From: Alex Rousskov Date: Tue, 18 Mar 2014 00:51:48 +0000 (-0600) Subject: Undo trunk r13266: "vector-refactor branch: align Vector API with std::vector" X-Git-Tag: SQUID_3_5_0_1~170^2~15 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=793b824cb95ad733d39378265f5a7ea26d8db75a;p=thirdparty%2Fsquid.git Undo trunk r13266: "vector-refactor branch: align Vector API with std::vector" to avoid stability issues related to std::vector migration. --- diff --git a/lib/MemPoolMalloc.cc b/lib/MemPoolMalloc.cc index 5a442f4e20..4e25cc4207 100644 --- a/lib/MemPoolMalloc.cc +++ b/lib/MemPoolMalloc.cc @@ -122,7 +122,7 @@ MemPoolMalloc::~MemPoolMalloc() bool MemPoolMalloc::idleTrigger(int shift) const { - return freelist.size() >> (shift ? 8 : 0); + return freelist.count >> (shift ? 8 : 0); } void diff --git a/src/ClientDelayConfig.cc b/src/ClientDelayConfig.cc index a588fd4aea..0d95212f78 100644 --- a/src/ClientDelayConfig.cc +++ b/src/ClientDelayConfig.cc @@ -30,7 +30,7 @@ ClientDelayConfig::finalize() void ClientDelayConfig::freePoolCount() { - pools.clear(); + pools.clean(); } void ClientDelayConfig::dumpPoolCount(StoreEntry * entry, const char *name) const diff --git a/src/ConfigOption.cc b/src/ConfigOption.cc index 2b771479ee..92a5ede6af 100644 --- a/src/ConfigOption.cc +++ b/src/ConfigOption.cc @@ -35,7 +35,7 @@ ConfigOptionVector::~ConfigOptionVector() { - while (!options.empty()) { + while (options.size()) { delete options.back(); options.pop_back(); } diff --git a/src/DiskIO/DiskIOModule.cc b/src/DiskIO/DiskIOModule.cc index 2992f2237d..0bc3278ac0 100644 --- a/src/DiskIO/DiskIOModule.cc +++ b/src/DiskIO/DiskIOModule.cc @@ -92,7 +92,7 @@ DiskIOModule::GetModules() void DiskIOModule::FreeAllModules() { - while (!GetModules().empty()) { + while (GetModules().size()) { DiskIOModule *fs = GetModules().back(); GetModules().pop_back(); fs->gracefulShutdown(); diff --git a/src/ExternalACLEntry.cc b/src/ExternalACLEntry.cc index 6e3a969e16..a8a459cd07 100644 --- a/src/ExternalACLEntry.cc +++ b/src/ExternalACLEntry.cc @@ -70,7 +70,7 @@ ExternalACLEntry::update(ExternalACLEntryData const &someData) result = someData.result; // replace all notes. not combine - notes.entries.clear(); + notes.entries.clean(); notes.append(&someData.notes); #if USE_AUTH diff --git a/src/FwdState.cc b/src/FwdState.cc index 881cb94dad..6847fed133 100644 --- a/src/FwdState.cc +++ b/src/FwdState.cc @@ -112,7 +112,7 @@ FwdState::abort(void* d) } else { debugs(17, 7, HERE << "store entry aborted; no connection to close"); } - fwd->serverDestinations.clear(); + fwd->serverDestinations.clean(); fwd->self = NULL; } @@ -277,7 +277,7 @@ FwdState::~FwdState() serverConn->close(); } - serverDestinations.clear(); + serverDestinations.clean(); debugs(17, 3, HERE << "FwdState destructor done"); } diff --git a/src/HttpHdrRange.cc b/src/HttpHdrRange.cc index 32d71f1fcf..bea0c7a42c 100644 --- a/src/HttpHdrRange.cc +++ b/src/HttpHdrRange.cc @@ -266,10 +266,8 @@ HttpHdrRange::parseInit(const String * range_spec) * at least one syntactically invalid byte-range-specs. */ if (!spec) { - while (!specs.empty()) { - delete specs.back(); - specs.pop_back(); - } + while (!specs.empty()) + delete specs.pop_back(); debugs(64, 2, "ignoring invalid range field: '" << range_spec << "'"); break; } @@ -283,10 +281,8 @@ HttpHdrRange::parseInit(const String * range_spec) HttpHdrRange::~HttpHdrRange() { - while (!specs.empty()) { - delete specs.back(); - specs.pop_back(); - } + while (specs.size()) + delete specs.pop_back(); } HttpHdrRange::HttpHdrRange(HttpHdrRange const &old) : @@ -345,7 +341,7 @@ void HttpHdrRange::merge (Vector &basis) { /* reset old array */ - specs.clear(); + specs.clean(); /* merge specs: * take one spec from "goods" and merge it with specs from * "specs" (if any) until there is no overlap */ @@ -354,8 +350,7 @@ HttpHdrRange::merge (Vector &basis) while (i != basis.end()) { if (specs.size() && (*i)->mergeWith(specs.back())) { /* merged with current so get rid of the prev one */ - delete specs.back(); - specs.pop_back(); + delete specs.pop_back(); continue; /* re-iterate */ } @@ -409,14 +404,14 @@ int HttpHdrRange::canonize (int64_t newClen) { clen = newClen; - debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.size() << + debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.count << " specs, clen: " << clen); Vector goods; getCanonizedSpecs(goods); merge (goods); - debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.size() << + debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.count << " specs"); - return specs.size() > 0; // fixme, should return bool + return specs.count > 0; } /* hack: returns true if range specs are too "complex" for Squid to handle */ diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc index f0f3e454ea..bd6c0eb08e 100644 --- a/src/HttpHeader.cc +++ b/src/HttpHeader.cc @@ -454,12 +454,12 @@ HttpHeader::clean() * has been used. As a hack, just never count zero-sized header * arrays. */ - if (!entries.empty()) - HttpHeaderStats[owner].hdrUCountDistr.count(entries.size()); + if (0 != entries.count) + HttpHeaderStats[owner].hdrUCountDistr.count(entries.count); ++ HttpHeaderStats[owner].destroyedCount; - HttpHeaderStats[owner].busyDestroyedCount += entries.size() > 0; + HttpHeaderStats[owner].busyDestroyedCount += entries.count > 0; } // if (owner <= hoReply) while ((e = getEntry(&pos))) { @@ -474,7 +474,7 @@ HttpHeader::clean() delete e; } } - entries.clear(); + entries.clean(); httpHeaderMaskInit(&mask, 0); len = 0; PROF_stop(HttpHeaderClean); @@ -748,11 +748,11 @@ HttpHeaderEntry * HttpHeader::getEntry(HttpHeaderPos * pos) const { assert(pos); - assert(*pos >= HttpHeaderInitPos && *pos < static_cast(entries.size())); + assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.count); - for (++(*pos); *pos < static_cast(entries.size()); ++(*pos)) { - if (entries[*pos]) - return static_cast(entries[*pos]); + for (++(*pos); *pos < (ssize_t)entries.count; ++(*pos)) { + if (entries.items[*pos]) + return (HttpHeaderEntry*)entries.items[*pos]; } return NULL; @@ -871,9 +871,9 @@ void HttpHeader::delAt(HttpHeaderPos pos, int &headers_deleted) { HttpHeaderEntry *e; - assert(pos >= HttpHeaderInitPos && pos < static_cast(entries.size())); - e = static_cast(entries[pos]); - entries[pos] = NULL; + assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.count); + e = (HttpHeaderEntry*)entries.items[pos]; + entries.items[pos] = NULL; /* decrement header length, allow for ": " and crlf */ len -= e->name.size() + 2 + e->value.size() + 2; assert(len >= 0); @@ -914,7 +914,7 @@ HttpHeader::addEntry(HttpHeaderEntry * e) assert_eid(e->id); assert(e->name.size()); - debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size()); + debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count); if (CBIT_TEST(mask, e->id)) ++ Headers[e->id].stat.repCount; @@ -936,7 +936,7 @@ HttpHeader::insertEntry(HttpHeaderEntry * e) assert(e); assert_eid(e->id); - debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size()); + debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count); if (CBIT_TEST(mask, e->id)) ++ Headers[e->id].stat.repCount; diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc index 764e71f909..d43b23df46 100644 --- a/src/HttpRequest.cc +++ b/src/HttpRequest.cc @@ -479,7 +479,7 @@ HttpRequest::adaptHistoryImport(const HttpRequest &them) bool HttpRequest::multipartRangeRequest() const { - return (range && range->specs.size() > 1); + return (range && range->specs.count > 1); } bool diff --git a/src/Notes.cc b/src/Notes.cc index b35029c6b9..ad84e0ea6b 100644 --- a/src/Notes.cc +++ b/src/Notes.cc @@ -146,15 +146,13 @@ Notes::dump(StoreEntry *entry, const char *key) void Notes::clean() { - notes.clear(); + notes.clean(); } NotePairs::~NotePairs() { - while (!entries.empty()) { - delete entries.back(); - entries.pop_back(); - } + while (!entries.empty()) + delete entries.pop_back(); } const char * diff --git a/src/Notes.h b/src/Notes.h index 6490f5d24e..b6c60af009 100644 --- a/src/Notes.h +++ b/src/Notes.h @@ -74,7 +74,7 @@ public: Notes(const char *aDescr, const char **metasBlacklist, bool allowFormatted = false): descr(aDescr), blacklisted(metasBlacklist), formattedValues(allowFormatted) {} Notes(): descr(NULL), blacklisted(NULL) {} - ~Notes() { notes.clear(); } + ~Notes() { notes.clean(); } /** * Parse a notes line and returns a pointer to the * parsed Note object. diff --git a/src/StoreFileSystem.cc b/src/StoreFileSystem.cc index 293c37f4e2..7b3497ed22 100644 --- a/src/StoreFileSystem.cc +++ b/src/StoreFileSystem.cc @@ -87,7 +87,7 @@ StoreFileSystem::GetFileSystems() void StoreFileSystem::FreeAllFs() { - while (!GetFileSystems().empty()) { + while (GetFileSystems().size()) { StoreFileSystem *fs = GetFileSystems().back(); GetFileSystems().pop_back(); fs->done(); diff --git a/src/adaptation/AccessCheck.cc b/src/adaptation/AccessCheck.cc index 19ee86346b..3e50fc67c8 100644 --- a/src/adaptation/AccessCheck.cc +++ b/src/adaptation/AccessCheck.cc @@ -102,7 +102,7 @@ Adaptation::AccessCheck::check() AccessRule *r = *i; if (isCandidate(*r)) { debugs(93, 5, HERE << "check: rule '" << r->id << "' is a candidate"); - candidates.push_back(r->id); + candidates += r->id; } } diff --git a/src/adaptation/Config.cc b/src/adaptation/Config.cc index d133c493e6..2794a4cdd2 100644 --- a/src/adaptation/Config.cc +++ b/src/adaptation/Config.cc @@ -139,7 +139,7 @@ Adaptation::Config::clear() const ServiceConfigs& configs = serviceConfigs; for (SCI cfg = configs.begin(); cfg != configs.end(); ++cfg) removeService((*cfg)->key); - serviceConfigs.clear(); + serviceConfigs.clean(); debugs(93, 3, HERE << "rules: " << AllRules().size() << ", groups: " << AllGroups().size() << ", services: " << serviceConfigs.size()); } @@ -163,7 +163,7 @@ Adaptation::Config::freeService() DetachServices(); - serviceConfigs.clear(); + serviceConfigs.clean(); } void @@ -210,7 +210,7 @@ Adaptation::Config::finalize() debugs(93,3, HERE << "Created " << created << " adaptation services"); // services remember their configs; we do not have to - serviceConfigs.clear(); + serviceConfigs.clean(); return true; } diff --git a/src/adaptation/DynamicGroupCfg.cc b/src/adaptation/DynamicGroupCfg.cc index 5d06f78fdd..f5ebf4a3d7 100644 --- a/src/adaptation/DynamicGroupCfg.cc +++ b/src/adaptation/DynamicGroupCfg.cc @@ -18,5 +18,5 @@ void Adaptation::DynamicGroupCfg::clear() { id.clean(); - services.clear(); + services.clean(); } diff --git a/src/adaptation/Service.cc b/src/adaptation/Service.cc index 10f1e642e8..33b60a8b9f 100644 --- a/src/adaptation/Service.cc +++ b/src/adaptation/Service.cc @@ -71,8 +71,6 @@ Adaptation::FindService(const Service::Id& key) void Adaptation::DetachServices() { - while (!AllServices().empty()) { - AllServices().back()->detach(); - AllServices().pop_back(); - } + while (!AllServices().empty()) + AllServices().pop_back()->detach(); } diff --git a/src/adaptation/ServiceGroups.cc b/src/adaptation/ServiceGroups.cc index 28e7d83763..ad196bc1a9 100644 --- a/src/adaptation/ServiceGroups.cc +++ b/src/adaptation/ServiceGroups.cc @@ -48,7 +48,7 @@ Adaptation::ServiceGroup::finalize() } s.cut(s.size() - 1); debugs(93, DBG_IMPORTANT, "Adaptation group '" << id << "' contains disabled member(s) after reconfiguration: " << s); - removedServices.clear(); + removedServices.clean(); } String baselineKey; diff --git a/src/adaptation/icap/Options.cc b/src/adaptation/icap/Options.cc index 9524a1ec49..e219a3751c 100644 --- a/src/adaptation/icap/Options.cc +++ b/src/adaptation/icap/Options.cc @@ -125,7 +125,7 @@ void Adaptation::Icap::Options::configure(const HttpReply *reply) void Adaptation::Icap::Options::cfgMethod(ICAP::Method m) { Must(m != ICAP::methodNone); - methods.push_back(m); + methods += m; } // TODO: HttpHeader should provide a general method for this type of conversion diff --git a/src/adaptation/icap/ServiceRep.cc b/src/adaptation/icap/ServiceRep.cc index ac295a767e..16b86f030b 100644 --- a/src/adaptation/icap/ServiceRep.cc +++ b/src/adaptation/icap/ServiceRep.cc @@ -375,8 +375,7 @@ void Adaptation::Icap::ServiceRep::noteTimeToNotify() Pointer us = NULL; while (!theClients.empty()) { - Client i = theClients.back(); - theClients.pop_back(); + Client i = theClients.pop_back(); ScheduleCallHere(i.callback); i.callback = 0; } diff --git a/src/auth/Gadgets.cc b/src/auth/Gadgets.cc index 03c1c2e930..f31c9049cd 100644 --- a/src/auth/Gadgets.cc +++ b/src/auth/Gadgets.cc @@ -133,7 +133,7 @@ authenticateReset(void) authenticateRotate(); /* free current global config details too. */ - Auth::TheConfig.clear(); + Auth::TheConfig.clean(); } AuthUserHashPointer::AuthUserHashPointer(Auth::User::Pointer anAuth_user): diff --git a/src/base/Vector.h b/src/base/Vector.h index 05d58f1281..5f430955b0 100644 --- a/src/base/Vector.h +++ b/src/base/Vector.h @@ -32,7 +32,7 @@ #define SQUID_ARRAY_H /** - \todo remove this after replacing with STL + \todo CLEANUP: this file should be called Vector.h at least, and probably be replaced by STL Vector */ #include "fatal.h" @@ -88,8 +88,7 @@ public: typedef VectorIteratorBase > iterator; typedef VectorIteratorBase const> const_iterator; typedef ptrdiff_t difference_type; - friend class VectorIteratorBase >; - friend class VectorIteratorBase const>; + void *operator new (size_t); void operator delete (void *); @@ -97,31 +96,29 @@ public: ~Vector(); Vector(Vector const &); Vector &operator = (Vector const &); - void clear(); + void clean(); void reserve (size_t capacity); void push_back (E); + Vector &operator += (E item) {push_back(item); return *this;}; void insert (E); const E &front() const; E &front(); E &back(); - void pop_back(); + E pop_back(); E shift(); // aka pop_front void prune(E); void preAppend(int app_count); - inline bool empty() const; - inline size_t size() const; + bool empty() const; + size_t size() const; iterator begin(); const_iterator begin () const; iterator end(); const_iterator end () const; - E& at(unsigned i); - const E& at(unsigned i) const; - inline E& operator [] (unsigned i); - inline const E& operator [] (unsigned i) const; - E* data() const { return items; } + E& operator [] (unsigned i); + const E& operator [] (unsigned i) const; -protected: + /* Do not change these, until the entry C struct is removed */ size_t capacity; size_t count; E *items; @@ -148,12 +145,12 @@ Vector::Vector() : capacity (0), count(0), items (NULL) template Vector::~Vector() { - clear(); + clean(); } template void -Vector::clear() +Vector::clean() { /* could also warn if some objects are left */ delete[] items; @@ -243,12 +240,13 @@ Vector::shift() } template -void +E Vector::pop_back() { assert (size()); - --count; + value_type result = items[--count]; items[count] = value_type(); + return result; } template @@ -316,7 +314,7 @@ template Vector & Vector::operator = (Vector const &old) { - clear(); + clean(); reserve (old.size()); for (size_t counter = 0; counter < old.size(); ++counter) @@ -367,26 +365,11 @@ Vector::end() const return const_iterator(size(), *this); } -template -E & -Vector::at(unsigned i) -{ - assert (size() > i); - return operator[](i); -} - -template -const E & -Vector::at(unsigned i) const -{ - assert (size() > i); - return operator[](i); -} - template E & Vector::operator [] (unsigned i) { + assert (size() > i); return items[i]; } @@ -394,6 +377,7 @@ template const E & Vector::operator [] (unsigned i) const { + assert (size() > i); return items[i]; } diff --git a/src/cache_cf.cc b/src/cache_cf.cc index 7e24e9206d..9d05a90585 100644 --- a/src/cache_cf.cc +++ b/src/cache_cf.cc @@ -1867,7 +1867,12 @@ static void free_authparam(Auth::ConfigVector * cfg) { /* Wipe the Auth globals and Detach/Destruct component config + state. */ - cfg->clear(); + cfg->clean(); + + /* remove our pointers to the probably-dead sub-configs */ + while (cfg->size()) { + cfg->pop_back(); + } /* on reconfigure initialize new auth schemes for the new config. */ if (reconfiguring) { @@ -1888,7 +1893,7 @@ static int find_fstype(char *type) { for (size_t i = 0; i < StoreFileSystem::FileSystems().size(); ++i) - if (strcasecmp(type, StoreFileSystem::FileSystems().at(i)->type()) == 0) + if (strcasecmp(type, StoreFileSystem::FileSystems().items[i]->type()) == 0) return (int)i; return (-1); @@ -1931,7 +1936,7 @@ parse_cachedir(SquidConfig::_cacheSwap * swap) sd = dynamic_cast(swap->swapDirs[i].getRaw()); - if (strcmp(sd->type(), StoreFileSystem::FileSystems().at(fs)->type()) != 0) { + if (strcmp(sd->type(), StoreFileSystem::FileSystems().items[fs]->type()) != 0) { debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " << sd->type() << " " << sd->path << " to " << type_str << ". Restart required"); return; @@ -1956,7 +1961,7 @@ parse_cachedir(SquidConfig::_cacheSwap * swap) allocate_new_swapdir(swap); - swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().at(fs)->createSwapDir(); + swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().items[fs]->createSwapDir(); sd = dynamic_cast(swap->swapDirs[swap->n_configured].getRaw()); diff --git a/src/client_side.cc b/src/client_side.cc index df1fdb8eed..115393fc95 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -1353,7 +1353,7 @@ ClientSocketContext::buildRangeHeader(HttpReply * rep) bool replyMatchRequest = rep->content_range != NULL ? request->range->contains(rep->content_range->spec) : true; - const int spec_count = http->request->range->specs.size(); + const int spec_count = http->request->range->specs.count; int64_t actual_clen = -1; debugs(33, 3, "clientBuildRangeHeader: range spec count: " << diff --git a/src/errorpage.cc b/src/errorpage.cc index 942588e91c..4d98ba2e18 100644 --- a/src/errorpage.cc +++ b/src/errorpage.cc @@ -204,7 +204,7 @@ errorInitialize(void) /** \par * Index any unknown file names used by deny_info. */ - ErrorDynamicPageInfo *info = ErrorDynamicPages.at(i - ERR_MAX); + ErrorDynamicPageInfo *info = ErrorDynamicPages.items[i - ERR_MAX]; assert(info && info->id == i && info->page_name); const char *pg = info->page_name; @@ -245,10 +245,8 @@ errorClean(void) safe_free(error_text); } - while (!ErrorDynamicPages.empty()) { - errorDynamicPageInfoDestroy(ErrorDynamicPages.back()); - ErrorDynamicPages.pop_back(); - } + while (ErrorDynamicPages.size()) + errorDynamicPageInfoDestroy(ErrorDynamicPages.pop_back()); error_page_count = 0; @@ -533,7 +531,7 @@ errorPageId(const char *page_name) } for (size_t j = 0; j < ErrorDynamicPages.size(); ++j) { - if (strcmp(ErrorDynamicPages[j]->page_name, page_name) == 0) + if (strcmp(ErrorDynamicPages.items[j]->page_name, page_name) == 0) return j + ERR_MAX; } @@ -563,7 +561,7 @@ errorPageName(int pageId) return err_type_str[pageId]; if (pageId >= ERR_MAX && pageId - ERR_MAX < (ssize_t)ErrorDynamicPages.size()) - return ErrorDynamicPages[pageId - ERR_MAX]->page_name; + return ErrorDynamicPages.items[pageId - ERR_MAX]->page_name; return "ERR_UNKNOWN"; /* should not happen */ } @@ -595,8 +593,8 @@ ErrorState::ErrorState(err_type t, Http::StatusCode status, HttpRequest * req) : { memset(&ftp, 0, sizeof(ftp)); - if (page_id >= ERR_MAX && ErrorDynamicPages[page_id - ERR_MAX]->page_redirect != Http::scNone) - httpStatus = ErrorDynamicPages[page_id - ERR_MAX]->page_redirect; + if (page_id >= ERR_MAX && ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect != Http::scNone) + httpStatus = ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect; if (req != NULL) { request = req; diff --git a/src/esi/CustomParser.cc b/src/esi/CustomParser.cc index 986406240c..8f632ca79f 100644 --- a/src/esi/CustomParser.cc +++ b/src/esi/CustomParser.cc @@ -205,8 +205,7 @@ ESICustomParser::parse(char const *dataToParse, size_t const lengthOfData, bool attribute = end + 1; } - // TODO: after c++11, replace &attributes.front() with attributes.data() - theClient->start (tag + 1, const_cast(&attributes.front()), attributes.size() >> 1); + theClient->start (tag + 1, (const char **)attributes.items, attributes.size() >> 1); /* TODO: attributes */ if (*(tagEnd - 1) == '/') diff --git a/src/esi/VarState.cc b/src/esi/VarState.cc index 59eddddfab..13e4f73109 100644 --- a/src/esi/VarState.cc +++ b/src/esi/VarState.cc @@ -167,10 +167,8 @@ ESIVarState::~ESIVarState() { freeResources(); - while (!variablesForCleanup.empty()) { - delete variablesForCleanup.back(); - variablesForCleanup.pop_back(); - } + while (variablesForCleanup.size()) + delete variablesForCleanup.pop_back(); delete defaultVariable; } diff --git a/src/fs/ufs/UFSSwapDir.cc b/src/fs/ufs/UFSSwapDir.cc index fdd49f30bd..db883a1728 100644 --- a/src/fs/ufs/UFSSwapDir.cc +++ b/src/fs/ufs/UFSSwapDir.cc @@ -226,10 +226,8 @@ Fs::Ufs::UFSSwapDir::changeIO(DiskIOModule *module) IO->io = anIO; /* Change the IO Options */ - if (currentIOOptions && currentIOOptions->options.size() > 2) { - delete currentIOOptions->options.back(); - currentIOOptions->options.pop_back(); - } + if (currentIOOptions && currentIOOptions->options.size() > 2) + delete currentIOOptions->options.pop_back(); /* TODO: factor out these 4 lines */ ConfigOption *ioOptions = IO->io->getOptionTree(); diff --git a/src/ipc/Kids.cc b/src/ipc/Kids.cc index ffb773a8c9..17c1465144 100644 --- a/src/ipc/Kids.cc +++ b/src/ipc/Kids.cc @@ -19,7 +19,8 @@ Kids::Kids() /// maintain n kids void Kids::init() { - storage.clear(); + if (storage.size() > 0) + storage.clean(); storage.reserve(NumberOfKids()); diff --git a/src/store.cc b/src/store.cc index b46504f8b4..066cb456a2 100644 --- a/src/store.cc +++ b/src/store.cc @@ -1289,7 +1289,7 @@ storeLateRelease(void *unused) } for (i = 0; i < 10; ++i) { - e = LateReleaseStack.empty() ? NULL : LateReleaseStack.pop(); + e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL; if (e == NULL) { /* done! */ diff --git a/src/store_dir.cc b/src/store_dir.cc index 24a0ffdb6b..82d6d8324c 100644 --- a/src/store_dir.cc +++ b/src/store_dir.cc @@ -1366,7 +1366,7 @@ StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData) bool StoreSearchHashIndex::next() { - if (!entries.empty()) + if (entries.size()) entries.pop_back(); while (!isDone() && !entries.size()) diff --git a/src/tests/test_http_range.cc b/src/tests/test_http_range.cc index 160a96beb2..bbf0544512 100644 --- a/src/tests/test_http_range.cc +++ b/src/tests/test_http_range.cc @@ -86,7 +86,7 @@ testRangeParser(char const *rangestring) HttpHdrRange copy(*range); - assert (copy.specs.size() == range->specs.size()); + assert (copy.specs.count == range->specs.count); HttpHdrRange::iterator pos = range->begin(); @@ -111,7 +111,7 @@ void testRangeIter () { HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.size() == 3); + assert (range->specs.count == 3); size_t counter = 0; HttpHdrRange::iterator i = range->begin(); @@ -132,7 +132,7 @@ void testRangeCanonization() { HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.size() == 3); + assert (range->specs.count == 3); /* 0-3 needs a content length of 4 */ /* This passes in the extant code - but should it? */ @@ -140,13 +140,13 @@ testRangeCanonization() if (!range->canonize(3)) exit(1); - assert (range->specs.size() == 3); + assert (range->specs.count == 3); delete range; range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.size() == 3); + assert (range->specs.count == 3); /* 0-3 needs a content length of 4 */ if (!range->canonize(4)) @@ -156,7 +156,7 @@ testRangeCanonization() range=rangeFromString("bytes=3-6"); - assert (range->specs.size() == 1); + assert (range->specs.count == 1); /* 3-6 needs a content length of 4 or more */ if (range->canonize(3)) @@ -166,7 +166,7 @@ testRangeCanonization() range=rangeFromString("bytes=3-6"); - assert (range->specs.size() == 1); + assert (range->specs.count == 1); /* 3-6 needs a content length of 4 or more */ if (!range->canonize(4)) @@ -176,12 +176,12 @@ testRangeCanonization() range=rangeFromString("bytes=1-1,2-3"); - assert (range->specs.size()== 2); + assert (range->specs.count == 2); if (!range->canonize(4)) exit(1); - assert (range->specs.size() == 2); + assert (range->specs.count == 2); delete range; } diff --git a/src/tunnel.cc b/src/tunnel.cc index 98fe5a433d..0ec27ee721 100644 --- a/src/tunnel.cc +++ b/src/tunnel.cc @@ -234,7 +234,7 @@ TunnelStateData::~TunnelStateData() debugs(26, 3, "TunnelStateData destructed this=" << this); assert(noConnections()); xfree(url); - serverDestinations.clear(); + serverDestinations.clean(); delete connectRespBuf; }