to avoid stability issues related to std::vector migration.
bool
MemPoolMalloc::idleTrigger(int shift) const
{
- return freelist.size() >> (shift ? 8 : 0);
+ return freelist.count >> (shift ? 8 : 0);
}
void
void ClientDelayConfig::freePoolCount()
{
- pools.clear();
+ pools.clean();
}
void ClientDelayConfig::dumpPoolCount(StoreEntry * entry, const char *name) const
ConfigOptionVector::~ConfigOptionVector()
{
- while (!options.empty()) {
+ while (options.size()) {
delete options.back();
options.pop_back();
}
void
DiskIOModule::FreeAllModules()
{
- while (!GetModules().empty()) {
+ while (GetModules().size()) {
DiskIOModule *fs = GetModules().back();
GetModules().pop_back();
fs->gracefulShutdown();
result = someData.result;
// replace all notes. not combine
- notes.entries.clear();
+ notes.entries.clean();
notes.append(&someData.notes);
#if USE_AUTH
} else {
debugs(17, 7, HERE << "store entry aborted; no connection to close");
}
- fwd->serverDestinations.clear();
+ fwd->serverDestinations.clean();
fwd->self = NULL;
}
serverConn->close();
}
- serverDestinations.clear();
+ serverDestinations.clean();
debugs(17, 3, HERE << "FwdState destructor done");
}
* at least one syntactically invalid byte-range-specs.
*/
if (!spec) {
- while (!specs.empty()) {
- delete specs.back();
- specs.pop_back();
- }
+ while (!specs.empty())
+ delete specs.pop_back();
debugs(64, 2, "ignoring invalid range field: '" << range_spec << "'");
break;
}
HttpHdrRange::~HttpHdrRange()
{
- while (!specs.empty()) {
- delete specs.back();
- specs.pop_back();
- }
+ while (specs.size())
+ delete specs.pop_back();
}
HttpHdrRange::HttpHdrRange(HttpHdrRange const &old) :
HttpHdrRange::merge (Vector<HttpHdrRangeSpec *> &basis)
{
/* reset old array */
- specs.clear();
+ specs.clean();
/* merge specs:
* take one spec from "goods" and merge it with specs from
* "specs" (if any) until there is no overlap */
while (i != basis.end()) {
if (specs.size() && (*i)->mergeWith(specs.back())) {
/* merged with current so get rid of the prev one */
- delete specs.back();
- specs.pop_back();
+ delete specs.pop_back();
continue; /* re-iterate */
}
HttpHdrRange::canonize (int64_t newClen)
{
clen = newClen;
- debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.size() <<
+ debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.count <<
" specs, clen: " << clen);
Vector<HttpHdrRangeSpec*> goods;
getCanonizedSpecs(goods);
merge (goods);
- debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.size() <<
+ debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.count <<
" specs");
- return specs.size() > 0; // fixme, should return bool
+ return specs.count > 0;
}
/* hack: returns true if range specs are too "complex" for Squid to handle */
* has been used. As a hack, just never count zero-sized header
* arrays.
*/
- if (!entries.empty())
- HttpHeaderStats[owner].hdrUCountDistr.count(entries.size());
+ if (0 != entries.count)
+ HttpHeaderStats[owner].hdrUCountDistr.count(entries.count);
++ HttpHeaderStats[owner].destroyedCount;
- HttpHeaderStats[owner].busyDestroyedCount += entries.size() > 0;
+ HttpHeaderStats[owner].busyDestroyedCount += entries.count > 0;
} // if (owner <= hoReply)
while ((e = getEntry(&pos))) {
delete e;
}
}
- entries.clear();
+ entries.clean();
httpHeaderMaskInit(&mask, 0);
len = 0;
PROF_stop(HttpHeaderClean);
HttpHeader::getEntry(HttpHeaderPos * pos) const
{
assert(pos);
- assert(*pos >= HttpHeaderInitPos && *pos < static_cast<ssize_t>(entries.size()));
+ assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.count);
- for (++(*pos); *pos < static_cast<ssize_t>(entries.size()); ++(*pos)) {
- if (entries[*pos])
- return static_cast<HttpHeaderEntry*>(entries[*pos]);
+ for (++(*pos); *pos < (ssize_t)entries.count; ++(*pos)) {
+ if (entries.items[*pos])
+ return (HttpHeaderEntry*)entries.items[*pos];
}
return NULL;
HttpHeader::delAt(HttpHeaderPos pos, int &headers_deleted)
{
HttpHeaderEntry *e;
- assert(pos >= HttpHeaderInitPos && pos < static_cast<ssize_t>(entries.size()));
- e = static_cast<HttpHeaderEntry*>(entries[pos]);
- entries[pos] = NULL;
+ assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.count);
+ e = (HttpHeaderEntry*)entries.items[pos];
+ entries.items[pos] = NULL;
/* decrement header length, allow for ": " and crlf */
len -= e->name.size() + 2 + e->value.size() + 2;
assert(len >= 0);
assert_eid(e->id);
assert(e->name.size());
- debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size());
+ debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count);
if (CBIT_TEST(mask, e->id))
++ Headers[e->id].stat.repCount;
assert(e);
assert_eid(e->id);
- debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size());
+ debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count);
if (CBIT_TEST(mask, e->id))
++ Headers[e->id].stat.repCount;
bool
HttpRequest::multipartRangeRequest() const
{
- return (range && range->specs.size() > 1);
+ return (range && range->specs.count > 1);
}
bool
void
Notes::clean()
{
- notes.clear();
+ notes.clean();
}
NotePairs::~NotePairs()
{
- while (!entries.empty()) {
- delete entries.back();
- entries.pop_back();
- }
+ while (!entries.empty())
+ delete entries.pop_back();
}
const char *
Notes(const char *aDescr, const char **metasBlacklist, bool allowFormatted = false): descr(aDescr), blacklisted(metasBlacklist), formattedValues(allowFormatted) {}
Notes(): descr(NULL), blacklisted(NULL) {}
- ~Notes() { notes.clear(); }
+ ~Notes() { notes.clean(); }
/**
* Parse a notes line and returns a pointer to the
* parsed Note object.
void
StoreFileSystem::FreeAllFs()
{
- while (!GetFileSystems().empty()) {
+ while (GetFileSystems().size()) {
StoreFileSystem *fs = GetFileSystems().back();
GetFileSystems().pop_back();
fs->done();
AccessRule *r = *i;
if (isCandidate(*r)) {
debugs(93, 5, HERE << "check: rule '" << r->id << "' is a candidate");
- candidates.push_back(r->id);
+ candidates += r->id;
}
}
const ServiceConfigs& configs = serviceConfigs;
for (SCI cfg = configs.begin(); cfg != configs.end(); ++cfg)
removeService((*cfg)->key);
- serviceConfigs.clear();
+ serviceConfigs.clean();
debugs(93, 3, HERE << "rules: " << AllRules().size() << ", groups: " <<
AllGroups().size() << ", services: " << serviceConfigs.size());
}
DetachServices();
- serviceConfigs.clear();
+ serviceConfigs.clean();
}
void
debugs(93,3, HERE << "Created " << created << " adaptation services");
// services remember their configs; we do not have to
- serviceConfigs.clear();
+ serviceConfigs.clean();
return true;
}
Adaptation::DynamicGroupCfg::clear()
{
id.clean();
- services.clear();
+ services.clean();
}
void Adaptation::DetachServices()
{
- while (!AllServices().empty()) {
- AllServices().back()->detach();
- AllServices().pop_back();
- }
+ while (!AllServices().empty())
+ AllServices().pop_back()->detach();
}
}
s.cut(s.size() - 1);
debugs(93, DBG_IMPORTANT, "Adaptation group '" << id << "' contains disabled member(s) after reconfiguration: " << s);
- removedServices.clear();
+ removedServices.clean();
}
String baselineKey;
void Adaptation::Icap::Options::cfgMethod(ICAP::Method m)
{
Must(m != ICAP::methodNone);
- methods.push_back(m);
+ methods += m;
}
// TODO: HttpHeader should provide a general method for this type of conversion
Pointer us = NULL;
while (!theClients.empty()) {
- Client i = theClients.back();
- theClients.pop_back();
+ Client i = theClients.pop_back();
ScheduleCallHere(i.callback);
i.callback = 0;
}
authenticateRotate();
/* free current global config details too. */
- Auth::TheConfig.clear();
+ Auth::TheConfig.clean();
}
AuthUserHashPointer::AuthUserHashPointer(Auth::User::Pointer anAuth_user):
#define SQUID_ARRAY_H
/**
- \todo remove this after replacing with STL
+ \todo CLEANUP: this file should be called Vector.h at least, and probably be replaced by STL Vector<C>
*/
#include "fatal.h"
typedef VectorIteratorBase<Vector<E> > iterator;
typedef VectorIteratorBase<Vector<E> const> const_iterator;
typedef ptrdiff_t difference_type;
- friend class VectorIteratorBase<Vector<E> >;
- friend class VectorIteratorBase<Vector<E> const>;
+
void *operator new (size_t);
void operator delete (void *);
~Vector();
Vector(Vector const &);
Vector &operator = (Vector const &);
- void clear();
+ void clean();
void reserve (size_t capacity);
void push_back (E);
+ Vector &operator += (E item) {push_back(item); return *this;};
void insert (E);
const E &front() const;
E &front();
E &back();
- void pop_back();
+ E pop_back();
E shift(); // aka pop_front
void prune(E);
void preAppend(int app_count);
- inline bool empty() const;
- inline size_t size() const;
+ bool empty() const;
+ size_t size() const;
iterator begin();
const_iterator begin () const;
iterator end();
const_iterator end () const;
- E& at(unsigned i);
- const E& at(unsigned i) const;
- inline E& operator [] (unsigned i);
- inline const E& operator [] (unsigned i) const;
- E* data() const { return items; }
+ E& operator [] (unsigned i);
+ const E& operator [] (unsigned i) const;
-protected:
+ /* Do not change these, until the entry C struct is removed */
size_t capacity;
size_t count;
E *items;
template<class E>
Vector<E>::~Vector()
{
- clear();
+ clean();
}
template<class E>
void
-Vector<E>::clear()
+Vector<E>::clean()
{
/* could also warn if some objects are left */
delete[] items;
}
template<class E>
-void
+E
Vector<E>::pop_back()
{
assert (size());
- --count;
+ value_type result = items[--count];
items[count] = value_type();
+ return result;
}
template<class E>
Vector<E> &
Vector<E>::operator = (Vector<E> const &old)
{
- clear();
+ clean();
reserve (old.size());
for (size_t counter = 0; counter < old.size(); ++counter)
return const_iterator(size(), *this);
}
-template<class E>
-E &
-Vector<E>::at(unsigned i)
-{
- assert (size() > i);
- return operator[](i);
-}
-
-template<class E>
-const E &
-Vector<E>::at(unsigned i) const
-{
- assert (size() > i);
- return operator[](i);
-}
-
template<class E>
E &
Vector<E>::operator [] (unsigned i)
{
+ assert (size() > i);
return items[i];
}
const E &
Vector<E>::operator [] (unsigned i) const
{
+ assert (size() > i);
return items[i];
}
free_authparam(Auth::ConfigVector * cfg)
{
/* Wipe the Auth globals and Detach/Destruct component config + state. */
- cfg->clear();
+ cfg->clean();
+
+ /* remove our pointers to the probably-dead sub-configs */
+ while (cfg->size()) {
+ cfg->pop_back();
+ }
/* on reconfigure initialize new auth schemes for the new config. */
if (reconfiguring) {
find_fstype(char *type)
{
for (size_t i = 0; i < StoreFileSystem::FileSystems().size(); ++i)
- if (strcasecmp(type, StoreFileSystem::FileSystems().at(i)->type()) == 0)
+ if (strcasecmp(type, StoreFileSystem::FileSystems().items[i]->type()) == 0)
return (int)i;
return (-1);
sd = dynamic_cast<SwapDir *>(swap->swapDirs[i].getRaw());
- if (strcmp(sd->type(), StoreFileSystem::FileSystems().at(fs)->type()) != 0) {
+ if (strcmp(sd->type(), StoreFileSystem::FileSystems().items[fs]->type()) != 0) {
debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " <<
sd->type() << " " << sd->path << " to " << type_str << ". Restart required");
return;
allocate_new_swapdir(swap);
- swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().at(fs)->createSwapDir();
+ swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().items[fs]->createSwapDir();
sd = dynamic_cast<SwapDir *>(swap->swapDirs[swap->n_configured].getRaw());
bool replyMatchRequest = rep->content_range != NULL ?
request->range->contains(rep->content_range->spec) :
true;
- const int spec_count = http->request->range->specs.size();
+ const int spec_count = http->request->range->specs.count;
int64_t actual_clen = -1;
debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
/** \par
* Index any unknown file names used by deny_info.
*/
- ErrorDynamicPageInfo *info = ErrorDynamicPages.at(i - ERR_MAX);
+ ErrorDynamicPageInfo *info = ErrorDynamicPages.items[i - ERR_MAX];
assert(info && info->id == i && info->page_name);
const char *pg = info->page_name;
safe_free(error_text);
}
- while (!ErrorDynamicPages.empty()) {
- errorDynamicPageInfoDestroy(ErrorDynamicPages.back());
- ErrorDynamicPages.pop_back();
- }
+ while (ErrorDynamicPages.size())
+ errorDynamicPageInfoDestroy(ErrorDynamicPages.pop_back());
error_page_count = 0;
}
for (size_t j = 0; j < ErrorDynamicPages.size(); ++j) {
- if (strcmp(ErrorDynamicPages[j]->page_name, page_name) == 0)
+ if (strcmp(ErrorDynamicPages.items[j]->page_name, page_name) == 0)
return j + ERR_MAX;
}
return err_type_str[pageId];
if (pageId >= ERR_MAX && pageId - ERR_MAX < (ssize_t)ErrorDynamicPages.size())
- return ErrorDynamicPages[pageId - ERR_MAX]->page_name;
+ return ErrorDynamicPages.items[pageId - ERR_MAX]->page_name;
return "ERR_UNKNOWN"; /* should not happen */
}
{
memset(&ftp, 0, sizeof(ftp));
- if (page_id >= ERR_MAX && ErrorDynamicPages[page_id - ERR_MAX]->page_redirect != Http::scNone)
- httpStatus = ErrorDynamicPages[page_id - ERR_MAX]->page_redirect;
+ if (page_id >= ERR_MAX && ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect != Http::scNone)
+ httpStatus = ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect;
if (req != NULL) {
request = req;
attribute = end + 1;
}
- // TODO: after c++11, replace &attributes.front() with attributes.data()
- theClient->start (tag + 1, const_cast<const char **>(&attributes.front()), attributes.size() >> 1);
+ theClient->start (tag + 1, (const char **)attributes.items, attributes.size() >> 1);
/* TODO: attributes */
if (*(tagEnd - 1) == '/')
{
freeResources();
- while (!variablesForCleanup.empty()) {
- delete variablesForCleanup.back();
- variablesForCleanup.pop_back();
- }
+ while (variablesForCleanup.size())
+ delete variablesForCleanup.pop_back();
delete defaultVariable;
}
IO->io = anIO;
/* Change the IO Options */
- if (currentIOOptions && currentIOOptions->options.size() > 2) {
- delete currentIOOptions->options.back();
- currentIOOptions->options.pop_back();
- }
+ if (currentIOOptions && currentIOOptions->options.size() > 2)
+ delete currentIOOptions->options.pop_back();
/* TODO: factor out these 4 lines */
ConfigOption *ioOptions = IO->io->getOptionTree();
/// maintain n kids
void Kids::init()
{
- storage.clear();
+ if (storage.size() > 0)
+ storage.clean();
storage.reserve(NumberOfKids());
}
for (i = 0; i < 10; ++i) {
- e = LateReleaseStack.empty() ? NULL : LateReleaseStack.pop();
+ e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
if (e == NULL) {
/* done! */
bool
StoreSearchHashIndex::next()
{
- if (!entries.empty())
+ if (entries.size())
entries.pop_back();
while (!isDone() && !entries.size())
HttpHdrRange copy(*range);
- assert (copy.specs.size() == range->specs.size());
+ assert (copy.specs.count == range->specs.count);
HttpHdrRange::iterator pos = range->begin();
testRangeIter ()
{
HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2");
- assert (range->specs.size() == 3);
+ assert (range->specs.count == 3);
size_t counter = 0;
HttpHdrRange::iterator i = range->begin();
testRangeCanonization()
{
HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2");
- assert (range->specs.size() == 3);
+ assert (range->specs.count == 3);
/* 0-3 needs a content length of 4 */
/* This passes in the extant code - but should it? */
if (!range->canonize(3))
exit(1);
- assert (range->specs.size() == 3);
+ assert (range->specs.count == 3);
delete range;
range=rangeFromString("bytes=0-3, 1-, -2");
- assert (range->specs.size() == 3);
+ assert (range->specs.count == 3);
/* 0-3 needs a content length of 4 */
if (!range->canonize(4))
range=rangeFromString("bytes=3-6");
- assert (range->specs.size() == 1);
+ assert (range->specs.count == 1);
/* 3-6 needs a content length of 4 or more */
if (range->canonize(3))
range=rangeFromString("bytes=3-6");
- assert (range->specs.size() == 1);
+ assert (range->specs.count == 1);
/* 3-6 needs a content length of 4 or more */
if (!range->canonize(4))
range=rangeFromString("bytes=1-1,2-3");
- assert (range->specs.size()== 2);
+ assert (range->specs.count == 2);
if (!range->canonize(4))
exit(1);
- assert (range->specs.size() == 2);
+ assert (range->specs.count == 2);
delete range;
}
debugs(26, 3, "TunnelStateData destructed this=" << this);
assert(noConnections());
xfree(url);
- serverDestinations.clear();
+ serverDestinations.clean();
delete connectRespBuf;
}