bool
MemPoolMalloc::idleTrigger(int shift) const
{
- return freelist.count >> (shift ? 8 : 0);
+ return freelist.size() >> (shift ? 8 : 0);
}
void
HttpHdrRange::canonize (int64_t newClen)
{
clen = newClen;
- debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.count <<
+ debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.size() <<
" specs, clen: " << clen);
Vector<HttpHdrRangeSpec*> goods;
getCanonizedSpecs(goods);
merge (goods);
- debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.count <<
+ debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.size() <<
" specs");
- return specs.count > 0;
+ return specs.size() > 0; // fixme, should return bool
}
/* hack: returns true if range specs are too "complex" for Squid to handle */
* has been used. As a hack, just never count zero-sized header
* arrays.
*/
- if (0 != entries.count)
- HttpHeaderStats[owner].hdrUCountDistr.count(entries.count);
+ if (!entries.empty())
+ HttpHeaderStats[owner].hdrUCountDistr.count(entries.size());
++ HttpHeaderStats[owner].destroyedCount;
- HttpHeaderStats[owner].busyDestroyedCount += entries.count > 0;
+ HttpHeaderStats[owner].busyDestroyedCount += entries.size() > 0;
} // if (owner <= hoReply)
while ((e = getEntry(&pos))) {
HttpHeader::getEntry(HttpHeaderPos * pos) const
{
assert(pos);
- assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.count);
+ assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.size());
- for (++(*pos); *pos < (ssize_t)entries.count; ++(*pos)) {
- if (entries.items[*pos])
- return (HttpHeaderEntry*)entries.items[*pos];
+ for (++(*pos); *pos < (ssize_t)entries.size(); ++(*pos)) {
+ if (entries[*pos])
+ return (HttpHeaderEntry*)entries[*pos];
}
return NULL;
HttpHeader::delAt(HttpHeaderPos pos, int &headers_deleted)
{
HttpHeaderEntry *e;
- assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.count);
- e = (HttpHeaderEntry*)entries.items[pos];
- entries.items[pos] = NULL;
+ assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.size());
+ e = (HttpHeaderEntry*)entries[pos];
+ entries[pos] = NULL;
/* decrement header length, allow for ": " and crlf */
len -= e->name.size() + 2 + e->value.size() + 2;
assert(len >= 0);
assert_eid(e->id);
assert(e->name.size());
- debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count);
+ debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size());
if (CBIT_TEST(mask, e->id))
++ Headers[e->id].stat.repCount;
bool
HttpRequest::multipartRangeRequest() const
{
- return (range && range->specs.count > 1);
+ return (range && !range->specs.empty());
}
bool
AccessRule *r = *i;
if (isCandidate(*r)) {
debugs(93, 5, HERE << "check: rule '" << r->id << "' is a candidate");
- candidates += r->id;
+ candidates.push_back(r->id);
}
}
void Adaptation::Icap::Options::cfgMethod(ICAP::Method m)
{
Must(m != ICAP::methodNone);
- methods += m;
+ methods.push_back(m);
}
// TODO: HttpHeader should provide a general method for this type of conversion
const_iterator begin () const;
iterator end();
const_iterator end () const;
+ E& at(unsigned i);
+ const E& at(unsigned i) const;
E& operator [] (unsigned i);
const E& operator [] (unsigned i) const;
template<class E>
E &
-Vector<E>::operator [] (unsigned i)
+Vector<E>::at(unsigned i)
{
assert (size() > i);
return items[i];
template<class E>
const E &
-Vector<E>::operator [] (unsigned i) const
+Vector<E>::at(unsigned i) const
{
assert (size() > i);
return items[i];
}
+template<class E>
+E &
+Vector<E>::operator [] (unsigned i)
+{
+ return items[i];
+}
+
+template<class E>
+const E &
+Vector<E>::operator [] (unsigned i) const
+{
+ return items[i];
+}
+
template<class C>
VectorIteratorBase<C>::VectorIteratorBase() : pos(0), theVector(NULL)
{}
find_fstype(char *type)
{
for (size_t i = 0; i < StoreFileSystem::FileSystems().size(); ++i)
- if (strcasecmp(type, StoreFileSystem::FileSystems().items[i]->type()) == 0)
+ if (strcasecmp(type, StoreFileSystem::FileSystems().at(i)->type()) == 0)
return (int)i;
return (-1);
sd = dynamic_cast<SwapDir *>(swap->swapDirs[i].getRaw());
- if (strcmp(sd->type(), StoreFileSystem::FileSystems().items[fs]->type()) != 0) {
+ if (strcmp(sd->type(), StoreFileSystem::FileSystems().at(fs)->type()) != 0) {
debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " <<
sd->type() << " " << sd->path << " to " << type_str << ". Restart required");
return;
allocate_new_swapdir(swap);
- swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().items[fs]->createSwapDir();
+ swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().at(fs)->createSwapDir();
sd = dynamic_cast<SwapDir *>(swap->swapDirs[swap->n_configured].getRaw());
bool replyMatchRequest = rep->content_range != NULL ?
request->range->contains(rep->content_range->spec) :
true;
- const int spec_count = http->request->range->specs.count;
+ const int spec_count = http->request->range->specs.size();
int64_t actual_clen = -1;
debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
/** \par
* Index any unknown file names used by deny_info.
*/
- ErrorDynamicPageInfo *info = ErrorDynamicPages.items[i - ERR_MAX];
+ ErrorDynamicPageInfo *info = ErrorDynamicPages[i - ERR_MAX];
assert(info && info->id == i && info->page_name);
const char *pg = info->page_name;
}
for (size_t j = 0; j < ErrorDynamicPages.size(); ++j) {
- if (strcmp(ErrorDynamicPages.items[j]->page_name, page_name) == 0)
+ if (strcmp(ErrorDynamicPages[j]->page_name, page_name) == 0)
return j + ERR_MAX;
}
return err_type_str[pageId];
if (pageId >= ERR_MAX && pageId - ERR_MAX < (ssize_t)ErrorDynamicPages.size())
- return ErrorDynamicPages.items[pageId - ERR_MAX]->page_name;
+ return ErrorDynamicPages[pageId - ERR_MAX]->page_name;
return "ERR_UNKNOWN"; /* should not happen */
}
{
memset(&ftp, 0, sizeof(ftp));
- if (page_id >= ERR_MAX && ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect != Http::scNone)
- httpStatus = ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect;
+ if (page_id >= ERR_MAX && ErrorDynamicPages[page_id - ERR_MAX]->page_redirect != Http::scNone)
+ httpStatus = ErrorDynamicPages[page_id - ERR_MAX]->page_redirect;
if (req != NULL) {
request = req;
}
for (i = 0; i < 10; ++i) {
- e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
+ e = LateReleaseStack.empty() ? NULL : LateReleaseStack.pop();
if (e == NULL) {
/* done! */
HttpHdrRange copy(*range);
- assert (copy.specs.count == range->specs.count);
+ assert (copy.specs.size() == range->specs.size());
HttpHdrRange::iterator pos = range->begin();
testRangeIter ()
{
HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2");
- assert (range->specs.count == 3);
+ assert (range->specs.size() == 3);
size_t counter = 0;
HttpHdrRange::iterator i = range->begin();
testRangeCanonization()
{
HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2");
- assert (range->specs.count == 3);
+ assert (range->specs.size() == 3);
/* 0-3 needs a content length of 4 */
/* This passes in the extant code - but should it? */
if (!range->canonize(3))
exit(1);
- assert (range->specs.count == 3);
+ assert (range->specs.size() == 3);
delete range;
range=rangeFromString("bytes=0-3, 1-, -2");
- assert (range->specs.count == 3);
+ assert (range->specs.size() == 3);
/* 0-3 needs a content length of 4 */
if (!range->canonize(4))
range=rangeFromString("bytes=3-6");
- assert (range->specs.count == 1);
+ assert (range->specs.size() == 1);
/* 3-6 needs a content length of 4 or more */
if (range->canonize(3))
range=rangeFromString("bytes=3-6");
- assert (range->specs.count == 1);
+ assert (range->specs.size() == 1);
/* 3-6 needs a content length of 4 or more */
if (!range->canonize(4))
range=rangeFromString("bytes=1-1,2-3");
- assert (range->specs.count == 2);
+ assert (range->specs.size()== 2);
if (!range->canonize(4))
exit(1);
- assert (range->specs.count == 2);
+ assert (range->specs.size() == 2);
delete range;
}