* struct stat and squidaio_xstrdup use explicit pool alloc()/freeOne().
* XXX: convert to MEMPROXY_CLASS() API
*/
+#include "mem/Allocator.h"
#include "mem/Pool.h"
#include <cerrno>
/* digest_nonce_h still uses explicit alloc()/freeOne() MemPool calls.
* XXX: convert to MEMPROXY_CLASS() API
*/
+#include "mem/Allocator.h"
#include "mem/Pool.h"
static AUTHSSTATS authenticateDigestStats;
#include "squid.h"
#include "cbdata.h"
#include "Generic.h"
+#include "mem/Allocator.h"
#include "mem/Pool.h"
#include "mgr/Registration.h"
#include "Store.h"
#include "base/TypeTraits.h"
#include "mem/forward.h"
+#include "mem/Meter.h"
namespace Mem
{
class Allocator : public Interface
{
public:
- explicit Allocator(const char * const aLabel): label(aLabel) {}
+ /// Flush counters to 'meter' after flush limit allocations
+ static const size_t FlushLimit = 1000;
+
+ Allocator(const char * const aLabel, const size_t sz):
+ label(aLabel),
+ objectSize(RoundedSize(sz))
+ {}
// TODO make this method const
/**
*/
virtual size_t getStats(PoolStats &) = 0;
- virtual PoolMeter const &getMeter() const = 0;
-
/// provide (and reserve) memory suitable for storing one object
- virtual void *alloc() = 0;
+ void *alloc() {
+ if (++countAlloc == FlushLimit)
+ flushCounters();
+ return allocate();
+ }
/// return memory reserved by alloc()
- virtual void freeOne(void *) = 0;
-
- /// brief description of objects returned by alloc()
- virtual char const *objectType() const { return label; }
-
- /// the size (in bytes) of objects managed by this allocator
- virtual size_t objectSize() const = 0;
+ void freeOne(void *obj) {
+ assert(obj != nullptr);
+ (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, objectSize);
+ deallocate(obj);
+ ++countFreeOne;
+ }
/// the difference between the number of alloc() and freeOne() calls
- virtual int getInUseCount() = 0;
+ int getInUseCount() const { return meter.inuse.currentLevel(); }
/// \see doZero
void zeroBlocks(const bool doIt) { doZero = doIt; }
- int inUseCount() { return getInUseCount(); } // XXX: drop redundant?
-
/// XXX: Misplaced -- not all allocators have a notion of a "chunk". See MemPoolChunked.
virtual void setChunkSize(size_t) {}
+ virtual bool idleTrigger(int shift) const = 0;
+
+ virtual void clean(time_t maxage) = 0;
+
+ /**
+ * Flush temporary counter values into the statistics held in 'meter'.
+ */
+ void flushCounters() {
+ if (countFreeOne) {
+ meter.gb_freed.update(countFreeOne, objectSize);
+ countFreeOne = 0;
+ }
+ if (countAlloc) {
+ meter.gb_allocated.update(countAlloc, objectSize);
+ countAlloc = 0;
+ }
+ if (countSavedAllocs) {
+ meter.gb_saved.update(countSavedAllocs, objectSize);
+ countSavedAllocs = 0;
+ }
+ }
+
/**
* \param minSize Minimum size needed to be allocated.
* \retval n Smallest size divisible by sizeof(void*)
*/
static size_t RoundedSize(const size_t minSize) { return ((minSize + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*); }
+public:
+
+ /// the number of calls to Mem::Allocator::alloc() since last flush
+ size_t countAlloc = 0;
+
+ /// the number of malloc()/calloc() calls avoided since last flush
+ size_t countSavedAllocs = 0;
+
+ /// the number of calls to Mem::Allocator::freeOne() since last flush
+ size_t countFreeOne = 0;
+
+ // XXX: no counter for the number of free() calls avoided
+
+ /// brief description of objects returned by alloc()
+ const char *const label;
+
+ /// the size (in bytes) of objects managed by this allocator
+ const size_t objectSize;
+
+ /// statistics tracked for this allocator
+ PoolMeter meter;
+
protected:
+ /// \copydoc void *alloc()
+ virtual void *allocate() = 0;
+ /// \copydoc void freeOne(void *)
+ virtual void deallocate(void *) = 0;
+
/**
* Whether to zero memory on initial allocation and on return to the pool.
*
* When possible, set this to false to avoid zeroing overheads.
*/
bool doZero = true;
-
-private:
- const char *label = nullptr;
};
} // namespace Mem
*/
#include "squid.h"
+#include "mem/Allocator.h"
#include "mem/AllocatorProxy.h"
-#include "mem/Meter.h"
#include "mem/Pool.h"
#include "mem/Stats.h"
if (!theAllocator)
return 0;
else
- return theAllocator->inUseCount();
+ return theAllocator->getInUseCount();
}
void
Mem::PoolMeter const &
Mem::AllocatorProxy::getMeter() const
{
- return getAllocator()->getMeter();
+ return getAllocator()->meter;
}
size_t
{
public:
/// Object to track per-pool cumulative counters
- struct mgb_t {
+ class mgb_t
+ {
+ public:
+ mgb_t &operator +=(const mgb_t &o) {
+ count += o.count;
+ bytes += o.bytes;
+ return *this;
+ }
+
+ /// account for memory actions taking place
+ void update(size_t items, size_t itemSize) {
+ count += items;
+ bytes += (items * itemSize);
+ }
+
+ public:
double count = 0.0;
double bytes = 0.0;
};
*/
#include "squid.h"
+#include "mem/Pool.h"
#include "mem/PoolChunked.h"
#include "mem/PoolMalloc.h"
#include "mem/Stats.h"
#include <cassert>
#include <cstring>
-#define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
-
extern time_t squid_curtime;
Mem::PoolMeter TheMeter;
defaultIsChunked = atoi(cfg);
}
-MemImplementingAllocator *
+Mem::Allocator *
MemPools::create(const char *label, size_t obj_size)
{
// TODO Use ref-counted Pointer for pool lifecycle management
// that is complicated by all the global static pool pointers.
// For now leak these Allocator descendants on shutdown.
- MemImplementingAllocator *newPool;
+ Mem::Allocator *newPool;
if (defaultIsChunked)
newPool = new MemPoolChunked(label, obj_size);
else
defaultIsChunked = aBool;
}
-void
-MemImplementingAllocator::flushMeters()
-{
- size_t calls;
-
- calls = free_calls;
- if (calls) {
- meter.gb_freed.count += calls;
- free_calls = 0;
- }
- calls = alloc_calls;
- if (calls) {
- meter.gb_allocated.count += calls;
- alloc_calls = 0;
- }
- calls = saved_calls;
- if (calls) {
- meter.gb_saved.count += calls;
- saved_calls = 0;
- }
-}
-
-void
-MemImplementingAllocator::flushMetersFull()
-{
- flushMeters();
- getMeter().gb_allocated.bytes = getMeter().gb_allocated.count * obj_size;
- getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size;
- getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size;
-}
-
/*
* Updates all pool counters, and recreates TheMeter totals from all pools
*/
void
MemPools::flushMeters()
{
+ // Does reset of the historic gb_* counters in TheMeter.
+ // This is okay as they get regenerated from pool historic counters.
TheMeter.flush();
for (const auto pool: pools) {
- pool->flushMetersFull();
- // are these TheMeter grow() operations or accumulated volumes ?
- TheMeter.alloc += pool->getMeter().alloc.currentLevel() * pool->obj_size;
- TheMeter.inuse += pool->getMeter().inuse.currentLevel() * pool->obj_size;
- TheMeter.idle += pool->getMeter().idle.currentLevel() * pool->obj_size;
-
- TheMeter.gb_allocated.count += pool->getMeter().gb_allocated.count;
- TheMeter.gb_saved.count += pool->getMeter().gb_saved.count;
- TheMeter.gb_freed.count += pool->getMeter().gb_freed.count;
- TheMeter.gb_allocated.bytes += pool->getMeter().gb_allocated.bytes;
- TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes;
- TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes;
+ // ensure the pool's meter reflect the latest calls
+ pool->flushCounters();
+
+ // Accumulate current volumes (in bytes) across all pools.
+ TheMeter.alloc += pool->meter.alloc.currentLevel() * pool->objectSize;
+ TheMeter.inuse += pool->meter.inuse.currentLevel() * pool->objectSize;
+ TheMeter.idle += pool->meter.idle.currentLevel() * pool->objectSize;
+ // We cannot calculate the global peak because individual pools peak at different times.
+
+ // regenerate gb_* values from original pool stats
+ TheMeter.gb_allocated += pool->meter.gb_allocated;
+ TheMeter.gb_saved += pool->meter.gb_saved;
+ TheMeter.gb_freed += pool->meter.gb_freed;
}
}
-void *
-MemImplementingAllocator::alloc()
-{
- if (++alloc_calls == FLUSH_LIMIT)
- flushMeters();
-
- return allocate();
-}
-
-void
-MemImplementingAllocator::freeOne(void *obj)
-{
- assert(obj != nullptr);
- (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
- deallocate(obj, MemPools::GetInstance().idleLimit() == 0);
- ++free_calls;
-}
-
/*
* Returns all cached frees to their home chunks
* If chunks unreferenced age is over, destroys Idle chunk
pool->clean(maxage);
}
}
-
-MemImplementingAllocator::MemImplementingAllocator(char const * const aLabel, const size_t aSize):
- Mem::Allocator(aLabel),
- alloc_calls(0),
- free_calls(0),
- saved_calls(0),
- obj_size(RoundedSize(aSize))
-{
- assert(aLabel != nullptr && aSize);
-}
-
-Mem::PoolMeter const &
-MemImplementingAllocator::getMeter() const
-{
- return meter;
-}
-
-Mem::PoolMeter &
-MemImplementingAllocator::getMeter()
-{
- return meter;
-}
-
-size_t
-MemImplementingAllocator::objectSize() const
-{
- return obj_size;
-}
-
* might be the way to go.
*/
-#include "mem/Allocator.h"
+#include "mem/forward.h"
#include "mem/Meter.h"
#include "util.h"
/// \ingroup MemPoolsAPI
#define toKB(size) ( (size + 1024 - 1) / 1024 )
-/// \ingroup MemPoolsAPI
-#define MEM_PAGE_SIZE 4096
-/// \ingroup MemPoolsAPI
-#define MEM_MIN_FREE 32
-/// \ingroup MemPoolsAPI
-#define MEM_MAX_FREE 65535 /* unsigned short is max number of items per chunk */
-
-class MemImplementingAllocator;
-
/// memory usage totals as of latest MemPools::flushMeters() event
extern Mem::PoolMeter TheMeter;
* Create an allocator with given name to allocate fixed-size objects
* of the specified size.
*/
- MemImplementingAllocator *create(const char *, size_t);
+ Mem::Allocator *create(const char *, size_t);
/**
* Sets upper limit in bytes to amount of free ram kept in pools. This is
void setDefaultPoolChunking(bool const &);
- std::list<MemImplementingAllocator *> pools;
+ std::list<Mem::Allocator *> pools;
bool defaultIsChunked = false;
private:
ssize_t idleLimit_ = (2 << 20);
};
-/// \ingroup MemPoolsAPI
-class MemImplementingAllocator : public Mem::Allocator
-{
-public:
- typedef Mem::PoolMeter PoolMeter; // TODO remove
-
- MemImplementingAllocator(char const *aLabel, size_t aSize);
-
- virtual PoolMeter &getMeter();
- virtual void flushMetersFull();
- virtual void flushMeters();
- virtual bool idleTrigger(int shift) const = 0;
- virtual void clean(time_t maxage) = 0;
-
- /* Mem::Allocator API */
- PoolMeter const &getMeter() const override;
- void *alloc() override;
- void freeOne(void *) override;
- size_t objectSize() const override;
- int getInUseCount() override = 0;
-
-protected:
- virtual void *allocate() = 0;
- virtual void deallocate(void *, bool aggressive) = 0;
- PoolMeter meter;
-public:
- size_t alloc_calls;
- size_t free_calls;
- size_t saved_calls;
- size_t obj_size;
-};
-
/// Creates a named MemPool of elements with the given size
#define memPoolCreate MemPools::GetInstance().create
#include <cstring>
#define MEM_MAX_MMAP_CHUNKS 2048
+#define MEM_PAGE_SIZE 4096
+#define MEM_MIN_FREE 32
+#define MEM_MAX_FREE 65535 /* unsigned short is max number of items per chunk */
/*
* Old way:
void **Free = (void **)freeList;
for (int i = 1; i < pool->chunk_capacity; ++i) {
- *Free = (void *) ((char *) Free + pool->obj_size);
+ *Free = (void *) ((char *) Free + pool->objectSize);
void **nextFree = (void **)*Free;
- (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size);
+ (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->objectSize);
Free = nextFree;
}
nextFreeChunk = pool->nextFreeChunk;
pool->nextFreeChunk = this;
- pool->getMeter().alloc += pool->chunk_capacity;
- pool->getMeter().idle += pool->chunk_capacity;
+ pool->meter.alloc += pool->chunk_capacity;
+ pool->meter.idle += pool->chunk_capacity;
++pool->chunkCount;
lastref = squid_curtime;
pool->allChunks.insert(this, memCompChunks);
}
MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) :
- MemImplementingAllocator(aLabel, aSize), chunk_size(0),
+ Mem::Allocator(aLabel, aSize),
+ chunk_size(0),
chunk_capacity(0), chunkCount(0), freeCache(nullptr), nextFreeChunk(nullptr),
Chunks(nullptr), allChunks(Splay<MemChunk *>())
{
MemChunk::~MemChunk()
{
- pool->getMeter().alloc -= pool->chunk_capacity;
- pool->getMeter().idle -= pool->chunk_capacity;
+ pool->meter.alloc -= pool->chunk_capacity;
+ pool->meter.idle -= pool->chunk_capacity;
-- pool->chunkCount;
pool->allChunks.remove(this, memCompChunks);
xfree(objCache);
* the object size here, but such condition is not safe.
*/
if (doZero)
- memset(obj, 0, obj_size);
+ memset(obj, 0, objectSize);
Free = (void **)obj;
*Free = freeCache;
freeCache = obj;
- (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size);
+ (void) VALGRIND_MAKE_MEM_NOACCESS(obj, objectSize);
}
/*
{
void **Free;
- ++saved_calls;
+ ++countSavedAllocs;
/* first, try cache */
if (freeCache) {
Free = (void **)freeCache;
- (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
+ (void) VALGRIND_MAKE_MEM_DEFINED(Free, objectSize);
freeCache = *Free;
*Free = nullptr;
return Free;
/* then try perchunk freelist chain */
if (nextFreeChunk == nullptr) {
/* no chunk with frees, so create new one */
- -- saved_calls; // compensate for the ++ above
+ --countSavedAllocs; // compensate for the ++ above
createChunk();
}
/* now we have some in perchunk freelist chain */
/* last free in this chunk, so remove us from perchunk freelist chain */
nextFreeChunk = chunk->nextFreeChunk;
}
- (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
+ (void) VALGRIND_MAKE_MEM_DEFINED(Free, objectSize);
return Free;
}
return;
csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
- cap = csize / obj_size;
+ cap = csize / objectSize;
if (cap < MEM_MIN_FREE)
cap = MEM_MIN_FREE;
- if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
- cap = MEM_CHUNK_MAX_SIZE / obj_size;
+ if (cap * objectSize > MEM_CHUNK_MAX_SIZE)
+ cap = MEM_CHUNK_MAX_SIZE / objectSize;
if (cap > MEM_MAX_FREE)
cap = MEM_MAX_FREE;
if (cap < 1)
cap = 1;
- csize = cap * obj_size;
+ csize = cap * objectSize;
csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
- cap = csize / obj_size;
+ cap = csize / objectSize;
chunk_capacity = cap;
chunk_size = csize;
{
MemChunk *chunk, *fchunk;
- flushMetersFull();
+ flushCounters();
clean(0);
- assert(meter.inuse.currentLevel() == 0);
+ assert(getInUseCount() == 0);
chunk = Chunks;
while ( (fchunk = chunk) != nullptr) {
}
-int
-MemPoolChunked::getInUseCount()
-{
- return meter.inuse.currentLevel();
-}
-
void *
MemPoolChunked::allocate()
{
}
void
-MemPoolChunked::deallocate(void *obj, bool)
+MemPoolChunked::deallocate(void *obj)
{
push(obj);
assert(meter.inuse.currentLevel() > 0);
if (!Chunks)
return;
- flushMetersFull();
+ flushCounters();
convertFreeCacheToChunkFreeCache();
/* Now we have all chunks in this pool cleared up, all free items returned to their home */
/* We start now checking all chunks to see if we can release any */
clean((time_t) 555555); /* don't want to get chunks released before reporting */
stats.pool = this;
- stats.label = objectType();
+ stats.label = label;
stats.meter = &meter;
- stats.obj_size = obj_size;
+ stats.obj_size = objectSize;
stats.chunk_capacity = chunk_capacity;
/* gather stats for each Chunk */
stats.items_inuse += meter.inuse.currentLevel();
stats.items_idle += meter.idle.currentLevel();
- stats.overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
+ stats.overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(label) + 1;
- return meter.inuse.currentLevel();
+ return getInUseCount();
}
#ifndef _MEM_POOL_CHUNKED_H_
#define _MEM_POOL_CHUNKED_H_
-#include "mem/Pool.h"
+#include "mem/Allocator.h"
#include "splay.h"
#define MEM_CHUNK_SIZE 4 * 4096 /* 16KB ... 4 * VM_PAGE_SZ */
class MemChunk;
/// \ingroup MemPoolsAPI
-class MemPoolChunked : public MemImplementingAllocator
+class MemPoolChunked : public Mem::Allocator
{
public:
friend class MemChunk;
MemPoolChunked(const char *label, size_t obj_size);
~MemPoolChunked() override;
void convertFreeCacheToChunkFreeCache();
- void clean(time_t maxage) override;
void createChunk();
void *get();
void push(void *obj);
/* Mem::Allocator API */
size_t getStats(Mem::PoolStats &) override;
- int getInUseCount() override;
void setChunkSize(size_t) override;
+ bool idleTrigger(int) const override;
+ void clean(time_t) override;
protected:
+ /* Mem::Allocator API */
void *allocate() override;
- void deallocate(void *, bool aggressive) override;
-public:
- bool idleTrigger(int shift) const override;
+ void deallocate(void *) override;
+public:
size_t chunk_size;
int chunk_capacity;
int chunkCount;
*/
#include "squid.h"
+#include "mem/Pool.h"
#include "mem/PoolMalloc.h"
#include "mem/Stats.h"
}
if (obj) {
--meter.idle;
- ++saved_calls;
+ ++countSavedAllocs;
} else {
if (doZero)
- obj = xcalloc(1, obj_size);
+ obj = xcalloc(1, objectSize);
else
- obj = xmalloc(obj_size);
+ obj = xmalloc(objectSize);
++meter.alloc;
}
++meter.inuse;
}
void
-MemPoolMalloc::deallocate(void *obj, bool aggressive)
+MemPoolMalloc::deallocate(void *obj)
{
--meter.inuse;
- if (aggressive) {
+ if (MemPools::GetInstance().idleLimit() == 0) {
xfree(obj);
--meter.alloc;
} else {
if (doZero)
- memset(obj, 0, obj_size);
+ memset(obj, 0, objectSize);
++meter.idle;
freelist.push(obj);
}
MemPoolMalloc::getStats(Mem::PoolStats &stats)
{
stats.pool = this;
- stats.label = objectType();
+ stats.label = label;
stats.meter = &meter;
- stats.obj_size = obj_size;
+ stats.obj_size = objectSize;
stats.chunk_capacity = 0;
- stats.chunks_alloc += 0;
- stats.chunks_inuse += 0;
- stats.chunks_partial += 0;
- stats.chunks_free += 0;
-
stats.items_alloc += meter.alloc.currentLevel();
stats.items_inuse += meter.inuse.currentLevel();
stats.items_idle += meter.idle.currentLevel();
- stats.overhead += sizeof(MemPoolMalloc) + strlen(objectType()) + 1;
-
- return meter.inuse.currentLevel();
-}
+ stats.overhead += sizeof(*this) + strlen(label) + 1;
-int
-MemPoolMalloc::getInUseCount()
-{
- return meter.inuse.currentLevel();
+ return getInUseCount();
}
-MemPoolMalloc::MemPoolMalloc(char const *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize)
+MemPoolMalloc::MemPoolMalloc(char const *aLabel, size_t aSize) :
+ Mem::Allocator(aLabel, aSize)
{
}
MemPoolMalloc::~MemPoolMalloc()
{
- assert(meter.inuse.currentLevel() == 0);
+ assert(getInUseCount() == 0);
clean(0);
}
* might be the way to go.
*/
-#include "mem/Pool.h"
+#include "mem/Allocator.h"
#include <stack>
/// \ingroup MemPoolsAPI
-class MemPoolMalloc : public MemImplementingAllocator
+class MemPoolMalloc : public Mem::Allocator
{
public:
MemPoolMalloc(char const *label, size_t aSize);
~MemPoolMalloc() override;
- bool idleTrigger(int shift) const override;
- void clean(time_t maxage) override;
/* Mem::Allocator API */
size_t getStats(Mem::PoolStats &) override;
- int getInUseCount() override;
+ bool idleTrigger(int) const override;
+ void clean(time_t) override;
protected:
+ /* Mem::Allocator API */
void *allocate() override;
- void deallocate(void *, bool aggressive) override;
+ void deallocate(void *) override;
+
private:
std::stack<void *> freelist;
};
*/
#include "squid.h"
+#include "mem/Allocator.h"
#include "mem/Pool.h"
#include "mem/Stats.h"
#include "fs_io.h"
#include "icmp/net_db.h"
#include "md5.h"
-#include "mem/forward.h"
-#include "mem/Meter.h"
+#include "mem/Allocator.h"
#include "mem/Pool.h"
#include "mem/Stats.h"
#include "MemBuf.h"
strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size);
strPools[i]->zeroBlocks(false);
- if (strPools[i]->objectSize() != PoolAttrs[i].obj_size)
+ if (strPools[i]->objectSize != PoolAttrs[i].obj_size)
debugs(13, DBG_IMPORTANT, "WARNING: " << PoolAttrs[i].name <<
- " is " << strPools[i]->objectSize() <<
+ " is " << strPools[i]->objectSize <<
" bytes instead of requested " <<
PoolAttrs[i].obj_size << " bytes");
}
{
for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
auto &pool = GetStrPool(i);
- if (fuzzy && net_size < pool.objectSize())
+ if (fuzzy && net_size < pool.objectSize)
return &pool;
- if (net_size == pool.objectSize())
+ if (net_size == pool.objectSize)
return &pool;
}
return nullptr;
for (i = 0; i < mem_str_pool_count; ++i) {
const auto &pool = GetStrPool(i);
- const auto plevel = pool.getMeter().inuse.currentLevel();
- stream << std::setw(20) << std::left << pool.objectType();
+ const auto plevel = pool.meter.inuse.currentLevel();
+ stream << std::setw(20) << std::left << pool.label;
stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
- stream << "\t " << xpercentInt(plevel * pool.objectSize(), StrVolumeMeter.currentLevel()) << "\n";
+ stream << "\t " << xpercentInt(plevel * pool.objectSize, StrVolumeMeter.currentLevel()) << "\n";
pooled_count += plevel;
- pooled_volume += plevel * pool.objectSize();
+ pooled_volume += plevel * pool.objectSize;
}
/* malloc strings */
assert(gross_size);
if (const auto pool = memFindStringPool(net_size, true)) {
- *gross_size = pool->objectSize();
+ *gross_size = pool->objectSize;
assert(*gross_size >= net_size);
++StrCountMeter;
StrVolumeMeter += *gross_size;
if (const auto pool = memFindStringPool(net_size, true)) {
++StrCountMeter;
- StrVolumeMeter += pool->objectSize();
+ StrVolumeMeter += pool->objectSize;
return pool->alloc();
}
size_t result = 0;
for (int counter = 0; counter < mem_str_pool_count; ++counter)
- result += GetStrPool(counter).inUseCount();
+ result += GetStrPool(counter).getInUseCount();
return result;
}
if (const auto pool = memFindStringPool(net_size, true)) {
pool->freeOne(buf);
- StrVolumeMeter -= pool->objectSize();
+ StrVolumeMeter -= pool->objectSize;
--StrCountMeter;
return;
}
int
memInUse(mem_type type)
{
- return GetPool(type)->inUseCount();
+ return GetPool(type)->getInUseCount();
}
/* ick */
PoolStats mp_stats;
pool->getStats(mp_stats);
- if (mp_stats.pool->getMeter().gb_allocated.count > 0)
+ if (mp_stats.pool->meter.gb_allocated.count > 0)
usedPools.emplace_back(mp_stats);
else
++not_used;
/** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
* XXX: convert to MEMPROXY_CLASS() API
*/
+#include "mem/Allocator.h"
#include "mem/Pool.h"
#include <climits>
void *Mem::AllocatorProxy::alloc() {return xmalloc(64*1024);}
void Mem::AllocatorProxy::freeOne(void *address) {xfree(address);}
int Mem::AllocatorProxy::inUseCount() const {return 0;}
-//Mem::PoolMeter const &Mem::AllocatorProxy::getMeter() const STUB_RETSTATREF(PoolMeter)
size_t Mem::AllocatorProxy::getStats(PoolStats &) STUB_RETVAL(0)
#include "mem/forward.h"
MemPools &MemPools::GetInstance() {return tmpMemPools;}
MemPools::MemPools() STUB_NOP
void MemPools::flushMeters() STUB
-MemImplementingAllocator * MemPools::create(const char *, size_t) STUB_RETVAL(nullptr);
+Mem::Allocator * MemPools::create(const char *, size_t) STUB_RETVAL(nullptr);
void MemPools::clean(time_t) STUB
void MemPools::setDefaultPoolChunking(bool const &) STUB
-//MemImplementingAllocator::MemImplementingAllocator(char const *, size_t) STUB_NOP
-Mem::PoolMeter const &MemImplementingAllocator::getMeter() const STUB_RETSTATREF(PoolMeter)
-Mem::PoolMeter &MemImplementingAllocator::getMeter() STUB_RETSTATREF(PoolMeter)
-void MemImplementingAllocator::flushMetersFull() STUB
-void MemImplementingAllocator::flushMeters() STUB
-void *MemImplementingAllocator::alloc() STUB_RETVAL(nullptr)
-void MemImplementingAllocator::freeOne(void *) STUB
-size_t MemImplementingAllocator::objectSize() const { return obj_size; }
-
#include "mem/Stats.h"
size_t Mem::GlobalStats(PoolStats &) STUB_RETVAL(0)
*/
#include "squid.h"
-#include "mem/forward.h"
+#include "mem/Allocator.h"
#include "mem/Pool.h"
#include "tests/testMem.h"
#include "unitTestMain.h"