From: Amos Jeffries Date: Sat, 28 Jan 2023 01:40:22 +0000 (+0000) Subject: Merge MemImplementingAllocator and Mem::Allocator (#1211) X-Git-Tag: SQUID_6_0_1~31 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7ae0a0c5b4e32cf58fa4a97dc41fa08256a78122;p=thirdparty%2Fsquid.git Merge MemImplementingAllocator and Mem::Allocator (#1211) --- diff --git a/src/DiskIO/DiskThreads/aiops.cc b/src/DiskIO/DiskThreads/aiops.cc index c7b52a906b..077e5f21e5 100644 --- a/src/DiskIO/DiskThreads/aiops.cc +++ b/src/DiskIO/DiskThreads/aiops.cc @@ -22,6 +22,7 @@ * struct stat and squidaio_xstrdup use explicit pool alloc()/freeOne(). * XXX: convert to MEMPROXY_CLASS() API */ +#include "mem/Allocator.h" #include "mem/Pool.h" #include diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc index 5d1599e48d..f00e2ba682 100644 --- a/src/auth/digest/Config.cc +++ b/src/auth/digest/Config.cc @@ -41,6 +41,7 @@ /* digest_nonce_h still uses explicit alloc()/freeOne() MemPool calls. * XXX: convert to MEMPROXY_CLASS() API */ +#include "mem/Allocator.h" #include "mem/Pool.h" static AUTHSSTATS authenticateDigestStats; diff --git a/src/cbdata.cc b/src/cbdata.cc index dc585ceabc..58ade16eeb 100644 --- a/src/cbdata.cc +++ b/src/cbdata.cc @@ -11,6 +11,7 @@ #include "squid.h" #include "cbdata.h" #include "Generic.h" +#include "mem/Allocator.h" #include "mem/Pool.h" #include "mgr/Registration.h" #include "Store.h" diff --git a/src/mem/Allocator.h b/src/mem/Allocator.h index c3dd3d9d1f..a554881cc7 100644 --- a/src/mem/Allocator.h +++ b/src/mem/Allocator.h @@ -11,6 +11,7 @@ #include "base/TypeTraits.h" #include "mem/forward.h" +#include "mem/Meter.h" namespace Mem { @@ -20,7 +21,13 @@ namespace Mem class Allocator : public Interface { public: - explicit Allocator(const char * const aLabel): label(aLabel) {} + /// Flush counters to 'meter' after flush limit allocations + static const size_t FlushLimit = 1000; + + Allocator(const char * const aLabel, const size_t sz): + label(aLabel), + objectSize(RoundedSize(sz)) + {} // TODO make this method const /** @@ -29,38 +36,86 @@ public: */ virtual size_t getStats(PoolStats &) = 0; - virtual PoolMeter const &getMeter() const = 0; - /// provide (and reserve) memory suitable for storing one object - virtual void *alloc() = 0; + void *alloc() { + if (++countAlloc == FlushLimit) + flushCounters(); + return allocate(); + } /// return memory reserved by alloc() - virtual void freeOne(void *) = 0; - - /// brief description of objects returned by alloc() - virtual char const *objectType() const { return label; } - - /// the size (in bytes) of objects managed by this allocator - virtual size_t objectSize() const = 0; + void freeOne(void *obj) { + assert(obj != nullptr); + (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, objectSize); + deallocate(obj); + ++countFreeOne; + } /// the difference between the number of alloc() and freeOne() calls - virtual int getInUseCount() = 0; + int getInUseCount() const { return meter.inuse.currentLevel(); } /// \see doZero void zeroBlocks(const bool doIt) { doZero = doIt; } - int inUseCount() { return getInUseCount(); } // XXX: drop redundant? - /// XXX: Misplaced -- not all allocators have a notion of a "chunk". See MemPoolChunked. virtual void setChunkSize(size_t) {} + virtual bool idleTrigger(int shift) const = 0; + + virtual void clean(time_t maxage) = 0; + + /** + * Flush temporary counter values into the statistics held in 'meter'. + */ + void flushCounters() { + if (countFreeOne) { + meter.gb_freed.update(countFreeOne, objectSize); + countFreeOne = 0; + } + if (countAlloc) { + meter.gb_allocated.update(countAlloc, objectSize); + countAlloc = 0; + } + if (countSavedAllocs) { + meter.gb_saved.update(countSavedAllocs, objectSize); + countSavedAllocs = 0; + } + } + /** * \param minSize Minimum size needed to be allocated. * \retval n Smallest size divisible by sizeof(void*) */ static size_t RoundedSize(const size_t minSize) { return ((minSize + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*); } +public: + + /// the number of calls to Mem::Allocator::alloc() since last flush + size_t countAlloc = 0; + + /// the number of malloc()/calloc() calls avoided since last flush + size_t countSavedAllocs = 0; + + /// the number of calls to Mem::Allocator::freeOne() since last flush + size_t countFreeOne = 0; + + // XXX: no counter for the number of free() calls avoided + + /// brief description of objects returned by alloc() + const char *const label; + + /// the size (in bytes) of objects managed by this allocator + const size_t objectSize; + + /// statistics tracked for this allocator + PoolMeter meter; + protected: + /// \copydoc void *alloc() + virtual void *allocate() = 0; + /// \copydoc void freeOne(void *) + virtual void deallocate(void *) = 0; + /** * Whether to zero memory on initial allocation and on return to the pool. * @@ -69,9 +124,6 @@ protected: * When possible, set this to false to avoid zeroing overheads. */ bool doZero = true; - -private: - const char *label = nullptr; }; } // namespace Mem diff --git a/src/mem/AllocatorProxy.cc b/src/mem/AllocatorProxy.cc index b945245cf3..92de07b691 100644 --- a/src/mem/AllocatorProxy.cc +++ b/src/mem/AllocatorProxy.cc @@ -7,8 +7,8 @@ */ #include "squid.h" +#include "mem/Allocator.h" #include "mem/AllocatorProxy.h" -#include "mem/Meter.h" #include "mem/Pool.h" #include "mem/Stats.h" @@ -43,7 +43,7 @@ Mem::AllocatorProxy::inUseCount() const if (!theAllocator) return 0; else - return theAllocator->inUseCount(); + return theAllocator->getInUseCount(); } void @@ -55,7 +55,7 @@ Mem::AllocatorProxy::zeroBlocks(bool doIt) Mem::PoolMeter const & Mem::AllocatorProxy::getMeter() const { - return getAllocator()->getMeter(); + return getAllocator()->meter; } size_t diff --git a/src/mem/Meter.h b/src/mem/Meter.h index 68b9e374ba..066627b5be 100644 --- a/src/mem/Meter.h +++ b/src/mem/Meter.h @@ -55,7 +55,22 @@ class PoolMeter { public: /// Object to track per-pool cumulative counters - struct mgb_t { + class mgb_t + { + public: + mgb_t &operator +=(const mgb_t &o) { + count += o.count; + bytes += o.bytes; + return *this; + } + + /// account for memory actions taking place + void update(size_t items, size_t itemSize) { + count += items; + bytes += (items * itemSize); + } + + public: double count = 0.0; double bytes = 0.0; }; diff --git a/src/mem/Pool.cc b/src/mem/Pool.cc index 96b0993a3f..73a0be37da 100644 --- a/src/mem/Pool.cc +++ b/src/mem/Pool.cc @@ -11,6 +11,7 @@ */ #include "squid.h" +#include "mem/Pool.h" #include "mem/PoolChunked.h" #include "mem/PoolMalloc.h" #include "mem/Stats.h" @@ -18,8 +19,6 @@ #include #include -#define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */ - extern time_t squid_curtime; Mem::PoolMeter TheMeter; @@ -44,14 +43,14 @@ MemPools::MemPools() defaultIsChunked = atoi(cfg); } -MemImplementingAllocator * +Mem::Allocator * MemPools::create(const char *label, size_t obj_size) { // TODO Use ref-counted Pointer for pool lifecycle management // that is complicated by all the global static pool pointers. // For now leak these Allocator descendants on shutdown. - MemImplementingAllocator *newPool; + Mem::Allocator *newPool; if (defaultIsChunked) newPool = new MemPoolChunked(label, obj_size); else @@ -66,79 +65,33 @@ MemPools::setDefaultPoolChunking(bool const &aBool) defaultIsChunked = aBool; } -void -MemImplementingAllocator::flushMeters() -{ - size_t calls; - - calls = free_calls; - if (calls) { - meter.gb_freed.count += calls; - free_calls = 0; - } - calls = alloc_calls; - if (calls) { - meter.gb_allocated.count += calls; - alloc_calls = 0; - } - calls = saved_calls; - if (calls) { - meter.gb_saved.count += calls; - saved_calls = 0; - } -} - -void -MemImplementingAllocator::flushMetersFull() -{ - flushMeters(); - getMeter().gb_allocated.bytes = getMeter().gb_allocated.count * obj_size; - getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size; - getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size; -} - /* * Updates all pool counters, and recreates TheMeter totals from all pools */ void MemPools::flushMeters() { + // Does reset of the historic gb_* counters in TheMeter. + // This is okay as they get regenerated from pool historic counters. TheMeter.flush(); for (const auto pool: pools) { - pool->flushMetersFull(); - // are these TheMeter grow() operations or accumulated volumes ? - TheMeter.alloc += pool->getMeter().alloc.currentLevel() * pool->obj_size; - TheMeter.inuse += pool->getMeter().inuse.currentLevel() * pool->obj_size; - TheMeter.idle += pool->getMeter().idle.currentLevel() * pool->obj_size; - - TheMeter.gb_allocated.count += pool->getMeter().gb_allocated.count; - TheMeter.gb_saved.count += pool->getMeter().gb_saved.count; - TheMeter.gb_freed.count += pool->getMeter().gb_freed.count; - TheMeter.gb_allocated.bytes += pool->getMeter().gb_allocated.bytes; - TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes; - TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes; + // ensure the pool's meter reflect the latest calls + pool->flushCounters(); + + // Accumulate current volumes (in bytes) across all pools. + TheMeter.alloc += pool->meter.alloc.currentLevel() * pool->objectSize; + TheMeter.inuse += pool->meter.inuse.currentLevel() * pool->objectSize; + TheMeter.idle += pool->meter.idle.currentLevel() * pool->objectSize; + // We cannot calculate the global peak because individual pools peak at different times. + + // regenerate gb_* values from original pool stats + TheMeter.gb_allocated += pool->meter.gb_allocated; + TheMeter.gb_saved += pool->meter.gb_saved; + TheMeter.gb_freed += pool->meter.gb_freed; } } -void * -MemImplementingAllocator::alloc() -{ - if (++alloc_calls == FLUSH_LIMIT) - flushMeters(); - - return allocate(); -} - -void -MemImplementingAllocator::freeOne(void *obj) -{ - assert(obj != nullptr); - (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size); - deallocate(obj, MemPools::GetInstance().idleLimit() == 0); - ++free_calls; -} - /* * Returns all cached frees to their home chunks * If chunks unreferenced age is over, destroys Idle chunk @@ -164,32 +117,3 @@ MemPools::clean(time_t maxage) pool->clean(maxage); } } - -MemImplementingAllocator::MemImplementingAllocator(char const * const aLabel, const size_t aSize): - Mem::Allocator(aLabel), - alloc_calls(0), - free_calls(0), - saved_calls(0), - obj_size(RoundedSize(aSize)) -{ - assert(aLabel != nullptr && aSize); -} - -Mem::PoolMeter const & -MemImplementingAllocator::getMeter() const -{ - return meter; -} - -Mem::PoolMeter & -MemImplementingAllocator::getMeter() -{ - return meter; -} - -size_t -MemImplementingAllocator::objectSize() const -{ - return obj_size; -} - diff --git a/src/mem/Pool.h b/src/mem/Pool.h index 3e2b154b82..970eb88ddb 100644 --- a/src/mem/Pool.h +++ b/src/mem/Pool.h @@ -28,7 +28,7 @@ * might be the way to go. */ -#include "mem/Allocator.h" +#include "mem/forward.h" #include "mem/Meter.h" #include "util.h" @@ -53,15 +53,6 @@ /// \ingroup MemPoolsAPI #define toKB(size) ( (size + 1024 - 1) / 1024 ) -/// \ingroup MemPoolsAPI -#define MEM_PAGE_SIZE 4096 -/// \ingroup MemPoolsAPI -#define MEM_MIN_FREE 32 -/// \ingroup MemPoolsAPI -#define MEM_MAX_FREE 65535 /* unsigned short is max number of items per chunk */ - -class MemImplementingAllocator; - /// memory usage totals as of latest MemPools::flushMeters() event extern Mem::PoolMeter TheMeter; @@ -77,7 +68,7 @@ public: * Create an allocator with given name to allocate fixed-size objects * of the specified size. */ - MemImplementingAllocator *create(const char *, size_t); + Mem::Allocator *create(const char *, size_t); /** * Sets upper limit in bytes to amount of free ram kept in pools. This is @@ -118,7 +109,7 @@ public: void setDefaultPoolChunking(bool const &); - std::list pools; + std::list pools; bool defaultIsChunked = false; private: @@ -128,38 +119,6 @@ private: ssize_t idleLimit_ = (2 << 20); }; -/// \ingroup MemPoolsAPI -class MemImplementingAllocator : public Mem::Allocator -{ -public: - typedef Mem::PoolMeter PoolMeter; // TODO remove - - MemImplementingAllocator(char const *aLabel, size_t aSize); - - virtual PoolMeter &getMeter(); - virtual void flushMetersFull(); - virtual void flushMeters(); - virtual bool idleTrigger(int shift) const = 0; - virtual void clean(time_t maxage) = 0; - - /* Mem::Allocator API */ - PoolMeter const &getMeter() const override; - void *alloc() override; - void freeOne(void *) override; - size_t objectSize() const override; - int getInUseCount() override = 0; - -protected: - virtual void *allocate() = 0; - virtual void deallocate(void *, bool aggressive) = 0; - PoolMeter meter; -public: - size_t alloc_calls; - size_t free_calls; - size_t saved_calls; - size_t obj_size; -}; - /// Creates a named MemPool of elements with the given size #define memPoolCreate MemPools::GetInstance().create diff --git a/src/mem/PoolChunked.cc b/src/mem/PoolChunked.cc index 43b0180105..c397d00ffc 100644 --- a/src/mem/PoolChunked.cc +++ b/src/mem/PoolChunked.cc @@ -18,6 +18,9 @@ #include #define MEM_MAX_MMAP_CHUNKS 2048 +#define MEM_PAGE_SIZE 4096 +#define MEM_MIN_FREE 32 +#define MEM_MAX_FREE 65535 /* unsigned short is max number of items per chunk */ /* * Old way: @@ -120,23 +123,24 @@ MemChunk::MemChunk(MemPoolChunked *aPool) void **Free = (void **)freeList; for (int i = 1; i < pool->chunk_capacity; ++i) { - *Free = (void *) ((char *) Free + pool->obj_size); + *Free = (void *) ((char *) Free + pool->objectSize); void **nextFree = (void **)*Free; - (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size); + (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->objectSize); Free = nextFree; } nextFreeChunk = pool->nextFreeChunk; pool->nextFreeChunk = this; - pool->getMeter().alloc += pool->chunk_capacity; - pool->getMeter().idle += pool->chunk_capacity; + pool->meter.alloc += pool->chunk_capacity; + pool->meter.idle += pool->chunk_capacity; ++pool->chunkCount; lastref = squid_curtime; pool->allChunks.insert(this, memCompChunks); } MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) : - MemImplementingAllocator(aLabel, aSize), chunk_size(0), + Mem::Allocator(aLabel, aSize), + chunk_size(0), chunk_capacity(0), chunkCount(0), freeCache(nullptr), nextFreeChunk(nullptr), Chunks(nullptr), allChunks(Splay()) { @@ -149,8 +153,8 @@ MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) : MemChunk::~MemChunk() { - pool->getMeter().alloc -= pool->chunk_capacity; - pool->getMeter().idle -= pool->chunk_capacity; + pool->meter.alloc -= pool->chunk_capacity; + pool->meter.idle -= pool->chunk_capacity; -- pool->chunkCount; pool->allChunks.remove(this, memCompChunks); xfree(objCache); @@ -166,11 +170,11 @@ MemPoolChunked::push(void *obj) * the object size here, but such condition is not safe. */ if (doZero) - memset(obj, 0, obj_size); + memset(obj, 0, objectSize); Free = (void **)obj; *Free = freeCache; freeCache = obj; - (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size); + (void) VALGRIND_MAKE_MEM_NOACCESS(obj, objectSize); } /* @@ -184,12 +188,12 @@ MemPoolChunked::get() { void **Free; - ++saved_calls; + ++countSavedAllocs; /* first, try cache */ if (freeCache) { Free = (void **)freeCache; - (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size); + (void) VALGRIND_MAKE_MEM_DEFINED(Free, objectSize); freeCache = *Free; *Free = nullptr; return Free; @@ -197,7 +201,7 @@ MemPoolChunked::get() /* then try perchunk freelist chain */ if (nextFreeChunk == nullptr) { /* no chunk with frees, so create new one */ - -- saved_calls; // compensate for the ++ above + --countSavedAllocs; // compensate for the ++ above createChunk(); } /* now we have some in perchunk freelist chain */ @@ -213,7 +217,7 @@ MemPoolChunked::get() /* last free in this chunk, so remove us from perchunk freelist chain */ nextFreeChunk = chunk->nextFreeChunk; } - (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size); + (void) VALGRIND_MAKE_MEM_DEFINED(Free, objectSize); return Free; } @@ -269,20 +273,20 @@ MemPoolChunked::setChunkSize(size_t chunksize) return; csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */ - cap = csize / obj_size; + cap = csize / objectSize; if (cap < MEM_MIN_FREE) cap = MEM_MIN_FREE; - if (cap * obj_size > MEM_CHUNK_MAX_SIZE) - cap = MEM_CHUNK_MAX_SIZE / obj_size; + if (cap * objectSize > MEM_CHUNK_MAX_SIZE) + cap = MEM_CHUNK_MAX_SIZE / objectSize; if (cap > MEM_MAX_FREE) cap = MEM_MAX_FREE; if (cap < 1) cap = 1; - csize = cap * obj_size; + csize = cap * objectSize; csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */ - cap = csize / obj_size; + cap = csize / objectSize; chunk_capacity = cap; chunk_size = csize; @@ -296,9 +300,9 @@ MemPoolChunked::~MemPoolChunked() { MemChunk *chunk, *fchunk; - flushMetersFull(); + flushCounters(); clean(0); - assert(meter.inuse.currentLevel() == 0); + assert(getInUseCount() == 0); chunk = Chunks; while ( (fchunk = chunk) != nullptr) { @@ -309,12 +313,6 @@ MemPoolChunked::~MemPoolChunked() } -int -MemPoolChunked::getInUseCount() -{ - return meter.inuse.currentLevel(); -} - void * MemPoolChunked::allocate() { @@ -326,7 +324,7 @@ MemPoolChunked::allocate() } void -MemPoolChunked::deallocate(void *obj, bool) +MemPoolChunked::deallocate(void *obj) { push(obj); assert(meter.inuse.currentLevel() > 0); @@ -369,7 +367,7 @@ MemPoolChunked::clean(time_t maxage) if (!Chunks) return; - flushMetersFull(); + flushCounters(); convertFreeCacheToChunkFreeCache(); /* Now we have all chunks in this pool cleared up, all free items returned to their home */ /* We start now checking all chunks to see if we can release any */ @@ -440,9 +438,9 @@ MemPoolChunked::getStats(Mem::PoolStats &stats) clean((time_t) 555555); /* don't want to get chunks released before reporting */ stats.pool = this; - stats.label = objectType(); + stats.label = label; stats.meter = &meter; - stats.obj_size = obj_size; + stats.obj_size = objectSize; stats.chunk_capacity = chunk_capacity; /* gather stats for each Chunk */ @@ -464,8 +462,8 @@ MemPoolChunked::getStats(Mem::PoolStats &stats) stats.items_inuse += meter.inuse.currentLevel(); stats.items_idle += meter.idle.currentLevel(); - stats.overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1; + stats.overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(label) + 1; - return meter.inuse.currentLevel(); + return getInUseCount(); } diff --git a/src/mem/PoolChunked.h b/src/mem/PoolChunked.h index 835f67b838..71592806bc 100644 --- a/src/mem/PoolChunked.h +++ b/src/mem/PoolChunked.h @@ -9,7 +9,7 @@ #ifndef _MEM_POOL_CHUNKED_H_ #define _MEM_POOL_CHUNKED_H_ -#include "mem/Pool.h" +#include "mem/Allocator.h" #include "splay.h" #define MEM_CHUNK_SIZE 4 * 4096 /* 16KB ... 4 * VM_PAGE_SZ */ @@ -18,29 +18,29 @@ class MemChunk; /// \ingroup MemPoolsAPI -class MemPoolChunked : public MemImplementingAllocator +class MemPoolChunked : public Mem::Allocator { public: friend class MemChunk; MemPoolChunked(const char *label, size_t obj_size); ~MemPoolChunked() override; void convertFreeCacheToChunkFreeCache(); - void clean(time_t maxage) override; void createChunk(); void *get(); void push(void *obj); /* Mem::Allocator API */ size_t getStats(Mem::PoolStats &) override; - int getInUseCount() override; void setChunkSize(size_t) override; + bool idleTrigger(int) const override; + void clean(time_t) override; protected: + /* Mem::Allocator API */ void *allocate() override; - void deallocate(void *, bool aggressive) override; -public: - bool idleTrigger(int shift) const override; + void deallocate(void *) override; +public: size_t chunk_size; int chunk_capacity; int chunkCount; diff --git a/src/mem/PoolMalloc.cc b/src/mem/PoolMalloc.cc index f5601c78ad..7a33eab94c 100644 --- a/src/mem/PoolMalloc.cc +++ b/src/mem/PoolMalloc.cc @@ -11,6 +11,7 @@ */ #include "squid.h" +#include "mem/Pool.h" #include "mem/PoolMalloc.h" #include "mem/Stats.h" @@ -29,12 +30,12 @@ MemPoolMalloc::allocate() } if (obj) { --meter.idle; - ++saved_calls; + ++countSavedAllocs; } else { if (doZero) - obj = xcalloc(1, obj_size); + obj = xcalloc(1, objectSize); else - obj = xmalloc(obj_size); + obj = xmalloc(objectSize); ++meter.alloc; } ++meter.inuse; @@ -42,15 +43,15 @@ MemPoolMalloc::allocate() } void -MemPoolMalloc::deallocate(void *obj, bool aggressive) +MemPoolMalloc::deallocate(void *obj) { --meter.inuse; - if (aggressive) { + if (MemPools::GetInstance().idleLimit() == 0) { xfree(obj); --meter.alloc; } else { if (doZero) - memset(obj, 0, obj_size); + memset(obj, 0, objectSize); ++meter.idle; freelist.push(obj); } @@ -61,38 +62,28 @@ size_t MemPoolMalloc::getStats(Mem::PoolStats &stats) { stats.pool = this; - stats.label = objectType(); + stats.label = label; stats.meter = &meter; - stats.obj_size = obj_size; + stats.obj_size = objectSize; stats.chunk_capacity = 0; - stats.chunks_alloc += 0; - stats.chunks_inuse += 0; - stats.chunks_partial += 0; - stats.chunks_free += 0; - stats.items_alloc += meter.alloc.currentLevel(); stats.items_inuse += meter.inuse.currentLevel(); stats.items_idle += meter.idle.currentLevel(); - stats.overhead += sizeof(MemPoolMalloc) + strlen(objectType()) + 1; - - return meter.inuse.currentLevel(); -} + stats.overhead += sizeof(*this) + strlen(label) + 1; -int -MemPoolMalloc::getInUseCount() -{ - return meter.inuse.currentLevel(); + return getInUseCount(); } -MemPoolMalloc::MemPoolMalloc(char const *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize) +MemPoolMalloc::MemPoolMalloc(char const *aLabel, size_t aSize) : + Mem::Allocator(aLabel, aSize) { } MemPoolMalloc::~MemPoolMalloc() { - assert(meter.inuse.currentLevel() == 0); + assert(getInUseCount() == 0); clean(0); } diff --git a/src/mem/PoolMalloc.h b/src/mem/PoolMalloc.h index 5b33b168e0..9a8d764c3b 100644 --- a/src/mem/PoolMalloc.h +++ b/src/mem/PoolMalloc.h @@ -28,26 +28,27 @@ * might be the way to go. */ -#include "mem/Pool.h" +#include "mem/Allocator.h" #include /// \ingroup MemPoolsAPI -class MemPoolMalloc : public MemImplementingAllocator +class MemPoolMalloc : public Mem::Allocator { public: MemPoolMalloc(char const *label, size_t aSize); ~MemPoolMalloc() override; - bool idleTrigger(int shift) const override; - void clean(time_t maxage) override; /* Mem::Allocator API */ size_t getStats(Mem::PoolStats &) override; - int getInUseCount() override; + bool idleTrigger(int) const override; + void clean(time_t) override; protected: + /* Mem::Allocator API */ void *allocate() override; - void deallocate(void *, bool aggressive) override; + void deallocate(void *) override; + private: std::stack freelist; }; diff --git a/src/mem/Stats.cc b/src/mem/Stats.cc index a3f4cd2784..418557d0cf 100644 --- a/src/mem/Stats.cc +++ b/src/mem/Stats.cc @@ -7,6 +7,7 @@ */ #include "squid.h" +#include "mem/Allocator.h" #include "mem/Pool.h" #include "mem/Stats.h" diff --git a/src/mem/old_api.cc b/src/mem/old_api.cc index 9fe397aa15..2524b016d7 100644 --- a/src/mem/old_api.cc +++ b/src/mem/old_api.cc @@ -16,8 +16,7 @@ #include "fs_io.h" #include "icmp/net_db.h" #include "md5.h" -#include "mem/forward.h" -#include "mem/Meter.h" +#include "mem/Allocator.h" #include "mem/Pool.h" #include "mem/Stats.h" #include "MemBuf.h" @@ -100,9 +99,9 @@ GetStrPool(size_t type) strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size); strPools[i]->zeroBlocks(false); - if (strPools[i]->objectSize() != PoolAttrs[i].obj_size) + if (strPools[i]->objectSize != PoolAttrs[i].obj_size) debugs(13, DBG_IMPORTANT, "WARNING: " << PoolAttrs[i].name << - " is " << strPools[i]->objectSize() << + " is " << strPools[i]->objectSize << " bytes instead of requested " << PoolAttrs[i].obj_size << " bytes"); } @@ -119,9 +118,9 @@ memFindStringPool(size_t net_size, bool fuzzy) { for (unsigned int i = 0; i < mem_str_pool_count; ++i) { auto &pool = GetStrPool(i); - if (fuzzy && net_size < pool.objectSize()) + if (fuzzy && net_size < pool.objectSize) return &pool; - if (net_size == pool.objectSize()) + if (net_size == pool.objectSize) return &pool; } return nullptr; @@ -139,12 +138,12 @@ memStringStats(std::ostream &stream) for (i = 0; i < mem_str_pool_count; ++i) { const auto &pool = GetStrPool(i); - const auto plevel = pool.getMeter().inuse.currentLevel(); - stream << std::setw(20) << std::left << pool.objectType(); + const auto plevel = pool.meter.inuse.currentLevel(); + stream << std::setw(20) << std::left << pool.label; stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel()); - stream << "\t " << xpercentInt(plevel * pool.objectSize(), StrVolumeMeter.currentLevel()) << "\n"; + stream << "\t " << xpercentInt(plevel * pool.objectSize, StrVolumeMeter.currentLevel()) << "\n"; pooled_count += plevel; - pooled_volume += plevel * pool.objectSize(); + pooled_volume += plevel * pool.objectSize; } /* malloc strings */ @@ -232,7 +231,7 @@ memAllocString(size_t net_size, size_t * gross_size) assert(gross_size); if (const auto pool = memFindStringPool(net_size, true)) { - *gross_size = pool->objectSize(); + *gross_size = pool->objectSize; assert(*gross_size >= net_size); ++StrCountMeter; StrVolumeMeter += *gross_size; @@ -252,7 +251,7 @@ memAllocRigid(size_t net_size) if (const auto pool = memFindStringPool(net_size, true)) { ++StrCountMeter; - StrVolumeMeter += pool->objectSize(); + StrVolumeMeter += pool->objectSize; return pool->alloc(); } @@ -267,7 +266,7 @@ memStringCount() size_t result = 0; for (int counter = 0; counter < mem_str_pool_count; ++counter) - result += GetStrPool(counter).inUseCount(); + result += GetStrPool(counter).getInUseCount(); return result; } @@ -294,7 +293,7 @@ memFreeRigid(void *buf, size_t net_size) if (const auto pool = memFindStringPool(net_size, true)) { pool->freeOne(buf); - StrVolumeMeter -= pool->objectSize(); + StrVolumeMeter -= pool->objectSize; --StrCountMeter; return; } @@ -501,7 +500,7 @@ memClean(void) int memInUse(mem_type type) { - return GetPool(type)->inUseCount(); + return GetPool(type)->getInUseCount(); } /* ick */ @@ -689,7 +688,7 @@ Mem::Report(std::ostream &stream) PoolStats mp_stats; pool->getStats(mp_stats); - if (mp_stats.pool->getMeter().gb_allocated.count > 0) + if (mp_stats.pool->meter.gb_allocated.count > 0) usedPools.emplace_back(mp_stats); else ++not_used; diff --git a/src/store.cc b/src/store.cc index 3f87d36f7f..44b866582e 100644 --- a/src/store.cc +++ b/src/store.cc @@ -59,6 +59,7 @@ /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB * XXX: convert to MEMPROXY_CLASS() API */ +#include "mem/Allocator.h" #include "mem/Pool.h" #include diff --git a/src/tests/stub_libmem.cc b/src/tests/stub_libmem.cc index 5900b77d74..5976a79e63 100644 --- a/src/tests/stub_libmem.cc +++ b/src/tests/stub_libmem.cc @@ -19,7 +19,6 @@ void *Mem::AllocatorProxy::alloc() {return xmalloc(64*1024);} void Mem::AllocatorProxy::freeOne(void *address) {xfree(address);} int Mem::AllocatorProxy::inUseCount() const {return 0;} -//Mem::PoolMeter const &Mem::AllocatorProxy::getMeter() const STUB_RETSTATREF(PoolMeter) size_t Mem::AllocatorProxy::getStats(PoolStats &) STUB_RETVAL(0) #include "mem/forward.h" @@ -82,18 +81,9 @@ static MemPools tmpMemPools; MemPools &MemPools::GetInstance() {return tmpMemPools;} MemPools::MemPools() STUB_NOP void MemPools::flushMeters() STUB -MemImplementingAllocator * MemPools::create(const char *, size_t) STUB_RETVAL(nullptr); +Mem::Allocator * MemPools::create(const char *, size_t) STUB_RETVAL(nullptr); void MemPools::clean(time_t) STUB void MemPools::setDefaultPoolChunking(bool const &) STUB -//MemImplementingAllocator::MemImplementingAllocator(char const *, size_t) STUB_NOP -Mem::PoolMeter const &MemImplementingAllocator::getMeter() const STUB_RETSTATREF(PoolMeter) -Mem::PoolMeter &MemImplementingAllocator::getMeter() STUB_RETSTATREF(PoolMeter) -void MemImplementingAllocator::flushMetersFull() STUB -void MemImplementingAllocator::flushMeters() STUB -void *MemImplementingAllocator::alloc() STUB_RETVAL(nullptr) -void MemImplementingAllocator::freeOne(void *) STUB -size_t MemImplementingAllocator::objectSize() const { return obj_size; } - #include "mem/Stats.h" size_t Mem::GlobalStats(PoolStats &) STUB_RETVAL(0) diff --git a/src/tests/testMem.cc b/src/tests/testMem.cc index 44926c6d8e..6fce58c3a3 100644 --- a/src/tests/testMem.cc +++ b/src/tests/testMem.cc @@ -7,7 +7,7 @@ */ #include "squid.h" -#include "mem/forward.h" +#include "mem/Allocator.h" #include "mem/Pool.h" #include "tests/testMem.h" #include "unitTestMain.h"