From: Henrik Nordstrom Date: Sat, 29 May 2010 00:59:35 +0000 (+0200) Subject: Simple freelist in malloc based pool allocator X-Git-Tag: SQUID_3_2_0_1~179^2~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2be7332c3477fbd73444b14db884ed2f86baf8f5;p=thirdparty%2Fsquid.git Simple freelist in malloc based pool allocator --- diff --git a/include/MemPool.h b/include/MemPool.h index f18b6207b1..111e25cbea 100644 --- a/include/MemPool.h +++ b/include/MemPool.h @@ -320,7 +320,7 @@ class MemImplementingAllocator : public MemAllocator { public: MemImplementingAllocator(char const *aLabel, size_t aSize); - ~MemImplementingAllocator(); + virtual ~MemImplementingAllocator(); virtual MemPoolMeter const &getMeter() const; virtual MemPoolMeter &getMeter(); virtual void flushMetersFull(); @@ -342,7 +342,7 @@ public: virtual int getInUseCount() = 0; protected: virtual void *allocate() = 0; - virtual void deallocate(void *) = 0; + virtual void deallocate(void *, bool aggressive) = 0; MemPoolMeter meter; int memPID; public: diff --git a/include/MemPoolChunked.h b/include/MemPoolChunked.h index b3b82c023f..00657908db 100644 --- a/include/MemPoolChunked.h +++ b/include/MemPoolChunked.h @@ -38,7 +38,7 @@ public: virtual int getInUseCount(); protected: virtual void *allocate(); - virtual void deallocate(void *); + virtual void deallocate(void *, bool aggressive); public: /** * Allows you tune chunk size of pooling. Objects are allocated in chunks diff --git a/include/MemPoolMalloc.h b/include/MemPoolMalloc.h index cef967ec1d..51a15c75c1 100644 --- a/include/MemPoolMalloc.h +++ b/include/MemPoolMalloc.h @@ -27,6 +27,7 @@ class MemPoolMalloc : public MemImplementingAllocator { public: MemPoolMalloc(char const *label, size_t aSize); + ~MemPoolMalloc(); virtual bool idleTrigger(int shift) const; virtual void clean(time_t maxage); @@ -39,8 +40,9 @@ public: virtual int getInUseCount(); protected: virtual void *allocate(); - virtual void deallocate(void *); + virtual void deallocate(void *, bool aggressive); private: + Stack freelist; }; diff --git a/lib/MemPool.cc b/lib/MemPool.cc index 07c2ac2362..8481278534 100644 --- a/lib/MemPool.cc +++ b/lib/MemPool.cc @@ -33,54 +33,6 @@ * */ -/* - * Old way: - * xmalloc each item separately, upon free stack into idle pool array. - * each item is individually malloc()ed from system, imposing libmalloc - * overhead, and additionally we add our overhead of pointer size per item - * as we keep a list of pointer to free items. - * - * Chunking: - * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but - * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to - * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size. - * Minimum Chunk size is MEM_CHUNK_SIZE (16K). - * A number of items fits into a single chunk, depending on item size. - * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535). - * - * We populate Chunk with a linkedlist, each node at first word of item, - * and pointing at next free item. Chunk->FreeList is pointing at first - * free node. Thus we stuff free housekeeping into the Chunk itself, and - * omit pointer overhead per item. - * - * Chunks are created on demand, and new chunks are inserted into linklist - * of chunks so that Chunks with smaller pointer value are placed closer - * to the linklist head. Head is a hotspot, servicing most of requests, so - * slow sorting occurs and Chunks in highest memory tend to become idle - * and freeable. - * - * event is registered that runs every 15 secs and checks reference time - * of each idle chunk. If a chunk is not referenced for 15 secs, it is - * released. - * - * [If mem_idle_limit is exceeded with pools, every chunk that becomes - * idle is immediately considered for release, unless this is the only - * chunk with free items in it.] (not implemented) - * - * In cachemgr output, there are new columns for chunking. Special item, - * Frag, is shown to estimate approximately fragmentation of chunked - * pools. Fragmentation is calculated by taking amount of items in use, - * calculating needed amount of chunks to fit all, and then comparing to - * actual amount of chunks in use. Frag number, in percent, is showing - * how many percent of chunks are in use excessively. 100% meaning that - * twice the needed amount of chunks are in use. - * "part" item shows number of chunks partially filled. This shows how - * badly fragmentation is spread across all chunks. - * - * Andres Kroonmaa. - * Copyright (c) 2003, Robert Collins - */ - #include "config.h" #if HAVE_ASSERT_H #include @@ -91,7 +43,6 @@ #include "MemPoolMalloc.h" #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */ -#define MEM_MAX_MMAP_CHUNKS 2048 #if HAVE_STRING_H #include @@ -173,9 +124,6 @@ MemPools::MemPools() : pools(NULL), mem_idle_limit(2 * MB), char *cfg = getenv("MEMPOOLS"); if (cfg) defaultIsChunked = atoi(cfg); -#if HAVE_MALLOPT && M_MMAP_MAX - mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS); -#endif } MemImplementingAllocator * @@ -299,7 +247,7 @@ MemImplementingAllocator::freeOne(void *obj) { assert(obj != NULL); (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size); - deallocate(obj); + deallocate(obj, MemPools::GetInstance().mem_idle_limit == 0); ++free_calls; } diff --git a/lib/MemPoolChunked.cc b/lib/MemPoolChunked.cc index 7492b17ade..ec3ca9bf07 100644 --- a/lib/MemPoolChunked.cc +++ b/lib/MemPoolChunked.cc @@ -88,7 +88,6 @@ #include "MemPoolChunked.h" -#define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */ #define MEM_MAX_MMAP_CHUNKS 2048 #if HAVE_STRING_H @@ -175,6 +174,10 @@ MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) : MemImplementi next = 0; setChunkSize(MEM_CHUNK_SIZE); + +#if HAVE_MALLOPT && M_MMAP_MAX + mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS); +#endif } MemChunk::~MemChunk() @@ -318,7 +321,7 @@ MemPoolChunked::~MemPoolChunked() flushMetersFull(); clean(0); - assert(getMeter().inuse.level == 0 && "While trying to destroy pool"); + assert(meter.inuse.level == 0 && "While trying to destroy pool"); chunk = Chunks; while ( (fchunk = chunk) != NULL) { @@ -332,7 +335,7 @@ MemPoolChunked::~MemPoolChunked() int MemPoolChunked::getInUseCount() { - return getMeter().inuse.level; + return meter.inuse.level; } void * @@ -346,7 +349,7 @@ MemPoolChunked::allocate() } void -MemPoolChunked::deallocate(void *obj) +MemPoolChunked::deallocate(void *obj, bool aggressive) { push(obj); assert(meter.inuse.level > 0); @@ -449,7 +452,7 @@ MemPoolChunked::clean(time_t maxage) bool MemPoolChunked::idleTrigger(int shift) const { - return getMeter().idle.level > (chunk_capacity << shift); + return meter.idle.level > (chunk_capacity << shift); } /* @@ -469,7 +472,7 @@ MemPoolChunked::getStats(MemPoolStats * stats, int accumulate) stats->pool = this; stats->label = objectType(); - stats->meter = &getMeter(); + stats->meter = &meter; stats->obj_size = obj_size; stats->chunk_capacity = chunk_capacity; @@ -488,11 +491,11 @@ MemPoolChunked::getStats(MemPoolStats * stats, int accumulate) stats->chunks_partial += chunks_partial; stats->chunks_free += chunks_free; - stats->items_alloc += getMeter().alloc.level; - stats->items_inuse += getMeter().inuse.level; - stats->items_idle += getMeter().idle.level; + stats->items_alloc += meter.alloc.level; + stats->items_inuse += meter.inuse.level; + stats->items_idle += meter.idle.level; stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1; - return getMeter().inuse.level; + return meter.inuse.level; } diff --git a/lib/MemPoolMalloc.cc b/lib/MemPoolMalloc.cc index fbb19991e9..3f07510199 100644 --- a/lib/MemPoolMalloc.cc +++ b/lib/MemPoolMalloc.cc @@ -3,7 +3,7 @@ * $Id$ * * DEBUG: section 63 Low Level Memory Pool Management - * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins + * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins, Henrik Nordstrom * * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ * ---------------------------------------------------------- @@ -54,17 +54,30 @@ extern time_t squid_curtime; void * MemPoolMalloc::allocate() { - memMeterInc(meter.alloc); + void *obj = freelist.pop(); + if (obj) { + memMeterDec(meter.idle); + } else { + obj = xcalloc(1, obj_size); + memMeterInc(meter.alloc); + } memMeterInc(meter.inuse); - return xcalloc(1, obj_size); + return obj; } void -MemPoolMalloc::deallocate(void *obj) +MemPoolMalloc::deallocate(void *obj, bool aggressive) { memMeterDec(meter.inuse); - memMeterDec(meter.alloc); - xfree(obj); + if (aggressive) { + xfree(obj); + memMeterDec(meter.alloc); + } else { + if (doZeroOnPush) + memset(obj, 0, obj_size); + memMeterInc(meter.idle); + freelist.push_back(obj); + } } /* TODO extract common logic to MemAllocate */ @@ -76,7 +89,7 @@ MemPoolMalloc::getStats(MemPoolStats * stats, int accumulate) stats->pool = this; stats->label = objectType(); - stats->meter = &getMeter(); + stats->meter = &meter; stats->obj_size = obj_size; stats->chunk_capacity = 0; @@ -104,14 +117,25 @@ MemPoolMalloc::MemPoolMalloc(char const *aLabel, size_t aSize) : MemImplementing { } +MemPoolMalloc::~MemPoolMalloc() +{ + assert(meter.inuse.level == 0 && "While trying to destroy pool"); + clean(0); +} + bool MemPoolMalloc::idleTrigger(int shift) const { - return false; + return freelist.count >> (shift ? 8 : 0); } void MemPoolMalloc::clean(time_t maxage) { + while (void *obj = freelist.pop()) { + memMeterDec(meter.idle); + memMeterDec(meter.alloc); + xfree(obj); + } }