{
public:
MemImplementingAllocator(char const *aLabel, size_t aSize);
- ~MemImplementingAllocator();
+ virtual ~MemImplementingAllocator();
virtual MemPoolMeter const &getMeter() const;
virtual MemPoolMeter &getMeter();
virtual void flushMetersFull();
virtual int getInUseCount() = 0;
protected:
virtual void *allocate() = 0;
- virtual void deallocate(void *) = 0;
+ virtual void deallocate(void *, bool aggressive) = 0;
MemPoolMeter meter;
int memPID;
public:
virtual int getInUseCount();
protected:
virtual void *allocate();
- virtual void deallocate(void *);
+ virtual void deallocate(void *, bool aggressive);
public:
/**
* Allows you tune chunk size of pooling. Objects are allocated in chunks
{
public:
MemPoolMalloc(char const *label, size_t aSize);
+ ~MemPoolMalloc();
virtual bool idleTrigger(int shift) const;
virtual void clean(time_t maxage);
virtual int getInUseCount();
protected:
virtual void *allocate();
- virtual void deallocate(void *);
+ virtual void deallocate(void *, bool aggressive);
private:
+ Stack<void *> freelist;
};
*
*/
-/*
- * Old way:
- * xmalloc each item separately, upon free stack into idle pool array.
- * each item is individually malloc()ed from system, imposing libmalloc
- * overhead, and additionally we add our overhead of pointer size per item
- * as we keep a list of pointer to free items.
- *
- * Chunking:
- * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
- * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
- * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
- * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
- * A number of items fits into a single chunk, depending on item size.
- * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
- *
- * We populate Chunk with a linkedlist, each node at first word of item,
- * and pointing at next free item. Chunk->FreeList is pointing at first
- * free node. Thus we stuff free housekeeping into the Chunk itself, and
- * omit pointer overhead per item.
- *
- * Chunks are created on demand, and new chunks are inserted into linklist
- * of chunks so that Chunks with smaller pointer value are placed closer
- * to the linklist head. Head is a hotspot, servicing most of requests, so
- * slow sorting occurs and Chunks in highest memory tend to become idle
- * and freeable.
- *
- * event is registered that runs every 15 secs and checks reference time
- * of each idle chunk. If a chunk is not referenced for 15 secs, it is
- * released.
- *
- * [If mem_idle_limit is exceeded with pools, every chunk that becomes
- * idle is immediately considered for release, unless this is the only
- * chunk with free items in it.] (not implemented)
- *
- * In cachemgr output, there are new columns for chunking. Special item,
- * Frag, is shown to estimate approximately fragmentation of chunked
- * pools. Fragmentation is calculated by taking amount of items in use,
- * calculating needed amount of chunks to fit all, and then comparing to
- * actual amount of chunks in use. Frag number, in percent, is showing
- * how many percent of chunks are in use excessively. 100% meaning that
- * twice the needed amount of chunks are in use.
- * "part" item shows number of chunks partially filled. This shows how
- * badly fragmentation is spread across all chunks.
- *
- * Andres Kroonmaa.
- * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
- */
-
#include "config.h"
#if HAVE_ASSERT_H
#include <assert.h>
#include "MemPoolMalloc.h"
#define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
-#define MEM_MAX_MMAP_CHUNKS 2048
#if HAVE_STRING_H
#include <string.h>
char *cfg = getenv("MEMPOOLS");
if (cfg)
defaultIsChunked = atoi(cfg);
-#if HAVE_MALLOPT && M_MMAP_MAX
- mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
-#endif
}
MemImplementingAllocator *
{
assert(obj != NULL);
(void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
- deallocate(obj);
+ deallocate(obj, MemPools::GetInstance().mem_idle_limit == 0);
++free_calls;
}
#include "MemPoolChunked.h"
-#define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
#define MEM_MAX_MMAP_CHUNKS 2048
#if HAVE_STRING_H
next = 0;
setChunkSize(MEM_CHUNK_SIZE);
+
+#if HAVE_MALLOPT && M_MMAP_MAX
+ mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
+#endif
}
MemChunk::~MemChunk()
flushMetersFull();
clean(0);
- assert(getMeter().inuse.level == 0 && "While trying to destroy pool");
+ assert(meter.inuse.level == 0 && "While trying to destroy pool");
chunk = Chunks;
while ( (fchunk = chunk) != NULL) {
int
MemPoolChunked::getInUseCount()
{
- return getMeter().inuse.level;
+ return meter.inuse.level;
}
void *
}
void
-MemPoolChunked::deallocate(void *obj)
+MemPoolChunked::deallocate(void *obj, bool aggressive)
{
push(obj);
assert(meter.inuse.level > 0);
bool
MemPoolChunked::idleTrigger(int shift) const
{
- return getMeter().idle.level > (chunk_capacity << shift);
+ return meter.idle.level > (chunk_capacity << shift);
}
/*
stats->pool = this;
stats->label = objectType();
- stats->meter = &getMeter();
+ stats->meter = &meter;
stats->obj_size = obj_size;
stats->chunk_capacity = chunk_capacity;
stats->chunks_partial += chunks_partial;
stats->chunks_free += chunks_free;
- stats->items_alloc += getMeter().alloc.level;
- stats->items_inuse += getMeter().inuse.level;
- stats->items_idle += getMeter().idle.level;
+ stats->items_alloc += meter.alloc.level;
+ stats->items_inuse += meter.inuse.level;
+ stats->items_idle += meter.idle.level;
stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
- return getMeter().inuse.level;
+ return meter.inuse.level;
}
* $Id$
*
* DEBUG: section 63 Low Level Memory Pool Management
- * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
+ * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins, Henrik Nordstrom
*
* SQUID Internet Object Cache http://squid.nlanr.net/Squid/
* ----------------------------------------------------------
void *
MemPoolMalloc::allocate()
{
- memMeterInc(meter.alloc);
+ void *obj = freelist.pop();
+ if (obj) {
+ memMeterDec(meter.idle);
+ } else {
+ obj = xcalloc(1, obj_size);
+ memMeterInc(meter.alloc);
+ }
memMeterInc(meter.inuse);
- return xcalloc(1, obj_size);
+ return obj;
}
void
-MemPoolMalloc::deallocate(void *obj)
+MemPoolMalloc::deallocate(void *obj, bool aggressive)
{
memMeterDec(meter.inuse);
- memMeterDec(meter.alloc);
- xfree(obj);
+ if (aggressive) {
+ xfree(obj);
+ memMeterDec(meter.alloc);
+ } else {
+ if (doZeroOnPush)
+ memset(obj, 0, obj_size);
+ memMeterInc(meter.idle);
+ freelist.push_back(obj);
+ }
}
/* TODO extract common logic to MemAllocate */
stats->pool = this;
stats->label = objectType();
- stats->meter = &getMeter();
+ stats->meter = &meter;
stats->obj_size = obj_size;
stats->chunk_capacity = 0;
{
}
+MemPoolMalloc::~MemPoolMalloc()
+{
+ assert(meter.inuse.level == 0 && "While trying to destroy pool");
+ clean(0);
+}
+
bool
MemPoolMalloc::idleTrigger(int shift) const
{
- return false;
+ return freelist.count >> (shift ? 8 : 0);
}
void
MemPoolMalloc::clean(time_t maxage)
{
+ while (void *obj = freelist.pop()) {
+ memMeterDec(meter.idle);
+ memMeterDec(meter.alloc);
+ xfree(obj);
+ }
}