]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/PoolChunked.cc
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
10 * DEBUG: section 63 Low Level Memory Pool Management
11 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
15 #include "mem/PoolChunked.h"
20 #define MEM_MAX_MMAP_CHUNKS 2048
24 * xmalloc each item separately, upon free stack into idle pool array.
25 * each item is individually malloc()ed from system, imposing libmalloc
26 * overhead, and additionally we add our overhead of pointer size per item
27 * as we keep a list of pointer to free items.
30 * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
31 * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
32 * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
33 * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
34 * A number of items fits into a single chunk, depending on item size.
35 * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
37 * We populate Chunk with a linkedlist, each node at first word of item,
38 * and pointing at next free item. Chunk->FreeList is pointing at first
39 * free node. Thus we stuff free housekeeping into the Chunk itself, and
40 * omit pointer overhead per item.
42 * Chunks are created on demand, and new chunks are inserted into linklist
43 * of chunks so that Chunks with smaller pointer value are placed closer
44 * to the linklist head. Head is a hotspot, servicing most of requests, so
45 * slow sorting occurs and Chunks in highest memory tend to become idle
48 * event is registered that runs every 15 secs and checks reference time
49 * of each idle chunk. If a chunk is not referenced for 15 secs, it is
52 * [If mem_idle_limit is exceeded with pools, every chunk that becomes
53 * idle is immediately considered for release, unless this is the only
54 * chunk with free items in it.] (not implemented)
56 * In cachemgr output, there are new columns for chunking. Special item,
57 * Frag, is shown to estimate approximately fragmentation of chunked
58 * pools. Fragmentation is calculated by taking amount of items in use,
59 * calculating needed amount of chunks to fit all, and then comparing to
60 * actual amount of chunks in use. Frag number, in percent, is showing
61 * how many percent of chunks are in use excessively. 100% meaning that
62 * twice the needed amount of chunks are in use.
63 * "part" item shows number of chunks partially filled. This shows how
64 * badly fragmentation is spread across all chunks.
69 extern time_t squid_curtime
;
71 /* local prototypes */
72 static int memCompChunks(MemChunk
* const &, MemChunk
* const &);
73 static int memCompObjChunks(void * const &, MemChunk
* const &);
77 memCompChunks(MemChunk
* const &chunkA
, MemChunk
* const &chunkB
)
79 if (chunkA
->objCache
> chunkB
->objCache
)
81 else if (chunkA
->objCache
< chunkB
->objCache
)
87 /* Compare object to chunk */
89 memCompObjChunks(void *const &obj
, MemChunk
* const &chunk
)
91 /* object is lower in memory than the chunks arena */
92 if (obj
< chunk
->objCache
)
94 /* object is within the pool */
95 if (obj
< (void *) ((char *) chunk
->objCache
+ chunk
->pool
->chunk_size
))
97 /* object is above the pool */
101 MemChunk::MemChunk(MemPoolChunked
*aPool
)
103 /* should have a pool for this too -
104 * note that this requres:
105 * allocate one chunk for the pool of chunks's first chunk
106 * allocate a chunk from that pool
107 * move the contents of one chunk into the other
108 * free the first chunk.
115 objCache
= xcalloc(1, pool
->chunk_size
);
117 objCache
= xmalloc(pool
->chunk_size
);
120 void **Free
= (void **)freeList
;
122 for (int i
= 1; i
< pool
->chunk_capacity
; ++i
) {
123 *Free
= (void *) ((char *) Free
+ pool
->obj_size
);
124 void **nextFree
= (void **)*Free
;
125 (void) VALGRIND_MAKE_MEM_NOACCESS(Free
, pool
->obj_size
);
128 nextFreeChunk
= pool
->nextFreeChunk
;
129 pool
->nextFreeChunk
= this;
131 pool
->getMeter().alloc
+= pool
->chunk_capacity
;
132 pool
->getMeter().idle
+= pool
->chunk_capacity
;
134 lastref
= squid_curtime
;
135 pool
->allChunks
.insert(this, memCompChunks
);
138 MemPoolChunked::MemPoolChunked(const char *aLabel
, size_t aSize
) :
139 MemImplementingAllocator(aLabel
, aSize
), chunk_size(0),
140 chunk_capacity(0), chunkCount(0), freeCache(0), nextFreeChunk(0),
141 Chunks(0), allChunks(Splay
<MemChunk
*>())
143 setChunkSize(MEM_CHUNK_SIZE
);
145 #if HAVE_MALLOPT && M_MMAP_MAX
146 mallopt(M_MMAP_MAX
, MEM_MAX_MMAP_CHUNKS
);
150 MemChunk::~MemChunk()
152 pool
->getMeter().alloc
-= pool
->chunk_capacity
;
153 pool
->getMeter().idle
-= pool
->chunk_capacity
;
155 pool
->allChunks
.remove(this, memCompChunks
);
160 MemPoolChunked::push(void *obj
)
163 /* XXX We should figure out a sane way of avoiding having to clear
164 * all buffers. For example data buffers such as used by MemBuf do
165 * not really need to be cleared.. There was a condition based on
166 * the object size here, but such condition is not safe.
169 memset(obj
, 0, obj_size
);
173 (void) VALGRIND_MAKE_MEM_NOACCESS(obj
, obj_size
);
177 * Find a chunk with a free item.
178 * Create new chunk on demand if no chunk with frees found.
179 * Insert new chunk in front of lowest ram chunk, making it preferred in future,
180 * and resulting slow compaction towards lowest ram area.
183 MemPoolChunked::get()
189 /* first, try cache */
191 Free
= (void **)freeCache
;
192 (void) VALGRIND_MAKE_MEM_DEFINED(Free
, obj_size
);
197 /* then try perchunk freelist chain */
198 if (nextFreeChunk
== NULL
) {
199 /* no chunk with frees, so create new one */
200 -- saved_calls
; // compensate for the ++ above
203 /* now we have some in perchunk freelist chain */
204 MemChunk
*chunk
= nextFreeChunk
;
206 Free
= (void **)chunk
->freeList
;
207 chunk
->freeList
= *Free
;
209 ++chunk
->inuse_count
;
210 chunk
->lastref
= squid_curtime
;
212 if (chunk
->freeList
== NULL
) {
213 /* last free in this chunk, so remove us from perchunk freelist chain */
214 nextFreeChunk
= chunk
->nextFreeChunk
;
216 (void) VALGRIND_MAKE_MEM_DEFINED(Free
, obj_size
);
220 /* just create a new chunk and place it into a good spot in the chunk chain */
222 MemPoolChunked::createChunk()
224 MemChunk
*chunk
, *newChunk
;
226 newChunk
= new MemChunk(this);
229 if (chunk
== NULL
) { /* first chunk in pool */
233 if (newChunk
->objCache
< chunk
->objCache
) {
234 /* we are lowest ram chunk, insert as first chunk */
235 newChunk
->next
= chunk
;
239 while (chunk
->next
) {
240 if (newChunk
->objCache
< chunk
->next
->objCache
) {
241 /* new chunk is in lower ram, insert here */
242 newChunk
->next
= chunk
->next
;
243 chunk
->next
= newChunk
;
248 /* we are the worst chunk in chain, add as last */
249 chunk
->next
= newChunk
;
253 MemPoolChunked::setChunkSize(size_t chunksize
)
256 size_t csize
= chunksize
;
258 if (Chunks
) /* unsafe to tamper */
261 csize
= ((csize
+ MEM_PAGE_SIZE
- 1) / MEM_PAGE_SIZE
) * MEM_PAGE_SIZE
; /* round up to page size */
262 cap
= csize
/ obj_size
;
264 if (cap
< MEM_MIN_FREE
)
266 if (cap
* obj_size
> MEM_CHUNK_MAX_SIZE
)
267 cap
= MEM_CHUNK_MAX_SIZE
/ obj_size
;
268 if (cap
> MEM_MAX_FREE
)
273 csize
= cap
* obj_size
;
274 csize
= ((csize
+ MEM_PAGE_SIZE
- 1) / MEM_PAGE_SIZE
) * MEM_PAGE_SIZE
; /* round up to page size */
275 cap
= csize
/ obj_size
;
277 chunk_capacity
= cap
;
282 * warning: we do not clean this entry from Pools assuming destruction
283 * is used at the end of the program only
285 MemPoolChunked::~MemPoolChunked()
287 MemChunk
*chunk
, *fchunk
;
291 assert(meter
.inuse
.currentLevel() == 0);
294 while ( (fchunk
= chunk
) != NULL
) {
298 /* TODO we should be doing something about the original Chunks pointer here. */
303 MemPoolChunked::getInUseCount()
305 return meter
.inuse
.currentLevel();
309 MemPoolChunked::allocate()
312 assert(meter
.idle
.currentLevel() > 0);
319 MemPoolChunked::deallocate(void *obj
, bool)
322 assert(meter
.inuse
.currentLevel() > 0);
328 MemPoolChunked::convertFreeCacheToChunkFreeCache()
332 * OK, so we have to go through all the global freeCache and find the Chunk
333 * any given Free belongs to, and stuff it into that Chunk's freelist
336 while ((Free
= freeCache
) != NULL
) {
337 MemChunk
*chunk
= NULL
;
338 chunk
= const_cast<MemChunk
*>(*allChunks
.find(Free
, memCompObjChunks
));
339 assert(splayLastResult
== 0);
340 assert(chunk
->inuse_count
> 0);
341 -- chunk
->inuse_count
;
342 (void) VALGRIND_MAKE_MEM_DEFINED(Free
, sizeof(void *));
343 freeCache
= *(void **)Free
; /* remove from global cache */
344 *(void **)Free
= chunk
->freeList
; /* stuff into chunks freelist */
345 (void) VALGRIND_MAKE_MEM_NOACCESS(Free
, sizeof(void *));
346 chunk
->freeList
= Free
;
347 chunk
->lastref
= squid_curtime
;
352 /* removes empty Chunks from pool */
354 MemPoolChunked::clean(time_t maxage
)
356 MemChunk
*chunk
, *freechunk
, *listTail
;
363 convertFreeCacheToChunkFreeCache();
364 /* Now we have all chunks in this pool cleared up, all free items returned to their home */
365 /* We start now checking all chunks to see if we can release any */
366 /* We start from Chunks->next, so first chunk is not released */
367 /* Recreate nextFreeChunk list from scratch */
370 while ((freechunk
= chunk
->next
) != NULL
) {
371 age
= squid_curtime
- freechunk
->lastref
;
372 freechunk
->nextFreeChunk
= NULL
;
373 if (freechunk
->inuse_count
== 0)
375 chunk
->next
= freechunk
->next
;
379 if (chunk
->next
== NULL
)
384 /* Recreate nextFreeChunk list from scratch */
385 /* Populate nextFreeChunk list in order of "most filled chunk first" */
386 /* in case of equal fill, put chunk in lower ram first */
387 /* First (create time) chunk is always on top, no matter how full */
390 nextFreeChunk
= chunk
;
391 chunk
->nextFreeChunk
= NULL
;
393 while (chunk
->next
) {
394 chunk
->next
->nextFreeChunk
= NULL
;
395 if (chunk
->next
->inuse_count
< chunk_capacity
) {
396 listTail
= nextFreeChunk
;
397 while (listTail
->nextFreeChunk
) {
398 if (chunk
->next
->inuse_count
> listTail
->nextFreeChunk
->inuse_count
)
400 if ((chunk
->next
->inuse_count
== listTail
->nextFreeChunk
->inuse_count
) &&
401 (chunk
->next
->objCache
< listTail
->nextFreeChunk
->objCache
))
403 listTail
= listTail
->nextFreeChunk
;
405 chunk
->next
->nextFreeChunk
= listTail
->nextFreeChunk
;
406 listTail
->nextFreeChunk
= chunk
->next
;
410 /* We started from 2nd chunk. If first chunk is full, remove it */
411 if (nextFreeChunk
->inuse_count
== chunk_capacity
)
412 nextFreeChunk
= nextFreeChunk
->nextFreeChunk
;
418 MemPoolChunked::idleTrigger(int shift
) const
420 return meter
.idle
.currentLevel() > (chunk_capacity
<< shift
);
424 * Update MemPoolStats struct for single pool
427 MemPoolChunked::getStats(MemPoolStats
* stats
, int accumulate
)
431 int chunks_partial
= 0;
433 if (!accumulate
) /* need skip memset for GlobalStats accumulation */
434 memset(stats
, 0, sizeof(MemPoolStats
));
436 clean((time_t) 555555); /* don't want to get chunks released before reporting */
439 stats
->label
= objectType();
440 stats
->meter
= &meter
;
441 stats
->obj_size
= obj_size
;
442 stats
->chunk_capacity
= chunk_capacity
;
444 /* gather stats for each Chunk */
447 if (chunk
->inuse_count
== 0)
449 else if (chunk
->inuse_count
< chunk_capacity
)
454 stats
->chunks_alloc
+= chunkCount
;
455 stats
->chunks_inuse
+= chunkCount
- chunks_free
;
456 stats
->chunks_partial
+= chunks_partial
;
457 stats
->chunks_free
+= chunks_free
;
459 stats
->items_alloc
+= meter
.alloc
.currentLevel();
460 stats
->items_inuse
+= meter
.inuse
.currentLevel();
461 stats
->items_idle
+= meter
.idle
.currentLevel();
463 stats
->overhead
+= sizeof(MemPoolChunked
) + chunkCount
* sizeof(MemChunk
) + strlen(objectType()) + 1;
465 return meter
.inuse
.currentLevel();