]> git.ipfire.org Git - thirdparty/squid.git/blame - src/mem/PoolChunked.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / mem / PoolChunked.cc
CommitLineData
0545caaa
AJ
1/*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8cfaefed
HN
8
9/*
8cfaefed
HN
10 * DEBUG: section 63 Low Level Memory Pool Management
11 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
8cfaefed
HN
12 */
13
0545caaa 14#include "squid.h"
ed6e9fb9 15#include "mem/PoolChunked.h"
0545caaa
AJ
16
17#include <cassert>
ed6e9fb9 18#include <cstring>
0545caaa
AJ
19
20#define MEM_MAX_MMAP_CHUNKS 2048
21
8cfaefed
HN
22/*
23 * Old way:
24 * xmalloc each item separately, upon free stack into idle pool array.
25 * each item is individually malloc()ed from system, imposing libmalloc
26 * overhead, and additionally we add our overhead of pointer size per item
27 * as we keep a list of pointer to free items.
28 *
29 * Chunking:
30 * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
31 * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
32 * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
33 * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
34 * A number of items fits into a single chunk, depending on item size.
35 * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
36 *
37 * We populate Chunk with a linkedlist, each node at first word of item,
38 * and pointing at next free item. Chunk->FreeList is pointing at first
39 * free node. Thus we stuff free housekeeping into the Chunk itself, and
40 * omit pointer overhead per item.
41 *
42 * Chunks are created on demand, and new chunks are inserted into linklist
43 * of chunks so that Chunks with smaller pointer value are placed closer
44 * to the linklist head. Head is a hotspot, servicing most of requests, so
45 * slow sorting occurs and Chunks in highest memory tend to become idle
46 * and freeable.
47 *
48 * event is registered that runs every 15 secs and checks reference time
49 * of each idle chunk. If a chunk is not referenced for 15 secs, it is
50 * released.
51 *
52 * [If mem_idle_limit is exceeded with pools, every chunk that becomes
53 * idle is immediately considered for release, unless this is the only
54 * chunk with free items in it.] (not implemented)
55 *
56 * In cachemgr output, there are new columns for chunking. Special item,
57 * Frag, is shown to estimate approximately fragmentation of chunked
58 * pools. Fragmentation is calculated by taking amount of items in use,
59 * calculating needed amount of chunks to fit all, and then comparing to
60 * actual amount of chunks in use. Frag number, in percent, is showing
61 * how many percent of chunks are in use excessively. 100% meaning that
62 * twice the needed amount of chunks are in use.
63 * "part" item shows number of chunks partially filled. This shows how
64 * badly fragmentation is spread across all chunks.
65 *
66 * Andres Kroonmaa.
8cfaefed
HN
67 */
68
8cfaefed
HN
69extern time_t squid_curtime;
70
71/* local prototypes */
72static int memCompChunks(MemChunk * const &, MemChunk * const &);
73static int memCompObjChunks(void * const &, MemChunk * const &);
74
75/* Compare chunks */
76static int
77memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
78{
79 if (chunkA->objCache > chunkB->objCache)
80 return 1;
81 else if (chunkA->objCache < chunkB->objCache)
82 return -1;
83 else
84 return 0;
85}
86
87/* Compare object to chunk */
88static int
89memCompObjChunks(void *const &obj, MemChunk * const &chunk)
90{
91 /* object is lower in memory than the chunks arena */
92 if (obj < chunk->objCache)
93 return -1;
94 /* object is within the pool */
95 if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
96 return 0;
97 /* object is above the pool */
98 return 1;
99}
100
101MemChunk::MemChunk(MemPoolChunked *aPool)
102{
103 /* should have a pool for this too -
104 * note that this requres:
105 * allocate one chunk for the pool of chunks's first chunk
106 * allocate a chunk from that pool
107 * move the contents of one chunk into the other
108 * free the first chunk.
109 */
110 inuse_count = 0;
111 next = NULL;
112 pool = aPool;
113
3b08e399
AJ
114 if (pool->doZero)
115 objCache = xcalloc(1, pool->chunk_size);
116 else
117 objCache = xmalloc(pool->chunk_size);
118
8cfaefed
HN
119 freeList = objCache;
120 void **Free = (void **)freeList;
121
742a021b 122 for (int i = 1; i < pool->chunk_capacity; ++i) {
8cfaefed
HN
123 *Free = (void *) ((char *) Free + pool->obj_size);
124 void **nextFree = (void **)*Free;
125 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size);
126 Free = nextFree;
127 }
128 nextFreeChunk = pool->nextFreeChunk;
129 pool->nextFreeChunk = this;
130
131 memMeterAdd(pool->getMeter().alloc, pool->chunk_capacity);
132 memMeterAdd(pool->getMeter().idle, pool->chunk_capacity);
742a021b 133 ++pool->chunkCount;
8cfaefed
HN
134 lastref = squid_curtime;
135 pool->allChunks.insert(this, memCompChunks);
136}
137
138MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize)
139{
140 chunk_size = 0;
141 chunk_capacity = 0;
142 chunkCount = 0;
143 freeCache = 0;
144 nextFreeChunk = 0;
145 Chunks = 0;
146 next = 0;
8cfaefed
HN
147
148 setChunkSize(MEM_CHUNK_SIZE);
2be7332c
HN
149
150#if HAVE_MALLOPT && M_MMAP_MAX
151 mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
152#endif
8cfaefed
HN
153}
154
155MemChunk::~MemChunk()
156{
157 memMeterDel(pool->getMeter().alloc, pool->chunk_capacity);
158 memMeterDel(pool->getMeter().idle, pool->chunk_capacity);
a2f5277a 159 -- pool->chunkCount;
8cfaefed
HN
160 pool->allChunks.remove(this, memCompChunks);
161 xfree(objCache);
162}
163
164void
165MemPoolChunked::push(void *obj)
166{
167 void **Free;
168 /* XXX We should figure out a sane way of avoiding having to clear
169 * all buffers. For example data buffers such as used by MemBuf do
170 * not really need to be cleared.. There was a condition based on
171 * the object size here, but such condition is not safe.
172 */
3b08e399 173 if (doZero)
8cfaefed
HN
174 memset(obj, 0, obj_size);
175 Free = (void **)obj;
176 *Free = freeCache;
177 freeCache = obj;
178 (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size);
179}
180
181/*
182 * Find a chunk with a free item.
183 * Create new chunk on demand if no chunk with frees found.
184 * Insert new chunk in front of lowest ram chunk, making it preferred in future,
185 * and resulting slow compaction towards lowest ram area.
186 */
187void *
188MemPoolChunked::get()
189{
190 void **Free;
191
742a021b 192 ++saved_calls;
903a6eec 193
8cfaefed
HN
194 /* first, try cache */
195 if (freeCache) {
196 Free = (void **)freeCache;
197 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
198 freeCache = *Free;
199 *Free = NULL;
200 return Free;
201 }
202 /* then try perchunk freelist chain */
203 if (nextFreeChunk == NULL) {
204 /* no chunk with frees, so create new one */
a2f5277a 205 -- saved_calls; // compensate for the ++ above
8cfaefed
HN
206 createChunk();
207 }
208 /* now we have some in perchunk freelist chain */
209 MemChunk *chunk = nextFreeChunk;
210
211 Free = (void **)chunk->freeList;
212 chunk->freeList = *Free;
213 *Free = NULL;
742a021b 214 ++chunk->inuse_count;
8cfaefed
HN
215 chunk->lastref = squid_curtime;
216
217 if (chunk->freeList == NULL) {
218 /* last free in this chunk, so remove us from perchunk freelist chain */
219 nextFreeChunk = chunk->nextFreeChunk;
220 }
221 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
222 return Free;
223}
224
225/* just create a new chunk and place it into a good spot in the chunk chain */
226void
227MemPoolChunked::createChunk()
228{
229 MemChunk *chunk, *newChunk;
230
231 newChunk = new MemChunk(this);
232
233 chunk = Chunks;
f53969cc 234 if (chunk == NULL) { /* first chunk in pool */
8cfaefed
HN
235 Chunks = newChunk;
236 return;
237 }
238 if (newChunk->objCache < chunk->objCache) {
239 /* we are lowest ram chunk, insert as first chunk */
240 newChunk->next = chunk;
241 Chunks = newChunk;
242 return;
243 }
244 while (chunk->next) {
245 if (newChunk->objCache < chunk->next->objCache) {
246 /* new chunk is in lower ram, insert here */
247 newChunk->next = chunk->next;
248 chunk->next = newChunk;
249 return;
250 }
251 chunk = chunk->next;
252 }
253 /* we are the worst chunk in chain, add as last */
254 chunk->next = newChunk;
255}
256
257void
258MemPoolChunked::setChunkSize(size_t chunksize)
259{
260 int cap;
261 size_t csize = chunksize;
262
f53969cc 263 if (Chunks) /* unsafe to tamper */
8cfaefed
HN
264 return;
265
f53969cc 266 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
8cfaefed
HN
267 cap = csize / obj_size;
268
269 if (cap < MEM_MIN_FREE)
270 cap = MEM_MIN_FREE;
271 if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
272 cap = MEM_CHUNK_MAX_SIZE / obj_size;
273 if (cap > MEM_MAX_FREE)
274 cap = MEM_MAX_FREE;
275 if (cap < 1)
276 cap = 1;
277
278 csize = cap * obj_size;
f53969cc 279 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
8cfaefed
HN
280 cap = csize / obj_size;
281
282 chunk_capacity = cap;
283 chunk_size = csize;
284}
285
286/*
287 * warning: we do not clean this entry from Pools assuming destruction
288 * is used at the end of the program only
289 */
290MemPoolChunked::~MemPoolChunked()
291{
292 MemChunk *chunk, *fchunk;
8cfaefed
HN
293
294 flushMetersFull();
295 clean(0);
12489dcb 296 assert(meter.inuse.level == 0);
8cfaefed
HN
297
298 chunk = Chunks;
299 while ( (fchunk = chunk) != NULL) {
300 chunk = chunk->next;
301 delete fchunk;
302 }
303 /* TODO we should be doing something about the original Chunks pointer here. */
304
8cfaefed
HN
305}
306
307int
308MemPoolChunked::getInUseCount()
309{
2be7332c 310 return meter.inuse.level;
8cfaefed
HN
311}
312
313void *
314MemPoolChunked::allocate()
315{
316 void *p = get();
317 assert(meter.idle.level > 0);
318 memMeterDec(meter.idle);
319 memMeterInc(meter.inuse);
320 return p;
321}
322
323void
2be7332c 324MemPoolChunked::deallocate(void *obj, bool aggressive)
8cfaefed
HN
325{
326 push(obj);
327 assert(meter.inuse.level > 0);
328 memMeterDec(meter.inuse);
329 memMeterInc(meter.idle);
330}
331
332void
333MemPoolChunked::convertFreeCacheToChunkFreeCache()
334{
335 void *Free;
336 /*
337 * OK, so we have to go through all the global freeCache and find the Chunk
338 * any given Free belongs to, and stuff it into that Chunk's freelist
339 */
340
341 while ((Free = freeCache) != NULL) {
342 MemChunk *chunk = NULL;
343 chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
344 assert(splayLastResult == 0);
345 assert(chunk->inuse_count > 0);
a2f5277a 346 -- chunk->inuse_count;
8cfaefed 347 (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
f53969cc
SM
348 freeCache = *(void **)Free; /* remove from global cache */
349 *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
8cfaefed
HN
350 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
351 chunk->freeList = Free;
352 chunk->lastref = squid_curtime;
353 }
354
355}
356
357/* removes empty Chunks from pool */
358void
359MemPoolChunked::clean(time_t maxage)
360{
361 MemChunk *chunk, *freechunk, *listTail;
362 time_t age;
363
8cfaefed
HN
364 if (!Chunks)
365 return;
366
367 flushMetersFull();
368 convertFreeCacheToChunkFreeCache();
369 /* Now we have all chunks in this pool cleared up, all free items returned to their home */
370 /* We start now checking all chunks to see if we can release any */
371 /* We start from Chunks->next, so first chunk is not released */
372 /* Recreate nextFreeChunk list from scratch */
373
374 chunk = Chunks;
375 while ((freechunk = chunk->next) != NULL) {
376 age = squid_curtime - freechunk->lastref;
377 freechunk->nextFreeChunk = NULL;
378 if (freechunk->inuse_count == 0)
379 if (age >= maxage) {
380 chunk->next = freechunk->next;
381 delete freechunk;
382 freechunk = NULL;
383 }
384 if (chunk->next == NULL)
385 break;
386 chunk = chunk->next;
387 }
388
389 /* Recreate nextFreeChunk list from scratch */
390 /* Populate nextFreeChunk list in order of "most filled chunk first" */
391 /* in case of equal fill, put chunk in lower ram first */
392 /* First (create time) chunk is always on top, no matter how full */
393
394 chunk = Chunks;
395 nextFreeChunk = chunk;
396 chunk->nextFreeChunk = NULL;
397
398 while (chunk->next) {
399 chunk->next->nextFreeChunk = NULL;
400 if (chunk->next->inuse_count < chunk_capacity) {
401 listTail = nextFreeChunk;
402 while (listTail->nextFreeChunk) {
403 if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
404 break;
405 if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
406 (chunk->next->objCache < listTail->nextFreeChunk->objCache))
407 break;
408 listTail = listTail->nextFreeChunk;
409 }
410 chunk->next->nextFreeChunk = listTail->nextFreeChunk;
411 listTail->nextFreeChunk = chunk->next;
412 }
413 chunk = chunk->next;
414 }
415 /* We started from 2nd chunk. If first chunk is full, remove it */
416 if (nextFreeChunk->inuse_count == chunk_capacity)
417 nextFreeChunk = nextFreeChunk->nextFreeChunk;
418
419 return;
420}
421
422bool
423MemPoolChunked::idleTrigger(int shift) const
424{
2be7332c 425 return meter.idle.level > (chunk_capacity << shift);
8cfaefed
HN
426}
427
428/*
429 * Update MemPoolStats struct for single pool
430 */
431int
432MemPoolChunked::getStats(MemPoolStats * stats, int accumulate)
433{
434 MemChunk *chunk;
435 int chunks_free = 0;
436 int chunks_partial = 0;
437
f53969cc 438 if (!accumulate) /* need skip memset for GlobalStats accumulation */
8cfaefed
HN
439 memset(stats, 0, sizeof(MemPoolStats));
440
f53969cc 441 clean((time_t) 555555); /* don't want to get chunks released before reporting */
8cfaefed
HN
442
443 stats->pool = this;
444 stats->label = objectType();
2be7332c 445 stats->meter = &meter;
8cfaefed
HN
446 stats->obj_size = obj_size;
447 stats->chunk_capacity = chunk_capacity;
448
449 /* gather stats for each Chunk */
450 chunk = Chunks;
451 while (chunk) {
452 if (chunk->inuse_count == 0)
742a021b 453 ++chunks_free;
8cfaefed 454 else if (chunk->inuse_count < chunk_capacity)
742a021b 455 ++chunks_partial;
8cfaefed
HN
456 chunk = chunk->next;
457 }
458
459 stats->chunks_alloc += chunkCount;
460 stats->chunks_inuse += chunkCount - chunks_free;
461 stats->chunks_partial += chunks_partial;
462 stats->chunks_free += chunks_free;
463
2be7332c
HN
464 stats->items_alloc += meter.alloc.level;
465 stats->items_inuse += meter.inuse.level;
466 stats->items_idle += meter.idle.level;
8cfaefed
HN
467
468 stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
469
2be7332c 470 return meter.inuse.level;
8cfaefed 471}
f53969cc 472