]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/PoolChunked.cc
Docs: Copyright updates for 2018 (#114)
[thirdparty/squid.git] / src / mem / PoolChunked.cc
1 /*
2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /*
10 * DEBUG: section 63 Low Level Memory Pool Management
11 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
12 */
13
14 #include "squid.h"
15 #include "mem/PoolChunked.h"
16
17 #include <cassert>
18 #include <cstring>
19
20 #define MEM_MAX_MMAP_CHUNKS 2048
21
22 /*
23 * Old way:
24 * xmalloc each item separately, upon free stack into idle pool array.
25 * each item is individually malloc()ed from system, imposing libmalloc
26 * overhead, and additionally we add our overhead of pointer size per item
27 * as we keep a list of pointer to free items.
28 *
29 * Chunking:
30 * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
31 * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
32 * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
33 * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
34 * A number of items fits into a single chunk, depending on item size.
35 * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
36 *
37 * We populate Chunk with a linkedlist, each node at first word of item,
38 * and pointing at next free item. Chunk->FreeList is pointing at first
39 * free node. Thus we stuff free housekeeping into the Chunk itself, and
40 * omit pointer overhead per item.
41 *
42 * Chunks are created on demand, and new chunks are inserted into linklist
43 * of chunks so that Chunks with smaller pointer value are placed closer
44 * to the linklist head. Head is a hotspot, servicing most of requests, so
45 * slow sorting occurs and Chunks in highest memory tend to become idle
46 * and freeable.
47 *
48 * event is registered that runs every 15 secs and checks reference time
49 * of each idle chunk. If a chunk is not referenced for 15 secs, it is
50 * released.
51 *
52 * [If mem_idle_limit is exceeded with pools, every chunk that becomes
53 * idle is immediately considered for release, unless this is the only
54 * chunk with free items in it.] (not implemented)
55 *
56 * In cachemgr output, there are new columns for chunking. Special item,
57 * Frag, is shown to estimate approximately fragmentation of chunked
58 * pools. Fragmentation is calculated by taking amount of items in use,
59 * calculating needed amount of chunks to fit all, and then comparing to
60 * actual amount of chunks in use. Frag number, in percent, is showing
61 * how many percent of chunks are in use excessively. 100% meaning that
62 * twice the needed amount of chunks are in use.
63 * "part" item shows number of chunks partially filled. This shows how
64 * badly fragmentation is spread across all chunks.
65 *
66 * Andres Kroonmaa.
67 */
68
69 extern time_t squid_curtime;
70
71 /* local prototypes */
72 static int memCompChunks(MemChunk * const &, MemChunk * const &);
73 static int memCompObjChunks(void * const &, MemChunk * const &);
74
75 /* Compare chunks */
76 static int
77 memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
78 {
79 if (chunkA->objCache > chunkB->objCache)
80 return 1;
81 else if (chunkA->objCache < chunkB->objCache)
82 return -1;
83 else
84 return 0;
85 }
86
87 /* Compare object to chunk */
88 static int
89 memCompObjChunks(void *const &obj, MemChunk * const &chunk)
90 {
91 /* object is lower in memory than the chunks arena */
92 if (obj < chunk->objCache)
93 return -1;
94 /* object is within the pool */
95 if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
96 return 0;
97 /* object is above the pool */
98 return 1;
99 }
100
101 MemChunk::MemChunk(MemPoolChunked *aPool)
102 {
103 /* should have a pool for this too -
104 * note that this requres:
105 * allocate one chunk for the pool of chunks's first chunk
106 * allocate a chunk from that pool
107 * move the contents of one chunk into the other
108 * free the first chunk.
109 */
110 inuse_count = 0;
111 next = NULL;
112 pool = aPool;
113
114 if (pool->doZero)
115 objCache = xcalloc(1, pool->chunk_size);
116 else
117 objCache = xmalloc(pool->chunk_size);
118
119 freeList = objCache;
120 void **Free = (void **)freeList;
121
122 for (int i = 1; i < pool->chunk_capacity; ++i) {
123 *Free = (void *) ((char *) Free + pool->obj_size);
124 void **nextFree = (void **)*Free;
125 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size);
126 Free = nextFree;
127 }
128 nextFreeChunk = pool->nextFreeChunk;
129 pool->nextFreeChunk = this;
130
131 pool->getMeter().alloc += pool->chunk_capacity;
132 pool->getMeter().idle += pool->chunk_capacity;
133 ++pool->chunkCount;
134 lastref = squid_curtime;
135 pool->allChunks.insert(this, memCompChunks);
136 }
137
138 MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) :
139 MemImplementingAllocator(aLabel, aSize), chunk_size(0),
140 chunk_capacity(0), chunkCount(0), freeCache(0), nextFreeChunk(0),
141 Chunks(0), allChunks(Splay<MemChunk *>())
142 {
143 setChunkSize(MEM_CHUNK_SIZE);
144
145 #if HAVE_MALLOPT && M_MMAP_MAX
146 mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
147 #endif
148 }
149
150 MemChunk::~MemChunk()
151 {
152 pool->getMeter().alloc -= pool->chunk_capacity;
153 pool->getMeter().idle -= pool->chunk_capacity;
154 -- pool->chunkCount;
155 pool->allChunks.remove(this, memCompChunks);
156 xfree(objCache);
157 }
158
159 void
160 MemPoolChunked::push(void *obj)
161 {
162 void **Free;
163 /* XXX We should figure out a sane way of avoiding having to clear
164 * all buffers. For example data buffers such as used by MemBuf do
165 * not really need to be cleared.. There was a condition based on
166 * the object size here, but such condition is not safe.
167 */
168 if (doZero)
169 memset(obj, 0, obj_size);
170 Free = (void **)obj;
171 *Free = freeCache;
172 freeCache = obj;
173 (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size);
174 }
175
176 /*
177 * Find a chunk with a free item.
178 * Create new chunk on demand if no chunk with frees found.
179 * Insert new chunk in front of lowest ram chunk, making it preferred in future,
180 * and resulting slow compaction towards lowest ram area.
181 */
182 void *
183 MemPoolChunked::get()
184 {
185 void **Free;
186
187 ++saved_calls;
188
189 /* first, try cache */
190 if (freeCache) {
191 Free = (void **)freeCache;
192 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
193 freeCache = *Free;
194 *Free = NULL;
195 return Free;
196 }
197 /* then try perchunk freelist chain */
198 if (nextFreeChunk == NULL) {
199 /* no chunk with frees, so create new one */
200 -- saved_calls; // compensate for the ++ above
201 createChunk();
202 }
203 /* now we have some in perchunk freelist chain */
204 MemChunk *chunk = nextFreeChunk;
205
206 Free = (void **)chunk->freeList;
207 chunk->freeList = *Free;
208 *Free = NULL;
209 ++chunk->inuse_count;
210 chunk->lastref = squid_curtime;
211
212 if (chunk->freeList == NULL) {
213 /* last free in this chunk, so remove us from perchunk freelist chain */
214 nextFreeChunk = chunk->nextFreeChunk;
215 }
216 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
217 return Free;
218 }
219
220 /* just create a new chunk and place it into a good spot in the chunk chain */
221 void
222 MemPoolChunked::createChunk()
223 {
224 MemChunk *chunk, *newChunk;
225
226 newChunk = new MemChunk(this);
227
228 chunk = Chunks;
229 if (chunk == NULL) { /* first chunk in pool */
230 Chunks = newChunk;
231 return;
232 }
233 if (newChunk->objCache < chunk->objCache) {
234 /* we are lowest ram chunk, insert as first chunk */
235 newChunk->next = chunk;
236 Chunks = newChunk;
237 return;
238 }
239 while (chunk->next) {
240 if (newChunk->objCache < chunk->next->objCache) {
241 /* new chunk is in lower ram, insert here */
242 newChunk->next = chunk->next;
243 chunk->next = newChunk;
244 return;
245 }
246 chunk = chunk->next;
247 }
248 /* we are the worst chunk in chain, add as last */
249 chunk->next = newChunk;
250 }
251
252 void
253 MemPoolChunked::setChunkSize(size_t chunksize)
254 {
255 int cap;
256 size_t csize = chunksize;
257
258 if (Chunks) /* unsafe to tamper */
259 return;
260
261 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
262 cap = csize / obj_size;
263
264 if (cap < MEM_MIN_FREE)
265 cap = MEM_MIN_FREE;
266 if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
267 cap = MEM_CHUNK_MAX_SIZE / obj_size;
268 if (cap > MEM_MAX_FREE)
269 cap = MEM_MAX_FREE;
270 if (cap < 1)
271 cap = 1;
272
273 csize = cap * obj_size;
274 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
275 cap = csize / obj_size;
276
277 chunk_capacity = cap;
278 chunk_size = csize;
279 }
280
281 /*
282 * warning: we do not clean this entry from Pools assuming destruction
283 * is used at the end of the program only
284 */
285 MemPoolChunked::~MemPoolChunked()
286 {
287 MemChunk *chunk, *fchunk;
288
289 flushMetersFull();
290 clean(0);
291 assert(meter.inuse.currentLevel() == 0);
292
293 chunk = Chunks;
294 while ( (fchunk = chunk) != NULL) {
295 chunk = chunk->next;
296 delete fchunk;
297 }
298 /* TODO we should be doing something about the original Chunks pointer here. */
299
300 }
301
302 int
303 MemPoolChunked::getInUseCount()
304 {
305 return meter.inuse.currentLevel();
306 }
307
308 void *
309 MemPoolChunked::allocate()
310 {
311 void *p = get();
312 assert(meter.idle.currentLevel() > 0);
313 --meter.idle;
314 ++meter.inuse;
315 return p;
316 }
317
318 void
319 MemPoolChunked::deallocate(void *obj, bool)
320 {
321 push(obj);
322 assert(meter.inuse.currentLevel() > 0);
323 --meter.inuse;
324 ++meter.idle;
325 }
326
327 void
328 MemPoolChunked::convertFreeCacheToChunkFreeCache()
329 {
330 void *Free;
331 /*
332 * OK, so we have to go through all the global freeCache and find the Chunk
333 * any given Free belongs to, and stuff it into that Chunk's freelist
334 */
335
336 while ((Free = freeCache) != NULL) {
337 MemChunk *chunk = NULL;
338 chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
339 assert(splayLastResult == 0);
340 assert(chunk->inuse_count > 0);
341 -- chunk->inuse_count;
342 (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
343 freeCache = *(void **)Free; /* remove from global cache */
344 *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
345 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
346 chunk->freeList = Free;
347 chunk->lastref = squid_curtime;
348 }
349
350 }
351
352 /* removes empty Chunks from pool */
353 void
354 MemPoolChunked::clean(time_t maxage)
355 {
356 MemChunk *chunk, *freechunk, *listTail;
357 time_t age;
358
359 if (!Chunks)
360 return;
361
362 flushMetersFull();
363 convertFreeCacheToChunkFreeCache();
364 /* Now we have all chunks in this pool cleared up, all free items returned to their home */
365 /* We start now checking all chunks to see if we can release any */
366 /* We start from Chunks->next, so first chunk is not released */
367 /* Recreate nextFreeChunk list from scratch */
368
369 chunk = Chunks;
370 while ((freechunk = chunk->next) != NULL) {
371 age = squid_curtime - freechunk->lastref;
372 freechunk->nextFreeChunk = NULL;
373 if (freechunk->inuse_count == 0)
374 if (age >= maxage) {
375 chunk->next = freechunk->next;
376 delete freechunk;
377 freechunk = NULL;
378 }
379 if (chunk->next == NULL)
380 break;
381 chunk = chunk->next;
382 }
383
384 /* Recreate nextFreeChunk list from scratch */
385 /* Populate nextFreeChunk list in order of "most filled chunk first" */
386 /* in case of equal fill, put chunk in lower ram first */
387 /* First (create time) chunk is always on top, no matter how full */
388
389 chunk = Chunks;
390 nextFreeChunk = chunk;
391 chunk->nextFreeChunk = NULL;
392
393 while (chunk->next) {
394 chunk->next->nextFreeChunk = NULL;
395 if (chunk->next->inuse_count < chunk_capacity) {
396 listTail = nextFreeChunk;
397 while (listTail->nextFreeChunk) {
398 if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
399 break;
400 if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
401 (chunk->next->objCache < listTail->nextFreeChunk->objCache))
402 break;
403 listTail = listTail->nextFreeChunk;
404 }
405 chunk->next->nextFreeChunk = listTail->nextFreeChunk;
406 listTail->nextFreeChunk = chunk->next;
407 }
408 chunk = chunk->next;
409 }
410 /* We started from 2nd chunk. If first chunk is full, remove it */
411 if (nextFreeChunk->inuse_count == chunk_capacity)
412 nextFreeChunk = nextFreeChunk->nextFreeChunk;
413
414 return;
415 }
416
417 bool
418 MemPoolChunked::idleTrigger(int shift) const
419 {
420 return meter.idle.currentLevel() > (chunk_capacity << shift);
421 }
422
423 /*
424 * Update MemPoolStats struct for single pool
425 */
426 int
427 MemPoolChunked::getStats(MemPoolStats * stats, int accumulate)
428 {
429 MemChunk *chunk;
430 int chunks_free = 0;
431 int chunks_partial = 0;
432
433 if (!accumulate) /* need skip memset for GlobalStats accumulation */
434 memset(stats, 0, sizeof(MemPoolStats));
435
436 clean((time_t) 555555); /* don't want to get chunks released before reporting */
437
438 stats->pool = this;
439 stats->label = objectType();
440 stats->meter = &meter;
441 stats->obj_size = obj_size;
442 stats->chunk_capacity = chunk_capacity;
443
444 /* gather stats for each Chunk */
445 chunk = Chunks;
446 while (chunk) {
447 if (chunk->inuse_count == 0)
448 ++chunks_free;
449 else if (chunk->inuse_count < chunk_capacity)
450 ++chunks_partial;
451 chunk = chunk->next;
452 }
453
454 stats->chunks_alloc += chunkCount;
455 stats->chunks_inuse += chunkCount - chunks_free;
456 stats->chunks_partial += chunks_partial;
457 stats->chunks_free += chunks_free;
458
459 stats->items_alloc += meter.alloc.currentLevel();
460 stats->items_inuse += meter.inuse.currentLevel();
461 stats->items_idle += meter.idle.currentLevel();
462
463 stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
464
465 return meter.inuse.currentLevel();
466 }
467