]> git.ipfire.org Git - thirdparty/squid.git/blame - lib/MemPoolChunked.cc
Renamed squid.h to squid-old.h and config.h to squid.h
[thirdparty/squid.git] / lib / MemPoolChunked.cc
CommitLineData
8cfaefed
HN
1
2/*
3 * $Id$
4 *
5 * DEBUG: section 63 Low Level Memory Pool Management
6 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
7 *
8 * SQUID Internet Object Cache http://squid.nlanr.net/Squid/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from the
12 * Internet community. Development is led by Duane Wessels of the
13 * National Laboratory for Applied Network Research and funded by the
14 * National Science Foundation. Squid is Copyrighted (C) 1998 by
15 * the Regents of the University of California. Please see the
16 * COPYRIGHT file for full details. Squid incorporates software
17 * developed and/or copyrighted by other sources. Please see the
18 * CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36/*
37 * Old way:
38 * xmalloc each item separately, upon free stack into idle pool array.
39 * each item is individually malloc()ed from system, imposing libmalloc
40 * overhead, and additionally we add our overhead of pointer size per item
41 * as we keep a list of pointer to free items.
42 *
43 * Chunking:
44 * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
45 * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
46 * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
47 * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
48 * A number of items fits into a single chunk, depending on item size.
49 * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
50 *
51 * We populate Chunk with a linkedlist, each node at first word of item,
52 * and pointing at next free item. Chunk->FreeList is pointing at first
53 * free node. Thus we stuff free housekeeping into the Chunk itself, and
54 * omit pointer overhead per item.
55 *
56 * Chunks are created on demand, and new chunks are inserted into linklist
57 * of chunks so that Chunks with smaller pointer value are placed closer
58 * to the linklist head. Head is a hotspot, servicing most of requests, so
59 * slow sorting occurs and Chunks in highest memory tend to become idle
60 * and freeable.
61 *
62 * event is registered that runs every 15 secs and checks reference time
63 * of each idle chunk. If a chunk is not referenced for 15 secs, it is
64 * released.
65 *
66 * [If mem_idle_limit is exceeded with pools, every chunk that becomes
67 * idle is immediately considered for release, unless this is the only
68 * chunk with free items in it.] (not implemented)
69 *
70 * In cachemgr output, there are new columns for chunking. Special item,
71 * Frag, is shown to estimate approximately fragmentation of chunked
72 * pools. Fragmentation is calculated by taking amount of items in use,
73 * calculating needed amount of chunks to fit all, and then comparing to
74 * actual amount of chunks in use. Frag number, in percent, is showing
75 * how many percent of chunks are in use excessively. 100% meaning that
76 * twice the needed amount of chunks are in use.
77 * "part" item shows number of chunks partially filled. This shows how
78 * badly fragmentation is spread across all chunks.
79 *
80 * Andres Kroonmaa.
81 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
82 */
83
f7f3304a 84#include "squid.h"
8cfaefed
HN
85#if HAVE_ASSERT_H
86#include <assert.h>
87#endif
88
89#include "MemPoolChunked.h"
90
8cfaefed
HN
91#define MEM_MAX_MMAP_CHUNKS 2048
92
93#if HAVE_STRING_H
94#include <string.h>
95#endif
96
97/*
98 * XXX This is a boundary violation between lib and src.. would be good
99 * if it could be solved otherwise, but left for now.
100 */
101extern time_t squid_curtime;
102
103/* local prototypes */
104static int memCompChunks(MemChunk * const &, MemChunk * const &);
105static int memCompObjChunks(void * const &, MemChunk * const &);
106
107/* Compare chunks */
108static int
109memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
110{
111 if (chunkA->objCache > chunkB->objCache)
112 return 1;
113 else if (chunkA->objCache < chunkB->objCache)
114 return -1;
115 else
116 return 0;
117}
118
119/* Compare object to chunk */
120static int
121memCompObjChunks(void *const &obj, MemChunk * const &chunk)
122{
123 /* object is lower in memory than the chunks arena */
124 if (obj < chunk->objCache)
125 return -1;
126 /* object is within the pool */
127 if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
128 return 0;
129 /* object is above the pool */
130 return 1;
131}
132
133MemChunk::MemChunk(MemPoolChunked *aPool)
134{
135 /* should have a pool for this too -
136 * note that this requres:
137 * allocate one chunk for the pool of chunks's first chunk
138 * allocate a chunk from that pool
139 * move the contents of one chunk into the other
140 * free the first chunk.
141 */
142 inuse_count = 0;
143 next = NULL;
144 pool = aPool;
145
146 objCache = xcalloc(1, pool->chunk_size);
147 freeList = objCache;
148 void **Free = (void **)freeList;
149
150 for (int i = 1; i < pool->chunk_capacity; i++) {
151 *Free = (void *) ((char *) Free + pool->obj_size);
152 void **nextFree = (void **)*Free;
153 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size);
154 Free = nextFree;
155 }
156 nextFreeChunk = pool->nextFreeChunk;
157 pool->nextFreeChunk = this;
158
159 memMeterAdd(pool->getMeter().alloc, pool->chunk_capacity);
160 memMeterAdd(pool->getMeter().idle, pool->chunk_capacity);
161 pool->chunkCount++;
162 lastref = squid_curtime;
163 pool->allChunks.insert(this, memCompChunks);
164}
165
166MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize)
167{
168 chunk_size = 0;
169 chunk_capacity = 0;
170 chunkCount = 0;
171 freeCache = 0;
172 nextFreeChunk = 0;
173 Chunks = 0;
174 next = 0;
8cfaefed
HN
175
176 setChunkSize(MEM_CHUNK_SIZE);
2be7332c
HN
177
178#if HAVE_MALLOPT && M_MMAP_MAX
179 mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
180#endif
8cfaefed
HN
181}
182
183MemChunk::~MemChunk()
184{
185 memMeterDel(pool->getMeter().alloc, pool->chunk_capacity);
186 memMeterDel(pool->getMeter().idle, pool->chunk_capacity);
187 pool->chunkCount--;
188 pool->allChunks.remove(this, memCompChunks);
189 xfree(objCache);
190}
191
192void
193MemPoolChunked::push(void *obj)
194{
195 void **Free;
196 /* XXX We should figure out a sane way of avoiding having to clear
197 * all buffers. For example data buffers such as used by MemBuf do
198 * not really need to be cleared.. There was a condition based on
199 * the object size here, but such condition is not safe.
200 */
201 if (doZeroOnPush)
202 memset(obj, 0, obj_size);
203 Free = (void **)obj;
204 *Free = freeCache;
205 freeCache = obj;
206 (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size);
207}
208
209/*
210 * Find a chunk with a free item.
211 * Create new chunk on demand if no chunk with frees found.
212 * Insert new chunk in front of lowest ram chunk, making it preferred in future,
213 * and resulting slow compaction towards lowest ram area.
214 */
215void *
216MemPoolChunked::get()
217{
218 void **Free;
219
903a6eec
HN
220 saved_calls++;
221
8cfaefed
HN
222 /* first, try cache */
223 if (freeCache) {
224 Free = (void **)freeCache;
225 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
226 freeCache = *Free;
227 *Free = NULL;
228 return Free;
229 }
230 /* then try perchunk freelist chain */
231 if (nextFreeChunk == NULL) {
232 /* no chunk with frees, so create new one */
3b32112a 233 saved_calls--; // compensate for the ++ above
8cfaefed
HN
234 createChunk();
235 }
236 /* now we have some in perchunk freelist chain */
237 MemChunk *chunk = nextFreeChunk;
238
239 Free = (void **)chunk->freeList;
240 chunk->freeList = *Free;
241 *Free = NULL;
242 chunk->inuse_count++;
243 chunk->lastref = squid_curtime;
244
245 if (chunk->freeList == NULL) {
246 /* last free in this chunk, so remove us from perchunk freelist chain */
247 nextFreeChunk = chunk->nextFreeChunk;
248 }
249 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
250 return Free;
251}
252
253/* just create a new chunk and place it into a good spot in the chunk chain */
254void
255MemPoolChunked::createChunk()
256{
257 MemChunk *chunk, *newChunk;
258
259 newChunk = new MemChunk(this);
260
261 chunk = Chunks;
262 if (chunk == NULL) { /* first chunk in pool */
263 Chunks = newChunk;
264 return;
265 }
266 if (newChunk->objCache < chunk->objCache) {
267 /* we are lowest ram chunk, insert as first chunk */
268 newChunk->next = chunk;
269 Chunks = newChunk;
270 return;
271 }
272 while (chunk->next) {
273 if (newChunk->objCache < chunk->next->objCache) {
274 /* new chunk is in lower ram, insert here */
275 newChunk->next = chunk->next;
276 chunk->next = newChunk;
277 return;
278 }
279 chunk = chunk->next;
280 }
281 /* we are the worst chunk in chain, add as last */
282 chunk->next = newChunk;
283}
284
285void
286MemPoolChunked::setChunkSize(size_t chunksize)
287{
288 int cap;
289 size_t csize = chunksize;
290
291 if (Chunks) /* unsafe to tamper */
292 return;
293
294 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
295 cap = csize / obj_size;
296
297 if (cap < MEM_MIN_FREE)
298 cap = MEM_MIN_FREE;
299 if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
300 cap = MEM_CHUNK_MAX_SIZE / obj_size;
301 if (cap > MEM_MAX_FREE)
302 cap = MEM_MAX_FREE;
303 if (cap < 1)
304 cap = 1;
305
306 csize = cap * obj_size;
307 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
308 cap = csize / obj_size;
309
310 chunk_capacity = cap;
311 chunk_size = csize;
312}
313
314/*
315 * warning: we do not clean this entry from Pools assuming destruction
316 * is used at the end of the program only
317 */
318MemPoolChunked::~MemPoolChunked()
319{
320 MemChunk *chunk, *fchunk;
8cfaefed
HN
321
322 flushMetersFull();
323 clean(0);
12489dcb 324 assert(meter.inuse.level == 0);
8cfaefed
HN
325
326 chunk = Chunks;
327 while ( (fchunk = chunk) != NULL) {
328 chunk = chunk->next;
329 delete fchunk;
330 }
331 /* TODO we should be doing something about the original Chunks pointer here. */
332
8cfaefed
HN
333}
334
335int
336MemPoolChunked::getInUseCount()
337{
2be7332c 338 return meter.inuse.level;
8cfaefed
HN
339}
340
341void *
342MemPoolChunked::allocate()
343{
344 void *p = get();
345 assert(meter.idle.level > 0);
346 memMeterDec(meter.idle);
347 memMeterInc(meter.inuse);
348 return p;
349}
350
351void
2be7332c 352MemPoolChunked::deallocate(void *obj, bool aggressive)
8cfaefed
HN
353{
354 push(obj);
355 assert(meter.inuse.level > 0);
356 memMeterDec(meter.inuse);
357 memMeterInc(meter.idle);
358}
359
360void
361MemPoolChunked::convertFreeCacheToChunkFreeCache()
362{
363 void *Free;
364 /*
365 * OK, so we have to go through all the global freeCache and find the Chunk
366 * any given Free belongs to, and stuff it into that Chunk's freelist
367 */
368
369 while ((Free = freeCache) != NULL) {
370 MemChunk *chunk = NULL;
371 chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
372 assert(splayLastResult == 0);
373 assert(chunk->inuse_count > 0);
374 chunk->inuse_count--;
375 (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
376 freeCache = *(void **)Free; /* remove from global cache */
377 *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
378 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
379 chunk->freeList = Free;
380 chunk->lastref = squid_curtime;
381 }
382
383}
384
385/* removes empty Chunks from pool */
386void
387MemPoolChunked::clean(time_t maxage)
388{
389 MemChunk *chunk, *freechunk, *listTail;
390 time_t age;
391
392 if (!this)
393 return;
394 if (!Chunks)
395 return;
396
397 flushMetersFull();
398 convertFreeCacheToChunkFreeCache();
399 /* Now we have all chunks in this pool cleared up, all free items returned to their home */
400 /* We start now checking all chunks to see if we can release any */
401 /* We start from Chunks->next, so first chunk is not released */
402 /* Recreate nextFreeChunk list from scratch */
403
404 chunk = Chunks;
405 while ((freechunk = chunk->next) != NULL) {
406 age = squid_curtime - freechunk->lastref;
407 freechunk->nextFreeChunk = NULL;
408 if (freechunk->inuse_count == 0)
409 if (age >= maxage) {
410 chunk->next = freechunk->next;
411 delete freechunk;
412 freechunk = NULL;
413 }
414 if (chunk->next == NULL)
415 break;
416 chunk = chunk->next;
417 }
418
419 /* Recreate nextFreeChunk list from scratch */
420 /* Populate nextFreeChunk list in order of "most filled chunk first" */
421 /* in case of equal fill, put chunk in lower ram first */
422 /* First (create time) chunk is always on top, no matter how full */
423
424 chunk = Chunks;
425 nextFreeChunk = chunk;
426 chunk->nextFreeChunk = NULL;
427
428 while (chunk->next) {
429 chunk->next->nextFreeChunk = NULL;
430 if (chunk->next->inuse_count < chunk_capacity) {
431 listTail = nextFreeChunk;
432 while (listTail->nextFreeChunk) {
433 if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
434 break;
435 if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
436 (chunk->next->objCache < listTail->nextFreeChunk->objCache))
437 break;
438 listTail = listTail->nextFreeChunk;
439 }
440 chunk->next->nextFreeChunk = listTail->nextFreeChunk;
441 listTail->nextFreeChunk = chunk->next;
442 }
443 chunk = chunk->next;
444 }
445 /* We started from 2nd chunk. If first chunk is full, remove it */
446 if (nextFreeChunk->inuse_count == chunk_capacity)
447 nextFreeChunk = nextFreeChunk->nextFreeChunk;
448
449 return;
450}
451
452bool
453MemPoolChunked::idleTrigger(int shift) const
454{
2be7332c 455 return meter.idle.level > (chunk_capacity << shift);
8cfaefed
HN
456}
457
458/*
459 * Update MemPoolStats struct for single pool
460 */
461int
462MemPoolChunked::getStats(MemPoolStats * stats, int accumulate)
463{
464 MemChunk *chunk;
465 int chunks_free = 0;
466 int chunks_partial = 0;
467
468 if (!accumulate) /* need skip memset for GlobalStats accumulation */
469 memset(stats, 0, sizeof(MemPoolStats));
470
471 clean((time_t) 555555); /* don't want to get chunks released before reporting */
472
473 stats->pool = this;
474 stats->label = objectType();
2be7332c 475 stats->meter = &meter;
8cfaefed
HN
476 stats->obj_size = obj_size;
477 stats->chunk_capacity = chunk_capacity;
478
479 /* gather stats for each Chunk */
480 chunk = Chunks;
481 while (chunk) {
482 if (chunk->inuse_count == 0)
483 chunks_free++;
484 else if (chunk->inuse_count < chunk_capacity)
485 chunks_partial++;
486 chunk = chunk->next;
487 }
488
489 stats->chunks_alloc += chunkCount;
490 stats->chunks_inuse += chunkCount - chunks_free;
491 stats->chunks_partial += chunks_partial;
492 stats->chunks_free += chunks_free;
493
2be7332c
HN
494 stats->items_alloc += meter.alloc.level;
495 stats->items_inuse += meter.inuse.level;
496 stats->items_idle += meter.idle.level;
8cfaefed
HN
497
498 stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
499
2be7332c 500 return meter.inuse.level;
8cfaefed 501}