]> git.ipfire.org Git - thirdparty/squid.git/blob - lib/MemPool.cc
Cleanup: zap CVS Id tags
[thirdparty/squid.git] / lib / MemPool.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 63 Low Level Memory Pool Management
6 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
7 *
8 * SQUID Internet Object Cache http://squid.nlanr.net/Squid/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from the
12 * Internet community. Development is led by Duane Wessels of the
13 * National Laboratory for Applied Network Research and funded by the
14 * National Science Foundation. Squid is Copyrighted (C) 1998 by
15 * the Regents of the University of California. Please see the
16 * COPYRIGHT file for full details. Squid incorporates software
17 * developed and/or copyrighted by other sources. Please see the
18 * CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 /*
37 * Old way:
38 * xmalloc each item separately, upon free stack into idle pool array.
39 * each item is individually malloc()ed from system, imposing libmalloc
40 * overhead, and additionally we add our overhead of pointer size per item
41 * as we keep a list of pointer to free items.
42 *
43 * Chunking:
44 * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but
45 * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to
46 * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size.
47 * Minimum Chunk size is MEM_CHUNK_SIZE (16K).
48 * A number of items fits into a single chunk, depending on item size.
49 * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535).
50 *
51 * We populate Chunk with a linkedlist, each node at first word of item,
52 * and pointing at next free item. Chunk->FreeList is pointing at first
53 * free node. Thus we stuff free housekeeping into the Chunk itself, and
54 * omit pointer overhead per item.
55 *
56 * Chunks are created on demand, and new chunks are inserted into linklist
57 * of chunks so that Chunks with smaller pointer value are placed closer
58 * to the linklist head. Head is a hotspot, servicing most of requests, so
59 * slow sorting occurs and Chunks in highest memory tend to become idle
60 * and freeable.
61 *
62 * event is registered that runs every 15 secs and checks reference time
63 * of each idle chunk. If a chunk is not referenced for 15 secs, it is
64 * released.
65 *
66 * [If mem_idle_limit is exceeded with pools, every chunk that becomes
67 * idle is immediately considered for release, unless this is the only
68 * chunk with free items in it.] (not implemented)
69 *
70 * In cachemgr output, there are new columns for chunking. Special item,
71 * Frag, is shown to estimate approximately fragmentation of chunked
72 * pools. Fragmentation is calculated by taking amount of items in use,
73 * calculating needed amount of chunks to fit all, and then comparing to
74 * actual amount of chunks in use. Frag number, in percent, is showing
75 * how many percent of chunks are in use excessively. 100% meaning that
76 * twice the needed amount of chunks are in use.
77 * "part" item shows number of chunks partially filled. This shows how
78 * badly fragmentation is spread across all chunks.
79 *
80 * Andres Kroonmaa.
81 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
82 */
83
84 #include "config.h"
85 #if HAVE_ASSERT_H
86 #include <assert.h>
87 #endif
88
89 #include "MemPool.h"
90
91 #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
92 #define MEM_MAX_MMAP_CHUNKS 2048
93
94 #if HAVE_STRING_H
95 #include <string.h>
96 #endif
97
98 /*
99 * XXX This is a boundary violation between lib and src.. would be good
100 * if it could be solved otherwise, but left for now.
101 */
102 extern time_t squid_curtime;
103
104 /* local data */
105 static MemPoolMeter TheMeter;
106 static MemPoolIterator Iterator;
107
108 static int Pool_id_counter = 0;
109
110 /* local prototypes */
111 static int memCompChunks(MemChunk * const &, MemChunk * const &);
112 static int memCompObjChunks(void * const &, MemChunk * const &);
113
114 MemPools &
115 MemPools::GetInstance()
116 {
117 /* Must use this idiom, as we can be double-initialised
118 * if we are called during static initialisations.
119 */
120 if (!Instance)
121 Instance = new MemPools;
122 return *Instance;
123 }
124
125 MemPools * MemPools::Instance = NULL;
126
127 MemPoolIterator *
128 memPoolIterate(void)
129 {
130 Iterator.pool = MemPools::GetInstance().pools;
131 return &Iterator;
132 }
133
134 void
135 memPoolIterateDone(MemPoolIterator ** iter)
136 {
137 assert(iter != NULL);
138 Iterator.pool = NULL;
139 *iter = NULL;
140 }
141
142 MemImplementingAllocator *
143 memPoolIterateNext(MemPoolIterator * iter)
144 {
145 MemImplementingAllocator *pool;
146 assert(iter != NULL);
147
148 pool = iter->pool;
149 if (!pool)
150 return NULL;
151
152 iter->pool = pool->next;
153 return pool;
154 }
155
156 void
157 MemPools::setIdleLimit(size_t new_idle_limit)
158 {
159 mem_idle_limit = new_idle_limit;
160 }
161
162 size_t
163 MemPools::idleLimit() const
164 {
165 return mem_idle_limit;
166 }
167
168 /* Compare chunks */
169 static int
170 memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB)
171 {
172 if (chunkA->objCache > chunkB->objCache)
173 return 1;
174 else if (chunkA->objCache < chunkB->objCache)
175 return -1;
176 else
177 return 0;
178 }
179
180 /* Compare object to chunk */
181 static int
182 memCompObjChunks(void *const &obj, MemChunk * const &chunk)
183 {
184 /* object is lower in memory than the chunks arena */
185 if (obj < chunk->objCache)
186 return -1;
187 /* object is within the pool */
188 if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size))
189 return 0;
190 /* object is above the pool */
191 return 1;
192 }
193
194 MemChunk::MemChunk(MemPool *aPool)
195 {
196 /* should have a pool for this too -
197 * note that this requres:
198 * allocate one chunk for the pool of chunks's first chunk
199 * allocate a chunk from that pool
200 * move the contents of one chunk into the other
201 * free the first chunk.
202 */
203 inuse_count = 0;
204 next = NULL;
205 pool = aPool;
206
207 objCache = xcalloc(1, pool->chunk_size);
208 freeList = objCache;
209 void **Free = (void **)freeList;
210
211 for (int i = 1; i < pool->chunk_capacity; i++) {
212 *Free = (void *) ((char *) Free + pool->obj_size);
213 void **nextFree = (void **)*Free;
214 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size);
215 Free = nextFree;
216 }
217 nextFreeChunk = pool->nextFreeChunk;
218 pool->nextFreeChunk = this;
219
220 memMeterAdd(pool->getMeter().alloc, pool->chunk_capacity);
221 memMeterAdd(pool->getMeter().idle, pool->chunk_capacity);
222 pool->idle += pool->chunk_capacity;
223 pool->chunkCount++;
224 lastref = squid_curtime;
225 pool->allChunks.insert(this, memCompChunks);
226 }
227
228 MemPool::MemPool(const char *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize)
229 {
230 chunk_size = 0;
231 chunk_capacity = 0;
232 memPID = 0;
233 chunkCount = 0;
234 inuse = 0;
235 idle = 0;
236 freeCache = 0;
237 nextFreeChunk = 0;
238 Chunks = 0;
239 next = 0;
240 MemImplementingAllocator *last_pool;
241
242 assert(aLabel != NULL && aSize);
243
244 setChunkSize(MEM_CHUNK_SIZE);
245
246 /* Append as Last */
247 for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;)
248 last_pool = last_pool->next;
249 if (last_pool)
250 last_pool->next = this;
251 else
252 MemPools::GetInstance().pools = this;
253
254 memPID = ++Pool_id_counter;
255 }
256
257 MemChunk::~MemChunk()
258 {
259 memMeterDel(pool->getMeter().alloc, pool->chunk_capacity);
260 memMeterDel(pool->getMeter().idle, pool->chunk_capacity);
261 pool->idle -= pool->chunk_capacity;
262 pool->chunkCount--;
263 pool->allChunks.remove(this, memCompChunks);
264 xfree(objCache);
265 }
266
267 void
268 MemPool::push(void *obj)
269 {
270 void **Free;
271 /* XXX We should figure out a sane way of avoiding having to clear
272 * all buffers. For example data buffers such as used by MemBuf do
273 * not really need to be cleared.. There was a condition based on
274 * the object size here, but such condition is not safe.
275 */
276 if (doZeroOnPush)
277 memset(obj, 0, obj_size);
278 Free = (void **)obj;
279 *Free = freeCache;
280 freeCache = obj;
281 (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size);
282 }
283
284 /*
285 * Find a chunk with a free item.
286 * Create new chunk on demand if no chunk with frees found.
287 * Insert new chunk in front of lowest ram chunk, making it preferred in future,
288 * and resulting slow compaction towards lowest ram area.
289 */
290 void *
291 MemPool::get()
292 {
293 void **Free;
294
295 /* first, try cache */
296 if (freeCache) {
297 Free = (void **)freeCache;
298 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
299 freeCache = *Free;
300 *Free = NULL;
301 return Free;
302 }
303 /* then try perchunk freelist chain */
304 if (nextFreeChunk == NULL) {
305 /* no chunk with frees, so create new one */
306 createChunk();
307 }
308 /* now we have some in perchunk freelist chain */
309 MemChunk *chunk = nextFreeChunk;
310
311 Free = (void **)chunk->freeList;
312 chunk->freeList = *Free;
313 *Free = NULL;
314 chunk->inuse_count++;
315 chunk->lastref = squid_curtime;
316
317 if (chunk->freeList == NULL) {
318 /* last free in this chunk, so remove us from perchunk freelist chain */
319 nextFreeChunk = chunk->nextFreeChunk;
320 }
321 (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size);
322 return Free;
323 }
324
325 /* just create a new chunk and place it into a good spot in the chunk chain */
326 void
327 MemPool::createChunk()
328 {
329 MemChunk *chunk, *newChunk;
330
331 newChunk = new MemChunk(this);
332
333 chunk = Chunks;
334 if (chunk == NULL) { /* first chunk in pool */
335 Chunks = newChunk;
336 return;
337 }
338 if (newChunk->objCache < chunk->objCache) {
339 /* we are lowest ram chunk, insert as first chunk */
340 newChunk->next = chunk;
341 Chunks = newChunk;
342 return;
343 }
344 while (chunk->next) {
345 if (newChunk->objCache < chunk->next->objCache) {
346 /* new chunk is in lower ram, insert here */
347 newChunk->next = chunk->next;
348 chunk->next = newChunk;
349 return;
350 }
351 chunk = chunk->next;
352 }
353 /* we are the worst chunk in chain, add as last */
354 chunk->next = newChunk;
355 }
356
357 /* Change the default calue of defaultIsChunked to override
358 * all pools - including those used before main() starts where
359 * MemPools::GetInstance().setDefaultPoolChunking() can be called.
360 */
361 MemPools::MemPools() : pools(NULL), mem_idle_limit(2 * MB),
362 poolCount (0), defaultIsChunked (!DISABLE_POOLS && !RUNNING_ON_VALGRIND)
363 {
364 char *cfg = getenv("MEMPOOLS");
365 if (cfg)
366 defaultIsChunked = atoi(cfg);
367 #if HAVE_MALLOPT && M_MMAP_MAX
368 mallopt(M_MMAP_MAX, MEM_MAX_MMAP_CHUNKS);
369 #endif
370 }
371
372 void
373 MemPool::setChunkSize(size_t chunksize)
374 {
375 int cap;
376 size_t csize = chunksize;
377
378 if (Chunks) /* unsafe to tamper */
379 return;
380
381 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
382 cap = csize / obj_size;
383
384 if (cap < MEM_MIN_FREE)
385 cap = MEM_MIN_FREE;
386 if (cap * obj_size > MEM_CHUNK_MAX_SIZE)
387 cap = MEM_CHUNK_MAX_SIZE / obj_size;
388 if (cap > MEM_MAX_FREE)
389 cap = MEM_MAX_FREE;
390 if (cap < 1)
391 cap = 1;
392
393 csize = cap * obj_size;
394 csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */
395 cap = csize / obj_size;
396
397 chunk_capacity = cap;
398 chunk_size = csize;
399 }
400
401 MemImplementingAllocator *
402 MemPools::create(const char *label, size_t obj_size)
403 {
404 return create (label, obj_size, defaultIsChunked);
405 }
406
407 MemImplementingAllocator *
408 MemPools::create(const char *label, size_t obj_size, bool const chunked)
409 {
410 ++poolCount;
411 if (chunked)
412 return new MemPool (label, obj_size);
413 else
414 return new MemMalloc (label, obj_size);
415 }
416
417 void
418 MemPools::setDefaultPoolChunking(bool const &aBool)
419 {
420 defaultIsChunked = aBool;
421 }
422
423 /*
424 * warning: we do not clean this entry from Pools assuming destruction
425 * is used at the end of the program only
426 */
427 MemPool::~MemPool()
428 {
429 MemChunk *chunk, *fchunk;
430 MemImplementingAllocator *find_pool, *prev_pool;
431
432 flushMetersFull();
433 clean(0);
434 assert(inuse == 0 && "While trying to destroy pool");
435
436 chunk = Chunks;
437 while ( (fchunk = chunk) != NULL) {
438 chunk = chunk->next;
439 delete fchunk;
440 }
441 /* TODO we should be doing something about the original Chunks pointer here. */
442
443 assert(MemPools::GetInstance().pools != NULL && "Called MemPool::~MemPool, but no pool exists!");
444
445 /* Pool clean, remove it from List and free */
446 for (find_pool = MemPools::GetInstance().pools, prev_pool = NULL; (find_pool && this != find_pool); find_pool = find_pool->next)
447 prev_pool = find_pool;
448 assert(find_pool != NULL && "pool to destroy not found");
449
450 if (prev_pool)
451 prev_pool->next = next;
452 else
453 MemPools::GetInstance().pools = next;
454 --MemPools::GetInstance().poolCount;
455 }
456
457 char const *
458 MemAllocator::objectType() const
459 {
460 return label;
461 }
462
463 int
464 MemAllocator::inUseCount()
465 {
466 return getInUseCount();
467 }
468
469 void
470 MemImplementingAllocator::flushMeters()
471 {
472 size_t calls;
473
474 calls = free_calls;
475 if (calls) {
476 getMeter().gb_freed.count += calls;
477 memMeterDel(getMeter().inuse, calls);
478 memMeterAdd(getMeter().idle, calls);
479 free_calls = 0;
480 }
481 calls = alloc_calls;
482 if (calls) {
483 meter.gb_saved.count += calls;
484 memMeterAdd(meter.inuse, calls);
485 memMeterDel(meter.idle, calls);
486 alloc_calls = 0;
487 }
488 }
489
490 void
491 MemImplementingAllocator::flushMetersFull()
492 {
493 flushMeters();
494 getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size;
495 getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size;
496 }
497
498 void
499 MemPoolMeter::flush()
500 {
501 alloc.level = 0;
502 inuse.level = 0;
503 idle.level = 0;
504 gb_saved.count = 0;
505 gb_saved.bytes = 0;
506 gb_freed.count = 0;
507 gb_freed.bytes = 0;
508 }
509 /*
510 * Updates all pool counters, and recreates TheMeter totals from all pools
511 */
512 void
513 MemPools::flushMeters()
514 {
515 MemImplementingAllocator *pool;
516 MemPoolIterator *iter;
517
518 TheMeter.flush();
519
520 iter = memPoolIterate();
521 while ((pool = memPoolIterateNext(iter))) {
522 pool->flushMetersFull();
523 memMeterAdd(TheMeter.alloc, pool->getMeter().alloc.level * pool->obj_size);
524 memMeterAdd(TheMeter.inuse, pool->getMeter().inuse.level * pool->obj_size);
525 memMeterAdd(TheMeter.idle, pool->getMeter().idle.level * pool->obj_size);
526 TheMeter.gb_saved.count += pool->getMeter().gb_saved.count;
527 TheMeter.gb_freed.count += pool->getMeter().gb_freed.count;
528 TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes;
529 TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes;
530 }
531 memPoolIterateDone(&iter);
532 }
533
534 void *
535 MemMalloc::allocate()
536 {
537 inuse++;
538 return xcalloc(1, obj_size);
539 }
540
541 void
542 MemMalloc::deallocate(void *obj)
543 {
544 inuse--;
545 xfree(obj);
546 }
547
548 void *
549 MemImplementingAllocator::alloc()
550 {
551 if (++alloc_calls == FLUSH_LIMIT)
552 flushMeters();
553
554 return allocate();
555 }
556
557 void
558 MemImplementingAllocator::free(void *obj)
559 {
560 assert(obj != NULL);
561 (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
562 deallocate(obj);
563 ++free_calls;
564 }
565
566 int
567 MemPool::getInUseCount()
568 {
569 return inuse;
570 }
571
572 void *
573 MemPool::allocate()
574 {
575 void *p = get();
576 assert(idle);
577 --idle;
578 ++inuse;
579 return p;
580 }
581
582 void
583 MemPool::deallocate(void *obj)
584 {
585 push(obj);
586 assert(inuse);
587 --inuse;
588 ++idle;
589 }
590
591 void
592 MemPool::convertFreeCacheToChunkFreeCache()
593 {
594 void *Free;
595 /*
596 * OK, so we have to go through all the global freeCache and find the Chunk
597 * any given Free belongs to, and stuff it into that Chunk's freelist
598 */
599
600 while ((Free = freeCache) != NULL) {
601 MemChunk *chunk = NULL;
602 chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks));
603 assert(splayLastResult == 0);
604 assert(chunk->inuse_count > 0);
605 chunk->inuse_count--;
606 (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *));
607 freeCache = *(void **)Free; /* remove from global cache */
608 *(void **)Free = chunk->freeList; /* stuff into chunks freelist */
609 (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *));
610 chunk->freeList = Free;
611 chunk->lastref = squid_curtime;
612 }
613
614 }
615
616 /* removes empty Chunks from pool */
617 void
618 MemPool::clean(time_t maxage)
619 {
620 MemChunk *chunk, *freechunk, *listTail;
621 time_t age;
622
623 if (!this)
624 return;
625 if (!Chunks)
626 return;
627
628 flushMetersFull();
629 convertFreeCacheToChunkFreeCache();
630 /* Now we have all chunks in this pool cleared up, all free items returned to their home */
631 /* We start now checking all chunks to see if we can release any */
632 /* We start from Chunks->next, so first chunk is not released */
633 /* Recreate nextFreeChunk list from scratch */
634
635 chunk = Chunks;
636 while ((freechunk = chunk->next) != NULL) {
637 age = squid_curtime - freechunk->lastref;
638 freechunk->nextFreeChunk = NULL;
639 if (freechunk->inuse_count == 0)
640 if (age >= maxage) {
641 chunk->next = freechunk->next;
642 delete freechunk;
643 freechunk = NULL;
644 }
645 if (chunk->next == NULL)
646 break;
647 chunk = chunk->next;
648 }
649
650 /* Recreate nextFreeChunk list from scratch */
651 /* Populate nextFreeChunk list in order of "most filled chunk first" */
652 /* in case of equal fill, put chunk in lower ram first */
653 /* First (create time) chunk is always on top, no matter how full */
654
655 chunk = Chunks;
656 nextFreeChunk = chunk;
657 chunk->nextFreeChunk = NULL;
658
659 while (chunk->next) {
660 chunk->next->nextFreeChunk = NULL;
661 if (chunk->next->inuse_count < chunk_capacity) {
662 listTail = nextFreeChunk;
663 while (listTail->nextFreeChunk) {
664 if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count)
665 break;
666 if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) &&
667 (chunk->next->objCache < listTail->nextFreeChunk->objCache))
668 break;
669 listTail = listTail->nextFreeChunk;
670 }
671 chunk->next->nextFreeChunk = listTail->nextFreeChunk;
672 listTail->nextFreeChunk = chunk->next;
673 }
674 chunk = chunk->next;
675 }
676 /* We started from 2nd chunk. If first chunk is full, remove it */
677 if (nextFreeChunk->inuse_count == chunk_capacity)
678 nextFreeChunk = nextFreeChunk->nextFreeChunk;
679
680 return;
681 }
682
683 /*
684 * Returns all cached frees to their home chunks
685 * If chunks unreferenced age is over, destroys Idle chunk
686 * Flushes meters for a pool
687 * If pool is not specified, iterates through all pools.
688 * When used for all pools, if new_idle_limit is above -1, new
689 * idle memory limit is set before Cleanup. This allows to shrink
690 * memPool memory usage to specified minimum.
691 */
692 void
693 MemPools::clean(time_t maxage)
694 {
695 MemImplementingAllocator *pool;
696 MemPoolIterator *iter;
697
698 int shift = 1;
699 flushMeters();
700 if (TheMeter.idle.level > mem_idle_limit)
701 maxage = shift = 0;
702
703 iter = memPoolIterate();
704 while ((pool = memPoolIterateNext(iter)))
705 if (pool->idleTrigger(shift))
706 pool->clean(maxage);
707 memPoolIterateDone(&iter);
708 }
709
710 bool
711 MemPool::idleTrigger(int shift) const
712 {
713 return getMeter().idle.level > (chunk_capacity << shift);
714 }
715
716 /* Persistent Pool stats. for GlobalStats accumulation */
717 static MemPoolStats pp_stats;
718
719 /*
720 * Update MemPoolStats struct for single pool
721 */
722 int
723 MemPool::getStats(MemPoolStats * stats)
724 {
725 MemChunk *chunk;
726 int chunks_free = 0;
727 int chunks_partial = 0;
728
729 if (stats != &pp_stats) /* need skip memset for GlobalStats accumulation */
730 /* XXX Fixme */
731 memset(stats, 0, sizeof(MemPoolStats));
732
733 clean((time_t) 555555); /* don't want to get chunks released before reporting */
734
735 stats->pool = this;
736 stats->label = objectType();
737 stats->meter = &getMeter();
738 stats->obj_size = obj_size;
739 stats->chunk_capacity = chunk_capacity;
740
741 /* gather stats for each Chunk */
742 chunk = Chunks;
743 while (chunk) {
744 if (chunk->inuse_count == 0)
745 chunks_free++;
746 else if (chunk->inuse_count < chunk_capacity)
747 chunks_partial++;
748 chunk = chunk->next;
749 }
750
751 stats->chunks_alloc += chunkCount;
752 stats->chunks_inuse += chunkCount - chunks_free;
753 stats->chunks_partial += chunks_partial;
754 stats->chunks_free += chunks_free;
755
756 stats->items_alloc += getMeter().alloc.level;
757 stats->items_inuse += getMeter().inuse.level;
758 stats->items_idle += getMeter().idle.level;
759
760 stats->overhead += sizeof(MemPool) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1;
761
762 return getMeter().inuse.level;
763 }
764
765 /* TODO extract common logic to MemAllocate */
766 int
767 MemMalloc::getStats(MemPoolStats * stats)
768 {
769 if (stats != &pp_stats) /* need skip memset for GlobalStats accumulation */
770 /* XXX Fixme */
771 memset(stats, 0, sizeof(MemPoolStats));
772
773 stats->pool = this;
774 stats->label = objectType();
775 stats->meter = &getMeter();
776 stats->obj_size = obj_size;
777 stats->chunk_capacity = 0;
778
779 stats->chunks_alloc += 0;
780 stats->chunks_inuse += 0;
781 stats->chunks_partial += 0;
782 stats->chunks_free += 0;
783
784 stats->items_alloc += getMeter().alloc.level;
785 stats->items_inuse += getMeter().inuse.level;
786 stats->items_idle += getMeter().idle.level;
787
788 stats->overhead += sizeof(MemMalloc) + strlen(objectType()) + 1;
789
790 return getMeter().inuse.level;
791 }
792
793 int
794 MemMalloc::getInUseCount()
795 {
796 return inuse;
797 }
798
799 /*
800 * Totals statistics is returned
801 */
802 int
803 memPoolGetGlobalStats(MemPoolGlobalStats * stats)
804 {
805 int pools_inuse = 0;
806 MemAllocator *pool;
807 MemPoolIterator *iter;
808
809 memset(stats, 0, sizeof(MemPoolGlobalStats));
810 memset(&pp_stats, 0, sizeof(MemPoolStats));
811
812 MemPools::GetInstance().flushMeters(); /* recreate TheMeter */
813
814 /* gather all stats for Totals */
815 iter = memPoolIterate();
816 while ((pool = memPoolIterateNext(iter))) {
817 if (pool->getStats(&pp_stats) > 0)
818 pools_inuse++;
819 }
820 memPoolIterateDone(&iter);
821
822 stats->TheMeter = &TheMeter;
823
824 stats->tot_pools_alloc = MemPools::GetInstance().poolCount;
825 stats->tot_pools_inuse = pools_inuse;
826 stats->tot_pools_mempid = Pool_id_counter;
827
828 stats->tot_chunks_alloc = pp_stats.chunks_alloc;
829 stats->tot_chunks_inuse = pp_stats.chunks_inuse;
830 stats->tot_chunks_partial = pp_stats.chunks_partial;
831 stats->tot_chunks_free = pp_stats.chunks_free;
832 stats->tot_items_alloc = pp_stats.items_alloc;
833 stats->tot_items_inuse = pp_stats.items_inuse;
834 stats->tot_items_idle = pp_stats.items_idle;
835
836 stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemPool *);
837 stats->mem_idle_limit = MemPools::GetInstance().mem_idle_limit;
838
839 return pools_inuse;
840 }
841
842 MemAllocator::MemAllocator(char const *aLabel) : doZeroOnPush(true), label(aLabel)
843 {
844 }
845
846 size_t MemAllocator::RoundedSize(size_t s)
847 {
848 return ((s + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*);
849 }
850
851 MemMalloc::MemMalloc(char const *label, size_t aSize) : MemImplementingAllocator(label, aSize) { inuse = 0; }
852
853 bool
854 MemMalloc::idleTrigger(int shift) const
855 {
856 return false;
857 }
858
859 void
860 MemMalloc::clean(time_t maxage)
861 {
862 }
863
864 int
865 memPoolInUseCount(MemAllocator * pool)
866 {
867 return pool->inUseCount();
868 }
869
870 int
871 memPoolsTotalAllocated(void)
872 {
873 MemPoolGlobalStats stats;
874 memPoolGetGlobalStats(&stats);
875 return stats.TheMeter->alloc.level;
876 }
877
878 void *
879 MemAllocatorProxy::alloc()
880 {
881 return getAllocator()->alloc();
882 }
883
884 void
885 MemAllocatorProxy::free(void *address)
886 {
887 getAllocator()->free(address);
888 /* TODO: check for empty, and if so, if the default type has altered,
889 * switch
890 */
891 }
892
893 MemAllocator *
894 MemAllocatorProxy::getAllocator() const
895 {
896 if (!theAllocator)
897 theAllocator = MemPools::GetInstance().create(objectType(), size);
898 return theAllocator;
899 }
900
901 int
902 MemAllocatorProxy::inUseCount() const
903 {
904 if (!theAllocator)
905 return 0;
906 else
907 return memPoolInUseCount(theAllocator);
908 }
909
910 size_t
911 MemAllocatorProxy::objectSize() const
912 {
913 return size;
914 }
915
916 char const *
917 MemAllocatorProxy::objectType() const
918 {
919 return label;
920 }
921
922 MemPoolMeter const &
923 MemAllocatorProxy::getMeter() const
924 {
925 return getAllocator()->getMeter();
926 }
927
928 int
929 MemAllocatorProxy::getStats(MemPoolStats * stats)
930 {
931 return getAllocator()->getStats(stats);
932 }
933
934 MemImplementingAllocator::MemImplementingAllocator(char const *aLabel, size_t aSize) : MemAllocator(aLabel),
935 next(NULL),
936 alloc_calls(0),
937 free_calls(0),
938 obj_size(RoundedSize(aSize))
939 {
940 }
941
942 void
943 MemAllocator::zeroOnPush(bool doIt)
944 {
945 doZeroOnPush = doIt;
946 }
947
948 MemPoolMeter const &
949 MemImplementingAllocator::getMeter() const
950 {
951 return meter;
952 }
953
954 MemPoolMeter &
955 MemImplementingAllocator::getMeter()
956 {
957 return meter;
958 }
959
960 size_t
961 MemImplementingAllocator::objectSize() const
962 {
963 return obj_size;
964 }