]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem.cc
Author: Alex Rousskov <rousskov@measurement-factory.com>
[thirdparty/squid.git] / src / mem.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 13 High Level Memory Pool Management
5 * AUTHOR: Harvest Derived
6 *
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
9 *
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 *
33 */
34
35 #include "squid.h"
36 #include "event.h"
37 #include "CacheManager.h"
38 #include "ClientInfo.h"
39 #include "Mem.h"
40 #include "memMeter.h"
41 #include "Store.h"
42 #include "StoreEntryStream.h"
43 #include "MemBuf.h"
44 #include "SquidTime.h"
45
46 #if HAVE_IOMANIP
47 #include <iomanip>
48 #endif
49 #if HAVE_OSTREAM
50 #include <ostream>
51 #endif
52
53 /* module globals */
54
55 /* local prototypes */
56 static void memStringStats(std::ostream &);
57
58 /* module locals */
59 static MemAllocator *MemPools[MEM_MAX];
60 static double xm_time = 0;
61 static double xm_deltat = 0;
62
63 /* string pools */
64 #define mem_str_pool_count 3
65
66 static const struct {
67 const char *name;
68 size_t obj_size;
69 }
70
71 StrPoolsAttrs[mem_str_pool_count] = {
72
73 {
74 "Short Strings", MemAllocator::RoundedSize(36),
75 }, /* to fit rfc1123 and similar */
76 {
77 "Medium Strings", MemAllocator::RoundedSize(128),
78 }, /* to fit most urls */
79 {
80 "Long Strings", MemAllocator::RoundedSize(512)
81 } /* other */
82 };
83
84 static struct {
85 MemAllocator *pool;
86 }
87
88 StrPools[mem_str_pool_count];
89 static MemMeter StrCountMeter;
90 static MemMeter StrVolumeMeter;
91
92 static MemMeter HugeBufCountMeter;
93 static MemMeter HugeBufVolumeMeter;
94
95 /* local routines */
96
97 static void
98 memStringStats(std::ostream &stream)
99 {
100 int i;
101 int pooled_count = 0;
102 size_t pooled_volume = 0;
103 /* heading */
104 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
105 /* table body */
106
107 for (i = 0; i < mem_str_pool_count; i++) {
108 const MemAllocator *pool = StrPools[i].pool;
109 const int plevel = pool->getMeter().inuse.level;
110 stream << std::setw(20) << std::left << pool->objectType();
111 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
112 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
113 pooled_count += plevel;
114 pooled_volume += plevel * pool->objectSize();
115 }
116
117 /* malloc strings */
118 stream << std::setw(20) << std::left << "Other Strings";
119
120 stream << std::right << "\t ";
121
122 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
123
124 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
125 }
126
127 static void
128 memBufStats(std::ostream & stream)
129 {
130 stream << "Large buffers: " <<
131 HugeBufCountMeter.level << " (" <<
132 HugeBufVolumeMeter.level / 1024 << " KB)\n";
133 }
134
135 void
136 Mem::Stats(StoreEntry * sentry)
137 {
138 StoreEntryStream stream(sentry);
139 Report(stream);
140 memStringStats(stream);
141 memBufStats(stream);
142 #if WITH_VALGRIND
143 if (RUNNING_ON_VALGRIND) {
144 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
145 stream << "Valgrind Report:\n";
146 stream << "Type\tAmount\n";
147 debugs(13, 1, "Asking valgrind for memleaks");
148 VALGRIND_DO_LEAK_CHECK;
149 debugs(13, 1, "Getting valgrind statistics");
150 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
151 stream << "Leaked\t" << leaked << "\n";
152 stream << "Dubious\t" << dubious << "\n";
153 stream << "Reachable\t" << reachable << "\n";
154 stream << "Suppressed\t" << suppressed << "\n";
155 }
156 #endif
157 stream.flush();
158 }
159
160 /*
161 * public routines
162 */
163
164 /*
165 * we have a limit on _total_ amount of idle memory so we ignore
166 * max_pages for now
167 */
168 void
169 memDataInit(mem_type type, const char *name, size_t size, int max_pages_notused, bool zeroOnPush)
170 {
171 assert(name && size);
172 assert(MemPools[type] == NULL);
173 MemPools[type] = memPoolCreate(name, size);
174 MemPools[type]->zeroOnPush(zeroOnPush);
175 }
176
177
178 /* find appropriate pool and use it (pools always init buffer with 0s) */
179 void *
180 memAllocate(mem_type type)
181 {
182 return MemPools[type]->alloc();
183 }
184
185 /* give memory back to the pool */
186 void
187 memFree(void *p, int type)
188 {
189 MemPools[type]->freeOne(p);
190 }
191
192 /* allocate a variable size buffer using best-fit pool */
193 void *
194 memAllocString(size_t net_size, size_t * gross_size)
195 {
196 int i;
197 MemAllocator *pool = NULL;
198 assert(gross_size);
199
200 for (i = 0; i < mem_str_pool_count; i++) {
201 if (net_size <= StrPoolsAttrs[i].obj_size) {
202 pool = StrPools[i].pool;
203 break;
204 }
205 }
206
207 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
208 assert(*gross_size >= net_size);
209 memMeterInc(StrCountMeter);
210 memMeterAdd(StrVolumeMeter, *gross_size);
211 return pool ? pool->alloc() : xcalloc(1, net_size);
212 }
213
214 extern size_t memStringCount();
215 size_t
216 memStringCount()
217 {
218 size_t result = 0;
219
220 for (int counter = 0; counter < mem_str_pool_count; ++counter)
221 result += memPoolInUseCount(StrPools[counter].pool);
222
223 return result;
224 }
225
226 /* free buffer allocated with memAllocString() */
227 void
228 memFreeString(size_t size, void *buf)
229 {
230 int i;
231 MemAllocator *pool = NULL;
232 assert(size && buf);
233
234 for (i = 0; i < mem_str_pool_count; i++) {
235 if (size <= StrPoolsAttrs[i].obj_size) {
236 assert(size == StrPoolsAttrs[i].obj_size);
237 pool = StrPools[i].pool;
238 break;
239 }
240 }
241
242 memMeterDec(StrCountMeter);
243 memMeterDel(StrVolumeMeter, size);
244 pool ? pool->freeOne(buf) : xfree(buf);
245 }
246
247 /* Find the best fit MEM_X_BUF type */
248 static mem_type
249 memFindBufSizeType(size_t net_size, size_t * gross_size)
250 {
251 mem_type type;
252 size_t size;
253
254 if (net_size <= 2 * 1024) {
255 type = MEM_2K_BUF;
256 size = 2 * 1024;
257 } else if (net_size <= 4 * 1024) {
258 type = MEM_4K_BUF;
259 size = 4 * 1024;
260 } else if (net_size <= 8 * 1024) {
261 type = MEM_8K_BUF;
262 size = 8 * 1024;
263 } else if (net_size <= 16 * 1024) {
264 type = MEM_16K_BUF;
265 size = 16 * 1024;
266 } else if (net_size <= 32 * 1024) {
267 type = MEM_32K_BUF;
268 size = 32 * 1024;
269 } else if (net_size <= 64 * 1024) {
270 type = MEM_64K_BUF;
271 size = 64 * 1024;
272 } else {
273 type = MEM_NONE;
274 size = net_size;
275 }
276
277 if (gross_size)
278 *gross_size = size;
279
280 return type;
281 }
282
283 /* allocate a variable size buffer using best-fit pool */
284 void *
285 memAllocBuf(size_t net_size, size_t * gross_size)
286 {
287 mem_type type = memFindBufSizeType(net_size, gross_size);
288
289 if (type != MEM_NONE)
290 return memAllocate(type);
291 else {
292 memMeterInc(HugeBufCountMeter);
293 memMeterAdd(HugeBufVolumeMeter, *gross_size);
294 return xcalloc(1, net_size);
295 }
296 }
297
298 /* resize a variable sized buffer using best-fit pool */
299 void *
300 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
301 {
302 /* XXX This can be optimized on very large buffers to use realloc() */
303 /* TODO: if the existing gross size is >= new gross size, do nothing */
304 size_t new_gross_size;
305 void *newbuf = memAllocBuf(net_size, &new_gross_size);
306
307 if (oldbuf) {
308 size_t data_size = *gross_size;
309
310 if (data_size > net_size)
311 data_size = net_size;
312
313 memcpy(newbuf, oldbuf, data_size);
314
315 memFreeBuf(*gross_size, oldbuf);
316 }
317
318 *gross_size = new_gross_size;
319 return newbuf;
320 }
321
322 /* free buffer allocated with memAllocBuf() */
323 void
324 memFreeBuf(size_t size, void *buf)
325 {
326 mem_type type = memFindBufSizeType(size, NULL);
327
328 if (type != MEM_NONE)
329 memFree(buf, type);
330 else {
331 xfree(buf);
332 memMeterDec(HugeBufCountMeter);
333 memMeterDel(HugeBufVolumeMeter, size);
334 }
335 }
336
337 static double clean_interval = 15.0; /* time to live of idle chunk before release */
338
339 void
340 Mem::CleanIdlePools(void *unused)
341 {
342 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
343 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
344 }
345
346 void
347 memConfigure(void)
348 {
349 size_t new_pool_limit;
350
351 /** Set to configured value first */
352 if (!Config.onoff.mem_pools)
353 new_pool_limit = 0;
354 else if (Config.MemPools.limit > 0)
355 new_pool_limit = Config.MemPools.limit;
356 else {
357 if (Config.MemPools.limit == 0)
358 debugs(13, 1, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
359 new_pool_limit = -1;
360 }
361
362 #if 0
363 /** \par
364 * DPW 2007-04-12
365 * No debugging here please because this method is called before
366 * the debug log is configured and we'll get the message on
367 * stderr when doing things like 'squid -k reconfigure'
368 */
369 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
370 debugs(13, 1, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
371 #endif
372
373 MemPools::GetInstance().setIdleLimit(new_pool_limit);
374 }
375
376 /* XXX make these classes do their own memory management */
377 #include "HttpHdrContRange.h"
378
379 void
380 Mem::Init(void)
381 {
382 int i;
383
384 /** \par
385 * NOTE: Mem::Init() is called before the config file is parsed
386 * and before the debugging module has been initialized. Any
387 * debug messages here at level 0 or 1 will always be printed
388 * on stderr.
389 */
390
391 /** \par
392 * Set all pointers to null. */
393 memset(MemPools, '\0', sizeof(MemPools));
394 /**
395 * Then initialize all pools.
396 * \par
397 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
398 * \par
399 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
400 * small; someday we will figure out what to do with all the entries here
401 * that are never used or used only once; perhaps we should simply use
402 * malloc() for those? @?@
403 */
404 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
405 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
406 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
407 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
408 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
409 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
410 memDataInit(MEM_ACL_DENY_INFO_LIST, "acl_deny_info_list",
411 sizeof(acl_deny_info_list), 0);
412 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(acl_name_list), 0);
413 #if USE_CACHE_DIGESTS
414
415 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
416 #endif
417
418 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
419 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
420 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
421 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
422 memDataInit(MEM_HTTP_HDR_CC, "HttpHdrCc", sizeof(HttpHdrCc), 0);
423 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
424 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
425 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
426 memDataInit(MEM_RELIST, "relist", sizeof(relist), 0);
427 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
428 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
429 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
430
431 /** Lastly init the string pools. */
432 for (i = 0; i < mem_str_pool_count; i++) {
433 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
434 StrPools[i].pool->zeroOnPush(false);
435
436 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
437 debugs(13, 1, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
438 }
439
440 /** \par
441 * finally register with the cache manager */
442 RegisterWithCacheManager();
443 }
444
445 void
446 Mem::Report()
447 {
448 debugs(13, 3, "Memory pools are '" <<
449 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
450 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
451 " MB");
452 }
453
454 void
455 Mem::RegisterWithCacheManager(void)
456 {
457 CacheManager::GetInstance()->registerAction("mem", "Memory Utilization",
458 Mem::Stats, 0, 1);
459 }
460
461 mem_type &operator++ (mem_type &aMem)
462 {
463 int tmp = (int)aMem;
464 aMem = (mem_type)(++tmp);
465 return aMem;
466 }
467
468 /*
469 * Test that all entries are initialized
470 */
471 void
472 memCheckInit(void)
473 {
474 mem_type t;
475
476 for (t = MEM_NONE, ++t; t < MEM_MAX; ++t) {
477 if (MEM_DONTFREE == t)
478 continue;
479
480 /*
481 * If you hit this assertion, then you forgot to add a
482 * memDataInit() line for type 't'.
483 */
484 assert(MemPools[t]);
485 }
486 }
487
488 void
489 memClean(void)
490 {
491 MemPoolGlobalStats stats;
492 MemPools::GetInstance().setIdleLimit(0);
493 MemPools::GetInstance().clean(0);
494 memPoolGetGlobalStats(&stats);
495
496 if (stats.tot_items_inuse)
497 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
498 " items in " << stats.tot_chunks_inuse << " chunks and " <<
499 stats.tot_pools_inuse << " pools are left dirty");
500 }
501
502 int
503 memInUse(mem_type type)
504 {
505 return memPoolInUseCount(MemPools[type]);
506 }
507
508 /* ick */
509
510 void
511 memFree2K(void *p)
512 {
513 memFree(p, MEM_2K_BUF);
514 }
515
516 void
517 memFree4K(void *p)
518 {
519 memFree(p, MEM_4K_BUF);
520 }
521
522 void
523 memFree8K(void *p)
524 {
525 memFree(p, MEM_8K_BUF);
526 }
527
528 void
529 memFree16K(void *p)
530 {
531 memFree(p, MEM_16K_BUF);
532 }
533
534 void
535 memFree32K(void *p)
536 {
537 memFree(p, MEM_32K_BUF);
538 }
539
540 void
541 memFree64K(void *p)
542 {
543 memFree(p, MEM_64K_BUF);
544 }
545
546 FREE *
547 memFreeBufFunc(size_t size)
548 {
549 switch (size) {
550
551 case 2 * 1024:
552 return memFree2K;
553
554 case 4 * 1024:
555 return memFree4K;
556
557 case 8 * 1024:
558 return memFree8K;
559
560 case 16 * 1024:
561 return memFree16K;
562
563 case 32 * 1024:
564 return memFree32K;
565
566 case 64 * 1024:
567 return memFree64K;
568
569 default:
570 memMeterDec(HugeBufCountMeter);
571 memMeterDel(HugeBufVolumeMeter, size);
572 return xfree;
573 }
574 }
575
576 /* MemPoolMeter */
577
578 void
579 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
580 {
581 int excess = 0;
582 int needed = 0;
583 MemPoolMeter *pm = mp_st->meter;
584 const char *delim = "\t ";
585
586 #if HAVE_IOMANIP
587 stream.setf(std::ios_base::fixed);
588 #endif
589 stream << std::setw(20) << std::left << mp_st->label << delim;
590 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
591
592 /* Chunks */
593 if (mp_st->chunk_capacity) {
594 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
595 stream << std::setw(4) << mp_st->chunk_capacity << delim;
596
597 needed = mp_st->items_inuse / mp_st->chunk_capacity;
598
599 if (mp_st->items_inuse % mp_st->chunk_capacity)
600 needed++;
601
602 excess = mp_st->chunks_inuse - needed;
603
604 stream << std::setw(4) << mp_st->chunks_alloc << delim;
605 stream << std::setw(4) << mp_st->chunks_inuse << delim;
606 stream << std::setw(4) << mp_st->chunks_free << delim;
607 stream << std::setw(4) << mp_st->chunks_partial << delim;
608 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
609 } else {
610 stream << delim;
611 stream << delim;
612 stream << delim;
613 stream << delim;
614 stream << delim;
615 stream << delim;
616 stream << delim;
617 }
618 /*
619 * Fragmentation calculation:
620 * needed = inuse.level / chunk_capacity
621 * excess = used - needed
622 * fragmentation = excess / needed * 100%
623 *
624 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
625 */
626 /* allocated */
627 stream << mp_st->items_alloc << delim;
628 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
629 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
630 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
631 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
632 /* in use */
633 stream << mp_st->items_inuse << delim;
634 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
635 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
636 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
637 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
638 /* idle */
639 stream << mp_st->items_idle << delim;
640 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
641 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
642 /* saved */
643 stream << (int)pm->gb_saved.count << delim;
644 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
645 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
646 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
647 pm->gb_oallocated.count = pm->gb_allocated.count;
648 }
649
650 static int
651 MemPoolReportSorter(const void *a, const void *b)
652 {
653 const MemPoolStats *A = (MemPoolStats *) a;
654 const MemPoolStats *B = (MemPoolStats *) b;
655
656 // use this to sort on %Total Allocated
657 //
658 double pa = (double) A->obj_size * A->meter->alloc.level;
659 double pb = (double) B->obj_size * B->meter->alloc.level;
660
661 if (pa > pb)
662 return -1;
663
664 if (pb > pa)
665 return 1;
666
667 #if 0
668 // use this to sort on In Use high(hrs)
669 //
670 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
671 return -1;
672
673 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
674 return 1;
675
676 #endif
677
678 return 0;
679 }
680
681 void
682 Mem::Report(std::ostream &stream)
683 {
684 static char buf[64];
685 static MemPoolStats mp_stats;
686 static MemPoolGlobalStats mp_total;
687 int not_used = 0;
688 MemPoolIterator *iter;
689 MemAllocator *pool;
690
691 /* caption */
692 stream << "Current memory usage:\n";
693 /* heading */
694 stream << "Pool\t Obj Size\t"
695 "Chunks\t\t\t\t\t\t\t"
696 "Allocated\t\t\t\t\t"
697 "In Use\t\t\t\t\t"
698 "Idle\t\t\t"
699 "Allocations Saved\t\t\t"
700 "Rate\t"
701 "\n"
702 " \t (bytes)\t"
703 "KB/ch\t obj/ch\t"
704 "(#)\t used\t free\t part\t %Frag\t "
705 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
706 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
707 "(#)\t (KB)\t high (KB)\t"
708 "(#)\t %cnt\t %vol\t"
709 "(#)/sec\t"
710 "\n";
711 xm_deltat = current_dtime - xm_time;
712 xm_time = current_dtime;
713
714 /* Get stats for Totals report line */
715 memPoolGetGlobalStats(&mp_total);
716
717 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
718 int npools = 0;
719
720 /* main table */
721 iter = memPoolIterate();
722
723 while ((pool = memPoolIterateNext(iter))) {
724 pool->getStats(&mp_stats);
725
726 if (!mp_stats.pool) /* pool destroyed */
727 continue;
728
729 if (mp_stats.pool->getMeter().gb_allocated.count > 0) /* this pool has been used */
730 sortme[npools++] = mp_stats;
731 else
732 not_used++;
733 }
734
735 memPoolIterateDone(&iter);
736
737 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
738
739 for (int i = 0; i< npools; i++) {
740 PoolReport(&sortme[i], mp_total.TheMeter, stream);
741 }
742
743 xfree(sortme);
744
745 mp_stats.pool = NULL;
746 mp_stats.label = "Total";
747 mp_stats.meter = mp_total.TheMeter;
748 mp_stats.obj_size = 1;
749 mp_stats.chunk_capacity = 0;
750 mp_stats.chunk_size = 0;
751 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
752 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
753 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
754 mp_stats.chunks_free = mp_total.tot_chunks_free;
755 mp_stats.items_alloc = mp_total.tot_items_alloc;
756 mp_stats.items_inuse = mp_total.tot_items_inuse;
757 mp_stats.items_idle = mp_total.tot_items_idle;
758 mp_stats.overhead = mp_total.tot_overhead;
759
760 PoolReport(&mp_stats, mp_total.TheMeter, stream);
761
762 /* Cumulative */
763 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
764 /* overhead */
765 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
766 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
767 /* limits */
768 if (mp_total.mem_idle_limit >= 0)
769 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
770 /* limits */
771 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
772 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
773 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
774 }