]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
Remove memPoolInUseCount() wrapper
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "base/PackableStream.h"
15 #include "ClientInfo.h"
16 #include "dlink.h"
17 #include "event.h"
18 #include "fs_io.h"
19 #include "icmp/net_db.h"
20 #include "md5.h"
21 #include "mem/forward.h"
22 #include "mem/Meter.h"
23 #include "mem/Pool.h"
24 #include "MemBuf.h"
25 #include "mgr/Registration.h"
26 #include "SquidConfig.h"
27 #include "SquidList.h"
28 #include "SquidTime.h"
29 #include "Store.h"
30
31 #include <iomanip>
32 #include <vector>
33
34 /* forward declarations */
35 static void memFree2K(void *);
36 static void memFree4K(void *);
37 static void memFree8K(void *);
38 static void memFree16K(void *);
39 static void memFree32K(void *);
40 static void memFree64K(void *);
41
42 /* local prototypes */
43 static void memStringStats(std::ostream &);
44
45 /* module locals */
46 static double xm_time = 0;
47 static double xm_deltat = 0;
48
49 /* string pools */
50 #define mem_str_pool_count 6
51
52 struct PoolMeta {
53 const char *name;
54 size_t obj_size;
55 };
56
57 static Mem::Meter StrCountMeter;
58 static Mem::Meter StrVolumeMeter;
59
60 static Mem::Meter HugeBufCountMeter;
61 static Mem::Meter HugeBufVolumeMeter;
62
63 /* local routines */
64 static MemAllocator *&
65 GetPool(size_t type)
66 {
67 static MemAllocator *pools[MEM_MAX];
68 static bool initialized = false;
69
70 if (!initialized) {
71 memset(pools, '\0', sizeof(pools));
72 initialized = true;
73 // Mem::Init() makes use of GetPool(type) to initialize
74 // the actual pools. So must come after the flag is true
75 Mem::Init();
76 }
77
78 return pools[type];
79 }
80
81 static MemAllocator *&
82 GetStrPool(size_t type)
83 {
84 static MemAllocator *strPools[mem_str_pool_count];
85 static bool initialized = false;
86
87 static const PoolMeta PoolAttrs[mem_str_pool_count] = {
88 {"Short Strings", MemAllocator::RoundedSize(36)}, /* to fit rfc1123 and similar */
89 {"Medium Strings", MemAllocator::RoundedSize(128)}, /* to fit most urls */
90 {"Long Strings", MemAllocator::RoundedSize(512)},
91 {"1KB Strings", MemAllocator::RoundedSize(1024)},
92 {"4KB Strings", MemAllocator::RoundedSize(4*1024)},
93 {"16KB Strings", MemAllocator::RoundedSize(16*1024)}
94 };
95
96 if (!initialized) {
97 memset(strPools, '\0', sizeof(strPools));
98
99 /** Lastly init the string pools. */
100 for (int i = 0; i < mem_str_pool_count; ++i) {
101 strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size);
102 strPools[i]->zeroBlocks(false);
103
104 if (strPools[i]->objectSize() != PoolAttrs[i].obj_size)
105 debugs(13, DBG_IMPORTANT, "NOTICE: " << PoolAttrs[i].name <<
106 " is " << strPools[i]->objectSize() <<
107 " bytes instead of requested " <<
108 PoolAttrs[i].obj_size << " bytes");
109 }
110
111 initialized = true;
112 }
113
114 return strPools[type];
115 }
116
117 /* Find the best fit string pool type */
118 static mem_type
119 memFindStringSizeType(size_t net_size, bool fuzzy)
120 {
121 mem_type type = MEM_NONE;
122 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
123 auto pool = GetStrPool(i);
124 if (!pool)
125 continue;
126 if (fuzzy && net_size < pool->objectSize()) {
127 type = static_cast<mem_type>(i);
128 break;
129 } else if (net_size == pool->objectSize()) {
130 type = static_cast<mem_type>(i);
131 break;
132 }
133 }
134
135 return type;
136 }
137
138 static void
139 memStringStats(std::ostream &stream)
140 {
141 int i;
142 int pooled_count = 0;
143 size_t pooled_volume = 0;
144 /* heading */
145 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
146 /* table body */
147
148 for (i = 0; i < mem_str_pool_count; ++i) {
149 const MemAllocator *pool = GetStrPool(i);
150 const auto plevel = pool->getMeter().inuse.currentLevel();
151 stream << std::setw(20) << std::left << pool->objectType();
152 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
153 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.currentLevel()) << "\n";
154 pooled_count += plevel;
155 pooled_volume += plevel * pool->objectSize();
156 }
157
158 /* malloc strings */
159 stream << std::setw(20) << std::left << "Other Strings";
160 stream << std::right << "\t ";
161 stream << xpercentInt(StrCountMeter.currentLevel() - pooled_count, StrCountMeter.currentLevel()) << "\t ";
162 stream << xpercentInt(StrVolumeMeter.currentLevel() - pooled_volume, StrVolumeMeter.currentLevel()) << "\n\n";
163 }
164
165 static void
166 memBufStats(std::ostream & stream)
167 {
168 stream << "Large buffers: " <<
169 HugeBufCountMeter.currentLevel() << " (" <<
170 HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
171 }
172
173 void
174 Mem::Stats(StoreEntry * sentry)
175 {
176 PackableStream stream(*sentry);
177 Report(stream);
178 memStringStats(stream);
179 memBufStats(stream);
180 #if WITH_VALGRIND
181 if (RUNNING_ON_VALGRIND) {
182 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
183 stream << "Valgrind Report:\n";
184 stream << "Type\tAmount\n";
185 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
186 VALGRIND_DO_LEAK_CHECK;
187 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
188 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
189 stream << "Leaked\t" << leaked << "\n";
190 stream << "Dubious\t" << dubious << "\n";
191 stream << "Reachable\t" << reachable << "\n";
192 stream << "Suppressed\t" << suppressed << "\n";
193 }
194 #endif
195 stream.flush();
196 }
197
198 /*
199 * public routines
200 */
201
202 /*
203 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
204 * Will ignore repeated calls for the same pool type.
205 *
206 * Relies on Mem::Init() having been called beforehand.
207 */
208 void
209 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
210 {
211 assert(name && size);
212
213 if (GetPool(type) != NULL)
214 return;
215
216 GetPool(type) = memPoolCreate(name, size);
217 GetPool(type)->zeroBlocks(doZero);
218 }
219
220 /* find appropriate pool and use it (pools always init buffer with 0s) */
221 void *
222 memAllocate(mem_type type)
223 {
224 assert(GetPool(type));
225 return GetPool(type)->alloc();
226 }
227
228 /* give memory back to the pool */
229 void
230 memFree(void *p, int type)
231 {
232 assert(GetPool(type));
233 GetPool(type)->freeOne(p);
234 }
235
236 /* allocate a variable size buffer using best-fit string pool */
237 void *
238 memAllocString(size_t net_size, size_t * gross_size)
239 {
240 MemAllocator *pool = NULL;
241 assert(gross_size);
242
243 auto type = memFindStringSizeType(net_size, true);
244 if (type != MEM_NONE)
245 pool = GetStrPool(type);
246
247 *gross_size = pool ? pool->objectSize() : net_size;
248 assert(*gross_size >= net_size);
249 ++StrCountMeter;
250 StrVolumeMeter += *gross_size;
251 return pool ? pool->alloc() : xcalloc(1, net_size);
252 }
253
254 size_t
255 memStringCount()
256 {
257 size_t result = 0;
258
259 for (int counter = 0; counter < mem_str_pool_count; ++counter)
260 result += GetStrPool(counter)->inUseCount();
261
262 return result;
263 }
264
265 /* free buffer allocated with memAllocString() */
266 void
267 memFreeString(size_t size, void *buf)
268 {
269 MemAllocator *pool = NULL;
270 assert(buf);
271
272 auto type = memFindStringSizeType(size, false);
273 if (type != MEM_NONE)
274 pool = GetStrPool(type);
275
276 --StrCountMeter;
277 StrVolumeMeter -= size;
278 pool ? pool->freeOne(buf) : xfree(buf);
279 }
280
281 /* Find the best fit MEM_X_BUF type */
282 static mem_type
283 memFindBufSizeType(size_t net_size, size_t * gross_size)
284 {
285 mem_type type;
286 size_t size;
287
288 if (net_size <= 2 * 1024) {
289 type = MEM_2K_BUF;
290 size = 2 * 1024;
291 } else if (net_size <= 4 * 1024) {
292 type = MEM_4K_BUF;
293 size = 4 * 1024;
294 } else if (net_size <= 8 * 1024) {
295 type = MEM_8K_BUF;
296 size = 8 * 1024;
297 } else if (net_size <= 16 * 1024) {
298 type = MEM_16K_BUF;
299 size = 16 * 1024;
300 } else if (net_size <= 32 * 1024) {
301 type = MEM_32K_BUF;
302 size = 32 * 1024;
303 } else if (net_size <= 64 * 1024) {
304 type = MEM_64K_BUF;
305 size = 64 * 1024;
306 } else {
307 type = MEM_NONE;
308 size = net_size;
309 }
310
311 if (gross_size)
312 *gross_size = size;
313
314 return type;
315 }
316
317 /* allocate a variable size buffer using best-fit pool */
318 void *
319 memAllocBuf(size_t net_size, size_t * gross_size)
320 {
321 mem_type type = memFindBufSizeType(net_size, gross_size);
322
323 if (type != MEM_NONE)
324 return memAllocate(type);
325 else {
326 ++HugeBufCountMeter;
327 HugeBufVolumeMeter += *gross_size;
328 return xcalloc(1, net_size);
329 }
330 }
331
332 /* resize a variable sized buffer using best-fit pool */
333 void *
334 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
335 {
336 /* XXX This can be optimized on very large buffers to use realloc() */
337 /* TODO: if the existing gross size is >= new gross size, do nothing */
338 size_t new_gross_size;
339 void *newbuf = memAllocBuf(net_size, &new_gross_size);
340
341 if (oldbuf) {
342 size_t data_size = *gross_size;
343
344 if (data_size > net_size)
345 data_size = net_size;
346
347 memcpy(newbuf, oldbuf, data_size);
348
349 memFreeBuf(*gross_size, oldbuf);
350 }
351
352 *gross_size = new_gross_size;
353 return newbuf;
354 }
355
356 /* free buffer allocated with memAllocBuf() */
357 void
358 memFreeBuf(size_t size, void *buf)
359 {
360 mem_type type = memFindBufSizeType(size, NULL);
361
362 if (type != MEM_NONE)
363 memFree(buf, type);
364 else {
365 xfree(buf);
366 --HugeBufCountMeter;
367 HugeBufVolumeMeter -= size;
368 }
369 }
370
371 static double clean_interval = 15.0; /* time to live of idle chunk before release */
372
373 void
374 Mem::CleanIdlePools(void *)
375 {
376 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
377 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
378 }
379
380 void
381 memConfigure(void)
382 {
383 int64_t new_pool_limit;
384
385 /** Set to configured value first */
386 if (!Config.onoff.mem_pools)
387 new_pool_limit = 0;
388 else if (Config.MemPools.limit > 0)
389 new_pool_limit = Config.MemPools.limit;
390 else {
391 if (Config.MemPools.limit == 0)
392 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
393 new_pool_limit = -1;
394 }
395
396 #if 0
397 /** \par
398 * DPW 2007-04-12
399 * No debugging here please because this method is called before
400 * the debug log is configured and we'll get the message on
401 * stderr when doing things like 'squid -k reconfigure'
402 */
403 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
404 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
405 #endif
406
407 MemPools::GetInstance().setIdleLimit(new_pool_limit);
408 }
409
410 /* XXX make these classes do their own memory management */
411 #include "HttpHdrContRange.h"
412
413 void
414 Mem::Init(void)
415 {
416 /* all pools are ready to be used */
417 static bool MemIsInitialized = false;
418 if (MemIsInitialized)
419 return;
420
421 /** \par
422 * NOTE: Mem::Init() is called before the config file is parsed
423 * and before the debugging module has been initialized. Any
424 * debug messages here at level 0 or 1 will always be printed
425 * on stderr.
426 */
427
428 /**
429 * Then initialize all pools.
430 * \par
431 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
432 * \par
433 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
434 * small; someday we will figure out what to do with all the entries here
435 * that are never used or used only once; perhaps we should simply use
436 * malloc() for those? @?@
437 */
438 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
439 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
440 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
441 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
442 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
443 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
444 memDataInit(MEM_ACL_DENY_INFO_LIST, "AclDenyInfoList",
445 sizeof(AclDenyInfoList), 0);
446 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(AclNameList), 0);
447 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
448 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
449 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
450 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
451 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
452 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
453 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
454 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
455 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
456 GetPool(MEM_MD5_DIGEST)->setChunkSize(512 * 1024);
457
458 MemIsInitialized = true;
459
460 // finally register with the cache manager
461 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
462 }
463
464 void
465 Mem::Report()
466 {
467 debugs(13, 3, "Memory pools are '" <<
468 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
469 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
470 " MB");
471 }
472
473 mem_type &operator++ (mem_type &aMem)
474 {
475 int tmp = (int)aMem;
476 aMem = (mem_type)(++tmp);
477 return aMem;
478 }
479
480 /*
481 * Test that all entries are initialized
482 */
483 void
484 memCheckInit(void)
485 {
486 mem_type t = MEM_NONE;
487
488 while (++t < MEM_DONTFREE) {
489 /*
490 * If you hit this assertion, then you forgot to add a
491 * memDataInit() line for type 't'.
492 * Or placed the pool type in the wrong section of the enum list.
493 */
494 assert(GetPool(t));
495 }
496 }
497
498 void
499 memClean(void)
500 {
501 MemPoolGlobalStats stats;
502 if (Config.MemPools.limit > 0) // do not reset if disabled or same
503 MemPools::GetInstance().setIdleLimit(0);
504 MemPools::GetInstance().clean(0);
505 memPoolGetGlobalStats(&stats);
506
507 if (stats.tot_items_inuse)
508 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
509 " items in " << stats.tot_chunks_inuse << " chunks and " <<
510 stats.tot_pools_inuse << " pools are left dirty");
511 }
512
513 int
514 memInUse(mem_type type)
515 {
516 return GetPool(type)->inUseCount();
517 }
518
519 /* ick */
520
521 void
522 memFree2K(void *p)
523 {
524 memFree(p, MEM_2K_BUF);
525 }
526
527 void
528 memFree4K(void *p)
529 {
530 memFree(p, MEM_4K_BUF);
531 }
532
533 void
534 memFree8K(void *p)
535 {
536 memFree(p, MEM_8K_BUF);
537 }
538
539 void
540 memFree16K(void *p)
541 {
542 memFree(p, MEM_16K_BUF);
543 }
544
545 void
546 memFree32K(void *p)
547 {
548 memFree(p, MEM_32K_BUF);
549 }
550
551 void
552 memFree64K(void *p)
553 {
554 memFree(p, MEM_64K_BUF);
555 }
556
557 static void
558 cxx_xfree(void * ptr)
559 {
560 xfree(ptr);
561 }
562
563 FREE *
564 memFreeBufFunc(size_t size)
565 {
566 switch (size) {
567
568 case 2 * 1024:
569 return memFree2K;
570
571 case 4 * 1024:
572 return memFree4K;
573
574 case 8 * 1024:
575 return memFree8K;
576
577 case 16 * 1024:
578 return memFree16K;
579
580 case 32 * 1024:
581 return memFree32K;
582
583 case 64 * 1024:
584 return memFree64K;
585
586 default:
587 --HugeBufCountMeter;
588 HugeBufVolumeMeter -= size;
589 return cxx_xfree;
590 }
591 }
592
593 /* MemPoolMeter */
594
595 void
596 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
597 {
598 int excess = 0;
599 int needed = 0;
600 MemPoolMeter *pm = mp_st->meter;
601 const char *delim = "\t ";
602
603 stream.setf(std::ios_base::fixed);
604 stream << std::setw(20) << std::left << mp_st->label << delim;
605 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
606
607 /* Chunks */
608 if (mp_st->chunk_capacity) {
609 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
610 stream << std::setw(4) << mp_st->chunk_capacity << delim;
611
612 needed = mp_st->items_inuse / mp_st->chunk_capacity;
613
614 if (mp_st->items_inuse % mp_st->chunk_capacity)
615 ++needed;
616
617 excess = mp_st->chunks_inuse - needed;
618
619 stream << std::setw(4) << mp_st->chunks_alloc << delim;
620 stream << std::setw(4) << mp_st->chunks_inuse << delim;
621 stream << std::setw(4) << mp_st->chunks_free << delim;
622 stream << std::setw(4) << mp_st->chunks_partial << delim;
623 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
624 } else {
625 stream << delim;
626 stream << delim;
627 stream << delim;
628 stream << delim;
629 stream << delim;
630 stream << delim;
631 stream << delim;
632 }
633 /*
634 * Fragmentation calculation:
635 * needed = inuse.currentLevel() / chunk_capacity
636 * excess = used - needed
637 * fragmentation = excess / needed * 100%
638 *
639 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
640 */
641 /* allocated */
642 stream << mp_st->items_alloc << delim;
643 stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
644 stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
645 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
646 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
647 /* in use */
648 stream << mp_st->items_inuse << delim;
649 stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
650 stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
651 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
652 stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
653 /* idle */
654 stream << mp_st->items_idle << delim;
655 stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
656 stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
657 /* saved */
658 stream << (int)pm->gb_saved.count << delim;
659 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
660 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
661 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
662 pm->gb_oallocated.count = pm->gb_allocated.count;
663 }
664
665 static int
666 MemPoolReportSorter(const void *a, const void *b)
667 {
668 const MemPoolStats *A = (MemPoolStats *) a;
669 const MemPoolStats *B = (MemPoolStats *) b;
670
671 // use this to sort on %Total Allocated
672 //
673 double pa = (double) A->obj_size * A->meter->alloc.currentLevel();
674 double pb = (double) B->obj_size * B->meter->alloc.currentLevel();
675
676 if (pa > pb)
677 return -1;
678
679 if (pb > pa)
680 return 1;
681
682 #if 0
683 // use this to sort on In Use high(hrs)
684 //
685 if (A->meter->inuse.peakTime() > B->meter->inuse.peakTime())
686 return -1;
687
688 if (B->meter->inuse.peakTime() > A->meter->inuse.peakTime())
689 return 1;
690
691 #endif
692
693 return 0;
694 }
695
696 void
697 Mem::Report(std::ostream &stream)
698 {
699 static char buf[64];
700 static MemPoolStats mp_stats;
701 static MemPoolGlobalStats mp_total;
702 int not_used = 0;
703 MemPoolIterator *iter;
704 MemAllocator *pool;
705
706 /* caption */
707 stream << "Current memory usage:\n";
708 /* heading */
709 stream << "Pool\t Obj Size\t"
710 "Chunks\t\t\t\t\t\t\t"
711 "Allocated\t\t\t\t\t"
712 "In Use\t\t\t\t\t"
713 "Idle\t\t\t"
714 "Allocations Saved\t\t\t"
715 "Rate\t"
716 "\n"
717 " \t (bytes)\t"
718 "KB/ch\t obj/ch\t"
719 "(#)\t used\t free\t part\t %Frag\t "
720 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
721 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
722 "(#)\t (KB)\t high (KB)\t"
723 "(#)\t %cnt\t %vol\t"
724 "(#)/sec\t"
725 "\n";
726 xm_deltat = current_dtime - xm_time;
727 xm_time = current_dtime;
728
729 /* Get stats for Totals report line */
730 memPoolGetGlobalStats(&mp_total);
731
732 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
733 int npools = 0;
734
735 /* main table */
736 iter = memPoolIterate();
737
738 while ((pool = memPoolIterateNext(iter))) {
739 pool->getStats(&mp_stats);
740
741 if (!mp_stats.pool) /* pool destroyed */
742 continue;
743
744 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
745 /* this pool has been used */
746 sortme[npools] = mp_stats;
747 ++npools;
748 } else {
749 ++not_used;
750 }
751 }
752
753 memPoolIterateDone(&iter);
754
755 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
756
757 for (int i = 0; i< npools; ++i) {
758 PoolReport(&sortme[i], mp_total.TheMeter, stream);
759 }
760
761 xfree(sortme);
762
763 mp_stats.pool = NULL;
764 mp_stats.label = "Total";
765 mp_stats.meter = mp_total.TheMeter;
766 mp_stats.obj_size = 1;
767 mp_stats.chunk_capacity = 0;
768 mp_stats.chunk_size = 0;
769 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
770 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
771 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
772 mp_stats.chunks_free = mp_total.tot_chunks_free;
773 mp_stats.items_alloc = mp_total.tot_items_alloc;
774 mp_stats.items_inuse = mp_total.tot_items_inuse;
775 mp_stats.items_idle = mp_total.tot_items_idle;
776 mp_stats.overhead = mp_total.tot_overhead;
777
778 PoolReport(&mp_stats, mp_total.TheMeter, stream);
779
780 /* Cumulative */
781 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
782 /* overhead */
783 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
784 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.currentLevel()) << "%)\n";
785 /* limits */
786 if (mp_total.mem_idle_limit >= 0)
787 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
788 /* limits */
789 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
790 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
791 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
792 }
793