]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
Sync with trunk-r14686
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "base/PackableStream.h"
15 #include "ClientInfo.h"
16 #include "dlink.h"
17 #include "event.h"
18 #include "fs_io.h"
19 #include "icmp/net_db.h"
20 #include "md5.h"
21 #include "mem/forward.h"
22 #include "mem/Meter.h"
23 #include "mem/Pool.h"
24 #include "MemBuf.h"
25 #include "mgr/Registration.h"
26 #include "SquidConfig.h"
27 #include "SquidList.h"
28 #include "SquidTime.h"
29 #include "Store.h"
30
31 #include <iomanip>
32
33 /* forward declarations */
34 static void memFree2K(void *);
35 static void memFree4K(void *);
36 static void memFree8K(void *);
37 static void memFree16K(void *);
38 static void memFree32K(void *);
39 static void memFree64K(void *);
40
41 /* local prototypes */
42 static void memStringStats(std::ostream &);
43
44 /* module locals */
45 static double xm_time = 0;
46 static double xm_deltat = 0;
47
48 /* string pools */
49 #define mem_str_pool_count 6
50
51 struct PoolMeta {
52 const char *name;
53 size_t obj_size;
54 };
55
56 static Mem::Meter StrCountMeter;
57 static Mem::Meter StrVolumeMeter;
58
59 static Mem::Meter HugeBufCountMeter;
60 static Mem::Meter HugeBufVolumeMeter;
61
62 /* local routines */
63
64 // XXX: refactor objects using these pools to use MEMPROXY classes instead
65 // then remove this function entirely
66 static MemAllocator *&
67 GetPool(size_t type)
68 {
69 static MemAllocator *pools[MEM_MAX];
70 static bool initialized = false;
71
72 if (!initialized) {
73 memset(pools, '\0', sizeof(pools));
74 initialized = true;
75 // Mem::Init() makes use of GetPool(type) to initialize
76 // the actual pools. So must come after the flag is true
77 Mem::Init();
78 }
79
80 return pools[type];
81 }
82
83 static MemAllocator &
84 GetStrPool(size_t type)
85 {
86 static MemAllocator *strPools[mem_str_pool_count];
87 static bool initialized = false;
88
89 static const PoolMeta PoolAttrs[mem_str_pool_count] = {
90 {"Short Strings", MemAllocator::RoundedSize(36)}, /* to fit rfc1123 and similar */
91 {"Medium Strings", MemAllocator::RoundedSize(128)}, /* to fit most urls */
92 {"Long Strings", MemAllocator::RoundedSize(512)},
93 {"1KB Strings", MemAllocator::RoundedSize(1024)},
94 {"4KB Strings", MemAllocator::RoundedSize(4*1024)},
95 {"16KB Strings", MemAllocator::RoundedSize(16*1024)}
96 };
97
98 if (!initialized) {
99 memset(strPools, '\0', sizeof(strPools));
100
101 /** Lastly init the string pools. */
102 for (int i = 0; i < mem_str_pool_count; ++i) {
103 strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size);
104 strPools[i]->zeroBlocks(false);
105
106 if (strPools[i]->objectSize() != PoolAttrs[i].obj_size)
107 debugs(13, DBG_IMPORTANT, "NOTICE: " << PoolAttrs[i].name <<
108 " is " << strPools[i]->objectSize() <<
109 " bytes instead of requested " <<
110 PoolAttrs[i].obj_size << " bytes");
111 }
112
113 initialized = true;
114 }
115
116 return *strPools[type];
117 }
118
119 /* Find the best fit string pool type */
120 static mem_type
121 memFindStringSizeType(size_t net_size, bool fuzzy)
122 {
123 mem_type type = MEM_NONE;
124 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
125 auto &pool = GetStrPool(i);
126 if (fuzzy && net_size < pool.objectSize()) {
127 type = static_cast<mem_type>(i);
128 break;
129 } else if (net_size == pool.objectSize()) {
130 type = static_cast<mem_type>(i);
131 break;
132 }
133 }
134
135 return type;
136 }
137
138 static void
139 memStringStats(std::ostream &stream)
140 {
141 int i;
142 int pooled_count = 0;
143 size_t pooled_volume = 0;
144 /* heading */
145 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
146 /* table body */
147
148 for (i = 0; i < mem_str_pool_count; ++i) {
149 const auto &pool = GetStrPool(i);
150 const auto plevel = pool.getMeter().inuse.currentLevel();
151 stream << std::setw(20) << std::left << pool.objectType();
152 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
153 stream << "\t " << xpercentInt(plevel * pool.objectSize(), StrVolumeMeter.currentLevel()) << "\n";
154 pooled_count += plevel;
155 pooled_volume += plevel * pool.objectSize();
156 }
157
158 /* malloc strings */
159 stream << std::setw(20) << std::left << "Other Strings";
160 stream << std::right << "\t ";
161 stream << xpercentInt(StrCountMeter.currentLevel() - pooled_count, StrCountMeter.currentLevel()) << "\t ";
162 stream << xpercentInt(StrVolumeMeter.currentLevel() - pooled_volume, StrVolumeMeter.currentLevel()) << "\n\n";
163 }
164
165 static void
166 memBufStats(std::ostream & stream)
167 {
168 stream << "Large buffers: " <<
169 HugeBufCountMeter.currentLevel() << " (" <<
170 HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
171 }
172
173 void
174 Mem::Stats(StoreEntry * sentry)
175 {
176 PackableStream stream(*sentry);
177 Report(stream);
178 memStringStats(stream);
179 memBufStats(stream);
180 #if WITH_VALGRIND
181 if (RUNNING_ON_VALGRIND) {
182 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
183 stream << "Valgrind Report:\n";
184 stream << "Type\tAmount\n";
185 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
186 VALGRIND_DO_LEAK_CHECK;
187 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
188 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
189 stream << "Leaked\t" << leaked << "\n";
190 stream << "Dubious\t" << dubious << "\n";
191 stream << "Reachable\t" << reachable << "\n";
192 stream << "Suppressed\t" << suppressed << "\n";
193 }
194 #endif
195 stream.flush();
196 }
197
198 /*
199 * public routines
200 */
201
202 /*
203 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
204 * Will ignore repeated calls for the same pool type.
205 *
206 * Relies on Mem::Init() having been called beforehand.
207 */
208 void
209 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
210 {
211 assert(name && size);
212
213 if (GetPool(type) != NULL)
214 return;
215
216 GetPool(type) = memPoolCreate(name, size);
217 GetPool(type)->zeroBlocks(doZero);
218 }
219
220 /* find appropriate pool and use it (pools always init buffer with 0s) */
221 void *
222 memAllocate(mem_type type)
223 {
224 assert(GetPool(type));
225 return GetPool(type)->alloc();
226 }
227
228 /* give memory back to the pool */
229 void
230 memFree(void *p, int type)
231 {
232 assert(GetPool(type));
233 GetPool(type)->freeOne(p);
234 }
235
236 /* allocate a variable size buffer using best-fit string pool */
237 void *
238 memAllocString(size_t net_size, size_t * gross_size)
239 {
240 assert(gross_size);
241
242 auto type = memFindStringSizeType(net_size, true);
243 if (type != MEM_NONE) {
244 auto &pool = GetStrPool(type);
245 *gross_size = pool.objectSize();
246 assert(*gross_size >= net_size);
247 ++StrCountMeter;
248 StrVolumeMeter += *gross_size;
249 return pool.alloc();
250 }
251
252 *gross_size = net_size;
253 ++StrCountMeter;
254 StrVolumeMeter += *gross_size;
255 return xcalloc(1, net_size);
256 }
257
258 size_t
259 memStringCount()
260 {
261 size_t result = 0;
262
263 for (int counter = 0; counter < mem_str_pool_count; ++counter)
264 result += GetStrPool(counter).inUseCount();
265
266 return result;
267 }
268
269 /* free buffer allocated with memAllocString() */
270 void
271 memFreeString(size_t size, void *buf)
272 {
273 assert(buf);
274
275 auto type = memFindStringSizeType(size, false);
276 if (type != MEM_NONE)
277 GetStrPool(type).freeOne(buf);
278 else
279 xfree(buf);
280
281 --StrCountMeter;
282 StrVolumeMeter -= size;
283 }
284
285 /* Find the best fit MEM_X_BUF type */
286 static mem_type
287 memFindBufSizeType(size_t net_size, size_t * gross_size)
288 {
289 mem_type type;
290 size_t size;
291
292 if (net_size <= 2 * 1024) {
293 type = MEM_2K_BUF;
294 size = 2 * 1024;
295 } else if (net_size <= 4 * 1024) {
296 type = MEM_4K_BUF;
297 size = 4 * 1024;
298 } else if (net_size <= 8 * 1024) {
299 type = MEM_8K_BUF;
300 size = 8 * 1024;
301 } else if (net_size <= 16 * 1024) {
302 type = MEM_16K_BUF;
303 size = 16 * 1024;
304 } else if (net_size <= 32 * 1024) {
305 type = MEM_32K_BUF;
306 size = 32 * 1024;
307 } else if (net_size <= 64 * 1024) {
308 type = MEM_64K_BUF;
309 size = 64 * 1024;
310 } else {
311 type = MEM_NONE;
312 size = net_size;
313 }
314
315 if (gross_size)
316 *gross_size = size;
317
318 return type;
319 }
320
321 /* allocate a variable size buffer using best-fit pool */
322 void *
323 memAllocBuf(size_t net_size, size_t * gross_size)
324 {
325 mem_type type = memFindBufSizeType(net_size, gross_size);
326
327 if (type != MEM_NONE)
328 return memAllocate(type);
329 else {
330 ++HugeBufCountMeter;
331 HugeBufVolumeMeter += *gross_size;
332 return xcalloc(1, net_size);
333 }
334 }
335
336 /* resize a variable sized buffer using best-fit pool */
337 void *
338 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
339 {
340 /* XXX This can be optimized on very large buffers to use realloc() */
341 /* TODO: if the existing gross size is >= new gross size, do nothing */
342 size_t new_gross_size;
343 void *newbuf = memAllocBuf(net_size, &new_gross_size);
344
345 if (oldbuf) {
346 size_t data_size = *gross_size;
347
348 if (data_size > net_size)
349 data_size = net_size;
350
351 memcpy(newbuf, oldbuf, data_size);
352
353 memFreeBuf(*gross_size, oldbuf);
354 }
355
356 *gross_size = new_gross_size;
357 return newbuf;
358 }
359
360 /* free buffer allocated with memAllocBuf() */
361 void
362 memFreeBuf(size_t size, void *buf)
363 {
364 mem_type type = memFindBufSizeType(size, NULL);
365
366 if (type != MEM_NONE)
367 memFree(buf, type);
368 else {
369 xfree(buf);
370 --HugeBufCountMeter;
371 HugeBufVolumeMeter -= size;
372 }
373 }
374
375 static double clean_interval = 15.0; /* time to live of idle chunk before release */
376
377 void
378 Mem::CleanIdlePools(void *)
379 {
380 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
381 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
382 }
383
384 void
385 memConfigure(void)
386 {
387 int64_t new_pool_limit;
388
389 /** Set to configured value first */
390 if (!Config.onoff.mem_pools)
391 new_pool_limit = 0;
392 else if (Config.MemPools.limit > 0)
393 new_pool_limit = Config.MemPools.limit;
394 else {
395 if (Config.MemPools.limit == 0)
396 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
397 new_pool_limit = -1;
398 }
399
400 #if 0
401 /** \par
402 * DPW 2007-04-12
403 * No debugging here please because this method is called before
404 * the debug log is configured and we'll get the message on
405 * stderr when doing things like 'squid -k reconfigure'
406 */
407 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
408 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
409 #endif
410
411 MemPools::GetInstance().setIdleLimit(new_pool_limit);
412 }
413
414 void
415 Mem::Init(void)
416 {
417 /* all pools are ready to be used */
418 static bool MemIsInitialized = false;
419 if (MemIsInitialized)
420 return;
421
422 /** \par
423 * NOTE: Mem::Init() is called before the config file is parsed
424 * and before the debugging module has been initialized. Any
425 * debug messages here at level 0 or 1 will always be printed
426 * on stderr.
427 */
428
429 /**
430 * Then initialize all pools.
431 * \par
432 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
433 * \par
434 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
435 * small; someday we will figure out what to do with all the entries here
436 * that are never used or used only once; perhaps we should simply use
437 * malloc() for those? @?@
438 */
439 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
440 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
441 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
442 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
443 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
444 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
445 memDataInit(MEM_ACL_DENY_INFO_LIST, "AclDenyInfoList",
446 sizeof(AclDenyInfoList), 0);
447 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(AclNameList), 0);
448 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
449 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
450 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
451 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
452 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
453 GetPool(MEM_MD5_DIGEST)->setChunkSize(512 * 1024);
454
455 MemIsInitialized = true;
456
457 // finally register with the cache manager
458 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
459 }
460
461 void
462 Mem::Report()
463 {
464 debugs(13, 3, "Memory pools are '" <<
465 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
466 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
467 " MB");
468 }
469
470 mem_type &operator++ (mem_type &aMem)
471 {
472 int tmp = (int)aMem;
473 aMem = (mem_type)(++tmp);
474 return aMem;
475 }
476
477 /*
478 * Test that all entries are initialized
479 */
480 void
481 memCheckInit(void)
482 {
483 mem_type t = MEM_NONE;
484
485 while (++t < MEM_MAX) {
486 /*
487 * If you hit this assertion, then you forgot to add a
488 * memDataInit() line for type 't'.
489 */
490 assert(GetPool(t));
491 }
492 }
493
494 void
495 memClean(void)
496 {
497 MemPoolGlobalStats stats;
498 if (Config.MemPools.limit > 0) // do not reset if disabled or same
499 MemPools::GetInstance().setIdleLimit(0);
500 MemPools::GetInstance().clean(0);
501 memPoolGetGlobalStats(&stats);
502
503 if (stats.tot_items_inuse)
504 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
505 " items in " << stats.tot_chunks_inuse << " chunks and " <<
506 stats.tot_pools_inuse << " pools are left dirty");
507 }
508
509 int
510 memInUse(mem_type type)
511 {
512 return GetPool(type)->inUseCount();
513 }
514
515 /* ick */
516
517 void
518 memFree2K(void *p)
519 {
520 memFree(p, MEM_2K_BUF);
521 }
522
523 void
524 memFree4K(void *p)
525 {
526 memFree(p, MEM_4K_BUF);
527 }
528
529 void
530 memFree8K(void *p)
531 {
532 memFree(p, MEM_8K_BUF);
533 }
534
535 void
536 memFree16K(void *p)
537 {
538 memFree(p, MEM_16K_BUF);
539 }
540
541 void
542 memFree32K(void *p)
543 {
544 memFree(p, MEM_32K_BUF);
545 }
546
547 void
548 memFree64K(void *p)
549 {
550 memFree(p, MEM_64K_BUF);
551 }
552
553 static void
554 cxx_xfree(void * ptr)
555 {
556 xfree(ptr);
557 }
558
559 FREE *
560 memFreeBufFunc(size_t size)
561 {
562 switch (size) {
563
564 case 2 * 1024:
565 return memFree2K;
566
567 case 4 * 1024:
568 return memFree4K;
569
570 case 8 * 1024:
571 return memFree8K;
572
573 case 16 * 1024:
574 return memFree16K;
575
576 case 32 * 1024:
577 return memFree32K;
578
579 case 64 * 1024:
580 return memFree64K;
581
582 default:
583 --HugeBufCountMeter;
584 HugeBufVolumeMeter -= size;
585 return cxx_xfree;
586 }
587 }
588
589 /* MemPoolMeter */
590
591 void
592 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
593 {
594 int excess = 0;
595 int needed = 0;
596 MemPoolMeter *pm = mp_st->meter;
597 const char *delim = "\t ";
598
599 stream.setf(std::ios_base::fixed);
600 stream << std::setw(20) << std::left << mp_st->label << delim;
601 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
602
603 /* Chunks */
604 if (mp_st->chunk_capacity) {
605 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
606 stream << std::setw(4) << mp_st->chunk_capacity << delim;
607
608 needed = mp_st->items_inuse / mp_st->chunk_capacity;
609
610 if (mp_st->items_inuse % mp_st->chunk_capacity)
611 ++needed;
612
613 excess = mp_st->chunks_inuse - needed;
614
615 stream << std::setw(4) << mp_st->chunks_alloc << delim;
616 stream << std::setw(4) << mp_st->chunks_inuse << delim;
617 stream << std::setw(4) << mp_st->chunks_free << delim;
618 stream << std::setw(4) << mp_st->chunks_partial << delim;
619 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
620 } else {
621 stream << delim;
622 stream << delim;
623 stream << delim;
624 stream << delim;
625 stream << delim;
626 stream << delim;
627 stream << delim;
628 }
629 /*
630 * Fragmentation calculation:
631 * needed = inuse.currentLevel() / chunk_capacity
632 * excess = used - needed
633 * fragmentation = excess / needed * 100%
634 *
635 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
636 */
637 /* allocated */
638 stream << mp_st->items_alloc << delim;
639 stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
640 stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
641 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
642 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
643 /* in use */
644 stream << mp_st->items_inuse << delim;
645 stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
646 stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
647 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
648 stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
649 /* idle */
650 stream << mp_st->items_idle << delim;
651 stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
652 stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
653 /* saved */
654 stream << (int)pm->gb_saved.count << delim;
655 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
656 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
657 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
658 pm->gb_oallocated.count = pm->gb_allocated.count;
659 }
660
661 static int
662 MemPoolReportSorter(const void *a, const void *b)
663 {
664 const MemPoolStats *A = (MemPoolStats *) a;
665 const MemPoolStats *B = (MemPoolStats *) b;
666
667 // use this to sort on %Total Allocated
668 //
669 double pa = (double) A->obj_size * A->meter->alloc.currentLevel();
670 double pb = (double) B->obj_size * B->meter->alloc.currentLevel();
671
672 if (pa > pb)
673 return -1;
674
675 if (pb > pa)
676 return 1;
677
678 #if 0
679 // use this to sort on In Use high(hrs)
680 //
681 if (A->meter->inuse.peakTime() > B->meter->inuse.peakTime())
682 return -1;
683
684 if (B->meter->inuse.peakTime() > A->meter->inuse.peakTime())
685 return 1;
686
687 #endif
688
689 return 0;
690 }
691
692 void
693 Mem::Report(std::ostream &stream)
694 {
695 static char buf[64];
696 static MemPoolStats mp_stats;
697 static MemPoolGlobalStats mp_total;
698 int not_used = 0;
699 MemPoolIterator *iter;
700 MemAllocator *pool;
701
702 /* caption */
703 stream << "Current memory usage:\n";
704 /* heading */
705 stream << "Pool\t Obj Size\t"
706 "Chunks\t\t\t\t\t\t\t"
707 "Allocated\t\t\t\t\t"
708 "In Use\t\t\t\t\t"
709 "Idle\t\t\t"
710 "Allocations Saved\t\t\t"
711 "Rate\t"
712 "\n"
713 " \t (bytes)\t"
714 "KB/ch\t obj/ch\t"
715 "(#)\t used\t free\t part\t %Frag\t "
716 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
717 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
718 "(#)\t (KB)\t high (KB)\t"
719 "(#)\t %cnt\t %vol\t"
720 "(#)/sec\t"
721 "\n";
722 xm_deltat = current_dtime - xm_time;
723 xm_time = current_dtime;
724
725 /* Get stats for Totals report line */
726 memPoolGetGlobalStats(&mp_total);
727
728 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
729 int npools = 0;
730
731 /* main table */
732 iter = memPoolIterate();
733
734 while ((pool = memPoolIterateNext(iter))) {
735 pool->getStats(&mp_stats);
736
737 if (!mp_stats.pool) /* pool destroyed */
738 continue;
739
740 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
741 /* this pool has been used */
742 sortme[npools] = mp_stats;
743 ++npools;
744 } else {
745 ++not_used;
746 }
747 }
748
749 memPoolIterateDone(&iter);
750
751 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
752
753 for (int i = 0; i< npools; ++i) {
754 PoolReport(&sortme[i], mp_total.TheMeter, stream);
755 }
756
757 xfree(sortme);
758
759 mp_stats.pool = NULL;
760 mp_stats.label = "Total";
761 mp_stats.meter = mp_total.TheMeter;
762 mp_stats.obj_size = 1;
763 mp_stats.chunk_capacity = 0;
764 mp_stats.chunk_size = 0;
765 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
766 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
767 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
768 mp_stats.chunks_free = mp_total.tot_chunks_free;
769 mp_stats.items_alloc = mp_total.tot_items_alloc;
770 mp_stats.items_inuse = mp_total.tot_items_inuse;
771 mp_stats.items_idle = mp_total.tot_items_idle;
772 mp_stats.overhead = mp_total.tot_overhead;
773
774 PoolReport(&mp_stats, mp_total.TheMeter, stream);
775
776 /* Cumulative */
777 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
778 /* overhead */
779 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
780 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.currentLevel()) << "%)\n";
781 /* limits */
782 if (mp_total.mem_idle_limit >= 0)
783 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
784 /* limits */
785 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
786 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
787 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
788 }
789