]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "base/PackableStream.h"
13 #include "ClientInfo.h"
14 #include "dlink.h"
15 #include "event.h"
16 #include "fs_io.h"
17 #include "icmp/net_db.h"
18 #include "md5.h"
19 #include "mem/forward.h"
20 #include "mem/Meter.h"
21 #include "mem/Pool.h"
22 #include "MemBuf.h"
23 #include "mgr/Registration.h"
24 #include "SquidConfig.h"
25 #include "SquidTime.h"
26 #include "Store.h"
27
28 #include <iomanip>
29
30 /* forward declarations */
31 static void memFree2K(void *);
32 static void memFree4K(void *);
33 static void memFree8K(void *);
34 static void memFree16K(void *);
35 static void memFree32K(void *);
36 static void memFree64K(void *);
37
38 /* local prototypes */
39 static void memStringStats(std::ostream &);
40
41 /* module locals */
42 static double xm_time = 0;
43 static double xm_deltat = 0;
44
45 /* string pools */
46 #define mem_str_pool_count 6
47
48 struct PoolMeta {
49 const char *name;
50 size_t obj_size;
51 };
52
53 static Mem::Meter StrCountMeter;
54 static Mem::Meter StrVolumeMeter;
55
56 static Mem::Meter HugeBufCountMeter;
57 static Mem::Meter HugeBufVolumeMeter;
58
59 /* local routines */
60
61 // XXX: refactor objects using these pools to use MEMPROXY classes instead
62 // then remove this function entirely
63 static MemAllocator *&
64 GetPool(size_t type)
65 {
66 static MemAllocator *pools[MEM_MAX];
67 static bool initialized = false;
68
69 if (!initialized) {
70 memset(pools, '\0', sizeof(pools));
71 initialized = true;
72 // Mem::Init() makes use of GetPool(type) to initialize
73 // the actual pools. So must come after the flag is true
74 Mem::Init();
75 }
76
77 return pools[type];
78 }
79
80 static MemAllocator &
81 GetStrPool(size_t type)
82 {
83 static MemAllocator *strPools[mem_str_pool_count];
84 static bool initialized = false;
85
86 static const PoolMeta PoolAttrs[mem_str_pool_count] = {
87 {"Short Strings", MemAllocator::RoundedSize(36)}, /* to fit rfc1123 and similar */
88 {"Medium Strings", MemAllocator::RoundedSize(128)}, /* to fit most urls */
89 {"Long Strings", MemAllocator::RoundedSize(512)},
90 {"1KB Strings", MemAllocator::RoundedSize(1024)},
91 {"4KB Strings", MemAllocator::RoundedSize(4*1024)},
92 {"16KB Strings", MemAllocator::RoundedSize(16*1024)}
93 };
94
95 if (!initialized) {
96 memset(strPools, '\0', sizeof(strPools));
97
98 /** Lastly init the string pools. */
99 for (int i = 0; i < mem_str_pool_count; ++i) {
100 strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size);
101 strPools[i]->zeroBlocks(false);
102
103 if (strPools[i]->objectSize() != PoolAttrs[i].obj_size)
104 debugs(13, DBG_IMPORTANT, "NOTICE: " << PoolAttrs[i].name <<
105 " is " << strPools[i]->objectSize() <<
106 " bytes instead of requested " <<
107 PoolAttrs[i].obj_size << " bytes");
108 }
109
110 initialized = true;
111 }
112
113 return *strPools[type];
114 }
115
116 /* Find the best fit string pool type */
117 static mem_type
118 memFindStringSizeType(size_t net_size, bool fuzzy)
119 {
120 mem_type type = MEM_NONE;
121 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
122 auto &pool = GetStrPool(i);
123 if (fuzzy && net_size < pool.objectSize()) {
124 type = static_cast<mem_type>(i);
125 break;
126 } else if (net_size == pool.objectSize()) {
127 type = static_cast<mem_type>(i);
128 break;
129 }
130 }
131
132 return type;
133 }
134
135 static void
136 memStringStats(std::ostream &stream)
137 {
138 int i;
139 int pooled_count = 0;
140 size_t pooled_volume = 0;
141 /* heading */
142 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
143 /* table body */
144
145 for (i = 0; i < mem_str_pool_count; ++i) {
146 const auto &pool = GetStrPool(i);
147 const auto plevel = pool.getMeter().inuse.currentLevel();
148 stream << std::setw(20) << std::left << pool.objectType();
149 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
150 stream << "\t " << xpercentInt(plevel * pool.objectSize(), StrVolumeMeter.currentLevel()) << "\n";
151 pooled_count += plevel;
152 pooled_volume += plevel * pool.objectSize();
153 }
154
155 /* malloc strings */
156 stream << std::setw(20) << std::left << "Other Strings";
157 stream << std::right << "\t ";
158 stream << xpercentInt(StrCountMeter.currentLevel() - pooled_count, StrCountMeter.currentLevel()) << "\t ";
159 stream << xpercentInt(StrVolumeMeter.currentLevel() - pooled_volume, StrVolumeMeter.currentLevel()) << "\n\n";
160 }
161
162 static void
163 memBufStats(std::ostream & stream)
164 {
165 stream << "Large buffers: " <<
166 HugeBufCountMeter.currentLevel() << " (" <<
167 HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
168 }
169
170 void
171 Mem::Stats(StoreEntry * sentry)
172 {
173 PackableStream stream(*sentry);
174 Report(stream);
175 memStringStats(stream);
176 memBufStats(stream);
177 #if WITH_VALGRIND
178 if (RUNNING_ON_VALGRIND) {
179 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
180 stream << "Valgrind Report:\n";
181 stream << "Type\tAmount\n";
182 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
183 VALGRIND_DO_LEAK_CHECK;
184 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
185 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
186 stream << "Leaked\t" << leaked << "\n";
187 stream << "Dubious\t" << dubious << "\n";
188 stream << "Reachable\t" << reachable << "\n";
189 stream << "Suppressed\t" << suppressed << "\n";
190 }
191 #endif
192 stream.flush();
193 }
194
195 /*
196 * public routines
197 */
198
199 /*
200 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
201 * Will ignore repeated calls for the same pool type.
202 *
203 * Relies on Mem::Init() having been called beforehand.
204 */
205 void
206 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
207 {
208 assert(name && size);
209
210 if (GetPool(type) != NULL)
211 return;
212
213 GetPool(type) = memPoolCreate(name, size);
214 GetPool(type)->zeroBlocks(doZero);
215 }
216
217 /* find appropriate pool and use it (pools always init buffer with 0s) */
218 void *
219 memAllocate(mem_type type)
220 {
221 assert(GetPool(type));
222 return GetPool(type)->alloc();
223 }
224
225 /* give memory back to the pool */
226 void
227 memFree(void *p, int type)
228 {
229 assert(GetPool(type));
230 GetPool(type)->freeOne(p);
231 }
232
233 /* allocate a variable size buffer using best-fit string pool */
234 void *
235 memAllocString(size_t net_size, size_t * gross_size)
236 {
237 assert(gross_size);
238
239 auto type = memFindStringSizeType(net_size, true);
240 if (type != MEM_NONE) {
241 auto &pool = GetStrPool(type);
242 *gross_size = pool.objectSize();
243 assert(*gross_size >= net_size);
244 ++StrCountMeter;
245 StrVolumeMeter += *gross_size;
246 return pool.alloc();
247 }
248
249 *gross_size = net_size;
250 ++StrCountMeter;
251 StrVolumeMeter += *gross_size;
252 return xcalloc(1, net_size);
253 }
254
255 size_t
256 memStringCount()
257 {
258 size_t result = 0;
259
260 for (int counter = 0; counter < mem_str_pool_count; ++counter)
261 result += GetStrPool(counter).inUseCount();
262
263 return result;
264 }
265
266 /* free buffer allocated with memAllocString() */
267 void
268 memFreeString(size_t size, void *buf)
269 {
270 assert(buf);
271
272 auto type = memFindStringSizeType(size, false);
273 if (type != MEM_NONE)
274 GetStrPool(type).freeOne(buf);
275 else
276 xfree(buf);
277
278 --StrCountMeter;
279 StrVolumeMeter -= size;
280 }
281
282 /* Find the best fit MEM_X_BUF type */
283 static mem_type
284 memFindBufSizeType(size_t net_size, size_t * gross_size)
285 {
286 mem_type type;
287 size_t size;
288
289 if (net_size <= 2 * 1024) {
290 type = MEM_2K_BUF;
291 size = 2 * 1024;
292 } else if (net_size <= 4 * 1024) {
293 type = MEM_4K_BUF;
294 size = 4 * 1024;
295 } else if (net_size <= 8 * 1024) {
296 type = MEM_8K_BUF;
297 size = 8 * 1024;
298 } else if (net_size <= 16 * 1024) {
299 type = MEM_16K_BUF;
300 size = 16 * 1024;
301 } else if (net_size <= 32 * 1024) {
302 type = MEM_32K_BUF;
303 size = 32 * 1024;
304 } else if (net_size <= 64 * 1024) {
305 type = MEM_64K_BUF;
306 size = 64 * 1024;
307 } else {
308 type = MEM_NONE;
309 size = net_size;
310 }
311
312 if (gross_size)
313 *gross_size = size;
314
315 return type;
316 }
317
318 /* allocate a variable size buffer using best-fit pool */
319 void *
320 memAllocBuf(size_t net_size, size_t * gross_size)
321 {
322 mem_type type = memFindBufSizeType(net_size, gross_size);
323
324 if (type != MEM_NONE)
325 return memAllocate(type);
326 else {
327 ++HugeBufCountMeter;
328 HugeBufVolumeMeter += *gross_size;
329 return xcalloc(1, net_size);
330 }
331 }
332
333 /* resize a variable sized buffer using best-fit pool */
334 void *
335 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
336 {
337 /* XXX This can be optimized on very large buffers to use realloc() */
338 /* TODO: if the existing gross size is >= new gross size, do nothing */
339 size_t new_gross_size;
340 void *newbuf = memAllocBuf(net_size, &new_gross_size);
341
342 if (oldbuf) {
343 size_t data_size = *gross_size;
344
345 if (data_size > net_size)
346 data_size = net_size;
347
348 memcpy(newbuf, oldbuf, data_size);
349
350 memFreeBuf(*gross_size, oldbuf);
351 }
352
353 *gross_size = new_gross_size;
354 return newbuf;
355 }
356
357 /* free buffer allocated with memAllocBuf() */
358 void
359 memFreeBuf(size_t size, void *buf)
360 {
361 mem_type type = memFindBufSizeType(size, NULL);
362
363 if (type != MEM_NONE)
364 memFree(buf, type);
365 else {
366 xfree(buf);
367 --HugeBufCountMeter;
368 HugeBufVolumeMeter -= size;
369 }
370 }
371
372 static double clean_interval = 15.0; /* time to live of idle chunk before release */
373
374 void
375 Mem::CleanIdlePools(void *)
376 {
377 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
378 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
379 }
380
381 void
382 memConfigure(void)
383 {
384 int64_t new_pool_limit;
385
386 /** Set to configured value first */
387 if (!Config.onoff.mem_pools)
388 new_pool_limit = 0;
389 else if (Config.MemPools.limit > 0)
390 new_pool_limit = Config.MemPools.limit;
391 else {
392 if (Config.MemPools.limit == 0)
393 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
394 new_pool_limit = -1;
395 }
396
397 #if 0
398 /** \par
399 * DPW 2007-04-12
400 * No debugging here please because this method is called before
401 * the debug log is configured and we'll get the message on
402 * stderr when doing things like 'squid -k reconfigure'
403 */
404 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
405 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
406 #endif
407
408 MemPools::GetInstance().setIdleLimit(new_pool_limit);
409 }
410
411 void
412 Mem::Init(void)
413 {
414 /* all pools are ready to be used */
415 static bool MemIsInitialized = false;
416 if (MemIsInitialized)
417 return;
418
419 /** \par
420 * NOTE: Mem::Init() is called before the config file is parsed
421 * and before the debugging module has been initialized. Any
422 * debug messages here at level 0 or 1 will always be printed
423 * on stderr.
424 */
425
426 /**
427 * Then initialize all pools.
428 * \par
429 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
430 * \par
431 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
432 * small; someday we will figure out what to do with all the entries here
433 * that are never used or used only once; perhaps we should simply use
434 * malloc() for those? @?@
435 */
436 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
437 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
438 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
439 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
440 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
441 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
442 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
443 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
444 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
445 GetPool(MEM_MD5_DIGEST)->setChunkSize(512 * 1024);
446
447 MemIsInitialized = true;
448
449 // finally register with the cache manager
450 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
451 }
452
453 void
454 Mem::Report()
455 {
456 debugs(13, 3, "Memory pools are '" <<
457 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
458 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
459 " MB");
460 }
461
462 mem_type &operator++ (mem_type &aMem)
463 {
464 int tmp = (int)aMem;
465 aMem = (mem_type)(++tmp);
466 return aMem;
467 }
468
469 /*
470 * Test that all entries are initialized
471 */
472 void
473 memCheckInit(void)
474 {
475 mem_type t = MEM_NONE;
476
477 while (++t < MEM_MAX) {
478 /*
479 * If you hit this assertion, then you forgot to add a
480 * memDataInit() line for type 't'.
481 */
482 assert(GetPool(t));
483 }
484 }
485
486 void
487 memClean(void)
488 {
489 MemPoolGlobalStats stats;
490 if (Config.MemPools.limit > 0) // do not reset if disabled or same
491 MemPools::GetInstance().setIdleLimit(0);
492 MemPools::GetInstance().clean(0);
493 memPoolGetGlobalStats(&stats);
494
495 if (stats.tot_items_inuse)
496 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
497 " items in " << stats.tot_chunks_inuse << " chunks and " <<
498 stats.tot_pools_inuse << " pools are left dirty");
499 }
500
501 int
502 memInUse(mem_type type)
503 {
504 return GetPool(type)->inUseCount();
505 }
506
507 /* ick */
508
509 void
510 memFree2K(void *p)
511 {
512 memFree(p, MEM_2K_BUF);
513 }
514
515 void
516 memFree4K(void *p)
517 {
518 memFree(p, MEM_4K_BUF);
519 }
520
521 void
522 memFree8K(void *p)
523 {
524 memFree(p, MEM_8K_BUF);
525 }
526
527 void
528 memFree16K(void *p)
529 {
530 memFree(p, MEM_16K_BUF);
531 }
532
533 void
534 memFree32K(void *p)
535 {
536 memFree(p, MEM_32K_BUF);
537 }
538
539 void
540 memFree64K(void *p)
541 {
542 memFree(p, MEM_64K_BUF);
543 }
544
545 static void
546 cxx_xfree(void * ptr)
547 {
548 xfree(ptr);
549 }
550
551 FREE *
552 memFreeBufFunc(size_t size)
553 {
554 switch (size) {
555
556 case 2 * 1024:
557 return memFree2K;
558
559 case 4 * 1024:
560 return memFree4K;
561
562 case 8 * 1024:
563 return memFree8K;
564
565 case 16 * 1024:
566 return memFree16K;
567
568 case 32 * 1024:
569 return memFree32K;
570
571 case 64 * 1024:
572 return memFree64K;
573
574 default:
575 --HugeBufCountMeter;
576 HugeBufVolumeMeter -= size;
577 return cxx_xfree;
578 }
579 }
580
581 /* MemPoolMeter */
582
583 void
584 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
585 {
586 int excess = 0;
587 int needed = 0;
588 MemPoolMeter *pm = mp_st->meter;
589 const char *delim = "\t ";
590
591 stream.setf(std::ios_base::fixed);
592 stream << std::setw(20) << std::left << mp_st->label << delim;
593 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
594
595 /* Chunks */
596 if (mp_st->chunk_capacity) {
597 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
598 stream << std::setw(4) << mp_st->chunk_capacity << delim;
599
600 needed = mp_st->items_inuse / mp_st->chunk_capacity;
601
602 if (mp_st->items_inuse % mp_st->chunk_capacity)
603 ++needed;
604
605 excess = mp_st->chunks_inuse - needed;
606
607 stream << std::setw(4) << mp_st->chunks_alloc << delim;
608 stream << std::setw(4) << mp_st->chunks_inuse << delim;
609 stream << std::setw(4) << mp_st->chunks_free << delim;
610 stream << std::setw(4) << mp_st->chunks_partial << delim;
611 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
612 } else {
613 stream << delim;
614 stream << delim;
615 stream << delim;
616 stream << delim;
617 stream << delim;
618 stream << delim;
619 stream << delim;
620 }
621 /*
622 * Fragmentation calculation:
623 * needed = inuse.currentLevel() / chunk_capacity
624 * excess = used - needed
625 * fragmentation = excess / needed * 100%
626 *
627 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
628 */
629 /* allocated */
630 stream << mp_st->items_alloc << delim;
631 stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
632 stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
633 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
634 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
635 /* in use */
636 stream << mp_st->items_inuse << delim;
637 stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
638 stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
639 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
640 stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
641 /* idle */
642 stream << mp_st->items_idle << delim;
643 stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
644 stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
645 /* saved */
646 stream << (int)pm->gb_saved.count << delim;
647 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
648 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
649 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
650 pm->gb_oallocated.count = pm->gb_allocated.count;
651 }
652
653 static int
654 MemPoolReportSorter(const void *a, const void *b)
655 {
656 const MemPoolStats *A = (MemPoolStats *) a;
657 const MemPoolStats *B = (MemPoolStats *) b;
658
659 // use this to sort on %Total Allocated
660 //
661 double pa = (double) A->obj_size * A->meter->alloc.currentLevel();
662 double pb = (double) B->obj_size * B->meter->alloc.currentLevel();
663
664 if (pa > pb)
665 return -1;
666
667 if (pb > pa)
668 return 1;
669
670 #if 0
671 // use this to sort on In Use high(hrs)
672 //
673 if (A->meter->inuse.peakTime() > B->meter->inuse.peakTime())
674 return -1;
675
676 if (B->meter->inuse.peakTime() > A->meter->inuse.peakTime())
677 return 1;
678
679 #endif
680
681 return 0;
682 }
683
684 void
685 Mem::Report(std::ostream &stream)
686 {
687 static char buf[64];
688 static MemPoolStats mp_stats;
689 static MemPoolGlobalStats mp_total;
690 int not_used = 0;
691 MemPoolIterator *iter;
692 MemAllocator *pool;
693
694 /* caption */
695 stream << "Current memory usage:\n";
696 /* heading */
697 stream << "Pool\t Obj Size\t"
698 "Chunks\t\t\t\t\t\t\t"
699 "Allocated\t\t\t\t\t"
700 "In Use\t\t\t\t\t"
701 "Idle\t\t\t"
702 "Allocations Saved\t\t\t"
703 "Rate\t"
704 "\n"
705 " \t (bytes)\t"
706 "KB/ch\t obj/ch\t"
707 "(#)\t used\t free\t part\t %Frag\t "
708 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
709 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
710 "(#)\t (KB)\t high (KB)\t"
711 "(#)\t %cnt\t %vol\t"
712 "(#)/sec\t"
713 "\n";
714 xm_deltat = current_dtime - xm_time;
715 xm_time = current_dtime;
716
717 /* Get stats for Totals report line */
718 memPoolGetGlobalStats(&mp_total);
719
720 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
721 int npools = 0;
722
723 /* main table */
724 iter = memPoolIterate();
725
726 while ((pool = memPoolIterateNext(iter))) {
727 pool->getStats(&mp_stats);
728
729 if (!mp_stats.pool) /* pool destroyed */
730 continue;
731
732 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
733 /* this pool has been used */
734 sortme[npools] = mp_stats;
735 ++npools;
736 } else {
737 ++not_used;
738 }
739 }
740
741 memPoolIterateDone(&iter);
742
743 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
744
745 for (int i = 0; i< npools; ++i) {
746 PoolReport(&sortme[i], mp_total.TheMeter, stream);
747 }
748
749 xfree(sortme);
750
751 mp_stats.pool = NULL;
752 mp_stats.label = "Total";
753 mp_stats.meter = mp_total.TheMeter;
754 mp_stats.obj_size = 1;
755 mp_stats.chunk_capacity = 0;
756 mp_stats.chunk_size = 0;
757 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
758 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
759 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
760 mp_stats.chunks_free = mp_total.tot_chunks_free;
761 mp_stats.items_alloc = mp_total.tot_items_alloc;
762 mp_stats.items_inuse = mp_total.tot_items_inuse;
763 mp_stats.items_idle = mp_total.tot_items_idle;
764 mp_stats.overhead = mp_total.tot_overhead;
765
766 PoolReport(&mp_stats, mp_total.TheMeter, stream);
767
768 /* Cumulative */
769 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
770 /* overhead */
771 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
772 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.currentLevel()) << "%)\n";
773 /* limits */
774 if (mp_total.mem_idle_limit >= 0)
775 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
776 /* limits */
777 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
778 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
779 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
780 }
781