]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
Cleanup: convert late initialized objects to MEMPROXY_CLASS
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "base/PackableStream.h"
15 #include "ClientInfo.h"
16 #include "dlink.h"
17 #include "event.h"
18 #include "fs_io.h"
19 #include "icmp/net_db.h"
20 #include "md5.h"
21 #include "mem/forward.h"
22 #include "mem/Meter.h"
23 #include "mem/Pool.h"
24 #include "MemBuf.h"
25 #include "mgr/Registration.h"
26 #include "SquidConfig.h"
27 #include "SquidList.h"
28 #include "SquidTime.h"
29 #include "Store.h"
30
31 #include <iomanip>
32
33 /* forward declarations */
34 static void memFree2K(void *);
35 static void memFree4K(void *);
36 static void memFree8K(void *);
37 static void memFree16K(void *);
38 static void memFree32K(void *);
39 static void memFree64K(void *);
40
41 /* local prototypes */
42 static void memStringStats(std::ostream &);
43
44 /* module locals */
45 static double xm_time = 0;
46 static double xm_deltat = 0;
47
48 /* string pools */
49 #define mem_str_pool_count 6
50
51 struct PoolMeta {
52 const char *name;
53 size_t obj_size;
54 };
55
56 static Mem::Meter StrCountMeter;
57 static Mem::Meter StrVolumeMeter;
58
59 static Mem::Meter HugeBufCountMeter;
60 static Mem::Meter HugeBufVolumeMeter;
61
62 /* local routines */
63
64 // XXX: refactor objects using these pools to use MEMPROXY classes instead
65 // then remove this function entirely
66 static MemAllocator *&
67 GetPool(size_t type)
68 {
69 static MemAllocator *pools[MEM_MAX];
70 static bool initialized = false;
71
72 if (!initialized) {
73 memset(pools, '\0', sizeof(pools));
74 initialized = true;
75 // Mem::Init() makes use of GetPool(type) to initialize
76 // the actual pools. So must come after the flag is true
77 Mem::Init();
78 }
79
80 return pools[type];
81 }
82
83 static MemAllocator &
84 GetStrPool(size_t type)
85 {
86 static MemAllocator *strPools[mem_str_pool_count];
87 static bool initialized = false;
88
89 static const PoolMeta PoolAttrs[mem_str_pool_count] = {
90 {"Short Strings", MemAllocator::RoundedSize(36)}, /* to fit rfc1123 and similar */
91 {"Medium Strings", MemAllocator::RoundedSize(128)}, /* to fit most urls */
92 {"Long Strings", MemAllocator::RoundedSize(512)},
93 {"1KB Strings", MemAllocator::RoundedSize(1024)},
94 {"4KB Strings", MemAllocator::RoundedSize(4*1024)},
95 {"16KB Strings", MemAllocator::RoundedSize(16*1024)}
96 };
97
98 if (!initialized) {
99 memset(strPools, '\0', sizeof(strPools));
100
101 /** Lastly init the string pools. */
102 for (int i = 0; i < mem_str_pool_count; ++i) {
103 strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size);
104 strPools[i]->zeroBlocks(false);
105
106 if (strPools[i]->objectSize() != PoolAttrs[i].obj_size)
107 debugs(13, DBG_IMPORTANT, "NOTICE: " << PoolAttrs[i].name <<
108 " is " << strPools[i]->objectSize() <<
109 " bytes instead of requested " <<
110 PoolAttrs[i].obj_size << " bytes");
111 }
112
113 initialized = true;
114 }
115
116 return *strPools[type];
117 }
118
119 /* Find the best fit string pool type */
120 static mem_type
121 memFindStringSizeType(size_t net_size, bool fuzzy)
122 {
123 mem_type type = MEM_NONE;
124 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
125 auto &pool = GetStrPool(i);
126 if (fuzzy && net_size < pool.objectSize()) {
127 type = static_cast<mem_type>(i);
128 break;
129 } else if (net_size == pool.objectSize()) {
130 type = static_cast<mem_type>(i);
131 break;
132 }
133 }
134
135 return type;
136 }
137
138 static void
139 memStringStats(std::ostream &stream)
140 {
141 int i;
142 int pooled_count = 0;
143 size_t pooled_volume = 0;
144 /* heading */
145 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
146 /* table body */
147
148 for (i = 0; i < mem_str_pool_count; ++i) {
149 const auto &pool = GetStrPool(i);
150 const auto plevel = pool.getMeter().inuse.currentLevel();
151 stream << std::setw(20) << std::left << pool.objectType();
152 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
153 stream << "\t " << xpercentInt(plevel * pool.objectSize(), StrVolumeMeter.currentLevel()) << "\n";
154 pooled_count += plevel;
155 pooled_volume += plevel * pool.objectSize();
156 }
157
158 /* malloc strings */
159 stream << std::setw(20) << std::left << "Other Strings";
160 stream << std::right << "\t ";
161 stream << xpercentInt(StrCountMeter.currentLevel() - pooled_count, StrCountMeter.currentLevel()) << "\t ";
162 stream << xpercentInt(StrVolumeMeter.currentLevel() - pooled_volume, StrVolumeMeter.currentLevel()) << "\n\n";
163 }
164
165 static void
166 memBufStats(std::ostream & stream)
167 {
168 stream << "Large buffers: " <<
169 HugeBufCountMeter.currentLevel() << " (" <<
170 HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
171 }
172
173 void
174 Mem::Stats(StoreEntry * sentry)
175 {
176 PackableStream stream(*sentry);
177 Report(stream);
178 memStringStats(stream);
179 memBufStats(stream);
180 #if WITH_VALGRIND
181 if (RUNNING_ON_VALGRIND) {
182 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
183 stream << "Valgrind Report:\n";
184 stream << "Type\tAmount\n";
185 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
186 VALGRIND_DO_LEAK_CHECK;
187 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
188 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
189 stream << "Leaked\t" << leaked << "\n";
190 stream << "Dubious\t" << dubious << "\n";
191 stream << "Reachable\t" << reachable << "\n";
192 stream << "Suppressed\t" << suppressed << "\n";
193 }
194 #endif
195 stream.flush();
196 }
197
198 /*
199 * public routines
200 */
201
202 /*
203 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
204 * Will ignore repeated calls for the same pool type.
205 *
206 * Relies on Mem::Init() having been called beforehand.
207 */
208 void
209 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
210 {
211 assert(name && size);
212
213 if (GetPool(type) != NULL)
214 return;
215
216 GetPool(type) = memPoolCreate(name, size);
217 GetPool(type)->zeroBlocks(doZero);
218 }
219
220 /* find appropriate pool and use it (pools always init buffer with 0s) */
221 void *
222 memAllocate(mem_type type)
223 {
224 assert(GetPool(type));
225 return GetPool(type)->alloc();
226 }
227
228 /* give memory back to the pool */
229 void
230 memFree(void *p, int type)
231 {
232 assert(GetPool(type));
233 GetPool(type)->freeOne(p);
234 }
235
236 /* allocate a variable size buffer using best-fit string pool */
237 void *
238 memAllocString(size_t net_size, size_t * gross_size)
239 {
240 assert(gross_size);
241
242 auto type = memFindStringSizeType(net_size, true);
243 if (type != MEM_NONE) {
244 auto &pool = GetStrPool(type);
245 *gross_size = pool.objectSize();
246 assert(*gross_size >= net_size);
247 ++StrCountMeter;
248 StrVolumeMeter += *gross_size;
249 return pool.alloc();
250 }
251
252 *gross_size = net_size;
253 ++StrCountMeter;
254 StrVolumeMeter += *gross_size;
255 return xcalloc(1, net_size);
256 }
257
258 size_t
259 memStringCount()
260 {
261 size_t result = 0;
262
263 for (int counter = 0; counter < mem_str_pool_count; ++counter)
264 result += GetStrPool(counter).inUseCount();
265
266 return result;
267 }
268
269 /* free buffer allocated with memAllocString() */
270 void
271 memFreeString(size_t size, void *buf)
272 {
273 assert(buf);
274
275 auto type = memFindStringSizeType(size, false);
276 if (type != MEM_NONE)
277 GetStrPool(type).freeOne(buf);
278 else
279 xfree(buf);
280
281 --StrCountMeter;
282 StrVolumeMeter -= size;
283 }
284
285 /* Find the best fit MEM_X_BUF type */
286 static mem_type
287 memFindBufSizeType(size_t net_size, size_t * gross_size)
288 {
289 mem_type type;
290 size_t size;
291
292 if (net_size <= 2 * 1024) {
293 type = MEM_2K_BUF;
294 size = 2 * 1024;
295 } else if (net_size <= 4 * 1024) {
296 type = MEM_4K_BUF;
297 size = 4 * 1024;
298 } else if (net_size <= 8 * 1024) {
299 type = MEM_8K_BUF;
300 size = 8 * 1024;
301 } else if (net_size <= 16 * 1024) {
302 type = MEM_16K_BUF;
303 size = 16 * 1024;
304 } else if (net_size <= 32 * 1024) {
305 type = MEM_32K_BUF;
306 size = 32 * 1024;
307 } else if (net_size <= 64 * 1024) {
308 type = MEM_64K_BUF;
309 size = 64 * 1024;
310 } else {
311 type = MEM_NONE;
312 size = net_size;
313 }
314
315 if (gross_size)
316 *gross_size = size;
317
318 return type;
319 }
320
321 /* allocate a variable size buffer using best-fit pool */
322 void *
323 memAllocBuf(size_t net_size, size_t * gross_size)
324 {
325 mem_type type = memFindBufSizeType(net_size, gross_size);
326
327 if (type != MEM_NONE)
328 return memAllocate(type);
329 else {
330 ++HugeBufCountMeter;
331 HugeBufVolumeMeter += *gross_size;
332 return xcalloc(1, net_size);
333 }
334 }
335
336 /* resize a variable sized buffer using best-fit pool */
337 void *
338 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
339 {
340 /* XXX This can be optimized on very large buffers to use realloc() */
341 /* TODO: if the existing gross size is >= new gross size, do nothing */
342 size_t new_gross_size;
343 void *newbuf = memAllocBuf(net_size, &new_gross_size);
344
345 if (oldbuf) {
346 size_t data_size = *gross_size;
347
348 if (data_size > net_size)
349 data_size = net_size;
350
351 memcpy(newbuf, oldbuf, data_size);
352
353 memFreeBuf(*gross_size, oldbuf);
354 }
355
356 *gross_size = new_gross_size;
357 return newbuf;
358 }
359
360 /* free buffer allocated with memAllocBuf() */
361 void
362 memFreeBuf(size_t size, void *buf)
363 {
364 mem_type type = memFindBufSizeType(size, NULL);
365
366 if (type != MEM_NONE)
367 memFree(buf, type);
368 else {
369 xfree(buf);
370 --HugeBufCountMeter;
371 HugeBufVolumeMeter -= size;
372 }
373 }
374
375 static double clean_interval = 15.0; /* time to live of idle chunk before release */
376
377 void
378 Mem::CleanIdlePools(void *)
379 {
380 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
381 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
382 }
383
384 void
385 memConfigure(void)
386 {
387 int64_t new_pool_limit;
388
389 /** Set to configured value first */
390 if (!Config.onoff.mem_pools)
391 new_pool_limit = 0;
392 else if (Config.MemPools.limit > 0)
393 new_pool_limit = Config.MemPools.limit;
394 else {
395 if (Config.MemPools.limit == 0)
396 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
397 new_pool_limit = -1;
398 }
399
400 #if 0
401 /** \par
402 * DPW 2007-04-12
403 * No debugging here please because this method is called before
404 * the debug log is configured and we'll get the message on
405 * stderr when doing things like 'squid -k reconfigure'
406 */
407 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
408 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
409 #endif
410
411 MemPools::GetInstance().setIdleLimit(new_pool_limit);
412 }
413
414 void
415 Mem::Init(void)
416 {
417 /* all pools are ready to be used */
418 static bool MemIsInitialized = false;
419 if (MemIsInitialized)
420 return;
421
422 /** \par
423 * NOTE: Mem::Init() is called before the config file is parsed
424 * and before the debugging module has been initialized. Any
425 * debug messages here at level 0 or 1 will always be printed
426 * on stderr.
427 */
428
429 /**
430 * Then initialize all pools.
431 * \par
432 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
433 * \par
434 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
435 * small; someday we will figure out what to do with all the entries here
436 * that are never used or used only once; perhaps we should simply use
437 * malloc() for those? @?@
438 */
439 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
440 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
441 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
442 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
443 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
444 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
445 memDataInit(MEM_ACL_DENY_INFO_LIST, "AclDenyInfoList",
446 sizeof(AclDenyInfoList), 0);
447 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(AclNameList), 0);
448 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
449 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
450 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
451 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
452 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
453 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
454 GetPool(MEM_MD5_DIGEST)->setChunkSize(512 * 1024);
455
456 MemIsInitialized = true;
457
458 // finally register with the cache manager
459 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
460 }
461
462 void
463 Mem::Report()
464 {
465 debugs(13, 3, "Memory pools are '" <<
466 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
467 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
468 " MB");
469 }
470
471 mem_type &operator++ (mem_type &aMem)
472 {
473 int tmp = (int)aMem;
474 aMem = (mem_type)(++tmp);
475 return aMem;
476 }
477
478 /*
479 * Test that all entries are initialized
480 */
481 void
482 memCheckInit(void)
483 {
484 mem_type t = MEM_NONE;
485
486 while (++t < MEM_MAX) {
487 /*
488 * If you hit this assertion, then you forgot to add a
489 * memDataInit() line for type 't'.
490 */
491 assert(GetPool(t));
492 }
493 }
494
495 void
496 memClean(void)
497 {
498 MemPoolGlobalStats stats;
499 if (Config.MemPools.limit > 0) // do not reset if disabled or same
500 MemPools::GetInstance().setIdleLimit(0);
501 MemPools::GetInstance().clean(0);
502 memPoolGetGlobalStats(&stats);
503
504 if (stats.tot_items_inuse)
505 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
506 " items in " << stats.tot_chunks_inuse << " chunks and " <<
507 stats.tot_pools_inuse << " pools are left dirty");
508 }
509
510 int
511 memInUse(mem_type type)
512 {
513 return GetPool(type)->inUseCount();
514 }
515
516 /* ick */
517
518 void
519 memFree2K(void *p)
520 {
521 memFree(p, MEM_2K_BUF);
522 }
523
524 void
525 memFree4K(void *p)
526 {
527 memFree(p, MEM_4K_BUF);
528 }
529
530 void
531 memFree8K(void *p)
532 {
533 memFree(p, MEM_8K_BUF);
534 }
535
536 void
537 memFree16K(void *p)
538 {
539 memFree(p, MEM_16K_BUF);
540 }
541
542 void
543 memFree32K(void *p)
544 {
545 memFree(p, MEM_32K_BUF);
546 }
547
548 void
549 memFree64K(void *p)
550 {
551 memFree(p, MEM_64K_BUF);
552 }
553
554 static void
555 cxx_xfree(void * ptr)
556 {
557 xfree(ptr);
558 }
559
560 FREE *
561 memFreeBufFunc(size_t size)
562 {
563 switch (size) {
564
565 case 2 * 1024:
566 return memFree2K;
567
568 case 4 * 1024:
569 return memFree4K;
570
571 case 8 * 1024:
572 return memFree8K;
573
574 case 16 * 1024:
575 return memFree16K;
576
577 case 32 * 1024:
578 return memFree32K;
579
580 case 64 * 1024:
581 return memFree64K;
582
583 default:
584 --HugeBufCountMeter;
585 HugeBufVolumeMeter -= size;
586 return cxx_xfree;
587 }
588 }
589
590 /* MemPoolMeter */
591
592 void
593 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
594 {
595 int excess = 0;
596 int needed = 0;
597 MemPoolMeter *pm = mp_st->meter;
598 const char *delim = "\t ";
599
600 stream.setf(std::ios_base::fixed);
601 stream << std::setw(20) << std::left << mp_st->label << delim;
602 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
603
604 /* Chunks */
605 if (mp_st->chunk_capacity) {
606 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
607 stream << std::setw(4) << mp_st->chunk_capacity << delim;
608
609 needed = mp_st->items_inuse / mp_st->chunk_capacity;
610
611 if (mp_st->items_inuse % mp_st->chunk_capacity)
612 ++needed;
613
614 excess = mp_st->chunks_inuse - needed;
615
616 stream << std::setw(4) << mp_st->chunks_alloc << delim;
617 stream << std::setw(4) << mp_st->chunks_inuse << delim;
618 stream << std::setw(4) << mp_st->chunks_free << delim;
619 stream << std::setw(4) << mp_st->chunks_partial << delim;
620 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
621 } else {
622 stream << delim;
623 stream << delim;
624 stream << delim;
625 stream << delim;
626 stream << delim;
627 stream << delim;
628 stream << delim;
629 }
630 /*
631 * Fragmentation calculation:
632 * needed = inuse.currentLevel() / chunk_capacity
633 * excess = used - needed
634 * fragmentation = excess / needed * 100%
635 *
636 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
637 */
638 /* allocated */
639 stream << mp_st->items_alloc << delim;
640 stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
641 stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
642 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
643 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
644 /* in use */
645 stream << mp_st->items_inuse << delim;
646 stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
647 stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
648 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
649 stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
650 /* idle */
651 stream << mp_st->items_idle << delim;
652 stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
653 stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
654 /* saved */
655 stream << (int)pm->gb_saved.count << delim;
656 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
657 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
658 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
659 pm->gb_oallocated.count = pm->gb_allocated.count;
660 }
661
662 static int
663 MemPoolReportSorter(const void *a, const void *b)
664 {
665 const MemPoolStats *A = (MemPoolStats *) a;
666 const MemPoolStats *B = (MemPoolStats *) b;
667
668 // use this to sort on %Total Allocated
669 //
670 double pa = (double) A->obj_size * A->meter->alloc.currentLevel();
671 double pb = (double) B->obj_size * B->meter->alloc.currentLevel();
672
673 if (pa > pb)
674 return -1;
675
676 if (pb > pa)
677 return 1;
678
679 #if 0
680 // use this to sort on In Use high(hrs)
681 //
682 if (A->meter->inuse.peakTime() > B->meter->inuse.peakTime())
683 return -1;
684
685 if (B->meter->inuse.peakTime() > A->meter->inuse.peakTime())
686 return 1;
687
688 #endif
689
690 return 0;
691 }
692
693 void
694 Mem::Report(std::ostream &stream)
695 {
696 static char buf[64];
697 static MemPoolStats mp_stats;
698 static MemPoolGlobalStats mp_total;
699 int not_used = 0;
700 MemPoolIterator *iter;
701 MemAllocator *pool;
702
703 /* caption */
704 stream << "Current memory usage:\n";
705 /* heading */
706 stream << "Pool\t Obj Size\t"
707 "Chunks\t\t\t\t\t\t\t"
708 "Allocated\t\t\t\t\t"
709 "In Use\t\t\t\t\t"
710 "Idle\t\t\t"
711 "Allocations Saved\t\t\t"
712 "Rate\t"
713 "\n"
714 " \t (bytes)\t"
715 "KB/ch\t obj/ch\t"
716 "(#)\t used\t free\t part\t %Frag\t "
717 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
718 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
719 "(#)\t (KB)\t high (KB)\t"
720 "(#)\t %cnt\t %vol\t"
721 "(#)/sec\t"
722 "\n";
723 xm_deltat = current_dtime - xm_time;
724 xm_time = current_dtime;
725
726 /* Get stats for Totals report line */
727 memPoolGetGlobalStats(&mp_total);
728
729 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
730 int npools = 0;
731
732 /* main table */
733 iter = memPoolIterate();
734
735 while ((pool = memPoolIterateNext(iter))) {
736 pool->getStats(&mp_stats);
737
738 if (!mp_stats.pool) /* pool destroyed */
739 continue;
740
741 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
742 /* this pool has been used */
743 sortme[npools] = mp_stats;
744 ++npools;
745 } else {
746 ++not_used;
747 }
748 }
749
750 memPoolIterateDone(&iter);
751
752 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
753
754 for (int i = 0; i< npools; ++i) {
755 PoolReport(&sortme[i], mp_total.TheMeter, stream);
756 }
757
758 xfree(sortme);
759
760 mp_stats.pool = NULL;
761 mp_stats.label = "Total";
762 mp_stats.meter = mp_total.TheMeter;
763 mp_stats.obj_size = 1;
764 mp_stats.chunk_capacity = 0;
765 mp_stats.chunk_size = 0;
766 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
767 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
768 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
769 mp_stats.chunks_free = mp_total.tot_chunks_free;
770 mp_stats.items_alloc = mp_total.tot_items_alloc;
771 mp_stats.items_inuse = mp_total.tot_items_inuse;
772 mp_stats.items_idle = mp_total.tot_items_idle;
773 mp_stats.overhead = mp_total.tot_overhead;
774
775 PoolReport(&mp_stats, mp_total.TheMeter, stream);
776
777 /* Cumulative */
778 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
779 /* overhead */
780 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
781 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.currentLevel()) << "%)\n";
782 /* limits */
783 if (mp_total.mem_idle_limit >= 0)
784 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
785 /* limits */
786 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
787 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
788 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
789 }
790