]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
Merge cleanups branch: split most of typedefs.h
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "base/PackableStream.h"
15 #include "CacheDigest.h"
16 #include "ClientInfo.h"
17 #include "disk.h"
18 #include "dlink.h"
19 #include "event.h"
20 #include "icmp/net_db.h"
21 #include "md5.h"
22 #include "mem/forward.h"
23 #include "mem/Meter.h"
24 #include "mem/Pool.h"
25 #include "MemBuf.h"
26 #include "mgr/Registration.h"
27 #include "SquidConfig.h"
28 #include "SquidList.h"
29 #include "SquidTime.h"
30 #include "Store.h"
31
32 #include <iomanip>
33
34 /* forward declarations */
35 static void memFree2K(void *);
36 static void memFree4K(void *);
37 static void memFree8K(void *);
38 static void memFree16K(void *);
39 static void memFree32K(void *);
40 static void memFree64K(void *);
41
42 /* module globals */
43 const size_t squidSystemPageSize=getpagesize();
44
45 /* local prototypes */
46 static void memStringStats(std::ostream &);
47
48 /* module locals */
49 static MemAllocator *MemPools[MEM_MAX];
50 static double xm_time = 0;
51 static double xm_deltat = 0;
52
53 /* all pools are ready to be used */
54 static bool MemIsInitialized = false;
55
56 /* string pools */
57 #define mem_str_pool_count 6
58
59 // 4 bytes bigger than the biggest string pool size
60 // which is in turn calculated from SmallestStringBeforeMemIsInitialized
61 static const size_t SmallestStringBeforeMemIsInitialized = 1024*16+4;
62
63 static const struct {
64 const char *name;
65 size_t obj_size;
66 }
67
68 StrPoolsAttrs[mem_str_pool_count] = {
69
70 {
71 "Short Strings", MemAllocator::RoundedSize(36),
72 }, /* to fit rfc1123 and similar */
73 {
74 "Medium Strings", MemAllocator::RoundedSize(128),
75 }, /* to fit most urls */
76 {
77 "Long Strings", MemAllocator::RoundedSize(512),
78 },
79 {
80 "1KB Strings", MemAllocator::RoundedSize(1024),
81 },
82 {
83 "4KB Strings", MemAllocator::RoundedSize(4*1024),
84 },
85 {
86 "16KB Strings",
87 MemAllocator::RoundedSize(SmallestStringBeforeMemIsInitialized-4)
88 }
89 };
90
91 static struct {
92 MemAllocator *pool;
93 }
94
95 StrPools[mem_str_pool_count];
96 static Mem::Meter StrCountMeter;
97 static Mem::Meter StrVolumeMeter;
98
99 static Mem::Meter HugeBufCountMeter;
100 static Mem::Meter HugeBufVolumeMeter;
101
102 /* local routines */
103
104 static void
105 memStringStats(std::ostream &stream)
106 {
107 int i;
108 int pooled_count = 0;
109 size_t pooled_volume = 0;
110 /* heading */
111 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
112 /* table body */
113
114 for (i = 0; i < mem_str_pool_count; ++i) {
115 const MemAllocator *pool = StrPools[i].pool;
116 const auto plevel = pool->getMeter().inuse.currentLevel();
117 stream << std::setw(20) << std::left << pool->objectType();
118 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
119 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.currentLevel()) << "\n";
120 pooled_count += plevel;
121 pooled_volume += plevel * pool->objectSize();
122 }
123
124 /* malloc strings */
125 stream << std::setw(20) << std::left << "Other Strings";
126 stream << std::right << "\t ";
127 stream << xpercentInt(StrCountMeter.currentLevel() - pooled_count, StrCountMeter.currentLevel()) << "\t ";
128 stream << xpercentInt(StrVolumeMeter.currentLevel() - pooled_volume, StrVolumeMeter.currentLevel()) << "\n\n";
129 }
130
131 static void
132 memBufStats(std::ostream & stream)
133 {
134 stream << "Large buffers: " <<
135 HugeBufCountMeter.currentLevel() << " (" <<
136 HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
137 }
138
139 void
140 Mem::Stats(StoreEntry * sentry)
141 {
142 PackableStream stream(*sentry);
143 Report(stream);
144 memStringStats(stream);
145 memBufStats(stream);
146 #if WITH_VALGRIND
147 if (RUNNING_ON_VALGRIND) {
148 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
149 stream << "Valgrind Report:\n";
150 stream << "Type\tAmount\n";
151 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
152 VALGRIND_DO_LEAK_CHECK;
153 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
154 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
155 stream << "Leaked\t" << leaked << "\n";
156 stream << "Dubious\t" << dubious << "\n";
157 stream << "Reachable\t" << reachable << "\n";
158 stream << "Suppressed\t" << suppressed << "\n";
159 }
160 #endif
161 stream.flush();
162 }
163
164 /*
165 * public routines
166 */
167
168 /*
169 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
170 * Will ignore repeated calls for the same pool type.
171 *
172 * Relies on Mem::Init() having been called beforehand.
173 */
174 void
175 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
176 {
177 assert(name && size);
178
179 if (MemPools[type] != NULL)
180 return;
181
182 MemPools[type] = memPoolCreate(name, size);
183 MemPools[type]->zeroBlocks(doZero);
184 }
185
186 /* find appropriate pool and use it (pools always init buffer with 0s) */
187 void *
188 memAllocate(mem_type type)
189 {
190 assert(MemPools[type]);
191 return MemPools[type]->alloc();
192 }
193
194 /* give memory back to the pool */
195 void
196 memFree(void *p, int type)
197 {
198 assert(MemPools[type]);
199 MemPools[type]->freeOne(p);
200 }
201
202 /* allocate a variable size buffer using best-fit string pool */
203 void *
204 memAllocString(size_t net_size, size_t * gross_size)
205 {
206 MemAllocator *pool = NULL;
207 assert(gross_size);
208
209 // if pools are not yet ready, make sure that
210 // the requested size is not poolable so that the right deallocator
211 // will be used
212 if (!MemIsInitialized && net_size < SmallestStringBeforeMemIsInitialized)
213 net_size = SmallestStringBeforeMemIsInitialized;
214
215 unsigned int i;
216 for (i = 0; i < mem_str_pool_count; ++i) {
217 if (net_size <= StrPoolsAttrs[i].obj_size) {
218 pool = StrPools[i].pool;
219 break;
220 }
221 }
222
223 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
224 assert(*gross_size >= net_size);
225 // may forget [de]allocations until MemIsInitialized
226 ++StrCountMeter;
227 StrVolumeMeter += *gross_size;
228 return pool ? pool->alloc() : xcalloc(1, net_size);
229 }
230
231 size_t
232 memStringCount()
233 {
234 size_t result = 0;
235
236 for (int counter = 0; counter < mem_str_pool_count; ++counter)
237 result += memPoolInUseCount(StrPools[counter].pool);
238
239 return result;
240 }
241
242 /* free buffer allocated with memAllocString() */
243 void
244 memFreeString(size_t size, void *buf)
245 {
246 MemAllocator *pool = NULL;
247 assert(buf);
248
249 if (MemIsInitialized) {
250 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
251 if (size <= StrPoolsAttrs[i].obj_size) {
252 assert(size == StrPoolsAttrs[i].obj_size);
253 pool = StrPools[i].pool;
254 break;
255 }
256 }
257 }
258
259 // may forget [de]allocations until MemIsInitialized
260 --StrCountMeter;
261 StrVolumeMeter -= size;
262 pool ? pool->freeOne(buf) : xfree(buf);
263 }
264
265 /* Find the best fit MEM_X_BUF type */
266 static mem_type
267 memFindBufSizeType(size_t net_size, size_t * gross_size)
268 {
269 mem_type type;
270 size_t size;
271
272 if (net_size <= 2 * 1024) {
273 type = MEM_2K_BUF;
274 size = 2 * 1024;
275 } else if (net_size <= 4 * 1024) {
276 type = MEM_4K_BUF;
277 size = 4 * 1024;
278 } else if (net_size <= 8 * 1024) {
279 type = MEM_8K_BUF;
280 size = 8 * 1024;
281 } else if (net_size <= 16 * 1024) {
282 type = MEM_16K_BUF;
283 size = 16 * 1024;
284 } else if (net_size <= 32 * 1024) {
285 type = MEM_32K_BUF;
286 size = 32 * 1024;
287 } else if (net_size <= 64 * 1024) {
288 type = MEM_64K_BUF;
289 size = 64 * 1024;
290 } else {
291 type = MEM_NONE;
292 size = net_size;
293 }
294
295 if (gross_size)
296 *gross_size = size;
297
298 return type;
299 }
300
301 /* allocate a variable size buffer using best-fit pool */
302 void *
303 memAllocBuf(size_t net_size, size_t * gross_size)
304 {
305 mem_type type = memFindBufSizeType(net_size, gross_size);
306
307 if (type != MEM_NONE)
308 return memAllocate(type);
309 else {
310 ++HugeBufCountMeter;
311 HugeBufVolumeMeter += *gross_size;
312 return xcalloc(1, net_size);
313 }
314 }
315
316 /* resize a variable sized buffer using best-fit pool */
317 void *
318 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
319 {
320 /* XXX This can be optimized on very large buffers to use realloc() */
321 /* TODO: if the existing gross size is >= new gross size, do nothing */
322 size_t new_gross_size;
323 void *newbuf = memAllocBuf(net_size, &new_gross_size);
324
325 if (oldbuf) {
326 size_t data_size = *gross_size;
327
328 if (data_size > net_size)
329 data_size = net_size;
330
331 memcpy(newbuf, oldbuf, data_size);
332
333 memFreeBuf(*gross_size, oldbuf);
334 }
335
336 *gross_size = new_gross_size;
337 return newbuf;
338 }
339
340 /* free buffer allocated with memAllocBuf() */
341 void
342 memFreeBuf(size_t size, void *buf)
343 {
344 mem_type type = memFindBufSizeType(size, NULL);
345
346 if (type != MEM_NONE)
347 memFree(buf, type);
348 else {
349 xfree(buf);
350 --HugeBufCountMeter;
351 HugeBufVolumeMeter -= size;
352 }
353 }
354
355 static double clean_interval = 15.0; /* time to live of idle chunk before release */
356
357 void
358 Mem::CleanIdlePools(void *)
359 {
360 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
361 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
362 }
363
364 void
365 memConfigure(void)
366 {
367 int64_t new_pool_limit;
368
369 /** Set to configured value first */
370 if (!Config.onoff.mem_pools)
371 new_pool_limit = 0;
372 else if (Config.MemPools.limit > 0)
373 new_pool_limit = Config.MemPools.limit;
374 else {
375 if (Config.MemPools.limit == 0)
376 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
377 new_pool_limit = -1;
378 }
379
380 #if 0
381 /** \par
382 * DPW 2007-04-12
383 * No debugging here please because this method is called before
384 * the debug log is configured and we'll get the message on
385 * stderr when doing things like 'squid -k reconfigure'
386 */
387 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
388 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
389 #endif
390
391 MemPools::GetInstance().setIdleLimit(new_pool_limit);
392 }
393
394 /* XXX make these classes do their own memory management */
395 #include "HttpHdrContRange.h"
396
397 void
398 Mem::Init(void)
399 {
400 int i;
401
402 /** \par
403 * NOTE: Mem::Init() is called before the config file is parsed
404 * and before the debugging module has been initialized. Any
405 * debug messages here at level 0 or 1 will always be printed
406 * on stderr.
407 */
408
409 /** \par
410 * Set all pointers to null. */
411 memset(MemPools, '\0', sizeof(MemPools));
412 /**
413 * Then initialize all pools.
414 * \par
415 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
416 * \par
417 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
418 * small; someday we will figure out what to do with all the entries here
419 * that are never used or used only once; perhaps we should simply use
420 * malloc() for those? @?@
421 */
422 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
423 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
424 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
425 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
426 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
427 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
428 memDataInit(MEM_ACL_DENY_INFO_LIST, "AclDenyInfoList",
429 sizeof(AclDenyInfoList), 0);
430 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(AclNameList), 0);
431 #if USE_CACHE_DIGESTS
432
433 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
434 #endif
435
436 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
437 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
438 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
439 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
440 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
441 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
442 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
443 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
444 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
445 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
446
447 /** Lastly init the string pools. */
448 for (i = 0; i < mem_str_pool_count; ++i) {
449 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
450 StrPools[i].pool->zeroBlocks(false);
451
452 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
453 debugs(13, DBG_IMPORTANT, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
454 }
455
456 MemIsInitialized = true;
457
458 // finally register with the cache manager
459 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
460 }
461
462 void
463 Mem::Report()
464 {
465 debugs(13, 3, "Memory pools are '" <<
466 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
467 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
468 " MB");
469 }
470
471 mem_type &operator++ (mem_type &aMem)
472 {
473 int tmp = (int)aMem;
474 aMem = (mem_type)(++tmp);
475 return aMem;
476 }
477
478 /*
479 * Test that all entries are initialized
480 */
481 void
482 memCheckInit(void)
483 {
484 mem_type t = MEM_NONE;
485
486 while (++t < MEM_DONTFREE) {
487 /*
488 * If you hit this assertion, then you forgot to add a
489 * memDataInit() line for type 't'.
490 * Or placed the pool type in the wrong section of the enum list.
491 */
492 assert(MemPools[t]);
493 }
494 }
495
496 void
497 memClean(void)
498 {
499 MemPoolGlobalStats stats;
500 if (Config.MemPools.limit > 0) // do not reset if disabled or same
501 MemPools::GetInstance().setIdleLimit(0);
502 MemPools::GetInstance().clean(0);
503 memPoolGetGlobalStats(&stats);
504
505 if (stats.tot_items_inuse)
506 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
507 " items in " << stats.tot_chunks_inuse << " chunks and " <<
508 stats.tot_pools_inuse << " pools are left dirty");
509 }
510
511 int
512 memInUse(mem_type type)
513 {
514 return memPoolInUseCount(MemPools[type]);
515 }
516
517 /* ick */
518
519 void
520 memFree2K(void *p)
521 {
522 memFree(p, MEM_2K_BUF);
523 }
524
525 void
526 memFree4K(void *p)
527 {
528 memFree(p, MEM_4K_BUF);
529 }
530
531 void
532 memFree8K(void *p)
533 {
534 memFree(p, MEM_8K_BUF);
535 }
536
537 void
538 memFree16K(void *p)
539 {
540 memFree(p, MEM_16K_BUF);
541 }
542
543 void
544 memFree32K(void *p)
545 {
546 memFree(p, MEM_32K_BUF);
547 }
548
549 void
550 memFree64K(void *p)
551 {
552 memFree(p, MEM_64K_BUF);
553 }
554
555 static void
556 cxx_xfree(void * ptr)
557 {
558 xfree(ptr);
559 }
560
561 FREE *
562 memFreeBufFunc(size_t size)
563 {
564 switch (size) {
565
566 case 2 * 1024:
567 return memFree2K;
568
569 case 4 * 1024:
570 return memFree4K;
571
572 case 8 * 1024:
573 return memFree8K;
574
575 case 16 * 1024:
576 return memFree16K;
577
578 case 32 * 1024:
579 return memFree32K;
580
581 case 64 * 1024:
582 return memFree64K;
583
584 default:
585 --HugeBufCountMeter;
586 HugeBufVolumeMeter -= size;
587 return cxx_xfree;
588 }
589 }
590
591 /* MemPoolMeter */
592
593 void
594 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
595 {
596 int excess = 0;
597 int needed = 0;
598 MemPoolMeter *pm = mp_st->meter;
599 const char *delim = "\t ";
600
601 stream.setf(std::ios_base::fixed);
602 stream << std::setw(20) << std::left << mp_st->label << delim;
603 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
604
605 /* Chunks */
606 if (mp_st->chunk_capacity) {
607 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
608 stream << std::setw(4) << mp_st->chunk_capacity << delim;
609
610 needed = mp_st->items_inuse / mp_st->chunk_capacity;
611
612 if (mp_st->items_inuse % mp_st->chunk_capacity)
613 ++needed;
614
615 excess = mp_st->chunks_inuse - needed;
616
617 stream << std::setw(4) << mp_st->chunks_alloc << delim;
618 stream << std::setw(4) << mp_st->chunks_inuse << delim;
619 stream << std::setw(4) << mp_st->chunks_free << delim;
620 stream << std::setw(4) << mp_st->chunks_partial << delim;
621 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
622 } else {
623 stream << delim;
624 stream << delim;
625 stream << delim;
626 stream << delim;
627 stream << delim;
628 stream << delim;
629 stream << delim;
630 }
631 /*
632 * Fragmentation calculation:
633 * needed = inuse.currentLevel() / chunk_capacity
634 * excess = used - needed
635 * fragmentation = excess / needed * 100%
636 *
637 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
638 */
639 /* allocated */
640 stream << mp_st->items_alloc << delim;
641 stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
642 stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
643 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
644 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
645 /* in use */
646 stream << mp_st->items_inuse << delim;
647 stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
648 stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
649 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
650 stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
651 /* idle */
652 stream << mp_st->items_idle << delim;
653 stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
654 stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
655 /* saved */
656 stream << (int)pm->gb_saved.count << delim;
657 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
658 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
659 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
660 pm->gb_oallocated.count = pm->gb_allocated.count;
661 }
662
663 static int
664 MemPoolReportSorter(const void *a, const void *b)
665 {
666 const MemPoolStats *A = (MemPoolStats *) a;
667 const MemPoolStats *B = (MemPoolStats *) b;
668
669 // use this to sort on %Total Allocated
670 //
671 double pa = (double) A->obj_size * A->meter->alloc.currentLevel();
672 double pb = (double) B->obj_size * B->meter->alloc.currentLevel();
673
674 if (pa > pb)
675 return -1;
676
677 if (pb > pa)
678 return 1;
679
680 #if 0
681 // use this to sort on In Use high(hrs)
682 //
683 if (A->meter->inuse.peakTime() > B->meter->inuse.peakTime())
684 return -1;
685
686 if (B->meter->inuse.peakTime() > A->meter->inuse.peakTime())
687 return 1;
688
689 #endif
690
691 return 0;
692 }
693
694 void
695 Mem::Report(std::ostream &stream)
696 {
697 static char buf[64];
698 static MemPoolStats mp_stats;
699 static MemPoolGlobalStats mp_total;
700 int not_used = 0;
701 MemPoolIterator *iter;
702 MemAllocator *pool;
703
704 /* caption */
705 stream << "Current memory usage:\n";
706 /* heading */
707 stream << "Pool\t Obj Size\t"
708 "Chunks\t\t\t\t\t\t\t"
709 "Allocated\t\t\t\t\t"
710 "In Use\t\t\t\t\t"
711 "Idle\t\t\t"
712 "Allocations Saved\t\t\t"
713 "Rate\t"
714 "\n"
715 " \t (bytes)\t"
716 "KB/ch\t obj/ch\t"
717 "(#)\t used\t free\t part\t %Frag\t "
718 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
719 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
720 "(#)\t (KB)\t high (KB)\t"
721 "(#)\t %cnt\t %vol\t"
722 "(#)/sec\t"
723 "\n";
724 xm_deltat = current_dtime - xm_time;
725 xm_time = current_dtime;
726
727 /* Get stats for Totals report line */
728 memPoolGetGlobalStats(&mp_total);
729
730 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
731 int npools = 0;
732
733 /* main table */
734 iter = memPoolIterate();
735
736 while ((pool = memPoolIterateNext(iter))) {
737 pool->getStats(&mp_stats);
738
739 if (!mp_stats.pool) /* pool destroyed */
740 continue;
741
742 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
743 /* this pool has been used */
744 sortme[npools] = mp_stats;
745 ++npools;
746 } else {
747 ++not_used;
748 }
749 }
750
751 memPoolIterateDone(&iter);
752
753 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
754
755 for (int i = 0; i< npools; ++i) {
756 PoolReport(&sortme[i], mp_total.TheMeter, stream);
757 }
758
759 xfree(sortme);
760
761 mp_stats.pool = NULL;
762 mp_stats.label = "Total";
763 mp_stats.meter = mp_total.TheMeter;
764 mp_stats.obj_size = 1;
765 mp_stats.chunk_capacity = 0;
766 mp_stats.chunk_size = 0;
767 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
768 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
769 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
770 mp_stats.chunks_free = mp_total.tot_chunks_free;
771 mp_stats.items_alloc = mp_total.tot_items_alloc;
772 mp_stats.items_inuse = mp_total.tot_items_inuse;
773 mp_stats.items_idle = mp_total.tot_items_idle;
774 mp_stats.overhead = mp_total.tot_overhead;
775
776 PoolReport(&mp_stats, mp_total.TheMeter, stream);
777
778 /* Cumulative */
779 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
780 /* overhead */
781 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
782 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.currentLevel()) << "%)\n";
783 /* limits */
784 if (mp_total.mem_idle_limit >= 0)
785 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
786 /* limits */
787 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
788 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
789 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
790 }
791