]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
e4b6ca22004b9ac6116435267612681a53d85f2f
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "CacheDigest.h"
15 #include "ClientInfo.h"
16 #include "disk.h"
17 #include "dlink.h"
18 #include "event.h"
19 #include "icmp/net_db.h"
20 #include "md5.h"
21 #include "mem/forward.h"
22 #include "mem/Pool.h"
23 #include "MemBuf.h"
24 #include "memMeter.h"
25 #include "mgr/Registration.h"
26 #include "RegexList.h"
27 #include "SquidConfig.h"
28 #include "SquidList.h"
29 #include "SquidTime.h"
30 #include "Store.h"
31 #include "StoreEntryStream.h"
32
33 #include <iomanip>
34 #include <ostream>
35
36 /* forward declarations */
37 static void memFree2K(void *);
38 static void memFree4K(void *);
39 static void memFree8K(void *);
40 static void memFree16K(void *);
41 static void memFree32K(void *);
42 static void memFree64K(void *);
43
44 /* module globals */
45 const size_t squidSystemPageSize=getpagesize();
46
47 /* local prototypes */
48 static void memStringStats(std::ostream &);
49
50 /* module locals */
51 static MemAllocator *MemPools[MEM_MAX];
52 static double xm_time = 0;
53 static double xm_deltat = 0;
54
55 /* all pools are ready to be used */
56 static bool MemIsInitialized = false;
57
58 /* string pools */
59 #define mem_str_pool_count 6
60
61 // 4 bytes bigger than the biggest string pool size
62 // which is in turn calculated from SmallestStringBeforeMemIsInitialized
63 static const size_t SmallestStringBeforeMemIsInitialized = 1024*16+4;
64
65 static const struct {
66 const char *name;
67 size_t obj_size;
68 }
69
70 StrPoolsAttrs[mem_str_pool_count] = {
71
72 {
73 "Short Strings", MemAllocator::RoundedSize(36),
74 }, /* to fit rfc1123 and similar */
75 {
76 "Medium Strings", MemAllocator::RoundedSize(128),
77 }, /* to fit most urls */
78 {
79 "Long Strings", MemAllocator::RoundedSize(512),
80 },
81 {
82 "1KB Strings", MemAllocator::RoundedSize(1024),
83 },
84 {
85 "4KB Strings", MemAllocator::RoundedSize(4*1024),
86 },
87 {
88 "16KB Strings",
89 MemAllocator::RoundedSize(SmallestStringBeforeMemIsInitialized-4)
90 }
91 };
92
93 static struct {
94 MemAllocator *pool;
95 }
96
97 StrPools[mem_str_pool_count];
98 static MemMeter StrCountMeter;
99 static MemMeter StrVolumeMeter;
100
101 static MemMeter HugeBufCountMeter;
102 static MemMeter HugeBufVolumeMeter;
103
104 /* local routines */
105
106 static void
107 memStringStats(std::ostream &stream)
108 {
109 int i;
110 int pooled_count = 0;
111 size_t pooled_volume = 0;
112 /* heading */
113 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
114 /* table body */
115
116 for (i = 0; i < mem_str_pool_count; ++i) {
117 const MemAllocator *pool = StrPools[i].pool;
118 const int plevel = pool->getMeter().inuse.level;
119 stream << std::setw(20) << std::left << pool->objectType();
120 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
121 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
122 pooled_count += plevel;
123 pooled_volume += plevel * pool->objectSize();
124 }
125
126 /* malloc strings */
127 stream << std::setw(20) << std::left << "Other Strings";
128
129 stream << std::right << "\t ";
130
131 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
132
133 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
134 }
135
136 static void
137 memBufStats(std::ostream & stream)
138 {
139 stream << "Large buffers: " <<
140 HugeBufCountMeter.level << " (" <<
141 HugeBufVolumeMeter.level / 1024 << " KB)\n";
142 }
143
144 void
145 Mem::Stats(StoreEntry * sentry)
146 {
147 StoreEntryStream stream(sentry);
148 Report(stream);
149 memStringStats(stream);
150 memBufStats(stream);
151 #if WITH_VALGRIND
152 if (RUNNING_ON_VALGRIND) {
153 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
154 stream << "Valgrind Report:\n";
155 stream << "Type\tAmount\n";
156 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
157 VALGRIND_DO_LEAK_CHECK;
158 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
159 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
160 stream << "Leaked\t" << leaked << "\n";
161 stream << "Dubious\t" << dubious << "\n";
162 stream << "Reachable\t" << reachable << "\n";
163 stream << "Suppressed\t" << suppressed << "\n";
164 }
165 #endif
166 stream.flush();
167 }
168
169 /*
170 * public routines
171 */
172
173 /*
174 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
175 * Will ignore repeated calls for the same pool type.
176 *
177 * Relies on Mem::Init() having been called beforehand.
178 */
179 void
180 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
181 {
182 assert(name && size);
183
184 if (MemPools[type] != NULL)
185 return;
186
187 MemPools[type] = memPoolCreate(name, size);
188 MemPools[type]->zeroBlocks(doZero);
189 }
190
191 /* find appropriate pool and use it (pools always init buffer with 0s) */
192 void *
193 memAllocate(mem_type type)
194 {
195 assert(MemPools[type]);
196 return MemPools[type]->alloc();
197 }
198
199 /* give memory back to the pool */
200 void
201 memFree(void *p, int type)
202 {
203 assert(MemPools[type]);
204 MemPools[type]->freeOne(p);
205 }
206
207 /* allocate a variable size buffer using best-fit string pool */
208 void *
209 memAllocString(size_t net_size, size_t * gross_size)
210 {
211 MemAllocator *pool = NULL;
212 assert(gross_size);
213
214 // if pools are not yet ready, make sure that
215 // the requested size is not poolable so that the right deallocator
216 // will be used
217 if (!MemIsInitialized && net_size < SmallestStringBeforeMemIsInitialized)
218 net_size = SmallestStringBeforeMemIsInitialized;
219
220 unsigned int i;
221 for (i = 0; i < mem_str_pool_count; ++i) {
222 if (net_size <= StrPoolsAttrs[i].obj_size) {
223 pool = StrPools[i].pool;
224 break;
225 }
226 }
227
228 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
229 assert(*gross_size >= net_size);
230 // may forget [de]allocations until MemIsInitialized
231 memMeterInc(StrCountMeter);
232 memMeterAdd(StrVolumeMeter, *gross_size);
233 return pool ? pool->alloc() : xcalloc(1, net_size);
234 }
235
236 size_t
237 memStringCount()
238 {
239 size_t result = 0;
240
241 for (int counter = 0; counter < mem_str_pool_count; ++counter)
242 result += memPoolInUseCount(StrPools[counter].pool);
243
244 return result;
245 }
246
247 /* free buffer allocated with memAllocString() */
248 void
249 memFreeString(size_t size, void *buf)
250 {
251 MemAllocator *pool = NULL;
252 assert(buf);
253
254 if (MemIsInitialized) {
255 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
256 if (size <= StrPoolsAttrs[i].obj_size) {
257 assert(size == StrPoolsAttrs[i].obj_size);
258 pool = StrPools[i].pool;
259 break;
260 }
261 }
262 }
263
264 // may forget [de]allocations until MemIsInitialized
265 memMeterDec(StrCountMeter);
266 memMeterDel(StrVolumeMeter, size);
267 pool ? pool->freeOne(buf) : xfree(buf);
268 }
269
270 /* Find the best fit MEM_X_BUF type */
271 static mem_type
272 memFindBufSizeType(size_t net_size, size_t * gross_size)
273 {
274 mem_type type;
275 size_t size;
276
277 if (net_size <= 2 * 1024) {
278 type = MEM_2K_BUF;
279 size = 2 * 1024;
280 } else if (net_size <= 4 * 1024) {
281 type = MEM_4K_BUF;
282 size = 4 * 1024;
283 } else if (net_size <= 8 * 1024) {
284 type = MEM_8K_BUF;
285 size = 8 * 1024;
286 } else if (net_size <= 16 * 1024) {
287 type = MEM_16K_BUF;
288 size = 16 * 1024;
289 } else if (net_size <= 32 * 1024) {
290 type = MEM_32K_BUF;
291 size = 32 * 1024;
292 } else if (net_size <= 64 * 1024) {
293 type = MEM_64K_BUF;
294 size = 64 * 1024;
295 } else {
296 type = MEM_NONE;
297 size = net_size;
298 }
299
300 if (gross_size)
301 *gross_size = size;
302
303 return type;
304 }
305
306 /* allocate a variable size buffer using best-fit pool */
307 void *
308 memAllocBuf(size_t net_size, size_t * gross_size)
309 {
310 mem_type type = memFindBufSizeType(net_size, gross_size);
311
312 if (type != MEM_NONE)
313 return memAllocate(type);
314 else {
315 memMeterInc(HugeBufCountMeter);
316 memMeterAdd(HugeBufVolumeMeter, *gross_size);
317 return xcalloc(1, net_size);
318 }
319 }
320
321 /* resize a variable sized buffer using best-fit pool */
322 void *
323 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
324 {
325 /* XXX This can be optimized on very large buffers to use realloc() */
326 /* TODO: if the existing gross size is >= new gross size, do nothing */
327 size_t new_gross_size;
328 void *newbuf = memAllocBuf(net_size, &new_gross_size);
329
330 if (oldbuf) {
331 size_t data_size = *gross_size;
332
333 if (data_size > net_size)
334 data_size = net_size;
335
336 memcpy(newbuf, oldbuf, data_size);
337
338 memFreeBuf(*gross_size, oldbuf);
339 }
340
341 *gross_size = new_gross_size;
342 return newbuf;
343 }
344
345 /* free buffer allocated with memAllocBuf() */
346 void
347 memFreeBuf(size_t size, void *buf)
348 {
349 mem_type type = memFindBufSizeType(size, NULL);
350
351 if (type != MEM_NONE)
352 memFree(buf, type);
353 else {
354 xfree(buf);
355 memMeterDec(HugeBufCountMeter);
356 memMeterDel(HugeBufVolumeMeter, size);
357 }
358 }
359
360 static double clean_interval = 15.0; /* time to live of idle chunk before release */
361
362 void
363 Mem::CleanIdlePools(void *)
364 {
365 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
366 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
367 }
368
369 void
370 memConfigure(void)
371 {
372 int64_t new_pool_limit;
373
374 /** Set to configured value first */
375 if (!Config.onoff.mem_pools)
376 new_pool_limit = 0;
377 else if (Config.MemPools.limit > 0)
378 new_pool_limit = Config.MemPools.limit;
379 else {
380 if (Config.MemPools.limit == 0)
381 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
382 new_pool_limit = -1;
383 }
384
385 #if 0
386 /** \par
387 * DPW 2007-04-12
388 * No debugging here please because this method is called before
389 * the debug log is configured and we'll get the message on
390 * stderr when doing things like 'squid -k reconfigure'
391 */
392 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
393 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
394 #endif
395
396 MemPools::GetInstance().setIdleLimit(new_pool_limit);
397 }
398
399 /* XXX make these classes do their own memory management */
400 #include "HttpHdrContRange.h"
401
402 void
403 Mem::Init(void)
404 {
405 int i;
406
407 /** \par
408 * NOTE: Mem::Init() is called before the config file is parsed
409 * and before the debugging module has been initialized. Any
410 * debug messages here at level 0 or 1 will always be printed
411 * on stderr.
412 */
413
414 /** \par
415 * Set all pointers to null. */
416 memset(MemPools, '\0', sizeof(MemPools));
417 /**
418 * Then initialize all pools.
419 * \par
420 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
421 * \par
422 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
423 * small; someday we will figure out what to do with all the entries here
424 * that are never used or used only once; perhaps we should simply use
425 * malloc() for those? @?@
426 */
427 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
428 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
429 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
430 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
431 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
432 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
433 memDataInit(MEM_ACL_DENY_INFO_LIST, "AclDenyInfoList",
434 sizeof(AclDenyInfoList), 0);
435 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(AclNameList), 0);
436 #if USE_CACHE_DIGESTS
437
438 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
439 #endif
440
441 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
442 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
443 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
444 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
445 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
446 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
447 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
448 memDataInit(MEM_RELIST, "RegexList", sizeof(RegexList), 0);
449 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
450 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
451 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
452
453 /** Lastly init the string pools. */
454 for (i = 0; i < mem_str_pool_count; ++i) {
455 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
456 StrPools[i].pool->zeroBlocks(false);
457
458 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
459 debugs(13, DBG_IMPORTANT, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
460 }
461
462 MemIsInitialized = true;
463
464 // finally register with the cache manager
465 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
466 }
467
468 void
469 Mem::Report()
470 {
471 debugs(13, 3, "Memory pools are '" <<
472 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
473 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
474 " MB");
475 }
476
477 mem_type &operator++ (mem_type &aMem)
478 {
479 int tmp = (int)aMem;
480 aMem = (mem_type)(++tmp);
481 return aMem;
482 }
483
484 /*
485 * Test that all entries are initialized
486 */
487 void
488 memCheckInit(void)
489 {
490 mem_type t = MEM_NONE;
491
492 while (++t < MEM_DONTFREE) {
493 /*
494 * If you hit this assertion, then you forgot to add a
495 * memDataInit() line for type 't'.
496 * Or placed the pool type in the wrong section of the enum list.
497 */
498 assert(MemPools[t]);
499 }
500 }
501
502 void
503 memClean(void)
504 {
505 MemPoolGlobalStats stats;
506 if (Config.MemPools.limit > 0) // do not reset if disabled or same
507 MemPools::GetInstance().setIdleLimit(0);
508 MemPools::GetInstance().clean(0);
509 memPoolGetGlobalStats(&stats);
510
511 if (stats.tot_items_inuse)
512 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
513 " items in " << stats.tot_chunks_inuse << " chunks and " <<
514 stats.tot_pools_inuse << " pools are left dirty");
515 }
516
517 int
518 memInUse(mem_type type)
519 {
520 return memPoolInUseCount(MemPools[type]);
521 }
522
523 /* ick */
524
525 void
526 memFree2K(void *p)
527 {
528 memFree(p, MEM_2K_BUF);
529 }
530
531 void
532 memFree4K(void *p)
533 {
534 memFree(p, MEM_4K_BUF);
535 }
536
537 void
538 memFree8K(void *p)
539 {
540 memFree(p, MEM_8K_BUF);
541 }
542
543 void
544 memFree16K(void *p)
545 {
546 memFree(p, MEM_16K_BUF);
547 }
548
549 void
550 memFree32K(void *p)
551 {
552 memFree(p, MEM_32K_BUF);
553 }
554
555 void
556 memFree64K(void *p)
557 {
558 memFree(p, MEM_64K_BUF);
559 }
560
561 static void
562 cxx_xfree(void * ptr)
563 {
564 xfree(ptr);
565 }
566
567 FREE *
568 memFreeBufFunc(size_t size)
569 {
570 switch (size) {
571
572 case 2 * 1024:
573 return memFree2K;
574
575 case 4 * 1024:
576 return memFree4K;
577
578 case 8 * 1024:
579 return memFree8K;
580
581 case 16 * 1024:
582 return memFree16K;
583
584 case 32 * 1024:
585 return memFree32K;
586
587 case 64 * 1024:
588 return memFree64K;
589
590 default:
591 memMeterDec(HugeBufCountMeter);
592 memMeterDel(HugeBufVolumeMeter, size);
593 return cxx_xfree;
594 }
595 }
596
597 /* MemPoolMeter */
598
599 void
600 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
601 {
602 int excess = 0;
603 int needed = 0;
604 MemPoolMeter *pm = mp_st->meter;
605 const char *delim = "\t ";
606
607 stream.setf(std::ios_base::fixed);
608 stream << std::setw(20) << std::left << mp_st->label << delim;
609 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
610
611 /* Chunks */
612 if (mp_st->chunk_capacity) {
613 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
614 stream << std::setw(4) << mp_st->chunk_capacity << delim;
615
616 needed = mp_st->items_inuse / mp_st->chunk_capacity;
617
618 if (mp_st->items_inuse % mp_st->chunk_capacity)
619 ++needed;
620
621 excess = mp_st->chunks_inuse - needed;
622
623 stream << std::setw(4) << mp_st->chunks_alloc << delim;
624 stream << std::setw(4) << mp_st->chunks_inuse << delim;
625 stream << std::setw(4) << mp_st->chunks_free << delim;
626 stream << std::setw(4) << mp_st->chunks_partial << delim;
627 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
628 } else {
629 stream << delim;
630 stream << delim;
631 stream << delim;
632 stream << delim;
633 stream << delim;
634 stream << delim;
635 stream << delim;
636 }
637 /*
638 * Fragmentation calculation:
639 * needed = inuse.level / chunk_capacity
640 * excess = used - needed
641 * fragmentation = excess / needed * 100%
642 *
643 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
644 */
645 /* allocated */
646 stream << mp_st->items_alloc << delim;
647 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
648 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
649 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
650 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
651 /* in use */
652 stream << mp_st->items_inuse << delim;
653 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
654 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
655 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
656 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
657 /* idle */
658 stream << mp_st->items_idle << delim;
659 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
660 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
661 /* saved */
662 stream << (int)pm->gb_saved.count << delim;
663 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
664 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
665 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
666 pm->gb_oallocated.count = pm->gb_allocated.count;
667 }
668
669 static int
670 MemPoolReportSorter(const void *a, const void *b)
671 {
672 const MemPoolStats *A = (MemPoolStats *) a;
673 const MemPoolStats *B = (MemPoolStats *) b;
674
675 // use this to sort on %Total Allocated
676 //
677 double pa = (double) A->obj_size * A->meter->alloc.level;
678 double pb = (double) B->obj_size * B->meter->alloc.level;
679
680 if (pa > pb)
681 return -1;
682
683 if (pb > pa)
684 return 1;
685
686 #if 0
687 // use this to sort on In Use high(hrs)
688 //
689 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
690 return -1;
691
692 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
693 return 1;
694
695 #endif
696
697 return 0;
698 }
699
700 void
701 Mem::Report(std::ostream &stream)
702 {
703 static char buf[64];
704 static MemPoolStats mp_stats;
705 static MemPoolGlobalStats mp_total;
706 int not_used = 0;
707 MemPoolIterator *iter;
708 MemAllocator *pool;
709
710 /* caption */
711 stream << "Current memory usage:\n";
712 /* heading */
713 stream << "Pool\t Obj Size\t"
714 "Chunks\t\t\t\t\t\t\t"
715 "Allocated\t\t\t\t\t"
716 "In Use\t\t\t\t\t"
717 "Idle\t\t\t"
718 "Allocations Saved\t\t\t"
719 "Rate\t"
720 "\n"
721 " \t (bytes)\t"
722 "KB/ch\t obj/ch\t"
723 "(#)\t used\t free\t part\t %Frag\t "
724 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
725 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
726 "(#)\t (KB)\t high (KB)\t"
727 "(#)\t %cnt\t %vol\t"
728 "(#)/sec\t"
729 "\n";
730 xm_deltat = current_dtime - xm_time;
731 xm_time = current_dtime;
732
733 /* Get stats for Totals report line */
734 memPoolGetGlobalStats(&mp_total);
735
736 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
737 int npools = 0;
738
739 /* main table */
740 iter = memPoolIterate();
741
742 while ((pool = memPoolIterateNext(iter))) {
743 pool->getStats(&mp_stats);
744
745 if (!mp_stats.pool) /* pool destroyed */
746 continue;
747
748 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
749 /* this pool has been used */
750 sortme[npools] = mp_stats;
751 ++npools;
752 } else {
753 ++not_used;
754 }
755 }
756
757 memPoolIterateDone(&iter);
758
759 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
760
761 for (int i = 0; i< npools; ++i) {
762 PoolReport(&sortme[i], mp_total.TheMeter, stream);
763 }
764
765 xfree(sortme);
766
767 mp_stats.pool = NULL;
768 mp_stats.label = "Total";
769 mp_stats.meter = mp_total.TheMeter;
770 mp_stats.obj_size = 1;
771 mp_stats.chunk_capacity = 0;
772 mp_stats.chunk_size = 0;
773 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
774 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
775 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
776 mp_stats.chunks_free = mp_total.tot_chunks_free;
777 mp_stats.items_alloc = mp_total.tot_items_alloc;
778 mp_stats.items_inuse = mp_total.tot_items_inuse;
779 mp_stats.items_idle = mp_total.tot_items_idle;
780 mp_stats.overhead = mp_total.tot_overhead;
781
782 PoolReport(&mp_stats, mp_total.TheMeter, stream);
783
784 /* Cumulative */
785 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
786 /* overhead */
787 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
788 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
789 /* limits */
790 if (mp_total.mem_idle_limit >= 0)
791 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
792 /* limits */
793 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
794 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
795 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
796 }
797