]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/old_api.cc
Shuffle RegexList.* to base/libbase RegexPattern.*
[thirdparty/squid.git] / src / mem / old_api.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 13 High Level Memory Pool Management */
10
11 #include "squid.h"
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "CacheDigest.h"
15 #include "ClientInfo.h"
16 #include "disk.h"
17 #include "dlink.h"
18 #include "event.h"
19 #include "icmp/net_db.h"
20 #include "md5.h"
21 #include "mem/forward.h"
22 #include "mem/Pool.h"
23 #include "MemBuf.h"
24 #include "memMeter.h"
25 #include "mgr/Registration.h"
26 #include "SquidConfig.h"
27 #include "SquidList.h"
28 #include "SquidTime.h"
29 #include "Store.h"
30 #include "StoreEntryStream.h"
31
32 #include <iomanip>
33 #include <ostream>
34
35 /* forward declarations */
36 static void memFree2K(void *);
37 static void memFree4K(void *);
38 static void memFree8K(void *);
39 static void memFree16K(void *);
40 static void memFree32K(void *);
41 static void memFree64K(void *);
42
43 /* module globals */
44 const size_t squidSystemPageSize=getpagesize();
45
46 /* local prototypes */
47 static void memStringStats(std::ostream &);
48
49 /* module locals */
50 static MemAllocator *MemPools[MEM_MAX];
51 static double xm_time = 0;
52 static double xm_deltat = 0;
53
54 /* all pools are ready to be used */
55 static bool MemIsInitialized = false;
56
57 /* string pools */
58 #define mem_str_pool_count 6
59
60 // 4 bytes bigger than the biggest string pool size
61 // which is in turn calculated from SmallestStringBeforeMemIsInitialized
62 static const size_t SmallestStringBeforeMemIsInitialized = 1024*16+4;
63
64 static const struct {
65 const char *name;
66 size_t obj_size;
67 }
68
69 StrPoolsAttrs[mem_str_pool_count] = {
70
71 {
72 "Short Strings", MemAllocator::RoundedSize(36),
73 }, /* to fit rfc1123 and similar */
74 {
75 "Medium Strings", MemAllocator::RoundedSize(128),
76 }, /* to fit most urls */
77 {
78 "Long Strings", MemAllocator::RoundedSize(512),
79 },
80 {
81 "1KB Strings", MemAllocator::RoundedSize(1024),
82 },
83 {
84 "4KB Strings", MemAllocator::RoundedSize(4*1024),
85 },
86 {
87 "16KB Strings",
88 MemAllocator::RoundedSize(SmallestStringBeforeMemIsInitialized-4)
89 }
90 };
91
92 static struct {
93 MemAllocator *pool;
94 }
95
96 StrPools[mem_str_pool_count];
97 static MemMeter StrCountMeter;
98 static MemMeter StrVolumeMeter;
99
100 static MemMeter HugeBufCountMeter;
101 static MemMeter HugeBufVolumeMeter;
102
103 /* local routines */
104
105 static void
106 memStringStats(std::ostream &stream)
107 {
108 int i;
109 int pooled_count = 0;
110 size_t pooled_volume = 0;
111 /* heading */
112 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
113 /* table body */
114
115 for (i = 0; i < mem_str_pool_count; ++i) {
116 const MemAllocator *pool = StrPools[i].pool;
117 const int plevel = pool->getMeter().inuse.level;
118 stream << std::setw(20) << std::left << pool->objectType();
119 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
120 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
121 pooled_count += plevel;
122 pooled_volume += plevel * pool->objectSize();
123 }
124
125 /* malloc strings */
126 stream << std::setw(20) << std::left << "Other Strings";
127
128 stream << std::right << "\t ";
129
130 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
131
132 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
133 }
134
135 static void
136 memBufStats(std::ostream & stream)
137 {
138 stream << "Large buffers: " <<
139 HugeBufCountMeter.level << " (" <<
140 HugeBufVolumeMeter.level / 1024 << " KB)\n";
141 }
142
143 void
144 Mem::Stats(StoreEntry * sentry)
145 {
146 StoreEntryStream stream(sentry);
147 Report(stream);
148 memStringStats(stream);
149 memBufStats(stream);
150 #if WITH_VALGRIND
151 if (RUNNING_ON_VALGRIND) {
152 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
153 stream << "Valgrind Report:\n";
154 stream << "Type\tAmount\n";
155 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
156 VALGRIND_DO_LEAK_CHECK;
157 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
158 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
159 stream << "Leaked\t" << leaked << "\n";
160 stream << "Dubious\t" << dubious << "\n";
161 stream << "Reachable\t" << reachable << "\n";
162 stream << "Suppressed\t" << suppressed << "\n";
163 }
164 #endif
165 stream.flush();
166 }
167
168 /*
169 * public routines
170 */
171
172 /*
173 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
174 * Will ignore repeated calls for the same pool type.
175 *
176 * Relies on Mem::Init() having been called beforehand.
177 */
178 void
179 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
180 {
181 assert(name && size);
182
183 if (MemPools[type] != NULL)
184 return;
185
186 MemPools[type] = memPoolCreate(name, size);
187 MemPools[type]->zeroBlocks(doZero);
188 }
189
190 /* find appropriate pool and use it (pools always init buffer with 0s) */
191 void *
192 memAllocate(mem_type type)
193 {
194 assert(MemPools[type]);
195 return MemPools[type]->alloc();
196 }
197
198 /* give memory back to the pool */
199 void
200 memFree(void *p, int type)
201 {
202 assert(MemPools[type]);
203 MemPools[type]->freeOne(p);
204 }
205
206 /* allocate a variable size buffer using best-fit string pool */
207 void *
208 memAllocString(size_t net_size, size_t * gross_size)
209 {
210 MemAllocator *pool = NULL;
211 assert(gross_size);
212
213 // if pools are not yet ready, make sure that
214 // the requested size is not poolable so that the right deallocator
215 // will be used
216 if (!MemIsInitialized && net_size < SmallestStringBeforeMemIsInitialized)
217 net_size = SmallestStringBeforeMemIsInitialized;
218
219 unsigned int i;
220 for (i = 0; i < mem_str_pool_count; ++i) {
221 if (net_size <= StrPoolsAttrs[i].obj_size) {
222 pool = StrPools[i].pool;
223 break;
224 }
225 }
226
227 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
228 assert(*gross_size >= net_size);
229 // may forget [de]allocations until MemIsInitialized
230 memMeterInc(StrCountMeter);
231 memMeterAdd(StrVolumeMeter, *gross_size);
232 return pool ? pool->alloc() : xcalloc(1, net_size);
233 }
234
235 size_t
236 memStringCount()
237 {
238 size_t result = 0;
239
240 for (int counter = 0; counter < mem_str_pool_count; ++counter)
241 result += memPoolInUseCount(StrPools[counter].pool);
242
243 return result;
244 }
245
246 /* free buffer allocated with memAllocString() */
247 void
248 memFreeString(size_t size, void *buf)
249 {
250 MemAllocator *pool = NULL;
251 assert(buf);
252
253 if (MemIsInitialized) {
254 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
255 if (size <= StrPoolsAttrs[i].obj_size) {
256 assert(size == StrPoolsAttrs[i].obj_size);
257 pool = StrPools[i].pool;
258 break;
259 }
260 }
261 }
262
263 // may forget [de]allocations until MemIsInitialized
264 memMeterDec(StrCountMeter);
265 memMeterDel(StrVolumeMeter, size);
266 pool ? pool->freeOne(buf) : xfree(buf);
267 }
268
269 /* Find the best fit MEM_X_BUF type */
270 static mem_type
271 memFindBufSizeType(size_t net_size, size_t * gross_size)
272 {
273 mem_type type;
274 size_t size;
275
276 if (net_size <= 2 * 1024) {
277 type = MEM_2K_BUF;
278 size = 2 * 1024;
279 } else if (net_size <= 4 * 1024) {
280 type = MEM_4K_BUF;
281 size = 4 * 1024;
282 } else if (net_size <= 8 * 1024) {
283 type = MEM_8K_BUF;
284 size = 8 * 1024;
285 } else if (net_size <= 16 * 1024) {
286 type = MEM_16K_BUF;
287 size = 16 * 1024;
288 } else if (net_size <= 32 * 1024) {
289 type = MEM_32K_BUF;
290 size = 32 * 1024;
291 } else if (net_size <= 64 * 1024) {
292 type = MEM_64K_BUF;
293 size = 64 * 1024;
294 } else {
295 type = MEM_NONE;
296 size = net_size;
297 }
298
299 if (gross_size)
300 *gross_size = size;
301
302 return type;
303 }
304
305 /* allocate a variable size buffer using best-fit pool */
306 void *
307 memAllocBuf(size_t net_size, size_t * gross_size)
308 {
309 mem_type type = memFindBufSizeType(net_size, gross_size);
310
311 if (type != MEM_NONE)
312 return memAllocate(type);
313 else {
314 memMeterInc(HugeBufCountMeter);
315 memMeterAdd(HugeBufVolumeMeter, *gross_size);
316 return xcalloc(1, net_size);
317 }
318 }
319
320 /* resize a variable sized buffer using best-fit pool */
321 void *
322 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
323 {
324 /* XXX This can be optimized on very large buffers to use realloc() */
325 /* TODO: if the existing gross size is >= new gross size, do nothing */
326 size_t new_gross_size;
327 void *newbuf = memAllocBuf(net_size, &new_gross_size);
328
329 if (oldbuf) {
330 size_t data_size = *gross_size;
331
332 if (data_size > net_size)
333 data_size = net_size;
334
335 memcpy(newbuf, oldbuf, data_size);
336
337 memFreeBuf(*gross_size, oldbuf);
338 }
339
340 *gross_size = new_gross_size;
341 return newbuf;
342 }
343
344 /* free buffer allocated with memAllocBuf() */
345 void
346 memFreeBuf(size_t size, void *buf)
347 {
348 mem_type type = memFindBufSizeType(size, NULL);
349
350 if (type != MEM_NONE)
351 memFree(buf, type);
352 else {
353 xfree(buf);
354 memMeterDec(HugeBufCountMeter);
355 memMeterDel(HugeBufVolumeMeter, size);
356 }
357 }
358
359 static double clean_interval = 15.0; /* time to live of idle chunk before release */
360
361 void
362 Mem::CleanIdlePools(void *)
363 {
364 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
365 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
366 }
367
368 void
369 memConfigure(void)
370 {
371 int64_t new_pool_limit;
372
373 /** Set to configured value first */
374 if (!Config.onoff.mem_pools)
375 new_pool_limit = 0;
376 else if (Config.MemPools.limit > 0)
377 new_pool_limit = Config.MemPools.limit;
378 else {
379 if (Config.MemPools.limit == 0)
380 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
381 new_pool_limit = -1;
382 }
383
384 #if 0
385 /** \par
386 * DPW 2007-04-12
387 * No debugging here please because this method is called before
388 * the debug log is configured and we'll get the message on
389 * stderr when doing things like 'squid -k reconfigure'
390 */
391 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
392 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
393 #endif
394
395 MemPools::GetInstance().setIdleLimit(new_pool_limit);
396 }
397
398 /* XXX make these classes do their own memory management */
399 #include "HttpHdrContRange.h"
400
401 void
402 Mem::Init(void)
403 {
404 int i;
405
406 /** \par
407 * NOTE: Mem::Init() is called before the config file is parsed
408 * and before the debugging module has been initialized. Any
409 * debug messages here at level 0 or 1 will always be printed
410 * on stderr.
411 */
412
413 /** \par
414 * Set all pointers to null. */
415 memset(MemPools, '\0', sizeof(MemPools));
416 /**
417 * Then initialize all pools.
418 * \par
419 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
420 * \par
421 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
422 * small; someday we will figure out what to do with all the entries here
423 * that are never used or used only once; perhaps we should simply use
424 * malloc() for those? @?@
425 */
426 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
427 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
428 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
429 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
430 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
431 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
432 memDataInit(MEM_ACL_DENY_INFO_LIST, "AclDenyInfoList",
433 sizeof(AclDenyInfoList), 0);
434 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(AclNameList), 0);
435 #if USE_CACHE_DIGESTS
436
437 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
438 #endif
439
440 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
441 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
442 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
443 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
444 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
445 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
446 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
447 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
448 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
449 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
450
451 /** Lastly init the string pools. */
452 for (i = 0; i < mem_str_pool_count; ++i) {
453 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
454 StrPools[i].pool->zeroBlocks(false);
455
456 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
457 debugs(13, DBG_IMPORTANT, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
458 }
459
460 MemIsInitialized = true;
461
462 // finally register with the cache manager
463 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
464 }
465
466 void
467 Mem::Report()
468 {
469 debugs(13, 3, "Memory pools are '" <<
470 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
471 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
472 " MB");
473 }
474
475 mem_type &operator++ (mem_type &aMem)
476 {
477 int tmp = (int)aMem;
478 aMem = (mem_type)(++tmp);
479 return aMem;
480 }
481
482 /*
483 * Test that all entries are initialized
484 */
485 void
486 memCheckInit(void)
487 {
488 mem_type t = MEM_NONE;
489
490 while (++t < MEM_DONTFREE) {
491 /*
492 * If you hit this assertion, then you forgot to add a
493 * memDataInit() line for type 't'.
494 * Or placed the pool type in the wrong section of the enum list.
495 */
496 assert(MemPools[t]);
497 }
498 }
499
500 void
501 memClean(void)
502 {
503 MemPoolGlobalStats stats;
504 if (Config.MemPools.limit > 0) // do not reset if disabled or same
505 MemPools::GetInstance().setIdleLimit(0);
506 MemPools::GetInstance().clean(0);
507 memPoolGetGlobalStats(&stats);
508
509 if (stats.tot_items_inuse)
510 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
511 " items in " << stats.tot_chunks_inuse << " chunks and " <<
512 stats.tot_pools_inuse << " pools are left dirty");
513 }
514
515 int
516 memInUse(mem_type type)
517 {
518 return memPoolInUseCount(MemPools[type]);
519 }
520
521 /* ick */
522
523 void
524 memFree2K(void *p)
525 {
526 memFree(p, MEM_2K_BUF);
527 }
528
529 void
530 memFree4K(void *p)
531 {
532 memFree(p, MEM_4K_BUF);
533 }
534
535 void
536 memFree8K(void *p)
537 {
538 memFree(p, MEM_8K_BUF);
539 }
540
541 void
542 memFree16K(void *p)
543 {
544 memFree(p, MEM_16K_BUF);
545 }
546
547 void
548 memFree32K(void *p)
549 {
550 memFree(p, MEM_32K_BUF);
551 }
552
553 void
554 memFree64K(void *p)
555 {
556 memFree(p, MEM_64K_BUF);
557 }
558
559 static void
560 cxx_xfree(void * ptr)
561 {
562 xfree(ptr);
563 }
564
565 FREE *
566 memFreeBufFunc(size_t size)
567 {
568 switch (size) {
569
570 case 2 * 1024:
571 return memFree2K;
572
573 case 4 * 1024:
574 return memFree4K;
575
576 case 8 * 1024:
577 return memFree8K;
578
579 case 16 * 1024:
580 return memFree16K;
581
582 case 32 * 1024:
583 return memFree32K;
584
585 case 64 * 1024:
586 return memFree64K;
587
588 default:
589 memMeterDec(HugeBufCountMeter);
590 memMeterDel(HugeBufVolumeMeter, size);
591 return cxx_xfree;
592 }
593 }
594
595 /* MemPoolMeter */
596
597 void
598 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
599 {
600 int excess = 0;
601 int needed = 0;
602 MemPoolMeter *pm = mp_st->meter;
603 const char *delim = "\t ";
604
605 stream.setf(std::ios_base::fixed);
606 stream << std::setw(20) << std::left << mp_st->label << delim;
607 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
608
609 /* Chunks */
610 if (mp_st->chunk_capacity) {
611 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
612 stream << std::setw(4) << mp_st->chunk_capacity << delim;
613
614 needed = mp_st->items_inuse / mp_st->chunk_capacity;
615
616 if (mp_st->items_inuse % mp_st->chunk_capacity)
617 ++needed;
618
619 excess = mp_st->chunks_inuse - needed;
620
621 stream << std::setw(4) << mp_st->chunks_alloc << delim;
622 stream << std::setw(4) << mp_st->chunks_inuse << delim;
623 stream << std::setw(4) << mp_st->chunks_free << delim;
624 stream << std::setw(4) << mp_st->chunks_partial << delim;
625 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
626 } else {
627 stream << delim;
628 stream << delim;
629 stream << delim;
630 stream << delim;
631 stream << delim;
632 stream << delim;
633 stream << delim;
634 }
635 /*
636 * Fragmentation calculation:
637 * needed = inuse.level / chunk_capacity
638 * excess = used - needed
639 * fragmentation = excess / needed * 100%
640 *
641 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
642 */
643 /* allocated */
644 stream << mp_st->items_alloc << delim;
645 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
646 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
647 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
648 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
649 /* in use */
650 stream << mp_st->items_inuse << delim;
651 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
652 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
653 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
654 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
655 /* idle */
656 stream << mp_st->items_idle << delim;
657 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
658 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
659 /* saved */
660 stream << (int)pm->gb_saved.count << delim;
661 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
662 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
663 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
664 pm->gb_oallocated.count = pm->gb_allocated.count;
665 }
666
667 static int
668 MemPoolReportSorter(const void *a, const void *b)
669 {
670 const MemPoolStats *A = (MemPoolStats *) a;
671 const MemPoolStats *B = (MemPoolStats *) b;
672
673 // use this to sort on %Total Allocated
674 //
675 double pa = (double) A->obj_size * A->meter->alloc.level;
676 double pb = (double) B->obj_size * B->meter->alloc.level;
677
678 if (pa > pb)
679 return -1;
680
681 if (pb > pa)
682 return 1;
683
684 #if 0
685 // use this to sort on In Use high(hrs)
686 //
687 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
688 return -1;
689
690 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
691 return 1;
692
693 #endif
694
695 return 0;
696 }
697
698 void
699 Mem::Report(std::ostream &stream)
700 {
701 static char buf[64];
702 static MemPoolStats mp_stats;
703 static MemPoolGlobalStats mp_total;
704 int not_used = 0;
705 MemPoolIterator *iter;
706 MemAllocator *pool;
707
708 /* caption */
709 stream << "Current memory usage:\n";
710 /* heading */
711 stream << "Pool\t Obj Size\t"
712 "Chunks\t\t\t\t\t\t\t"
713 "Allocated\t\t\t\t\t"
714 "In Use\t\t\t\t\t"
715 "Idle\t\t\t"
716 "Allocations Saved\t\t\t"
717 "Rate\t"
718 "\n"
719 " \t (bytes)\t"
720 "KB/ch\t obj/ch\t"
721 "(#)\t used\t free\t part\t %Frag\t "
722 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
723 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
724 "(#)\t (KB)\t high (KB)\t"
725 "(#)\t %cnt\t %vol\t"
726 "(#)/sec\t"
727 "\n";
728 xm_deltat = current_dtime - xm_time;
729 xm_time = current_dtime;
730
731 /* Get stats for Totals report line */
732 memPoolGetGlobalStats(&mp_total);
733
734 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
735 int npools = 0;
736
737 /* main table */
738 iter = memPoolIterate();
739
740 while ((pool = memPoolIterateNext(iter))) {
741 pool->getStats(&mp_stats);
742
743 if (!mp_stats.pool) /* pool destroyed */
744 continue;
745
746 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
747 /* this pool has been used */
748 sortme[npools] = mp_stats;
749 ++npools;
750 } else {
751 ++not_used;
752 }
753 }
754
755 memPoolIterateDone(&iter);
756
757 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
758
759 for (int i = 0; i< npools; ++i) {
760 PoolReport(&sortme[i], mp_total.TheMeter, stream);
761 }
762
763 xfree(sortme);
764
765 mp_stats.pool = NULL;
766 mp_stats.label = "Total";
767 mp_stats.meter = mp_total.TheMeter;
768 mp_stats.obj_size = 1;
769 mp_stats.chunk_capacity = 0;
770 mp_stats.chunk_size = 0;
771 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
772 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
773 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
774 mp_stats.chunks_free = mp_total.tot_chunks_free;
775 mp_stats.items_alloc = mp_total.tot_items_alloc;
776 mp_stats.items_inuse = mp_total.tot_items_inuse;
777 mp_stats.items_idle = mp_total.tot_items_idle;
778 mp_stats.overhead = mp_total.tot_overhead;
779
780 PoolReport(&mp_stats, mp_total.TheMeter, stream);
781
782 /* Cumulative */
783 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
784 /* overhead */
785 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
786 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
787 /* limits */
788 if (mp_total.mem_idle_limit >= 0)
789 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
790 /* limits */
791 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
792 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
793 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
794 }
795