]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / mem.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 13 High Level Memory Pool Management
5 * AUTHOR: Harvest Derived
6 *
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
9 *
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 *
33 */
34
35 #include "squid.h"
36 #include "event.h"
37 #include "ClientInfo.h"
38 #include "Mem.h"
39 #include "md5.h"
40 #include "MemBuf.h"
41 #include "memMeter.h"
42 #include "mgr/Registration.h"
43 #include "protos.h"
44 #include "SquidTime.h"
45 #include "Store.h"
46 #include "StoreEntryStream.h"
47
48 #if HAVE_IOMANIP
49 #include <iomanip>
50 #endif
51 #if HAVE_OSTREAM
52 #include <ostream>
53 #endif
54
55 /* module globals */
56 const size_t squidSystemPageSize=getpagesize();
57
58 /* local prototypes */
59 static void memStringStats(std::ostream &);
60
61 /* module locals */
62 static MemAllocator *MemPools[MEM_MAX];
63 static double xm_time = 0;
64 static double xm_deltat = 0;
65
66 /* all pools are ready to be used */
67 static bool MemIsInitialized = false;
68
69 /* string pools */
70 #define mem_str_pool_count 6
71
72 // 4 bytes bigger than the biggest string pool size
73 // which is in turn calculated from SmallestStringBeforeMemIsInitialized
74 static const size_t SmallestStringBeforeMemIsInitialized = 1024*16+4;
75
76 static const struct {
77 const char *name;
78 size_t obj_size;
79 }
80
81 StrPoolsAttrs[mem_str_pool_count] = {
82
83 {
84 "Short Strings", MemAllocator::RoundedSize(36),
85 }, /* to fit rfc1123 and similar */
86 {
87 "Medium Strings", MemAllocator::RoundedSize(128),
88 }, /* to fit most urls */
89 {
90 "Long Strings", MemAllocator::RoundedSize(512),
91 },
92 {
93 "1KB Strings", MemAllocator::RoundedSize(1024),
94 },
95 {
96 "4KB Strings", MemAllocator::RoundedSize(4*1024),
97 },
98 {
99 "16KB Strings",
100 MemAllocator::RoundedSize(SmallestStringBeforeMemIsInitialized-4)
101 }
102 };
103
104 static struct {
105 MemAllocator *pool;
106 }
107
108 StrPools[mem_str_pool_count];
109 static MemMeter StrCountMeter;
110 static MemMeter StrVolumeMeter;
111
112 static MemMeter HugeBufCountMeter;
113 static MemMeter HugeBufVolumeMeter;
114
115 /* local routines */
116
117 static void
118 memStringStats(std::ostream &stream)
119 {
120 int i;
121 int pooled_count = 0;
122 size_t pooled_volume = 0;
123 /* heading */
124 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
125 /* table body */
126
127 for (i = 0; i < mem_str_pool_count; ++i) {
128 const MemAllocator *pool = StrPools[i].pool;
129 const int plevel = pool->getMeter().inuse.level;
130 stream << std::setw(20) << std::left << pool->objectType();
131 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
132 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
133 pooled_count += plevel;
134 pooled_volume += plevel * pool->objectSize();
135 }
136
137 /* malloc strings */
138 stream << std::setw(20) << std::left << "Other Strings";
139
140 stream << std::right << "\t ";
141
142 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
143
144 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
145 }
146
147 static void
148 memBufStats(std::ostream & stream)
149 {
150 stream << "Large buffers: " <<
151 HugeBufCountMeter.level << " (" <<
152 HugeBufVolumeMeter.level / 1024 << " KB)\n";
153 }
154
155 void
156 Mem::Stats(StoreEntry * sentry)
157 {
158 StoreEntryStream stream(sentry);
159 Report(stream);
160 memStringStats(stream);
161 memBufStats(stream);
162 #if WITH_VALGRIND
163 if (RUNNING_ON_VALGRIND) {
164 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
165 stream << "Valgrind Report:\n";
166 stream << "Type\tAmount\n";
167 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
168 VALGRIND_DO_LEAK_CHECK;
169 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
170 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
171 stream << "Leaked\t" << leaked << "\n";
172 stream << "Dubious\t" << dubious << "\n";
173 stream << "Reachable\t" << reachable << "\n";
174 stream << "Suppressed\t" << suppressed << "\n";
175 }
176 #endif
177 stream.flush();
178 }
179
180 /*
181 * public routines
182 */
183
184 /*
185 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
186 * Will ignore repeated calls for the same pool type.
187 *
188 * Relies on Mem::Init() having been called beforehand.
189 */
190 void
191 memDataInit(mem_type type, const char *name, size_t size, int max_pages_notused, bool zeroOnPush)
192 {
193 assert(name && size);
194
195 if (MemPools[type] != NULL)
196 return;
197
198 MemPools[type] = memPoolCreate(name, size);
199 MemPools[type]->zeroOnPush(zeroOnPush);
200 }
201
202 /* find appropriate pool and use it (pools always init buffer with 0s) */
203 void *
204 memAllocate(mem_type type)
205 {
206 assert(MemPools[type]);
207 return MemPools[type]->alloc();
208 }
209
210 /* give memory back to the pool */
211 void
212 memFree(void *p, int type)
213 {
214 assert(MemPools[type]);
215 MemPools[type]->freeOne(p);
216 }
217
218 /* allocate a variable size buffer using best-fit string pool */
219 void *
220 memAllocString(size_t net_size, size_t * gross_size)
221 {
222 MemAllocator *pool = NULL;
223 assert(gross_size);
224
225 // if pools are not yet ready, make sure that
226 // the requested size is not poolable so that the right deallocator
227 // will be used
228 if (!MemIsInitialized && net_size < SmallestStringBeforeMemIsInitialized)
229 net_size = SmallestStringBeforeMemIsInitialized;
230
231 unsigned int i;
232 for (i = 0; i < mem_str_pool_count; ++i) {
233 if (net_size <= StrPoolsAttrs[i].obj_size) {
234 pool = StrPools[i].pool;
235 break;
236 }
237 }
238
239 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
240 assert(*gross_size >= net_size);
241 // may forget [de]allocations until MemIsInitialized
242 memMeterInc(StrCountMeter);
243 memMeterAdd(StrVolumeMeter, *gross_size);
244 return pool ? pool->alloc() : xcalloc(1, net_size);
245 }
246
247 extern size_t memStringCount();
248 size_t
249 memStringCount()
250 {
251 size_t result = 0;
252
253 for (int counter = 0; counter < mem_str_pool_count; ++counter)
254 result += memPoolInUseCount(StrPools[counter].pool);
255
256 return result;
257 }
258
259 /* free buffer allocated with memAllocString() */
260 void
261 memFreeString(size_t size, void *buf)
262 {
263 MemAllocator *pool = NULL;
264 assert(buf);
265
266 if (MemIsInitialized) {
267 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
268 if (size <= StrPoolsAttrs[i].obj_size) {
269 assert(size == StrPoolsAttrs[i].obj_size);
270 pool = StrPools[i].pool;
271 break;
272 }
273 }
274 }
275
276 // may forget [de]allocations until MemIsInitialized
277 memMeterDec(StrCountMeter);
278 memMeterDel(StrVolumeMeter, size);
279 pool ? pool->freeOne(buf) : xfree(buf);
280 }
281
282 /* Find the best fit MEM_X_BUF type */
283 static mem_type
284 memFindBufSizeType(size_t net_size, size_t * gross_size)
285 {
286 mem_type type;
287 size_t size;
288
289 if (net_size <= 2 * 1024) {
290 type = MEM_2K_BUF;
291 size = 2 * 1024;
292 } else if (net_size <= 4 * 1024) {
293 type = MEM_4K_BUF;
294 size = 4 * 1024;
295 } else if (net_size <= 8 * 1024) {
296 type = MEM_8K_BUF;
297 size = 8 * 1024;
298 } else if (net_size <= 16 * 1024) {
299 type = MEM_16K_BUF;
300 size = 16 * 1024;
301 } else if (net_size <= 32 * 1024) {
302 type = MEM_32K_BUF;
303 size = 32 * 1024;
304 } else if (net_size <= 64 * 1024) {
305 type = MEM_64K_BUF;
306 size = 64 * 1024;
307 } else {
308 type = MEM_NONE;
309 size = net_size;
310 }
311
312 if (gross_size)
313 *gross_size = size;
314
315 return type;
316 }
317
318 /* allocate a variable size buffer using best-fit pool */
319 void *
320 memAllocBuf(size_t net_size, size_t * gross_size)
321 {
322 mem_type type = memFindBufSizeType(net_size, gross_size);
323
324 if (type != MEM_NONE)
325 return memAllocate(type);
326 else {
327 memMeterInc(HugeBufCountMeter);
328 memMeterAdd(HugeBufVolumeMeter, *gross_size);
329 return xcalloc(1, net_size);
330 }
331 }
332
333 /* resize a variable sized buffer using best-fit pool */
334 void *
335 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
336 {
337 /* XXX This can be optimized on very large buffers to use realloc() */
338 /* TODO: if the existing gross size is >= new gross size, do nothing */
339 size_t new_gross_size;
340 void *newbuf = memAllocBuf(net_size, &new_gross_size);
341
342 if (oldbuf) {
343 size_t data_size = *gross_size;
344
345 if (data_size > net_size)
346 data_size = net_size;
347
348 memcpy(newbuf, oldbuf, data_size);
349
350 memFreeBuf(*gross_size, oldbuf);
351 }
352
353 *gross_size = new_gross_size;
354 return newbuf;
355 }
356
357 /* free buffer allocated with memAllocBuf() */
358 void
359 memFreeBuf(size_t size, void *buf)
360 {
361 mem_type type = memFindBufSizeType(size, NULL);
362
363 if (type != MEM_NONE)
364 memFree(buf, type);
365 else {
366 xfree(buf);
367 memMeterDec(HugeBufCountMeter);
368 memMeterDel(HugeBufVolumeMeter, size);
369 }
370 }
371
372 static double clean_interval = 15.0; /* time to live of idle chunk before release */
373
374 void
375 Mem::CleanIdlePools(void *unused)
376 {
377 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
378 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
379 }
380
381 void
382 memConfigure(void)
383 {
384 int64_t new_pool_limit;
385
386 /** Set to configured value first */
387 if (!Config.onoff.mem_pools)
388 new_pool_limit = 0;
389 else if (Config.MemPools.limit > 0)
390 new_pool_limit = Config.MemPools.limit;
391 else {
392 if (Config.MemPools.limit == 0)
393 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
394 new_pool_limit = -1;
395 }
396
397 #if 0
398 /** \par
399 * DPW 2007-04-12
400 * No debugging here please because this method is called before
401 * the debug log is configured and we'll get the message on
402 * stderr when doing things like 'squid -k reconfigure'
403 */
404 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
405 debugs(13, DBG_IMPORTANT, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
406 #endif
407
408 MemPools::GetInstance().setIdleLimit(new_pool_limit);
409 }
410
411 /* XXX make these classes do their own memory management */
412 #include "HttpHdrContRange.h"
413
414 void
415 Mem::Init(void)
416 {
417 int i;
418
419 /** \par
420 * NOTE: Mem::Init() is called before the config file is parsed
421 * and before the debugging module has been initialized. Any
422 * debug messages here at level 0 or 1 will always be printed
423 * on stderr.
424 */
425
426 /** \par
427 * Set all pointers to null. */
428 memset(MemPools, '\0', sizeof(MemPools));
429 /**
430 * Then initialize all pools.
431 * \par
432 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
433 * \par
434 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
435 * small; someday we will figure out what to do with all the entries here
436 * that are never used or used only once; perhaps we should simply use
437 * malloc() for those? @?@
438 */
439 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
440 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
441 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
442 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
443 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
444 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
445 memDataInit(MEM_ACL_DENY_INFO_LIST, "acl_deny_info_list",
446 sizeof(acl_deny_info_list), 0);
447 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(acl_name_list), 0);
448 #if USE_CACHE_DIGESTS
449
450 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
451 #endif
452
453 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
454 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
455 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
456 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
457 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
458 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
459 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
460 memDataInit(MEM_RELIST, "relist", sizeof(relist), 0);
461 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
462 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
463 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
464
465 /** Lastly init the string pools. */
466 for (i = 0; i < mem_str_pool_count; ++i) {
467 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
468 StrPools[i].pool->zeroOnPush(false);
469
470 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
471 debugs(13, DBG_IMPORTANT, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
472 }
473
474 MemIsInitialized = true;
475 /** \par
476 * finally register with the cache manager */
477 RegisterWithCacheManager();
478 }
479
480 void
481 Mem::Report()
482 {
483 debugs(13, 3, "Memory pools are '" <<
484 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
485 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
486 " MB");
487 }
488
489 void
490 Mem::RegisterWithCacheManager(void)
491 {
492 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
493 }
494
495 mem_type &operator++ (mem_type &aMem)
496 {
497 int tmp = (int)aMem;
498 aMem = (mem_type)(++tmp);
499 return aMem;
500 }
501
502 /*
503 * Test that all entries are initialized
504 */
505 void
506 memCheckInit(void)
507 {
508 mem_type t = MEM_NONE;
509
510 while (++t < MEM_DONTFREE) {
511 /*
512 * If you hit this assertion, then you forgot to add a
513 * memDataInit() line for type 't'.
514 * Or placed the pool type in the wrong section of the enum list.
515 */
516 assert(MemPools[t]);
517 }
518 }
519
520 void
521 memClean(void)
522 {
523 MemPoolGlobalStats stats;
524 if (Config.MemPools.limit > 0) // do not reset if disabled or same
525 MemPools::GetInstance().setIdleLimit(0);
526 MemPools::GetInstance().clean(0);
527 memPoolGetGlobalStats(&stats);
528
529 if (stats.tot_items_inuse)
530 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
531 " items in " << stats.tot_chunks_inuse << " chunks and " <<
532 stats.tot_pools_inuse << " pools are left dirty");
533 }
534
535 int
536 memInUse(mem_type type)
537 {
538 return memPoolInUseCount(MemPools[type]);
539 }
540
541 /* ick */
542
543 void
544 memFree2K(void *p)
545 {
546 memFree(p, MEM_2K_BUF);
547 }
548
549 void
550 memFree4K(void *p)
551 {
552 memFree(p, MEM_4K_BUF);
553 }
554
555 void
556 memFree8K(void *p)
557 {
558 memFree(p, MEM_8K_BUF);
559 }
560
561 void
562 memFree16K(void *p)
563 {
564 memFree(p, MEM_16K_BUF);
565 }
566
567 void
568 memFree32K(void *p)
569 {
570 memFree(p, MEM_32K_BUF);
571 }
572
573 void
574 memFree64K(void *p)
575 {
576 memFree(p, MEM_64K_BUF);
577 }
578
579 static void
580 cxx_xfree(void * ptr)
581 {
582 xfree(ptr);
583 }
584
585 FREE *
586 memFreeBufFunc(size_t size)
587 {
588 switch (size) {
589
590 case 2 * 1024:
591 return memFree2K;
592
593 case 4 * 1024:
594 return memFree4K;
595
596 case 8 * 1024:
597 return memFree8K;
598
599 case 16 * 1024:
600 return memFree16K;
601
602 case 32 * 1024:
603 return memFree32K;
604
605 case 64 * 1024:
606 return memFree64K;
607
608 default:
609 memMeterDec(HugeBufCountMeter);
610 memMeterDel(HugeBufVolumeMeter, size);
611 return cxx_xfree;
612 }
613 }
614
615 /* MemPoolMeter */
616
617 void
618 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
619 {
620 int excess = 0;
621 int needed = 0;
622 MemPoolMeter *pm = mp_st->meter;
623 const char *delim = "\t ";
624
625 #if HAVE_IOMANIP
626 stream.setf(std::ios_base::fixed);
627 #endif
628 stream << std::setw(20) << std::left << mp_st->label << delim;
629 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
630
631 /* Chunks */
632 if (mp_st->chunk_capacity) {
633 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
634 stream << std::setw(4) << mp_st->chunk_capacity << delim;
635
636 needed = mp_st->items_inuse / mp_st->chunk_capacity;
637
638 if (mp_st->items_inuse % mp_st->chunk_capacity)
639 ++needed;
640
641 excess = mp_st->chunks_inuse - needed;
642
643 stream << std::setw(4) << mp_st->chunks_alloc << delim;
644 stream << std::setw(4) << mp_st->chunks_inuse << delim;
645 stream << std::setw(4) << mp_st->chunks_free << delim;
646 stream << std::setw(4) << mp_st->chunks_partial << delim;
647 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
648 } else {
649 stream << delim;
650 stream << delim;
651 stream << delim;
652 stream << delim;
653 stream << delim;
654 stream << delim;
655 stream << delim;
656 }
657 /*
658 * Fragmentation calculation:
659 * needed = inuse.level / chunk_capacity
660 * excess = used - needed
661 * fragmentation = excess / needed * 100%
662 *
663 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
664 */
665 /* allocated */
666 stream << mp_st->items_alloc << delim;
667 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
668 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
669 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
670 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
671 /* in use */
672 stream << mp_st->items_inuse << delim;
673 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
674 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
675 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
676 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
677 /* idle */
678 stream << mp_st->items_idle << delim;
679 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
680 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
681 /* saved */
682 stream << (int)pm->gb_saved.count << delim;
683 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
684 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
685 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
686 pm->gb_oallocated.count = pm->gb_allocated.count;
687 }
688
689 static int
690 MemPoolReportSorter(const void *a, const void *b)
691 {
692 const MemPoolStats *A = (MemPoolStats *) a;
693 const MemPoolStats *B = (MemPoolStats *) b;
694
695 // use this to sort on %Total Allocated
696 //
697 double pa = (double) A->obj_size * A->meter->alloc.level;
698 double pb = (double) B->obj_size * B->meter->alloc.level;
699
700 if (pa > pb)
701 return -1;
702
703 if (pb > pa)
704 return 1;
705
706 #if 0
707 // use this to sort on In Use high(hrs)
708 //
709 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
710 return -1;
711
712 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
713 return 1;
714
715 #endif
716
717 return 0;
718 }
719
720 void
721 Mem::Report(std::ostream &stream)
722 {
723 static char buf[64];
724 static MemPoolStats mp_stats;
725 static MemPoolGlobalStats mp_total;
726 int not_used = 0;
727 MemPoolIterator *iter;
728 MemAllocator *pool;
729
730 /* caption */
731 stream << "Current memory usage:\n";
732 /* heading */
733 stream << "Pool\t Obj Size\t"
734 "Chunks\t\t\t\t\t\t\t"
735 "Allocated\t\t\t\t\t"
736 "In Use\t\t\t\t\t"
737 "Idle\t\t\t"
738 "Allocations Saved\t\t\t"
739 "Rate\t"
740 "\n"
741 " \t (bytes)\t"
742 "KB/ch\t obj/ch\t"
743 "(#)\t used\t free\t part\t %Frag\t "
744 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
745 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
746 "(#)\t (KB)\t high (KB)\t"
747 "(#)\t %cnt\t %vol\t"
748 "(#)/sec\t"
749 "\n";
750 xm_deltat = current_dtime - xm_time;
751 xm_time = current_dtime;
752
753 /* Get stats for Totals report line */
754 memPoolGetGlobalStats(&mp_total);
755
756 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
757 int npools = 0;
758
759 /* main table */
760 iter = memPoolIterate();
761
762 while ((pool = memPoolIterateNext(iter))) {
763 pool->getStats(&mp_stats);
764
765 if (!mp_stats.pool) /* pool destroyed */
766 continue;
767
768 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
769 /* this pool has been used */
770 sortme[npools] = mp_stats;
771 ++npools;
772 } else {
773 ++not_used;
774 }
775 }
776
777 memPoolIterateDone(&iter);
778
779 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
780
781 for (int i = 0; i< npools; ++i) {
782 PoolReport(&sortme[i], mp_total.TheMeter, stream);
783 }
784
785 xfree(sortme);
786
787 mp_stats.pool = NULL;
788 mp_stats.label = "Total";
789 mp_stats.meter = mp_total.TheMeter;
790 mp_stats.obj_size = 1;
791 mp_stats.chunk_capacity = 0;
792 mp_stats.chunk_size = 0;
793 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
794 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
795 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
796 mp_stats.chunks_free = mp_total.tot_chunks_free;
797 mp_stats.items_alloc = mp_total.tot_items_alloc;
798 mp_stats.items_inuse = mp_total.tot_items_inuse;
799 mp_stats.items_idle = mp_total.tot_items_idle;
800 mp_stats.overhead = mp_total.tot_overhead;
801
802 PoolReport(&mp_stats, mp_total.TheMeter, stream);
803
804 /* Cumulative */
805 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
806 /* overhead */
807 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
808 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
809 /* limits */
810 if (mp_total.mem_idle_limit >= 0)
811 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
812 /* limits */
813 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
814 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
815 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
816 }