]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem.cc
Renamed squid.h to squid-old.h and config.h to squid.h
[thirdparty/squid.git] / src / mem.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 13 High Level Memory Pool Management
5 * AUTHOR: Harvest Derived
6 *
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
9 *
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 *
33 */
34
35 #include "squid-old.h"
36 #include "event.h"
37 #include "mgr/Registration.h"
38 #include "ClientInfo.h"
39 #include "Mem.h"
40 #include "memMeter.h"
41 #include "Store.h"
42 #include "StoreEntryStream.h"
43 #include "MemBuf.h"
44 #include "SquidTime.h"
45
46 #if HAVE_IOMANIP
47 #include <iomanip>
48 #endif
49 #if HAVE_OSTREAM
50 #include <ostream>
51 #endif
52
53 /* module globals */
54 const size_t squidSystemPageSize=getpagesize();
55
56 /* local prototypes */
57 static void memStringStats(std::ostream &);
58
59 /* module locals */
60 static MemAllocator *MemPools[MEM_MAX];
61 static double xm_time = 0;
62 static double xm_deltat = 0;
63
64 /* all pools are ready to be used */
65 static bool MemIsInitialized = false;
66
67 /* string pools */
68 #define mem_str_pool_count 6
69
70 // 4 bytes bigger than the biggest string pool size
71 // which is in turn calculated from SmallestStringBeforeMemIsInitialized
72 static const size_t SmallestStringBeforeMemIsInitialized = 1024*16+4;
73
74 static const struct {
75 const char *name;
76 size_t obj_size;
77 }
78
79 StrPoolsAttrs[mem_str_pool_count] = {
80
81 {
82 "Short Strings", MemAllocator::RoundedSize(36),
83 }, /* to fit rfc1123 and similar */
84 {
85 "Medium Strings", MemAllocator::RoundedSize(128),
86 }, /* to fit most urls */
87 {
88 "Long Strings", MemAllocator::RoundedSize(512),
89 },
90 {
91 "1KB Strings", MemAllocator::RoundedSize(1024),
92 },
93 {
94 "4KB Strings", MemAllocator::RoundedSize(4*1024),
95 },
96 {
97 "16KB Strings",
98 MemAllocator::RoundedSize(SmallestStringBeforeMemIsInitialized-4)
99 }
100 };
101
102 static struct {
103 MemAllocator *pool;
104 }
105
106 StrPools[mem_str_pool_count];
107 static MemMeter StrCountMeter;
108 static MemMeter StrVolumeMeter;
109
110 static MemMeter HugeBufCountMeter;
111 static MemMeter HugeBufVolumeMeter;
112
113 /* local routines */
114
115 static void
116 memStringStats(std::ostream &stream)
117 {
118 int i;
119 int pooled_count = 0;
120 size_t pooled_volume = 0;
121 /* heading */
122 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
123 /* table body */
124
125 for (i = 0; i < mem_str_pool_count; ++i) {
126 const MemAllocator *pool = StrPools[i].pool;
127 const int plevel = pool->getMeter().inuse.level;
128 stream << std::setw(20) << std::left << pool->objectType();
129 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
130 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
131 pooled_count += plevel;
132 pooled_volume += plevel * pool->objectSize();
133 }
134
135 /* malloc strings */
136 stream << std::setw(20) << std::left << "Other Strings";
137
138 stream << std::right << "\t ";
139
140 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
141
142 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
143 }
144
145 static void
146 memBufStats(std::ostream & stream)
147 {
148 stream << "Large buffers: " <<
149 HugeBufCountMeter.level << " (" <<
150 HugeBufVolumeMeter.level / 1024 << " KB)\n";
151 }
152
153 void
154 Mem::Stats(StoreEntry * sentry)
155 {
156 StoreEntryStream stream(sentry);
157 Report(stream);
158 memStringStats(stream);
159 memBufStats(stream);
160 #if WITH_VALGRIND
161 if (RUNNING_ON_VALGRIND) {
162 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
163 stream << "Valgrind Report:\n";
164 stream << "Type\tAmount\n";
165 debugs(13, 1, "Asking valgrind for memleaks");
166 VALGRIND_DO_LEAK_CHECK;
167 debugs(13, 1, "Getting valgrind statistics");
168 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
169 stream << "Leaked\t" << leaked << "\n";
170 stream << "Dubious\t" << dubious << "\n";
171 stream << "Reachable\t" << reachable << "\n";
172 stream << "Suppressed\t" << suppressed << "\n";
173 }
174 #endif
175 stream.flush();
176 }
177
178 /*
179 * public routines
180 */
181
182 /*
183 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
184 * Will ignore repeated calls for the same pool type.
185 *
186 * Relies on Mem::Init() having been called beforehand.
187 */
188 void
189 memDataInit(mem_type type, const char *name, size_t size, int max_pages_notused, bool zeroOnPush)
190 {
191 assert(name && size);
192
193 if (MemPools[type] != NULL)
194 return;
195
196 MemPools[type] = memPoolCreate(name, size);
197 MemPools[type]->zeroOnPush(zeroOnPush);
198 }
199
200
201 /* find appropriate pool and use it (pools always init buffer with 0s) */
202 void *
203 memAllocate(mem_type type)
204 {
205 assert(MemPools[type]);
206 return MemPools[type]->alloc();
207 }
208
209 /* give memory back to the pool */
210 void
211 memFree(void *p, int type)
212 {
213 assert(MemPools[type]);
214 MemPools[type]->freeOne(p);
215 }
216
217 /* allocate a variable size buffer using best-fit string pool */
218 void *
219 memAllocString(size_t net_size, size_t * gross_size)
220 {
221 MemAllocator *pool = NULL;
222 assert(gross_size);
223
224 // if pools are not yet ready, make sure that
225 // the requested size is not poolable so that the right deallocator
226 // will be used
227 if (!MemIsInitialized && net_size < SmallestStringBeforeMemIsInitialized)
228 net_size = SmallestStringBeforeMemIsInitialized;
229
230 unsigned int i;
231 for (i = 0; i < mem_str_pool_count; ++i) {
232 if (net_size <= StrPoolsAttrs[i].obj_size) {
233 pool = StrPools[i].pool;
234 break;
235 }
236 }
237
238 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
239 assert(*gross_size >= net_size);
240 // may forget [de]allocations until MemIsInitialized
241 memMeterInc(StrCountMeter);
242 memMeterAdd(StrVolumeMeter, *gross_size);
243 return pool ? pool->alloc() : xcalloc(1, net_size);
244 }
245
246 extern size_t memStringCount();
247 size_t
248 memStringCount()
249 {
250 size_t result = 0;
251
252 for (int counter = 0; counter < mem_str_pool_count; ++counter)
253 result += memPoolInUseCount(StrPools[counter].pool);
254
255 return result;
256 }
257
258 /* free buffer allocated with memAllocString() */
259 void
260 memFreeString(size_t size, void *buf)
261 {
262 MemAllocator *pool = NULL;
263 assert(buf);
264
265 if (MemIsInitialized) {
266 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
267 if (size <= StrPoolsAttrs[i].obj_size) {
268 assert(size == StrPoolsAttrs[i].obj_size);
269 pool = StrPools[i].pool;
270 break;
271 }
272 }
273 }
274
275 // may forget [de]allocations until MemIsInitialized
276 memMeterDec(StrCountMeter);
277 memMeterDel(StrVolumeMeter, size);
278 pool ? pool->freeOne(buf) : xfree(buf);
279 }
280
281 /* Find the best fit MEM_X_BUF type */
282 static mem_type
283 memFindBufSizeType(size_t net_size, size_t * gross_size)
284 {
285 mem_type type;
286 size_t size;
287
288 if (net_size <= 2 * 1024) {
289 type = MEM_2K_BUF;
290 size = 2 * 1024;
291 } else if (net_size <= 4 * 1024) {
292 type = MEM_4K_BUF;
293 size = 4 * 1024;
294 } else if (net_size <= 8 * 1024) {
295 type = MEM_8K_BUF;
296 size = 8 * 1024;
297 } else if (net_size <= 16 * 1024) {
298 type = MEM_16K_BUF;
299 size = 16 * 1024;
300 } else if (net_size <= 32 * 1024) {
301 type = MEM_32K_BUF;
302 size = 32 * 1024;
303 } else if (net_size <= 64 * 1024) {
304 type = MEM_64K_BUF;
305 size = 64 * 1024;
306 } else {
307 type = MEM_NONE;
308 size = net_size;
309 }
310
311 if (gross_size)
312 *gross_size = size;
313
314 return type;
315 }
316
317 /* allocate a variable size buffer using best-fit pool */
318 void *
319 memAllocBuf(size_t net_size, size_t * gross_size)
320 {
321 mem_type type = memFindBufSizeType(net_size, gross_size);
322
323 if (type != MEM_NONE)
324 return memAllocate(type);
325 else {
326 memMeterInc(HugeBufCountMeter);
327 memMeterAdd(HugeBufVolumeMeter, *gross_size);
328 return xcalloc(1, net_size);
329 }
330 }
331
332 /* resize a variable sized buffer using best-fit pool */
333 void *
334 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
335 {
336 /* XXX This can be optimized on very large buffers to use realloc() */
337 /* TODO: if the existing gross size is >= new gross size, do nothing */
338 size_t new_gross_size;
339 void *newbuf = memAllocBuf(net_size, &new_gross_size);
340
341 if (oldbuf) {
342 size_t data_size = *gross_size;
343
344 if (data_size > net_size)
345 data_size = net_size;
346
347 memcpy(newbuf, oldbuf, data_size);
348
349 memFreeBuf(*gross_size, oldbuf);
350 }
351
352 *gross_size = new_gross_size;
353 return newbuf;
354 }
355
356 /* free buffer allocated with memAllocBuf() */
357 void
358 memFreeBuf(size_t size, void *buf)
359 {
360 mem_type type = memFindBufSizeType(size, NULL);
361
362 if (type != MEM_NONE)
363 memFree(buf, type);
364 else {
365 xfree(buf);
366 memMeterDec(HugeBufCountMeter);
367 memMeterDel(HugeBufVolumeMeter, size);
368 }
369 }
370
371 static double clean_interval = 15.0; /* time to live of idle chunk before release */
372
373 void
374 Mem::CleanIdlePools(void *unused)
375 {
376 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
377 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
378 }
379
380 void
381 memConfigure(void)
382 {
383 int64_t new_pool_limit;
384
385 /** Set to configured value first */
386 if (!Config.onoff.mem_pools)
387 new_pool_limit = 0;
388 else if (Config.MemPools.limit > 0)
389 new_pool_limit = Config.MemPools.limit;
390 else {
391 if (Config.MemPools.limit == 0)
392 debugs(13, 1, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
393 new_pool_limit = -1;
394 }
395
396 #if 0
397 /** \par
398 * DPW 2007-04-12
399 * No debugging here please because this method is called before
400 * the debug log is configured and we'll get the message on
401 * stderr when doing things like 'squid -k reconfigure'
402 */
403 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
404 debugs(13, 1, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
405 #endif
406
407 MemPools::GetInstance().setIdleLimit(new_pool_limit);
408 }
409
410 /* XXX make these classes do their own memory management */
411 #include "HttpHdrContRange.h"
412
413 void
414 Mem::Init(void)
415 {
416 int i;
417
418 /** \par
419 * NOTE: Mem::Init() is called before the config file is parsed
420 * and before the debugging module has been initialized. Any
421 * debug messages here at level 0 or 1 will always be printed
422 * on stderr.
423 */
424
425 /** \par
426 * Set all pointers to null. */
427 memset(MemPools, '\0', sizeof(MemPools));
428 /**
429 * Then initialize all pools.
430 * \par
431 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
432 * \par
433 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
434 * small; someday we will figure out what to do with all the entries here
435 * that are never used or used only once; perhaps we should simply use
436 * malloc() for those? @?@
437 */
438 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
439 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
440 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
441 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
442 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
443 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
444 memDataInit(MEM_ACL_DENY_INFO_LIST, "acl_deny_info_list",
445 sizeof(acl_deny_info_list), 0);
446 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(acl_name_list), 0);
447 #if USE_CACHE_DIGESTS
448
449 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
450 #endif
451
452 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
453 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
454 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
455 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
456 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
457 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
458 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
459 memDataInit(MEM_RELIST, "relist", sizeof(relist), 0);
460 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
461 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
462 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
463
464 /** Lastly init the string pools. */
465 for (i = 0; i < mem_str_pool_count; ++i) {
466 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
467 StrPools[i].pool->zeroOnPush(false);
468
469 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
470 debugs(13, 1, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
471 }
472
473 MemIsInitialized = true;
474 /** \par
475 * finally register with the cache manager */
476 RegisterWithCacheManager();
477 }
478
479 void
480 Mem::Report()
481 {
482 debugs(13, 3, "Memory pools are '" <<
483 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
484 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
485 " MB");
486 }
487
488 void
489 Mem::RegisterWithCacheManager(void)
490 {
491 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
492 }
493
494 mem_type &operator++ (mem_type &aMem)
495 {
496 int tmp = (int)aMem;
497 aMem = (mem_type)(++tmp);
498 return aMem;
499 }
500
501 /*
502 * Test that all entries are initialized
503 */
504 void
505 memCheckInit(void)
506 {
507 mem_type t = MEM_NONE;
508
509 while (++t < MEM_DONTFREE) {
510 /*
511 * If you hit this assertion, then you forgot to add a
512 * memDataInit() line for type 't'.
513 * Or placed the pool type in the wrong section of the enum list.
514 */
515 assert(MemPools[t]);
516 }
517 }
518
519 void
520 memClean(void)
521 {
522 MemPoolGlobalStats stats;
523 if (Config.MemPools.limit > 0) // do not reset if disabled or same
524 MemPools::GetInstance().setIdleLimit(0);
525 MemPools::GetInstance().clean(0);
526 memPoolGetGlobalStats(&stats);
527
528 if (stats.tot_items_inuse)
529 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
530 " items in " << stats.tot_chunks_inuse << " chunks and " <<
531 stats.tot_pools_inuse << " pools are left dirty");
532 }
533
534 int
535 memInUse(mem_type type)
536 {
537 return memPoolInUseCount(MemPools[type]);
538 }
539
540 /* ick */
541
542 void
543 memFree2K(void *p)
544 {
545 memFree(p, MEM_2K_BUF);
546 }
547
548 void
549 memFree4K(void *p)
550 {
551 memFree(p, MEM_4K_BUF);
552 }
553
554 void
555 memFree8K(void *p)
556 {
557 memFree(p, MEM_8K_BUF);
558 }
559
560 void
561 memFree16K(void *p)
562 {
563 memFree(p, MEM_16K_BUF);
564 }
565
566 void
567 memFree32K(void *p)
568 {
569 memFree(p, MEM_32K_BUF);
570 }
571
572 void
573 memFree64K(void *p)
574 {
575 memFree(p, MEM_64K_BUF);
576 }
577
578 static void
579 cxx_xfree(void * ptr)
580 {
581 xfree(ptr);
582 }
583
584 FREE *
585 memFreeBufFunc(size_t size)
586 {
587 switch (size) {
588
589 case 2 * 1024:
590 return memFree2K;
591
592 case 4 * 1024:
593 return memFree4K;
594
595 case 8 * 1024:
596 return memFree8K;
597
598 case 16 * 1024:
599 return memFree16K;
600
601 case 32 * 1024:
602 return memFree32K;
603
604 case 64 * 1024:
605 return memFree64K;
606
607 default:
608 memMeterDec(HugeBufCountMeter);
609 memMeterDel(HugeBufVolumeMeter, size);
610 return cxx_xfree;
611 }
612 }
613
614 /* MemPoolMeter */
615
616 void
617 Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
618 {
619 int excess = 0;
620 int needed = 0;
621 MemPoolMeter *pm = mp_st->meter;
622 const char *delim = "\t ";
623
624 #if HAVE_IOMANIP
625 stream.setf(std::ios_base::fixed);
626 #endif
627 stream << std::setw(20) << std::left << mp_st->label << delim;
628 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
629
630 /* Chunks */
631 if (mp_st->chunk_capacity) {
632 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
633 stream << std::setw(4) << mp_st->chunk_capacity << delim;
634
635 needed = mp_st->items_inuse / mp_st->chunk_capacity;
636
637 if (mp_st->items_inuse % mp_st->chunk_capacity)
638 ++needed;
639
640 excess = mp_st->chunks_inuse - needed;
641
642 stream << std::setw(4) << mp_st->chunks_alloc << delim;
643 stream << std::setw(4) << mp_st->chunks_inuse << delim;
644 stream << std::setw(4) << mp_st->chunks_free << delim;
645 stream << std::setw(4) << mp_st->chunks_partial << delim;
646 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
647 } else {
648 stream << delim;
649 stream << delim;
650 stream << delim;
651 stream << delim;
652 stream << delim;
653 stream << delim;
654 stream << delim;
655 }
656 /*
657 * Fragmentation calculation:
658 * needed = inuse.level / chunk_capacity
659 * excess = used - needed
660 * fragmentation = excess / needed * 100%
661 *
662 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
663 */
664 /* allocated */
665 stream << mp_st->items_alloc << delim;
666 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
667 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
668 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
669 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
670 /* in use */
671 stream << mp_st->items_inuse << delim;
672 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
673 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
674 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
675 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
676 /* idle */
677 stream << mp_st->items_idle << delim;
678 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
679 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
680 /* saved */
681 stream << (int)pm->gb_saved.count << delim;
682 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
683 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
684 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
685 pm->gb_oallocated.count = pm->gb_allocated.count;
686 }
687
688 static int
689 MemPoolReportSorter(const void *a, const void *b)
690 {
691 const MemPoolStats *A = (MemPoolStats *) a;
692 const MemPoolStats *B = (MemPoolStats *) b;
693
694 // use this to sort on %Total Allocated
695 //
696 double pa = (double) A->obj_size * A->meter->alloc.level;
697 double pb = (double) B->obj_size * B->meter->alloc.level;
698
699 if (pa > pb)
700 return -1;
701
702 if (pb > pa)
703 return 1;
704
705 #if 0
706 // use this to sort on In Use high(hrs)
707 //
708 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
709 return -1;
710
711 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
712 return 1;
713
714 #endif
715
716 return 0;
717 }
718
719 void
720 Mem::Report(std::ostream &stream)
721 {
722 static char buf[64];
723 static MemPoolStats mp_stats;
724 static MemPoolGlobalStats mp_total;
725 int not_used = 0;
726 MemPoolIterator *iter;
727 MemAllocator *pool;
728
729 /* caption */
730 stream << "Current memory usage:\n";
731 /* heading */
732 stream << "Pool\t Obj Size\t"
733 "Chunks\t\t\t\t\t\t\t"
734 "Allocated\t\t\t\t\t"
735 "In Use\t\t\t\t\t"
736 "Idle\t\t\t"
737 "Allocations Saved\t\t\t"
738 "Rate\t"
739 "\n"
740 " \t (bytes)\t"
741 "KB/ch\t obj/ch\t"
742 "(#)\t used\t free\t part\t %Frag\t "
743 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
744 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
745 "(#)\t (KB)\t high (KB)\t"
746 "(#)\t %cnt\t %vol\t"
747 "(#)/sec\t"
748 "\n";
749 xm_deltat = current_dtime - xm_time;
750 xm_time = current_dtime;
751
752 /* Get stats for Totals report line */
753 memPoolGetGlobalStats(&mp_total);
754
755 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
756 int npools = 0;
757
758 /* main table */
759 iter = memPoolIterate();
760
761 while ((pool = memPoolIterateNext(iter))) {
762 pool->getStats(&mp_stats);
763
764 if (!mp_stats.pool) /* pool destroyed */
765 continue;
766
767 if (mp_stats.pool->getMeter().gb_allocated.count > 0) /* this pool has been used */
768 sortme[npools++] = mp_stats;
769 else
770 ++not_used;
771 }
772
773 memPoolIterateDone(&iter);
774
775 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
776
777 for (int i = 0; i< npools; ++i) {
778 PoolReport(&sortme[i], mp_total.TheMeter, stream);
779 }
780
781 xfree(sortme);
782
783 mp_stats.pool = NULL;
784 mp_stats.label = "Total";
785 mp_stats.meter = mp_total.TheMeter;
786 mp_stats.obj_size = 1;
787 mp_stats.chunk_capacity = 0;
788 mp_stats.chunk_size = 0;
789 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
790 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
791 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
792 mp_stats.chunks_free = mp_total.tot_chunks_free;
793 mp_stats.items_alloc = mp_total.tot_items_alloc;
794 mp_stats.items_inuse = mp_total.tot_items_inuse;
795 mp_stats.items_idle = mp_total.tot_items_idle;
796 mp_stats.overhead = mp_total.tot_overhead;
797
798 PoolReport(&mp_stats, mp_total.TheMeter, stream);
799
800 /* Cumulative */
801 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
802 /* overhead */
803 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
804 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
805 /* limits */
806 if (mp_total.mem_idle_limit >= 0)
807 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
808 /* limits */
809 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
810 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
811 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
812 }