]> git.ipfire.org Git - thirdparty/squid.git/blame - src/mem.cc
Fix-fix for MD5.
[thirdparty/squid.git] / src / mem.cc
CommitLineData
acf5589a 1
2/*
cb6d4984 3 * $Id: mem.cc,v 1.105 2007/05/22 16:40:06 rousskov Exp $
acf5589a 4 *
7021844c 5 * DEBUG: section 13 High Level Memory Pool Management
acf5589a 6 * AUTHOR: Harvest Derived
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
acf5589a 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
acf5589a 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
acf5589a 34 */
35
36#include "squid.h"
c21ad0f5 37
38#include <iomanip>
39#include <ostream>
40
a553a5a3 41#include "event.h"
62ee09ca 42#include "CacheManager.h"
528b2c61 43#include "Mem.h"
d96ceb8e 44#include "memMeter.h"
e6ccf245 45#include "Store.h"
c21ad0f5 46#include "StoreEntryStream.h"
0eb49b6d 47#include "MemBuf.h"
985c86bc 48#include "SquidTime.h"
acf5589a 49
7021844c 50/* module globals */
acf5589a 51
d96ceb8e 52/* local prototypes */
c21ad0f5 53static void memStringStats(std::ostream &);
d96ceb8e 54
55/* module locals */
04eb0689 56static MemAllocator *MemPools[MEM_MAX];
d96ceb8e 57static double xm_time = 0;
58static double xm_deltat = 0;
acf5589a 59
9fe7e747 60/* string pools */
61#define mem_str_pool_count 3
62e76326 62
63static const struct
64{
ec878047 65 const char *name;
9fe7e747 66 size_t obj_size;
62e76326 67}
68
69StrPoolsAttrs[mem_str_pool_count] = {
70
71 {
cb6d4984 72 "Short Strings", MemAllocator::RoundedSize(36),
62e76326 73 }, /* to fit rfc1123 and similar */
74 {
cb6d4984 75 "Medium Strings", MemAllocator::RoundedSize(128),
62e76326 76 }, /* to fit most urls */
77 {
cb6d4984 78 "Long Strings", MemAllocator::RoundedSize(512)
62e76326 79 } /* other */
80 };
81
82static struct
83{
b001e822 84 MemAllocator *pool;
62e76326 85}
86
87StrPools[mem_str_pool_count];
9fe7e747 88static MemMeter StrCountMeter;
89static MemMeter StrVolumeMeter;
90
1eb41ae8 91static MemMeter HugeBufCountMeter;
92static MemMeter HugeBufVolumeMeter;
9fe7e747 93
94/* local routines */
95
9fe7e747 96static void
c21ad0f5 97memStringStats(std::ostream &stream)
9fe7e747 98{
9fe7e747 99 int i;
100 int pooled_count = 0;
101 size_t pooled_volume = 0;
102 /* heading */
35268c70 103 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
9fe7e747 104 /* table body */
62e76326 105
9fe7e747 106 for (i = 0; i < mem_str_pool_count; i++) {
b001e822 107 const MemAllocator *pool = StrPools[i].pool;
108 const int plevel = pool->getMeter().inuse.level;
c21ad0f5 109 stream << std::setw(20) << std::left << pool->objectType();
110 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
111 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
62e76326 112 pooled_count += plevel;
b001e822 113 pooled_volume += plevel * pool->objectSize();
9fe7e747 114 }
62e76326 115
9fe7e747 116 /* malloc strings */
c21ad0f5 117 stream << std::setw(20) << std::left << "Other Strings";
118
119 stream << std::right << "\t ";
62e76326 120
c21ad0f5 121 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
122
123 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
1eb41ae8 124}
125
126static void
c21ad0f5 127memBufStats(std::ostream & stream)
1eb41ae8 128{
c21ad0f5 129 stream << "Large buffers: " <<
130 HugeBufCountMeter.level << " (" <<
131 HugeBufVolumeMeter.level / 1024 << " KB)\n";
9fe7e747 132}
133
528b2c61 134void
135Mem::Stats(StoreEntry * sentry)
acf5589a 136{
c21ad0f5 137 StoreEntryStream stream(sentry);
138 Report(stream);
139 memStringStats(stream);
140 memBufStats(stream);
b4bab919 141#if WITH_VALGRIND
142 if (RUNNING_ON_VALGRIND) {
143 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
144 stream << "Valgrind Report:\n";
145 stream << "Type\tAmount\n";
bf8fe701 146 debugs(13, 1, "Asking valgrind for memleaks");
b4bab919 147 VALGRIND_DO_LEAK_CHECK;
bf8fe701 148 debugs(13, 1, "Getting valgrind statistics");
b4bab919 149 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
150 stream << "Leaked\t" << leaked << "\n";
151 stream << "Dubious\t" << dubious << "\n";
152 stream << "Reachable\t" << reachable << "\n";
153 stream << "Suppressed\t" << suppressed << "\n";
154 }
155#endif
c21ad0f5 156 stream.flush();
acf5589a 157}
158
159/*
9fe7e747 160 * public routines
acf5589a 161 */
162
58a39dc9 163/*
164 * we have a limit on _total_ amount of idle memory so we ignore
165 * max_pages for now
166 */
167void
168memDataInit(mem_type type, const char *name, size_t size, int max_pages_notused)
169{
170 assert(name && size);
d96ceb8e 171 assert(MemPools[type] == NULL);
04eb0689 172 MemPools[type] = memPoolCreate(name, size);
58a39dc9 173}
174
9fe7e747 175
7021844c 176/* find appropriate pool and use it (pools always init buffer with 0s) */
acf5589a 177void *
7021844c 178memAllocate(mem_type type)
acf5589a 179{
b001e822 180 return MemPools[type]->alloc();
acf5589a 181}
182
db1cd23c 183/* give memory back to the pool */
acf5589a 184void
db1cd23c 185memFree(void *p, int type)
acf5589a 186{
b001e822 187 MemPools[type]->free(p);
acf5589a 188}
189
9fe7e747 190/* allocate a variable size buffer using best-fit pool */
191void *
4be8fe59 192memAllocString(size_t net_size, size_t * gross_size)
9fe7e747 193{
194 int i;
b001e822 195 MemAllocator *pool = NULL;
9fe7e747 196 assert(gross_size);
62e76326 197
9fe7e747 198 for (i = 0; i < mem_str_pool_count; i++) {
62e76326 199 if (net_size <= StrPoolsAttrs[i].obj_size) {
200 pool = StrPools[i].pool;
201 break;
202 }
9fe7e747 203 }
62e76326 204
e231a8ce 205 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
9fe7e747 206 assert(*gross_size >= net_size);
207 memMeterInc(StrCountMeter);
208 memMeterAdd(StrVolumeMeter, *gross_size);
b001e822 209 return pool ? pool->alloc() : xcalloc(1, net_size);
9fe7e747 210}
211
0353e724 212extern size_t memStringCount();
213size_t
214memStringCount()
215{
216 size_t result = 0;
217
218 for (int counter = 0; counter < mem_str_pool_count; ++counter)
219 result += memPoolInUseCount(StrPools[counter].pool);
220
221 return result;
222}
223
4be8fe59 224/* free buffer allocated with memAllocString() */
9fe7e747 225void
4be8fe59 226memFreeString(size_t size, void *buf)
9fe7e747 227{
228 int i;
b001e822 229 MemAllocator *pool = NULL;
9fe7e747 230 assert(size && buf);
62e76326 231
9fe7e747 232 for (i = 0; i < mem_str_pool_count; i++) {
62e76326 233 if (size <= StrPoolsAttrs[i].obj_size) {
234 assert(size == StrPoolsAttrs[i].obj_size);
235 pool = StrPools[i].pool;
236 break;
237 }
9fe7e747 238 }
62e76326 239
9fe7e747 240 memMeterDec(StrCountMeter);
241 memMeterDel(StrVolumeMeter, size);
b001e822 242 pool ? pool->free(buf) : xfree(buf);
9fe7e747 243}
244
1eb41ae8 245/* Find the best fit MEM_X_BUF type */
246static mem_type
247memFindBufSizeType(size_t net_size, size_t * gross_size)
248{
249 mem_type type;
250 size_t size;
62e76326 251
fa80a8ef 252 if (net_size <= 2 * 1024) {
62e76326 253 type = MEM_2K_BUF;
254 size = 2 * 1024;
fa80a8ef 255 } else if (net_size <= 4 * 1024) {
62e76326 256 type = MEM_4K_BUF;
257 size = 4 * 1024;
fa80a8ef 258 } else if (net_size <= 8 * 1024) {
62e76326 259 type = MEM_8K_BUF;
260 size = 8 * 1024;
fa80a8ef 261 } else if (net_size <= 16 * 1024) {
62e76326 262 type = MEM_16K_BUF;
263 size = 16 * 1024;
fa80a8ef 264 } else if (net_size <= 32 * 1024) {
62e76326 265 type = MEM_32K_BUF;
266 size = 32 * 1024;
fa80a8ef 267 } else if (net_size <= 64 * 1024) {
62e76326 268 type = MEM_64K_BUF;
269 size = 64 * 1024;
1eb41ae8 270 } else {
62e76326 271 type = MEM_NONE;
272 size = net_size;
1eb41ae8 273 }
62e76326 274
1eb41ae8 275 if (gross_size)
62e76326 276 *gross_size = size;
277
1eb41ae8 278 return type;
279}
280
281/* allocate a variable size buffer using best-fit pool */
282void *
283memAllocBuf(size_t net_size, size_t * gross_size)
284{
285 mem_type type = memFindBufSizeType(net_size, gross_size);
62e76326 286
1eb41ae8 287 if (type != MEM_NONE)
62e76326 288 return memAllocate(type);
1eb41ae8 289 else {
62e76326 290 memMeterInc(HugeBufCountMeter);
291 memMeterAdd(HugeBufVolumeMeter, *gross_size);
292 return xcalloc(1, net_size);
1eb41ae8 293 }
294}
295
296/* resize a variable sized buffer using best-fit pool */
297void *
298memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
299{
300 /* XXX This can be optimized on very large buffers to use realloc() */
edce4d98 301 /* TODO: if the existing gross size is >= new gross size, do nothing */
e6ccf245 302 size_t new_gross_size;
1eb41ae8 303 void *newbuf = memAllocBuf(net_size, &new_gross_size);
62e76326 304
1eb41ae8 305 if (oldbuf) {
62e76326 306 size_t data_size = *gross_size;
307
308 if (data_size > net_size)
309 data_size = net_size;
310
311 memcpy(newbuf, oldbuf, data_size);
312
313 memFreeBuf(*gross_size, oldbuf);
1eb41ae8 314 }
62e76326 315
1eb41ae8 316 *gross_size = new_gross_size;
317 return newbuf;
318}
319
320/* free buffer allocated with memAllocBuf() */
321void
322memFreeBuf(size_t size, void *buf)
323{
324 mem_type type = memFindBufSizeType(size, NULL);
62e76326 325
1eb41ae8 326 if (type != MEM_NONE)
62e76326 327 memFree(buf, type);
1eb41ae8 328 else {
62e76326 329 xfree(buf);
330 memMeterDec(HugeBufCountMeter);
331 memMeterDel(HugeBufVolumeMeter, size);
1eb41ae8 332 }
333}
334
d96ceb8e 335static double clean_interval = 15.0; /* time to live of idle chunk before release */
336
337void
528b2c61 338Mem::CleanIdlePools(void *unused)
d96ceb8e 339{
b001e822 340 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
528b2c61 341 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
d96ceb8e 342}
343
d96ceb8e 344void
345memConfigure(void)
346{
b001e822 347 size_t new_pool_limit;
d96ceb8e 348 /* set to configured value first */
62e76326 349
d96ceb8e 350 if (!Config.onoff.mem_pools)
62e76326 351 new_pool_limit = 0;
d96ceb8e 352 else if (Config.MemPools.limit > 0)
62e76326 353 new_pool_limit = Config.MemPools.limit;
d96ceb8e 354 else
62e76326 355 new_pool_limit = mem_unlimited_size;
d96ceb8e 356
af00d03d 357#if 0
358 /*
359 * DPW 2007-04-12
360 * No debugging here please because this method is called before
361 * the debug log is configured and we'll get the message on
362 * stderr when doing things like 'squid -k reconfigure'
363 */
b001e822 364 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
bf8fe701 365 debugs(13, 1, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
af00d03d 366#endif
62e76326 367
b001e822 368 MemPools::GetInstance().setIdleLimit(new_pool_limit);
d96ceb8e 369}
1eb41ae8 370
528b2c61 371/* XXX make these classes do their own memory management */
372#include "HttpHdrContRange.h"
373
acf5589a 374void
528b2c61 375Mem::Init(void)
acf5589a 376{
9fe7e747 377 int i;
d96ceb8e 378
33135cfb 379 /*
380 * NOTE: Mem::Init() is called before the config file is parsed
381 * and before the debugging module has been initialized. Any
382 * debug messages here at level 0 or 1 will always be printed
383 * on stderr.
384 */
bf8fe701 385 debugs(13, 3, "Memory pools are '" << ((Config.onoff.mem_pools ? "on" : "off")) << "'; limit: "<<
386 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) << " MB");
d96ceb8e 387
7021844c 388 /* set all pointers to null */
389 memset(MemPools, '\0', sizeof(MemPools));
390 /*
391 * it does not hurt much to have a lot of pools since sizeof(MemPool) is
392 * small; someday we will figure out what to do with all the entries here
393 * that are never used or used only once; perhaps we should simply use
394 * malloc() for those? @?@
395 */
137ee196 396 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10);
acf5589a 397 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10);
398 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10);
58cd5bbd 399 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10);
400 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10);
401 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10);
acf5589a 402 memDataInit(MEM_ACL_DENY_INFO_LIST, "acl_deny_info_list",
62e76326 403 sizeof(acl_deny_info_list), 0);
acf5589a 404 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(acl_name_list), 0);
c68e9c6b 405#if USE_CACHE_DIGESTS
62e76326 406
26c2ce6f 407 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
c68e9c6b 408#endif
62e76326 409
58cd5bbd 410 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
acf5589a 411 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
acf5589a 412 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
413 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
7faf2bdb 414 memDataInit(MEM_HTTP_HDR_CC, "HttpHdrCc", sizeof(HttpHdrCc), 0);
d76fcfa7 415 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
acf5589a 416 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
417 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
acf5589a 418 memDataInit(MEM_RELIST, "relist", sizeof(relist), 0);
59c4d35b 419 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
e55650e3 420 memDataInit(MEM_MD5_DIGEST, "MD5 digest", MD5_DIGEST_CHARS, 0);
b001e822 421 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
58cd5bbd 422
58a39dc9 423 /* init string pools */
62e76326 424
58a39dc9 425 for (i = 0; i < mem_str_pool_count; i++) {
04eb0689 426 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
e231a8ce 427
b001e822 428 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
83ba0bf7 429 debugs(13, 1, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
58a39dc9 430 }
62ee09ca 431}
62e76326 432
62ee09ca 433void
434Mem::RegisterWithCacheManager(CacheManager & manager)
435{
436 manager.registerAction("mem",
437 "Memory Utilization",
438 Mem::Stats, 0, 1);
58a39dc9 439}
440
e6ccf245 441mem_type &operator++ (mem_type &aMem)
442{
1f1ae50a 443 int tmp = (int)aMem;
444 aMem = (mem_type)(++tmp);
e6ccf245 445 return aMem;
446}
447
58a39dc9 448/*
449 * Test that all entries are initialized
450 */
451void
452memCheckInit(void)
453{
454 mem_type t;
62e76326 455
e6ccf245 456 for (t = MEM_NONE, ++t; t < MEM_MAX; ++t) {
62e76326 457 if (MEM_DONTFREE == t)
458 continue;
459
460 /*
461 * If you hit this assertion, then you forgot to add a
462 * memDataInit() line for type 't'.
463 */
464 assert(MemPools[t]);
acf5589a 465 }
466}
467
468void
58a39dc9 469memClean(void)
acf5589a 470{
d96ceb8e 471 MemPoolGlobalStats stats;
b001e822 472 MemPools::GetInstance().setIdleLimit(0);
473 MemPools::GetInstance().clean(0);
d96ceb8e 474 memPoolGetGlobalStats(&stats);
62e76326 475
d96ceb8e 476 if (stats.tot_items_inuse)
bf8fe701 477 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
478 " items in " << stats.tot_chunks_inuse << " chunks and " <<
479 stats.tot_pools_inuse << " pools are left dirty");
acf5589a 480}
481
acf5589a 482int
483memInUse(mem_type type)
484{
b4832aa9 485 return memPoolInUseCount(MemPools[type]);
acf5589a 486}
487
488/* ick */
489
1eb41ae8 490static void
137ee196 491memFree2K(void *p)
492{
db1cd23c 493 memFree(p, MEM_2K_BUF);
137ee196 494}
495
acf5589a 496void
497memFree4K(void *p)
498{
db1cd23c 499 memFree(p, MEM_4K_BUF);
acf5589a 500}
501
502void
503memFree8K(void *p)
504{
db1cd23c 505 memFree(p, MEM_8K_BUF);
acf5589a 506}
58cd5bbd 507
1eb41ae8 508static void
58cd5bbd 509memFree16K(void *p)
510{
511 memFree(p, MEM_16K_BUF);
512}
513
1eb41ae8 514static void
58cd5bbd 515memFree32K(void *p)
516{
517 memFree(p, MEM_32K_BUF);
518}
519
1eb41ae8 520static void
58cd5bbd 521memFree64K(void *p)
522{
523 memFree(p, MEM_64K_BUF);
524}
1eb41ae8 525
526FREE *
527memFreeBufFunc(size_t size)
528{
fa80a8ef 529 switch (size) {
62e76326 530
fa80a8ef 531 case 2 * 1024:
62e76326 532 return memFree2K;
533
fa80a8ef 534 case 4 * 1024:
62e76326 535 return memFree4K;
536
fa80a8ef 537 case 8 * 1024:
62e76326 538 return memFree8K;
539
fa80a8ef 540 case 16 * 1024:
62e76326 541 return memFree16K;
542
fa80a8ef 543 case 32 * 1024:
62e76326 544 return memFree32K;
545
fa80a8ef 546 case 64 * 1024:
62e76326 547 return memFree64K;
548
1eb41ae8 549 default:
62e76326 550 memMeterDec(HugeBufCountMeter);
551 memMeterDel(HugeBufVolumeMeter, size);
552 return xfree;
1eb41ae8 553 }
554}
d96ceb8e 555
556/* MemPoolMeter */
557
528b2c61 558void
c21ad0f5 559Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
d96ceb8e 560{
60eed7c2 561 int excess = 0;
d96ceb8e 562 int needed = 0;
563 MemPoolMeter *pm = mp_st->meter;
eecdacf6 564 const char *delim = "\t ";
d96ceb8e 565
eecdacf6 566 stream << std::setw(20) << std::left << mp_st->label << delim;
567 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
d96ceb8e 568
569 /* Chunks */
eecdacf6 570 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
571 stream << std::setw(4) << mp_st->chunk_capacity << delim;
d96ceb8e 572
573 if (mp_st->chunk_capacity) {
62e76326 574 needed = mp_st->items_inuse / mp_st->chunk_capacity;
575
576 if (mp_st->items_inuse % mp_st->chunk_capacity)
577 needed++;
578
579 excess = mp_st->chunks_inuse - needed;
d96ceb8e 580 }
62e76326 581
eecdacf6 582 stream << std::setw(4) << mp_st->chunks_alloc << delim;
583 stream << std::setw(4) << mp_st->chunks_inuse << delim;
584 stream << std::setw(4) << mp_st->chunks_free << delim;
585 stream << std::setw(4) << mp_st->chunks_partial << delim;
586 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
62e76326 587 /*
588 * Fragmentation calculation:
589 * needed = inuse.level / chunk_capacity
590 * excess = used - needed
591 * fragmentation = excess / needed * 100%
592 *
593 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
594 */
c21ad0f5 595 /* allocated */
eecdacf6 596 stream << mp_st->items_alloc << delim;
597 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
598 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
599 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
600 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
c21ad0f5 601 /* in use */
eecdacf6 602 stream << mp_st->items_inuse << delim;
603 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
604 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
605 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
606 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
c21ad0f5 607 /* idle */
eecdacf6 608 stream << mp_st->items_idle << delim;
609 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
610 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
c21ad0f5 611 /* saved */
eecdacf6 612 stream << (int)pm->gb_saved.count << delim;
613 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_saved.count) << delim;
614 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_saved.bytes) << delim;
35268c70 615 stream << std::setprecision(3) << xdiv(pm->gb_saved.count - pm->gb_osaved.count, xm_deltat) << "\n";
d96ceb8e 616 pm->gb_osaved.count = pm->gb_saved.count;
617}
618
729102f4 619static int
620MemPoolReportSorter(const void *a, const void *b)
621{
622 const MemPoolStats *A = (MemPoolStats *) a;
623 const MemPoolStats *B = (MemPoolStats *) b;
624
625 // use this to sort on %Total Allocated
626 //
627 double pa = (double) A->obj_size * A->meter->alloc.level;
628 double pb = (double) B->obj_size * B->meter->alloc.level;
629
630 if (pa > pb)
631 return -1;
632
633 if (pb > pa)
634 return 1;
635
636#if 0
637 // use this to sort on In Use high(hrs)
638 //
639 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
640 return -1;
641
642 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
643 return 1;
644
645#endif
646
647 return 0;
648}
649
d96ceb8e 650void
c21ad0f5 651Mem::Report(std::ostream &stream)
d96ceb8e 652{
653 static char buf[64];
654 static MemPoolStats mp_stats;
655 static MemPoolGlobalStats mp_total;
656 int not_used = 0;
657 MemPoolIterator *iter;
b001e822 658 MemAllocator *pool;
d96ceb8e 659
660 /* caption */
c21ad0f5 661 stream << "Current memory usage:\n";
d96ceb8e 662 /* heading */
c21ad0f5 663 stream << "Pool\t Obj Size\t"
664 "Chunks\t\t\t\t\t\t\t"
665 "Allocated\t\t\t\t\t"
666 "In Use\t\t\t\t\t"
667 "Idle\t\t\t"
668 "Allocations Saved\t\t\t"
669 "Hit Rate\t"
670 "\n"
671 " \t (bytes)\t"
672 "KB/ch\t obj/ch\t"
35268c70 673 "(#)\t used\t free\t part\t %Frag\t "
674 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
675 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
c21ad0f5 676 "(#)\t (KB)\t high (KB)\t"
35268c70 677 "(#)\t %cnt\t %vol\t"
c21ad0f5 678 "(#) / sec\t"
679 "\n";
d96ceb8e 680 xm_deltat = current_dtime - xm_time;
681 xm_time = current_dtime;
682
683 /* Get stats for Totals report line */
684 memPoolGetGlobalStats(&mp_total);
685
729102f4 686 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
687 int npools = 0;
688
d96ceb8e 689 /* main table */
690 iter = memPoolIterate();
62e76326 691
d96ceb8e 692 while ((pool = memPoolIterateNext(iter))) {
b001e822 693 pool->getStats(&mp_stats);
62e76326 694
695 if (!mp_stats.pool) /* pool destroyed */
696 continue;
697
b001e822 698 if (mp_stats.pool->getMeter().gb_saved.count > 0) /* this pool has been used */
729102f4 699 sortme[npools++] = mp_stats;
62e76326 700 else
701 not_used++;
d96ceb8e 702 }
62e76326 703
d96ceb8e 704 memPoolIterateDone(&iter);
705
729102f4 706 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
707
708 for (int i = 0; i< npools; i++) {
c21ad0f5 709 PoolReport(&sortme[i], mp_total.TheMeter, stream);
729102f4 710 }
711
712 xfree(sortme);
713
d96ceb8e 714 mp_stats.pool = NULL;
715 mp_stats.label = "Total";
716 mp_stats.meter = mp_total.TheMeter;
717 mp_stats.obj_size = 1;
718 mp_stats.chunk_capacity = 0;
719 mp_stats.chunk_size = 0;
720 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
721 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
722 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
723 mp_stats.chunks_free = mp_total.tot_chunks_free;
724 mp_stats.items_alloc = mp_total.tot_items_alloc;
725 mp_stats.items_inuse = mp_total.tot_items_inuse;
726 mp_stats.items_idle = mp_total.tot_items_idle;
727 mp_stats.overhead = mp_total.tot_overhead;
728
c21ad0f5 729 PoolReport(&mp_stats, mp_total.TheMeter, stream);
d96ceb8e 730
731 /* Cumulative */
c21ad0f5 732 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_saved.bytes) << "\n";
d96ceb8e 733 /* overhead */
c21ad0f5 734 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
35268c70 735 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
d96ceb8e 736 /* limits */
c21ad0f5 737 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
d96ceb8e 738 /* limits */
c21ad0f5 739 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
740 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
741 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
d96ceb8e 742}