]> git.ipfire.org Git - thirdparty/squid.git/blame - src/mem.cc
Portability fix: u_int*_t types are deprecated, replaced with uint*_t
[thirdparty/squid.git] / src / mem.cc
CommitLineData
acf5589a 1/*
262a0e14 2 * $Id$
acf5589a 3 *
7021844c 4 * DEBUG: section 13 High Level Memory Pool Management
acf5589a 5 * AUTHOR: Harvest Derived
6 *
2b6662ba 7 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 8 * ----------------------------------------------------------
acf5589a 9 *
2b6662ba 10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
acf5589a 18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
26ac0430 23 *
acf5589a 24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
26ac0430 28 *
acf5589a 29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
cbdec147 31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 32 *
acf5589a 33 */
34
35#include "squid.h"
a553a5a3 36#include "event.h"
62ee09ca 37#include "CacheManager.h"
1c898d4c 38#include "ClientInfo.h"
528b2c61 39#include "Mem.h"
d96ceb8e 40#include "memMeter.h"
e6ccf245 41#include "Store.h"
c21ad0f5 42#include "StoreEntryStream.h"
0eb49b6d 43#include "MemBuf.h"
985c86bc 44#include "SquidTime.h"
acf5589a 45
27e059d4
AJ
46#if HAVE_IOMANIP
47#include <iomanip>
48#endif
49#if HAVE_OSTREAM
50#include <ostream>
51#endif
52
7021844c 53/* module globals */
43d1bbe4 54const size_t squidSystemPageSize=getpagesize();
acf5589a 55
d96ceb8e 56/* local prototypes */
c21ad0f5 57static void memStringStats(std::ostream &);
d96ceb8e 58
59/* module locals */
04eb0689 60static MemAllocator *MemPools[MEM_MAX];
d96ceb8e 61static double xm_time = 0;
62static double xm_deltat = 0;
acf5589a 63
9fe7e747 64/* string pools */
65#define mem_str_pool_count 3
62e76326 66
26ac0430 67static const struct {
ec878047 68 const char *name;
9fe7e747 69 size_t obj_size;
62e76326 70}
71
72StrPoolsAttrs[mem_str_pool_count] = {
73
26ac0430
AJ
74 {
75 "Short Strings", MemAllocator::RoundedSize(36),
76 }, /* to fit rfc1123 and similar */
77 {
78 "Medium Strings", MemAllocator::RoundedSize(128),
79 }, /* to fit most urls */
80 {
81 "Long Strings", MemAllocator::RoundedSize(512)
82 } /* other */
83};
84
85static struct {
b001e822 86 MemAllocator *pool;
62e76326 87}
88
89StrPools[mem_str_pool_count];
9fe7e747 90static MemMeter StrCountMeter;
91static MemMeter StrVolumeMeter;
92
1eb41ae8 93static MemMeter HugeBufCountMeter;
94static MemMeter HugeBufVolumeMeter;
9fe7e747 95
96/* local routines */
97
9fe7e747 98static void
c21ad0f5 99memStringStats(std::ostream &stream)
9fe7e747 100{
9fe7e747 101 int i;
102 int pooled_count = 0;
103 size_t pooled_volume = 0;
104 /* heading */
35268c70 105 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
9fe7e747 106 /* table body */
62e76326 107
9fe7e747 108 for (i = 0; i < mem_str_pool_count; i++) {
b001e822 109 const MemAllocator *pool = StrPools[i].pool;
110 const int plevel = pool->getMeter().inuse.level;
c21ad0f5 111 stream << std::setw(20) << std::left << pool->objectType();
112 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.level);
113 stream << "\t " << xpercentInt(plevel * pool->objectSize(), StrVolumeMeter.level) << "\n";
62e76326 114 pooled_count += plevel;
b001e822 115 pooled_volume += plevel * pool->objectSize();
9fe7e747 116 }
62e76326 117
9fe7e747 118 /* malloc strings */
c21ad0f5 119 stream << std::setw(20) << std::left << "Other Strings";
120
121 stream << std::right << "\t ";
62e76326 122
c21ad0f5 123 stream << xpercentInt(StrCountMeter.level - pooled_count, StrCountMeter.level) << "\t ";
124
125 stream << xpercentInt(StrVolumeMeter.level - pooled_volume, StrVolumeMeter.level) << "\n\n";
1eb41ae8 126}
127
128static void
c21ad0f5 129memBufStats(std::ostream & stream)
1eb41ae8 130{
c21ad0f5 131 stream << "Large buffers: " <<
132 HugeBufCountMeter.level << " (" <<
133 HugeBufVolumeMeter.level / 1024 << " KB)\n";
9fe7e747 134}
135
528b2c61 136void
137Mem::Stats(StoreEntry * sentry)
acf5589a 138{
c21ad0f5 139 StoreEntryStream stream(sentry);
140 Report(stream);
141 memStringStats(stream);
142 memBufStats(stream);
b4bab919 143#if WITH_VALGRIND
144 if (RUNNING_ON_VALGRIND) {
26ac0430
AJ
145 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
146 stream << "Valgrind Report:\n";
147 stream << "Type\tAmount\n";
148 debugs(13, 1, "Asking valgrind for memleaks");
149 VALGRIND_DO_LEAK_CHECK;
150 debugs(13, 1, "Getting valgrind statistics");
151 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
152 stream << "Leaked\t" << leaked << "\n";
153 stream << "Dubious\t" << dubious << "\n";
154 stream << "Reachable\t" << reachable << "\n";
155 stream << "Suppressed\t" << suppressed << "\n";
b4bab919 156 }
157#endif
c21ad0f5 158 stream.flush();
acf5589a 159}
160
161/*
9fe7e747 162 * public routines
acf5589a 163 */
164
58a39dc9 165/*
166 * we have a limit on _total_ amount of idle memory so we ignore
167 * max_pages for now
168 */
169void
60da11d3 170memDataInit(mem_type type, const char *name, size_t size, int max_pages_notused, bool zeroOnPush)
58a39dc9 171{
172 assert(name && size);
d96ceb8e 173 assert(MemPools[type] == NULL);
04eb0689 174 MemPools[type] = memPoolCreate(name, size);
60da11d3 175 MemPools[type]->zeroOnPush(zeroOnPush);
58a39dc9 176}
177
9fe7e747 178
7021844c 179/* find appropriate pool and use it (pools always init buffer with 0s) */
acf5589a 180void *
7021844c 181memAllocate(mem_type type)
acf5589a 182{
b001e822 183 return MemPools[type]->alloc();
acf5589a 184}
185
db1cd23c 186/* give memory back to the pool */
acf5589a 187void
db1cd23c 188memFree(void *p, int type)
acf5589a 189{
dc47f531 190 MemPools[type]->freeOne(p);
acf5589a 191}
192
9fe7e747 193/* allocate a variable size buffer using best-fit pool */
194void *
4be8fe59 195memAllocString(size_t net_size, size_t * gross_size)
9fe7e747 196{
197 int i;
b001e822 198 MemAllocator *pool = NULL;
9fe7e747 199 assert(gross_size);
62e76326 200
9fe7e747 201 for (i = 0; i < mem_str_pool_count; i++) {
62e76326 202 if (net_size <= StrPoolsAttrs[i].obj_size) {
203 pool = StrPools[i].pool;
204 break;
205 }
9fe7e747 206 }
62e76326 207
e231a8ce 208 *gross_size = pool ? StrPoolsAttrs[i].obj_size : net_size;
9fe7e747 209 assert(*gross_size >= net_size);
210 memMeterInc(StrCountMeter);
211 memMeterAdd(StrVolumeMeter, *gross_size);
b001e822 212 return pool ? pool->alloc() : xcalloc(1, net_size);
9fe7e747 213}
214
0353e724 215extern size_t memStringCount();
216size_t
217memStringCount()
218{
219 size_t result = 0;
220
221 for (int counter = 0; counter < mem_str_pool_count; ++counter)
222 result += memPoolInUseCount(StrPools[counter].pool);
223
224 return result;
225}
226
4be8fe59 227/* free buffer allocated with memAllocString() */
9fe7e747 228void
4be8fe59 229memFreeString(size_t size, void *buf)
9fe7e747 230{
231 int i;
b001e822 232 MemAllocator *pool = NULL;
9fe7e747 233 assert(size && buf);
62e76326 234
9fe7e747 235 for (i = 0; i < mem_str_pool_count; i++) {
62e76326 236 if (size <= StrPoolsAttrs[i].obj_size) {
237 assert(size == StrPoolsAttrs[i].obj_size);
238 pool = StrPools[i].pool;
239 break;
240 }
9fe7e747 241 }
62e76326 242
9fe7e747 243 memMeterDec(StrCountMeter);
244 memMeterDel(StrVolumeMeter, size);
dc47f531 245 pool ? pool->freeOne(buf) : xfree(buf);
9fe7e747 246}
247
1eb41ae8 248/* Find the best fit MEM_X_BUF type */
249static mem_type
250memFindBufSizeType(size_t net_size, size_t * gross_size)
251{
252 mem_type type;
253 size_t size;
62e76326 254
fa80a8ef 255 if (net_size <= 2 * 1024) {
62e76326 256 type = MEM_2K_BUF;
257 size = 2 * 1024;
fa80a8ef 258 } else if (net_size <= 4 * 1024) {
62e76326 259 type = MEM_4K_BUF;
260 size = 4 * 1024;
fa80a8ef 261 } else if (net_size <= 8 * 1024) {
62e76326 262 type = MEM_8K_BUF;
263 size = 8 * 1024;
fa80a8ef 264 } else if (net_size <= 16 * 1024) {
62e76326 265 type = MEM_16K_BUF;
266 size = 16 * 1024;
fa80a8ef 267 } else if (net_size <= 32 * 1024) {
62e76326 268 type = MEM_32K_BUF;
269 size = 32 * 1024;
fa80a8ef 270 } else if (net_size <= 64 * 1024) {
62e76326 271 type = MEM_64K_BUF;
272 size = 64 * 1024;
1eb41ae8 273 } else {
62e76326 274 type = MEM_NONE;
275 size = net_size;
1eb41ae8 276 }
62e76326 277
1eb41ae8 278 if (gross_size)
62e76326 279 *gross_size = size;
280
1eb41ae8 281 return type;
282}
283
284/* allocate a variable size buffer using best-fit pool */
285void *
286memAllocBuf(size_t net_size, size_t * gross_size)
287{
288 mem_type type = memFindBufSizeType(net_size, gross_size);
62e76326 289
1eb41ae8 290 if (type != MEM_NONE)
62e76326 291 return memAllocate(type);
1eb41ae8 292 else {
62e76326 293 memMeterInc(HugeBufCountMeter);
294 memMeterAdd(HugeBufVolumeMeter, *gross_size);
295 return xcalloc(1, net_size);
1eb41ae8 296 }
297}
298
299/* resize a variable sized buffer using best-fit pool */
300void *
301memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
302{
303 /* XXX This can be optimized on very large buffers to use realloc() */
edce4d98 304 /* TODO: if the existing gross size is >= new gross size, do nothing */
e6ccf245 305 size_t new_gross_size;
1eb41ae8 306 void *newbuf = memAllocBuf(net_size, &new_gross_size);
62e76326 307
1eb41ae8 308 if (oldbuf) {
62e76326 309 size_t data_size = *gross_size;
310
311 if (data_size > net_size)
312 data_size = net_size;
313
314 memcpy(newbuf, oldbuf, data_size);
315
316 memFreeBuf(*gross_size, oldbuf);
1eb41ae8 317 }
62e76326 318
1eb41ae8 319 *gross_size = new_gross_size;
320 return newbuf;
321}
322
323/* free buffer allocated with memAllocBuf() */
324void
325memFreeBuf(size_t size, void *buf)
326{
327 mem_type type = memFindBufSizeType(size, NULL);
62e76326 328
1eb41ae8 329 if (type != MEM_NONE)
62e76326 330 memFree(buf, type);
1eb41ae8 331 else {
62e76326 332 xfree(buf);
333 memMeterDec(HugeBufCountMeter);
334 memMeterDel(HugeBufVolumeMeter, size);
1eb41ae8 335 }
336}
337
d96ceb8e 338static double clean_interval = 15.0; /* time to live of idle chunk before release */
339
340void
528b2c61 341Mem::CleanIdlePools(void *unused)
d96ceb8e 342{
b001e822 343 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
528b2c61 344 eventAdd("memPoolCleanIdlePools", CleanIdlePools, NULL, clean_interval, 1);
d96ceb8e 345}
346
d96ceb8e 347void
348memConfigure(void)
349{
70be1349 350 int64_t new_pool_limit;
62e76326 351
09c5ae5a 352 /** Set to configured value first */
d96ceb8e 353 if (!Config.onoff.mem_pools)
62e76326 354 new_pool_limit = 0;
d96ceb8e 355 else if (Config.MemPools.limit > 0)
62e76326 356 new_pool_limit = Config.MemPools.limit;
89646bd7 357 else {
c8768728
A
358 if (Config.MemPools.limit == 0)
359 debugs(13, 1, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
89646bd7
HN
360 new_pool_limit = -1;
361 }
d96ceb8e 362
af00d03d 363#if 0
09c5ae5a 364 /** \par
af00d03d 365 * DPW 2007-04-12
366 * No debugging here please because this method is called before
367 * the debug log is configured and we'll get the message on
368 * stderr when doing things like 'squid -k reconfigure'
369 */
b001e822 370 if (MemPools::GetInstance().idleLimit() > new_pool_limit)
bf8fe701 371 debugs(13, 1, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit) << " MB");
af00d03d 372#endif
62e76326 373
b001e822 374 MemPools::GetInstance().setIdleLimit(new_pool_limit);
d96ceb8e 375}
1eb41ae8 376
528b2c61 377/* XXX make these classes do their own memory management */
378#include "HttpHdrContRange.h"
379
acf5589a 380void
528b2c61 381Mem::Init(void)
acf5589a 382{
9fe7e747 383 int i;
d96ceb8e 384
09c5ae5a 385 /** \par
33135cfb 386 * NOTE: Mem::Init() is called before the config file is parsed
387 * and before the debugging module has been initialized. Any
388 * debug messages here at level 0 or 1 will always be printed
389 * on stderr.
390 */
d96ceb8e 391
09c5ae5a
AJ
392 /** \par
393 * Set all pointers to null. */
7021844c 394 memset(MemPools, '\0', sizeof(MemPools));
09c5ae5a
AJ
395 /**
396 * Then initialize all pools.
397 * \par
398 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
399 * \par
400 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
7021844c 401 * small; someday we will figure out what to do with all the entries here
402 * that are never used or used only once; perhaps we should simply use
403 * malloc() for those? @?@
404 */
60da11d3 405 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
406 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
407 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
408 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
409 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
410 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
acf5589a 411 memDataInit(MEM_ACL_DENY_INFO_LIST, "acl_deny_info_list",
62e76326 412 sizeof(acl_deny_info_list), 0);
acf5589a 413 memDataInit(MEM_ACL_NAME_LIST, "acl_name_list", sizeof(acl_name_list), 0);
c68e9c6b 414#if USE_CACHE_DIGESTS
62e76326 415
26c2ce6f 416 memDataInit(MEM_CACHE_DIGEST, "CacheDigest", sizeof(CacheDigest), 0);
c68e9c6b 417#endif
62e76326 418
58cd5bbd 419 memDataInit(MEM_LINK_LIST, "link_list", sizeof(link_list), 10);
acf5589a 420 memDataInit(MEM_DLINK_NODE, "dlink_node", sizeof(dlink_node), 10);
acf5589a 421 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
422 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
7faf2bdb 423 memDataInit(MEM_HTTP_HDR_CC, "HttpHdrCc", sizeof(HttpHdrCc), 0);
d76fcfa7 424 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE, "HttpHdrContRange", sizeof(HttpHdrContRange), 0);
acf5589a 425 memDataInit(MEM_NETDBENTRY, "netdbEntry", sizeof(netdbEntry), 0);
426 memDataInit(MEM_NET_DB_NAME, "net_db_name", sizeof(net_db_name), 0);
acf5589a 427 memDataInit(MEM_RELIST, "relist", sizeof(relist), 0);
59c4d35b 428 memDataInit(MEM_CLIENT_INFO, "ClientInfo", sizeof(ClientInfo), 0);
c3031d67 429 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
b001e822 430 MemPools[MEM_MD5_DIGEST]->setChunkSize(512 * 1024);
58cd5bbd 431
09c5ae5a 432 /** Lastly init the string pools. */
58a39dc9 433 for (i = 0; i < mem_str_pool_count; i++) {
04eb0689 434 StrPools[i].pool = memPoolCreate(StrPoolsAttrs[i].name, StrPoolsAttrs[i].obj_size);
60da11d3 435 StrPools[i].pool->zeroOnPush(false);
e231a8ce 436
b001e822 437 if (StrPools[i].pool->objectSize() != StrPoolsAttrs[i].obj_size)
83ba0bf7 438 debugs(13, 1, "Notice: " << StrPoolsAttrs[i].name << " is " << StrPools[i].pool->objectSize() << " bytes instead of requested " << StrPoolsAttrs[i].obj_size << " bytes");
58a39dc9 439 }
ea391f18 440
09c5ae5a
AJ
441 /** \par
442 * finally register with the cache manager */
ea391f18 443 RegisterWithCacheManager();
62ee09ca 444}
62e76326 445
97244680 446void
447Mem::Report()
448{
449 debugs(13, 3, "Memory pools are '" <<
26ac0430
AJ
450 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
451 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
452 " MB");
97244680 453}
454
62ee09ca 455void
84f50787 456Mem::RegisterWithCacheManager(void)
62ee09ca 457{
84f50787 458 CacheManager::GetInstance()->registerAction("mem", "Memory Utilization",
26ac0430 459 Mem::Stats, 0, 1);
58a39dc9 460}
461
e6ccf245 462mem_type &operator++ (mem_type &aMem)
463{
1f1ae50a 464 int tmp = (int)aMem;
465 aMem = (mem_type)(++tmp);
e6ccf245 466 return aMem;
467}
468
58a39dc9 469/*
470 * Test that all entries are initialized
471 */
472void
473memCheckInit(void)
474{
475 mem_type t;
62e76326 476
e6ccf245 477 for (t = MEM_NONE, ++t; t < MEM_MAX; ++t) {
62e76326 478 if (MEM_DONTFREE == t)
479 continue;
480
481 /*
482 * If you hit this assertion, then you forgot to add a
483 * memDataInit() line for type 't'.
484 */
485 assert(MemPools[t]);
acf5589a 486 }
487}
488
489void
58a39dc9 490memClean(void)
acf5589a 491{
d96ceb8e 492 MemPoolGlobalStats stats;
b001e822 493 MemPools::GetInstance().setIdleLimit(0);
494 MemPools::GetInstance().clean(0);
d96ceb8e 495 memPoolGetGlobalStats(&stats);
62e76326 496
d96ceb8e 497 if (stats.tot_items_inuse)
bf8fe701 498 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
499 " items in " << stats.tot_chunks_inuse << " chunks and " <<
500 stats.tot_pools_inuse << " pools are left dirty");
acf5589a 501}
502
acf5589a 503int
504memInUse(mem_type type)
505{
b4832aa9 506 return memPoolInUseCount(MemPools[type]);
acf5589a 507}
508
509/* ick */
510
e4ae841b 511void
137ee196 512memFree2K(void *p)
513{
db1cd23c 514 memFree(p, MEM_2K_BUF);
137ee196 515}
516
acf5589a 517void
518memFree4K(void *p)
519{
db1cd23c 520 memFree(p, MEM_4K_BUF);
acf5589a 521}
522
523void
524memFree8K(void *p)
525{
db1cd23c 526 memFree(p, MEM_8K_BUF);
acf5589a 527}
58cd5bbd 528
e4ae841b 529void
58cd5bbd 530memFree16K(void *p)
531{
532 memFree(p, MEM_16K_BUF);
533}
534
e4ae841b 535void
58cd5bbd 536memFree32K(void *p)
537{
538 memFree(p, MEM_32K_BUF);
539}
540
e4ae841b 541void
58cd5bbd 542memFree64K(void *p)
543{
544 memFree(p, MEM_64K_BUF);
545}
1eb41ae8 546
59a09b98
FC
547static void
548cxx_xfree(void * ptr)
549{
f673997d 550 xfree(ptr);
59a09b98
FC
551}
552
1eb41ae8 553FREE *
554memFreeBufFunc(size_t size)
555{
fa80a8ef 556 switch (size) {
62e76326 557
fa80a8ef 558 case 2 * 1024:
62e76326 559 return memFree2K;
560
fa80a8ef 561 case 4 * 1024:
62e76326 562 return memFree4K;
563
fa80a8ef 564 case 8 * 1024:
62e76326 565 return memFree8K;
566
fa80a8ef 567 case 16 * 1024:
62e76326 568 return memFree16K;
569
fa80a8ef 570 case 32 * 1024:
62e76326 571 return memFree32K;
572
fa80a8ef 573 case 64 * 1024:
62e76326 574 return memFree64K;
575
1eb41ae8 576 default:
62e76326 577 memMeterDec(HugeBufCountMeter);
578 memMeterDel(HugeBufVolumeMeter, size);
59a09b98 579 return cxx_xfree;
1eb41ae8 580 }
581}
d96ceb8e 582
583/* MemPoolMeter */
584
528b2c61 585void
c21ad0f5 586Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
d96ceb8e 587{
60eed7c2 588 int excess = 0;
d96ceb8e 589 int needed = 0;
590 MemPoolMeter *pm = mp_st->meter;
eecdacf6 591 const char *delim = "\t ";
d96ceb8e 592
903a6eec
HN
593#if HAVE_IOMANIP
594 stream.setf(std::ios_base::fixed);
595#endif
eecdacf6 596 stream << std::setw(20) << std::left << mp_st->label << delim;
597 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
d96ceb8e 598
599 /* Chunks */
d96ceb8e 600 if (mp_st->chunk_capacity) {
3b32112a
A
601 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
602 stream << std::setw(4) << mp_st->chunk_capacity << delim;
333c86f5 603
62e76326 604 needed = mp_st->items_inuse / mp_st->chunk_capacity;
605
606 if (mp_st->items_inuse % mp_st->chunk_capacity)
607 needed++;
608
609 excess = mp_st->chunks_inuse - needed;
62e76326 610
3b32112a
A
611 stream << std::setw(4) << mp_st->chunks_alloc << delim;
612 stream << std::setw(4) << mp_st->chunks_inuse << delim;
613 stream << std::setw(4) << mp_st->chunks_free << delim;
614 stream << std::setw(4) << mp_st->chunks_partial << delim;
615 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
333c86f5 616 } else {
3b32112a
A
617 stream << delim;
618 stream << delim;
619 stream << delim;
620 stream << delim;
621 stream << delim;
622 stream << delim;
623 stream << delim;
333c86f5 624 }
62e76326 625 /*
626 * Fragmentation calculation:
627 * needed = inuse.level / chunk_capacity
628 * excess = used - needed
629 * fragmentation = excess / needed * 100%
630 *
631 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
632 */
c21ad0f5 633 /* allocated */
eecdacf6 634 stream << mp_st->items_alloc << delim;
635 stream << toKB(mp_st->obj_size * pm->alloc.level) << delim;
636 stream << toKB(mp_st->obj_size * pm->alloc.hwater_level) << delim;
637 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.hwater_stamp) / 3600.) << delim;
638 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.level, AllMeter->alloc.level) << delim;
c21ad0f5 639 /* in use */
eecdacf6 640 stream << mp_st->items_inuse << delim;
641 stream << toKB(mp_st->obj_size * pm->inuse.level) << delim;
642 stream << toKB(mp_st->obj_size * pm->inuse.hwater_level) << delim;
643 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.hwater_stamp) / 3600.) << delim;
644 stream << std::setprecision(3) << xpercent(pm->inuse.level, pm->alloc.level) << delim;
c21ad0f5 645 /* idle */
eecdacf6 646 stream << mp_st->items_idle << delim;
647 stream << toKB(mp_st->obj_size * pm->idle.level) << delim;
648 stream << toKB(mp_st->obj_size * pm->idle.hwater_level) << delim;
c21ad0f5 649 /* saved */
eecdacf6 650 stream << (int)pm->gb_saved.count << delim;
903a6eec
HN
651 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
652 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
653 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
654 pm->gb_oallocated.count = pm->gb_allocated.count;
d96ceb8e 655}
656
729102f4 657static int
658MemPoolReportSorter(const void *a, const void *b)
659{
660 const MemPoolStats *A = (MemPoolStats *) a;
661 const MemPoolStats *B = (MemPoolStats *) b;
662
663 // use this to sort on %Total Allocated
664 //
665 double pa = (double) A->obj_size * A->meter->alloc.level;
666 double pb = (double) B->obj_size * B->meter->alloc.level;
667
668 if (pa > pb)
669 return -1;
670
671 if (pb > pa)
672 return 1;
673
674#if 0
675 // use this to sort on In Use high(hrs)
676 //
677 if (A->meter->inuse.hwater_stamp > B->meter->inuse.hwater_stamp)
678 return -1;
679
680 if (B->meter->inuse.hwater_stamp > A->meter->inuse.hwater_stamp)
681 return 1;
682
683#endif
684
685 return 0;
686}
687
d96ceb8e 688void
c21ad0f5 689Mem::Report(std::ostream &stream)
d96ceb8e 690{
691 static char buf[64];
692 static MemPoolStats mp_stats;
693 static MemPoolGlobalStats mp_total;
694 int not_used = 0;
695 MemPoolIterator *iter;
b001e822 696 MemAllocator *pool;
d96ceb8e 697
698 /* caption */
c21ad0f5 699 stream << "Current memory usage:\n";
d96ceb8e 700 /* heading */
c21ad0f5 701 stream << "Pool\t Obj Size\t"
702 "Chunks\t\t\t\t\t\t\t"
703 "Allocated\t\t\t\t\t"
704 "In Use\t\t\t\t\t"
705 "Idle\t\t\t"
706 "Allocations Saved\t\t\t"
903a6eec 707 "Rate\t"
c21ad0f5 708 "\n"
709 " \t (bytes)\t"
710 "KB/ch\t obj/ch\t"
35268c70 711 "(#)\t used\t free\t part\t %Frag\t "
712 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
713 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
c21ad0f5 714 "(#)\t (KB)\t high (KB)\t"
35268c70 715 "(#)\t %cnt\t %vol\t"
903a6eec 716 "(#)/sec\t"
c21ad0f5 717 "\n";
d96ceb8e 718 xm_deltat = current_dtime - xm_time;
719 xm_time = current_dtime;
720
721 /* Get stats for Totals report line */
722 memPoolGetGlobalStats(&mp_total);
723
729102f4 724 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc ,sizeof(*sortme));
725 int npools = 0;
726
d96ceb8e 727 /* main table */
728 iter = memPoolIterate();
62e76326 729
d96ceb8e 730 while ((pool = memPoolIterateNext(iter))) {
b001e822 731 pool->getStats(&mp_stats);
62e76326 732
733 if (!mp_stats.pool) /* pool destroyed */
734 continue;
735
903a6eec 736 if (mp_stats.pool->getMeter().gb_allocated.count > 0) /* this pool has been used */
729102f4 737 sortme[npools++] = mp_stats;
62e76326 738 else
739 not_used++;
d96ceb8e 740 }
62e76326 741
d96ceb8e 742 memPoolIterateDone(&iter);
743
729102f4 744 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
745
746 for (int i = 0; i< npools; i++) {
c21ad0f5 747 PoolReport(&sortme[i], mp_total.TheMeter, stream);
729102f4 748 }
749
750 xfree(sortme);
751
d96ceb8e 752 mp_stats.pool = NULL;
753 mp_stats.label = "Total";
754 mp_stats.meter = mp_total.TheMeter;
755 mp_stats.obj_size = 1;
756 mp_stats.chunk_capacity = 0;
757 mp_stats.chunk_size = 0;
758 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
759 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
760 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
761 mp_stats.chunks_free = mp_total.tot_chunks_free;
762 mp_stats.items_alloc = mp_total.tot_items_alloc;
763 mp_stats.items_inuse = mp_total.tot_items_inuse;
764 mp_stats.items_idle = mp_total.tot_items_idle;
765 mp_stats.overhead = mp_total.tot_overhead;
766
c21ad0f5 767 PoolReport(&mp_stats, mp_total.TheMeter, stream);
d96ceb8e 768
769 /* Cumulative */
903a6eec 770 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
d96ceb8e 771 /* overhead */
c21ad0f5 772 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
35268c70 773 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.level) << "%)\n";
d96ceb8e 774 /* limits */
89646bd7
HN
775 if (mp_total.mem_idle_limit >= 0)
776 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
d96ceb8e 777 /* limits */
c21ad0f5 778 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
779 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
780 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
d96ceb8e 781}