]> git.ipfire.org Git - thirdparty/squid.git/blame - src/mem/old_api.cc
Maintenance: Removed most NULLs using modernize-use-nullptr (#1075)
[thirdparty/squid.git] / src / mem / old_api.cc
CommitLineData
acf5589a 1/*
bf95c10a 2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
e25c139f 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
acf5589a 7 */
8
bbc27441
AJ
9/* DEBUG: section 13 High Level Memory Pool Management */
10
582c2af2 11#include "squid.h"
7e10ac87 12#include "base/PackableStream.h"
1c898d4c 13#include "ClientInfo.h"
7a9b4357 14#include "dlink.h"
6be70545 15#include "event.h"
b3f7fd88 16#include "fs_io.h"
602d9612 17#include "icmp/net_db.h"
582c2af2 18#include "md5.h"
ed6e9fb9 19#include "mem/forward.h"
9663db1c 20#include "mem/Meter.h"
f327361e 21#include "mem/Pool.h"
582c2af2 22#include "MemBuf.h"
582c2af2 23#include "mgr/Registration.h"
4d5904f7 24#include "SquidConfig.h"
e6ccf245 25#include "Store.h"
acf5589a 26
27e059d4 27#include <iomanip>
27e059d4 28
8a89c28f
FC
29/* forward declarations */
30static void memFree2K(void *);
31static void memFree4K(void *);
32static void memFree8K(void *);
33static void memFree16K(void *);
34static void memFree32K(void *);
35static void memFree64K(void *);
36
d96ceb8e 37/* local prototypes */
c21ad0f5 38static void memStringStats(std::ostream &);
d96ceb8e 39
40/* module locals */
d96ceb8e 41static double xm_time = 0;
42static double xm_deltat = 0;
acf5589a 43
9fe7e747 44/* string pools */
867c718d 45#define mem_str_pool_count 6
62e76326 46
ff56eb86 47struct PoolMeta {
ec878047 48 const char *name;
9fe7e747 49 size_t obj_size;
26ac0430
AJ
50};
51
9663db1c
AJ
52static Mem::Meter StrCountMeter;
53static Mem::Meter StrVolumeMeter;
9fe7e747 54
9663db1c
AJ
55static Mem::Meter HugeBufCountMeter;
56static Mem::Meter HugeBufVolumeMeter;
9fe7e747 57
58/* local routines */
b31547a9
AJ
59
60// XXX: refactor objects using these pools to use MEMPROXY classes instead
61// then remove this function entirely
42472012 62static MemAllocator *&
2d328b70 63GetPool(size_t type)
42472012
FC
64{
65 static MemAllocator *pools[MEM_MAX];
66 static bool initialized = false;
67
68 if (!initialized) {
69 memset(pools, '\0', sizeof(pools));
70 initialized = true;
3b1439d0
AJ
71 // Mem::Init() makes use of GetPool(type) to initialize
72 // the actual pools. So must come after the flag is true
73 Mem::Init();
42472012
FC
74 }
75
76 return pools[type];
77}
78
0bbc734b 79static MemAllocator &
dcb30b14 80GetStrPool(size_t type)
42472012
FC
81{
82 static MemAllocator *strPools[mem_str_pool_count];
83 static bool initialized = false;
84
ff56eb86
AJ
85 static const PoolMeta PoolAttrs[mem_str_pool_count] = {
86 {"Short Strings", MemAllocator::RoundedSize(36)}, /* to fit rfc1123 and similar */
87 {"Medium Strings", MemAllocator::RoundedSize(128)}, /* to fit most urls */
88 {"Long Strings", MemAllocator::RoundedSize(512)},
89 {"1KB Strings", MemAllocator::RoundedSize(1024)},
90 {"4KB Strings", MemAllocator::RoundedSize(4*1024)},
9dbe98b9 91 {"16KB Strings", MemAllocator::RoundedSize(16*1024)}
ff56eb86
AJ
92 };
93
42472012
FC
94 if (!initialized) {
95 memset(strPools, '\0', sizeof(strPools));
ff56eb86
AJ
96
97 /** Lastly init the string pools. */
98 for (int i = 0; i < mem_str_pool_count; ++i) {
99 strPools[i] = memPoolCreate(PoolAttrs[i].name, PoolAttrs[i].obj_size);
100 strPools[i]->zeroBlocks(false);
101
102 if (strPools[i]->objectSize() != PoolAttrs[i].obj_size)
d816f28d 103 debugs(13, DBG_IMPORTANT, "WARNING: " << PoolAttrs[i].name <<
ff56eb86
AJ
104 " is " << strPools[i]->objectSize() <<
105 " bytes instead of requested " <<
106 PoolAttrs[i].obj_size << " bytes");
107 }
108
42472012
FC
109 initialized = true;
110 }
111
0bbc734b 112 return *strPools[type];
42472012 113}
9fe7e747 114
278e775d
EB
115/// \returns the best-fit string pool or nil
116static MemAllocator *
117memFindStringPool(size_t net_size, bool fuzzy)
c7ae4a7c 118{
c7ae4a7c 119 for (unsigned int i = 0; i < mem_str_pool_count; ++i) {
0bbc734b 120 auto &pool = GetStrPool(i);
278e775d
EB
121 if (fuzzy && net_size < pool.objectSize())
122 return &pool;
123 if (net_size == pool.objectSize())
124 return &pool;
c7ae4a7c 125 }
278e775d 126 return nullptr;
c7ae4a7c
AJ
127}
128
9fe7e747 129static void
c21ad0f5 130memStringStats(std::ostream &stream)
9fe7e747 131{
9fe7e747 132 int i;
133 int pooled_count = 0;
134 size_t pooled_volume = 0;
135 /* heading */
35268c70 136 stream << "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
9fe7e747 137 /* table body */
62e76326 138
89235243 139 for (i = 0; i < mem_str_pool_count; ++i) {
0bbc734b
AJ
140 const auto &pool = GetStrPool(i);
141 const auto plevel = pool.getMeter().inuse.currentLevel();
142 stream << std::setw(20) << std::left << pool.objectType();
9663db1c 143 stream << std::right << "\t " << xpercentInt(plevel, StrCountMeter.currentLevel());
0bbc734b 144 stream << "\t " << xpercentInt(plevel * pool.objectSize(), StrVolumeMeter.currentLevel()) << "\n";
62e76326 145 pooled_count += plevel;
0bbc734b 146 pooled_volume += plevel * pool.objectSize();
9fe7e747 147 }
62e76326 148
9fe7e747 149 /* malloc strings */
c21ad0f5 150 stream << std::setw(20) << std::left << "Other Strings";
c21ad0f5 151 stream << std::right << "\t ";
9663db1c
AJ
152 stream << xpercentInt(StrCountMeter.currentLevel() - pooled_count, StrCountMeter.currentLevel()) << "\t ";
153 stream << xpercentInt(StrVolumeMeter.currentLevel() - pooled_volume, StrVolumeMeter.currentLevel()) << "\n\n";
1eb41ae8 154}
155
156static void
c21ad0f5 157memBufStats(std::ostream & stream)
1eb41ae8 158{
c21ad0f5 159 stream << "Large buffers: " <<
9663db1c
AJ
160 HugeBufCountMeter.currentLevel() << " (" <<
161 HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
9fe7e747 162}
163
528b2c61 164void
165Mem::Stats(StoreEntry * sentry)
acf5589a 166{
7e10ac87 167 PackableStream stream(*sentry);
c21ad0f5 168 Report(stream);
169 memStringStats(stream);
170 memBufStats(stream);
b4bab919 171#if WITH_VALGRIND
172 if (RUNNING_ON_VALGRIND) {
26ac0430
AJ
173 long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
174 stream << "Valgrind Report:\n";
175 stream << "Type\tAmount\n";
e0236918 176 debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
26ac0430 177 VALGRIND_DO_LEAK_CHECK;
e0236918 178 debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
26ac0430
AJ
179 VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
180 stream << "Leaked\t" << leaked << "\n";
181 stream << "Dubious\t" << dubious << "\n";
182 stream << "Reachable\t" << reachable << "\n";
183 stream << "Suppressed\t" << suppressed << "\n";
b4bab919 184 }
185#endif
c21ad0f5 186 stream.flush();
acf5589a 187}
188
189/*
9fe7e747 190 * public routines
acf5589a 191 */
192
58a39dc9 193/*
b74cf25f
AJ
194 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
195 * Will ignore repeated calls for the same pool type.
196 *
197 * Relies on Mem::Init() having been called beforehand.
58a39dc9 198 */
199void
ced8def3 200memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
58a39dc9 201{
202 assert(name && size);
b74cf25f 203
aee3523a 204 if (GetPool(type) != nullptr)
b74cf25f
AJ
205 return;
206
2d328b70
AJ
207 GetPool(type) = memPoolCreate(name, size);
208 GetPool(type)->zeroBlocks(doZero);
58a39dc9 209}
210
7021844c 211/* find appropriate pool and use it (pools always init buffer with 0s) */
acf5589a 212void *
7021844c 213memAllocate(mem_type type)
acf5589a 214{
2d328b70
AJ
215 assert(GetPool(type));
216 return GetPool(type)->alloc();
acf5589a 217}
218
db1cd23c 219/* give memory back to the pool */
acf5589a 220void
db1cd23c 221memFree(void *p, int type)
acf5589a 222{
2d328b70
AJ
223 assert(GetPool(type));
224 GetPool(type)->freeOne(p);
acf5589a 225}
226
b5a644fc 227/* allocate a variable size buffer using best-fit string pool */
9fe7e747 228void *
4be8fe59 229memAllocString(size_t net_size, size_t * gross_size)
9fe7e747 230{
9fe7e747 231 assert(gross_size);
62e76326 232
278e775d
EB
233 if (const auto pool = memFindStringPool(net_size, true)) {
234 *gross_size = pool->objectSize();
0bbc734b
AJ
235 assert(*gross_size >= net_size);
236 ++StrCountMeter;
237 StrVolumeMeter += *gross_size;
278e775d 238 return pool->alloc();
0bbc734b 239 }
62e76326 240
0bbc734b 241 *gross_size = net_size;
9663db1c
AJ
242 ++StrCountMeter;
243 StrVolumeMeter += *gross_size;
0bbc734b 244 return xcalloc(1, net_size);
9fe7e747 245}
246
c3b51d64
EB
247void *
248memAllocRigid(size_t net_size)
249{
250 // TODO: Use memAllocString() instead (after it stops zeroing memory).
251
252 if (const auto pool = memFindStringPool(net_size, true)) {
253 ++StrCountMeter;
254 StrVolumeMeter += pool->objectSize();
255 return pool->alloc();
256 }
257
258 ++StrCountMeter;
259 StrVolumeMeter += net_size;
260 return xmalloc(net_size);
261}
262
0353e724 263size_t
264memStringCount()
265{
266 size_t result = 0;
267
268 for (int counter = 0; counter < mem_str_pool_count; ++counter)
0bbc734b 269 result += GetStrPool(counter).inUseCount();
0353e724 270
271 return result;
272}
273
4be8fe59 274/* free buffer allocated with memAllocString() */
9fe7e747 275void
4be8fe59 276memFreeString(size_t size, void *buf)
9fe7e747 277{
b5a644fc
FC
278 assert(buf);
279
278e775d
EB
280 if (const auto pool = memFindStringPool(size, false))
281 pool->freeOne(buf);
0bbc734b
AJ
282 else
283 xfree(buf);
62e76326 284
9663db1c
AJ
285 --StrCountMeter;
286 StrVolumeMeter -= size;
9fe7e747 287}
288
c3b51d64
EB
289void
290memFreeRigid(void *buf, size_t net_size)
291{
292 // TODO: Use memFreeString() instead (after removing fuzzy=false pool search).
293
294 if (const auto pool = memFindStringPool(net_size, true)) {
295 pool->freeOne(buf);
296 StrVolumeMeter -= pool->objectSize();
297 --StrCountMeter;
298 return;
299 }
300
301 xfree(buf);
302 StrVolumeMeter -= net_size;
303 --StrCountMeter;
304}
305
1eb41ae8 306/* Find the best fit MEM_X_BUF type */
307static mem_type
308memFindBufSizeType(size_t net_size, size_t * gross_size)
309{
310 mem_type type;
311 size_t size;
62e76326 312
fa80a8ef 313 if (net_size <= 2 * 1024) {
62e76326 314 type = MEM_2K_BUF;
315 size = 2 * 1024;
fa80a8ef 316 } else if (net_size <= 4 * 1024) {
62e76326 317 type = MEM_4K_BUF;
318 size = 4 * 1024;
fa80a8ef 319 } else if (net_size <= 8 * 1024) {
62e76326 320 type = MEM_8K_BUF;
321 size = 8 * 1024;
fa80a8ef 322 } else if (net_size <= 16 * 1024) {
62e76326 323 type = MEM_16K_BUF;
324 size = 16 * 1024;
fa80a8ef 325 } else if (net_size <= 32 * 1024) {
62e76326 326 type = MEM_32K_BUF;
327 size = 32 * 1024;
fa80a8ef 328 } else if (net_size <= 64 * 1024) {
62e76326 329 type = MEM_64K_BUF;
330 size = 64 * 1024;
1eb41ae8 331 } else {
62e76326 332 type = MEM_NONE;
333 size = net_size;
1eb41ae8 334 }
62e76326 335
1eb41ae8 336 if (gross_size)
62e76326 337 *gross_size = size;
338
1eb41ae8 339 return type;
340}
341
342/* allocate a variable size buffer using best-fit pool */
343void *
344memAllocBuf(size_t net_size, size_t * gross_size)
345{
346 mem_type type = memFindBufSizeType(net_size, gross_size);
62e76326 347
1eb41ae8 348 if (type != MEM_NONE)
62e76326 349 return memAllocate(type);
1eb41ae8 350 else {
9663db1c
AJ
351 ++HugeBufCountMeter;
352 HugeBufVolumeMeter += *gross_size;
62e76326 353 return xcalloc(1, net_size);
1eb41ae8 354 }
355}
356
357/* resize a variable sized buffer using best-fit pool */
358void *
359memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
360{
361 /* XXX This can be optimized on very large buffers to use realloc() */
edce4d98 362 /* TODO: if the existing gross size is >= new gross size, do nothing */
e6ccf245 363 size_t new_gross_size;
1eb41ae8 364 void *newbuf = memAllocBuf(net_size, &new_gross_size);
62e76326 365
1eb41ae8 366 if (oldbuf) {
62e76326 367 size_t data_size = *gross_size;
368
369 if (data_size > net_size)
370 data_size = net_size;
371
372 memcpy(newbuf, oldbuf, data_size);
373
374 memFreeBuf(*gross_size, oldbuf);
1eb41ae8 375 }
62e76326 376
1eb41ae8 377 *gross_size = new_gross_size;
378 return newbuf;
379}
380
381/* free buffer allocated with memAllocBuf() */
382void
383memFreeBuf(size_t size, void *buf)
384{
aee3523a 385 mem_type type = memFindBufSizeType(size, nullptr);
62e76326 386
1eb41ae8 387 if (type != MEM_NONE)
62e76326 388 memFree(buf, type);
1eb41ae8 389 else {
62e76326 390 xfree(buf);
9663db1c
AJ
391 --HugeBufCountMeter;
392 HugeBufVolumeMeter -= size;
1eb41ae8 393 }
394}
395
f53969cc 396static double clean_interval = 15.0; /* time to live of idle chunk before release */
d96ceb8e 397
398void
ced8def3 399Mem::CleanIdlePools(void *)
d96ceb8e 400{
b001e822 401 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
aee3523a 402 eventAdd("memPoolCleanIdlePools", CleanIdlePools, nullptr, clean_interval, 1);
d96ceb8e 403}
404
d96ceb8e 405void
406memConfigure(void)
407{
70be1349 408 int64_t new_pool_limit;
62e76326 409
09c5ae5a 410 /** Set to configured value first */
d96ceb8e 411 if (!Config.onoff.mem_pools)
62e76326 412 new_pool_limit = 0;
d96ceb8e 413 else if (Config.MemPools.limit > 0)
62e76326 414 new_pool_limit = Config.MemPools.limit;
89646bd7 415 else {
c8768728 416 if (Config.MemPools.limit == 0)
e0236918 417 debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
89646bd7
HN
418 new_pool_limit = -1;
419 }
d96ceb8e 420
b001e822 421 MemPools::GetInstance().setIdleLimit(new_pool_limit);
d96ceb8e 422}
1eb41ae8 423
acf5589a 424void
528b2c61 425Mem::Init(void)
acf5589a 426{
51494bc6
AJ
427 /* all pools are ready to be used */
428 static bool MemIsInitialized = false;
429 if (MemIsInitialized)
430 return;
431
09c5ae5a
AJ
432 /**
433 * Then initialize all pools.
434 * \par
435 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
436 * \par
437 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
7021844c 438 * small; someday we will figure out what to do with all the entries here
439 * that are never used or used only once; perhaps we should simply use
440 * malloc() for those? @?@
441 */
60da11d3 442 memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
443 memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
444 memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
445 memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
446 memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
447 memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
acf5589a 448 memDataInit(MEM_DREAD_CTRL, "dread_ctrl", sizeof(dread_ctrl), 0);
449 memDataInit(MEM_DWRITE_Q, "dwrite_q", sizeof(dwrite_q), 0);
c3031d67 450 memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0);
2d328b70 451 GetPool(MEM_MD5_DIGEST)->setChunkSize(512 * 1024);
58cd5bbd 452
b5a644fc 453 MemIsInitialized = true;
ed6e9fb9
AJ
454
455 // finally register with the cache manager
456 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
62ee09ca 457}
62e76326 458
97244680 459void
460Mem::Report()
461{
462 debugs(13, 3, "Memory pools are '" <<
26ac0430
AJ
463 (Config.onoff.mem_pools ? "on" : "off") << "'; limit: " <<
464 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
465 " MB");
97244680 466}
467
8b082ed9
FC
468static mem_type &
469operator++(mem_type &aMem)
e6ccf245 470{
1f1ae50a 471 int tmp = (int)aMem;
472 aMem = (mem_type)(++tmp);
e6ccf245 473 return aMem;
474}
475
58a39dc9 476/*
477 * Test that all entries are initialized
478 */
479void
480memCheckInit(void)
481{
92b77cf7 482 mem_type t = MEM_NONE;
62e76326 483
3c670b50 484 while (++t < MEM_MAX) {
62e76326 485 /*
486 * If you hit this assertion, then you forgot to add a
487 * memDataInit() line for type 't'.
488 */
2d328b70 489 assert(GetPool(t));
acf5589a 490 }
491}
492
493void
58a39dc9 494memClean(void)
acf5589a 495{
d96ceb8e 496 MemPoolGlobalStats stats;
f5f9e44c
AR
497 if (Config.MemPools.limit > 0) // do not reset if disabled or same
498 MemPools::GetInstance().setIdleLimit(0);
b001e822 499 MemPools::GetInstance().clean(0);
d96ceb8e 500 memPoolGetGlobalStats(&stats);
62e76326 501
d96ceb8e 502 if (stats.tot_items_inuse)
bf8fe701 503 debugs(13, 2, "memCleanModule: " << stats.tot_items_inuse <<
504 " items in " << stats.tot_chunks_inuse << " chunks and " <<
505 stats.tot_pools_inuse << " pools are left dirty");
acf5589a 506}
507
acf5589a 508int
509memInUse(mem_type type)
510{
e8dcf312 511 return GetPool(type)->inUseCount();
acf5589a 512}
513
514/* ick */
515
e4ae841b 516void
137ee196 517memFree2K(void *p)
518{
db1cd23c 519 memFree(p, MEM_2K_BUF);
137ee196 520}
521
acf5589a 522void
523memFree4K(void *p)
524{
db1cd23c 525 memFree(p, MEM_4K_BUF);
acf5589a 526}
527
528void
529memFree8K(void *p)
530{
db1cd23c 531 memFree(p, MEM_8K_BUF);
acf5589a 532}
58cd5bbd 533
e4ae841b 534void
58cd5bbd 535memFree16K(void *p)
536{
537 memFree(p, MEM_16K_BUF);
538}
539
e4ae841b 540void
58cd5bbd 541memFree32K(void *p)
542{
543 memFree(p, MEM_32K_BUF);
544}
545
e4ae841b 546void
58cd5bbd 547memFree64K(void *p)
548{
549 memFree(p, MEM_64K_BUF);
550}
1eb41ae8 551
59a09b98
FC
552static void
553cxx_xfree(void * ptr)
554{
f673997d 555 xfree(ptr);
59a09b98
FC
556}
557
1eb41ae8 558FREE *
559memFreeBufFunc(size_t size)
560{
fa80a8ef 561 switch (size) {
62e76326 562
fa80a8ef 563 case 2 * 1024:
62e76326 564 return memFree2K;
565
fa80a8ef 566 case 4 * 1024:
62e76326 567 return memFree4K;
568
fa80a8ef 569 case 8 * 1024:
62e76326 570 return memFree8K;
571
fa80a8ef 572 case 16 * 1024:
62e76326 573 return memFree16K;
574
fa80a8ef 575 case 32 * 1024:
62e76326 576 return memFree32K;
577
fa80a8ef 578 case 64 * 1024:
62e76326 579 return memFree64K;
580
1eb41ae8 581 default:
9663db1c
AJ
582 --HugeBufCountMeter;
583 HugeBufVolumeMeter -= size;
59a09b98 584 return cxx_xfree;
1eb41ae8 585 }
586}
d96ceb8e 587
588/* MemPoolMeter */
589
528b2c61 590void
c21ad0f5 591Mem::PoolReport(const MemPoolStats * mp_st, const MemPoolMeter * AllMeter, std::ostream &stream)
d96ceb8e 592{
60eed7c2 593 int excess = 0;
d96ceb8e 594 int needed = 0;
595 MemPoolMeter *pm = mp_st->meter;
eecdacf6 596 const char *delim = "\t ";
d96ceb8e 597
903a6eec 598 stream.setf(std::ios_base::fixed);
eecdacf6 599 stream << std::setw(20) << std::left << mp_st->label << delim;
600 stream << std::setw(4) << std::right << mp_st->obj_size << delim;
d96ceb8e 601
602 /* Chunks */
d96ceb8e 603 if (mp_st->chunk_capacity) {
3b32112a
A
604 stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
605 stream << std::setw(4) << mp_st->chunk_capacity << delim;
333c86f5 606
62e76326 607 needed = mp_st->items_inuse / mp_st->chunk_capacity;
608
609 if (mp_st->items_inuse % mp_st->chunk_capacity)
89235243 610 ++needed;
62e76326 611
612 excess = mp_st->chunks_inuse - needed;
62e76326 613
3b32112a
A
614 stream << std::setw(4) << mp_st->chunks_alloc << delim;
615 stream << std::setw(4) << mp_st->chunks_inuse << delim;
616 stream << std::setw(4) << mp_st->chunks_free << delim;
617 stream << std::setw(4) << mp_st->chunks_partial << delim;
618 stream << std::setprecision(3) << xpercent(excess, needed) << delim;
333c86f5 619 } else {
3b32112a
A
620 stream << delim;
621 stream << delim;
622 stream << delim;
623 stream << delim;
624 stream << delim;
625 stream << delim;
626 stream << delim;
333c86f5 627 }
62e76326 628 /*
629 * Fragmentation calculation:
9663db1c 630 * needed = inuse.currentLevel() / chunk_capacity
62e76326 631 * excess = used - needed
632 * fragmentation = excess / needed * 100%
633 *
634 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
635 */
c21ad0f5 636 /* allocated */
eecdacf6 637 stream << mp_st->items_alloc << delim;
9663db1c
AJ
638 stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
639 stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
640 stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
641 stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
c21ad0f5 642 /* in use */
eecdacf6 643 stream << mp_st->items_inuse << delim;
9663db1c
AJ
644 stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
645 stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
646 stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
647 stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
c21ad0f5 648 /* idle */
eecdacf6 649 stream << mp_st->items_idle << delim;
9663db1c
AJ
650 stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
651 stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
c21ad0f5 652 /* saved */
eecdacf6 653 stream << (int)pm->gb_saved.count << delim;
903a6eec
HN
654 stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
655 stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
656 stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
657 pm->gb_oallocated.count = pm->gb_allocated.count;
d96ceb8e 658}
659
729102f4 660static int
661MemPoolReportSorter(const void *a, const void *b)
662{
663 const MemPoolStats *A = (MemPoolStats *) a;
664 const MemPoolStats *B = (MemPoolStats *) b;
665
666 // use this to sort on %Total Allocated
667 //
9663db1c
AJ
668 double pa = (double) A->obj_size * A->meter->alloc.currentLevel();
669 double pb = (double) B->obj_size * B->meter->alloc.currentLevel();
729102f4 670
671 if (pa > pb)
672 return -1;
673
674 if (pb > pa)
675 return 1;
676
729102f4 677 return 0;
678}
679
d96ceb8e 680void
c21ad0f5 681Mem::Report(std::ostream &stream)
d96ceb8e 682{
683 static char buf[64];
684 static MemPoolStats mp_stats;
685 static MemPoolGlobalStats mp_total;
686 int not_used = 0;
687 MemPoolIterator *iter;
b001e822 688 MemAllocator *pool;
d96ceb8e 689
690 /* caption */
c21ad0f5 691 stream << "Current memory usage:\n";
d96ceb8e 692 /* heading */
c21ad0f5 693 stream << "Pool\t Obj Size\t"
f53969cc
SM
694 "Chunks\t\t\t\t\t\t\t"
695 "Allocated\t\t\t\t\t"
696 "In Use\t\t\t\t\t"
697 "Idle\t\t\t"
698 "Allocations Saved\t\t\t"
699 "Rate\t"
700 "\n"
701 " \t (bytes)\t"
702 "KB/ch\t obj/ch\t"
703 "(#)\t used\t free\t part\t %Frag\t "
704 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
705 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
706 "(#)\t (KB)\t high (KB)\t"
707 "(#)\t %cnt\t %vol\t"
708 "(#)/sec\t"
709 "\n";
d96ceb8e 710 xm_deltat = current_dtime - xm_time;
711 xm_time = current_dtime;
712
713 /* Get stats for Totals report line */
714 memPoolGetGlobalStats(&mp_total);
715
9e167fa2 716 MemPoolStats *sortme = (MemPoolStats *) xcalloc(mp_total.tot_pools_alloc,sizeof(*sortme));
729102f4 717 int npools = 0;
718
d96ceb8e 719 /* main table */
720 iter = memPoolIterate();
62e76326 721
d96ceb8e 722 while ((pool = memPoolIterateNext(iter))) {
b001e822 723 pool->getStats(&mp_stats);
62e76326 724
f53969cc 725 if (!mp_stats.pool) /* pool destroyed */
62e76326 726 continue;
727
f412b2d6
FC
728 if (mp_stats.pool->getMeter().gb_allocated.count > 0) {
729 /* this pool has been used */
730 sortme[npools] = mp_stats;
731 ++npools;
732 } else {
89235243 733 ++not_used;
f412b2d6 734 }
d96ceb8e 735 }
62e76326 736
d96ceb8e 737 memPoolIterateDone(&iter);
738
729102f4 739 qsort(sortme, npools, sizeof(*sortme), MemPoolReportSorter);
740
89235243 741 for (int i = 0; i< npools; ++i) {
c21ad0f5 742 PoolReport(&sortme[i], mp_total.TheMeter, stream);
729102f4 743 }
744
745 xfree(sortme);
746
aee3523a 747 mp_stats.pool = nullptr;
d96ceb8e 748 mp_stats.label = "Total";
749 mp_stats.meter = mp_total.TheMeter;
750 mp_stats.obj_size = 1;
751 mp_stats.chunk_capacity = 0;
752 mp_stats.chunk_size = 0;
753 mp_stats.chunks_alloc = mp_total.tot_chunks_alloc;
754 mp_stats.chunks_inuse = mp_total.tot_chunks_inuse;
755 mp_stats.chunks_partial = mp_total.tot_chunks_partial;
756 mp_stats.chunks_free = mp_total.tot_chunks_free;
757 mp_stats.items_alloc = mp_total.tot_items_alloc;
758 mp_stats.items_inuse = mp_total.tot_items_inuse;
759 mp_stats.items_idle = mp_total.tot_items_idle;
760 mp_stats.overhead = mp_total.tot_overhead;
761
c21ad0f5 762 PoolReport(&mp_stats, mp_total.TheMeter, stream);
d96ceb8e 763
764 /* Cumulative */
903a6eec 765 stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.TheMeter->gb_allocated.bytes) << "\n";
d96ceb8e 766 /* overhead */
c21ad0f5 767 stream << "Current overhead: " << mp_total.tot_overhead << " bytes (" <<
9663db1c 768 std::setprecision(3) << xpercent(mp_total.tot_overhead, mp_total.TheMeter->inuse.currentLevel()) << "%)\n";
d96ceb8e 769 /* limits */
89646bd7
HN
770 if (mp_total.mem_idle_limit >= 0)
771 stream << "Idle pool limit: " << std::setprecision(2) << toMB(mp_total.mem_idle_limit) << " MB\n";
d96ceb8e 772 /* limits */
c21ad0f5 773 stream << "Total Pools created: " << mp_total.tot_pools_alloc << "\n";
774 stream << "Pools ever used: " << mp_total.tot_pools_alloc - not_used << " (shown above)\n";
775 stream << "Currently in use: " << mp_total.tot_pools_inuse << "\n";
d96ceb8e 776}
f53969cc 777