2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 13 High Level Memory Pool Management */
12 #include "base/PackableStream.h"
13 #include "ClientInfo.h"
17 #include "icmp/net_db.h"
19 #include "mem/forward.h"
20 #include "mem/Meter.h"
23 #include "mgr/Registration.h"
24 #include "SquidConfig.h"
25 #include "SquidTime.h"
30 /* forward declarations */
31 static void memFree2K(void *);
32 static void memFree4K(void *);
33 static void memFree8K(void *);
34 static void memFree16K(void *);
35 static void memFree32K(void *);
36 static void memFree64K(void *);
38 /* local prototypes */
39 static void memStringStats(std::ostream
&);
42 static double xm_time
= 0;
43 static double xm_deltat
= 0;
46 #define mem_str_pool_count 6
53 static Mem::Meter StrCountMeter
;
54 static Mem::Meter StrVolumeMeter
;
56 static Mem::Meter HugeBufCountMeter
;
57 static Mem::Meter HugeBufVolumeMeter
;
61 // XXX: refactor objects using these pools to use MEMPROXY classes instead
62 // then remove this function entirely
63 static MemAllocator
*&
66 static MemAllocator
*pools
[MEM_MAX
];
67 static bool initialized
= false;
70 memset(pools
, '\0', sizeof(pools
));
72 // Mem::Init() makes use of GetPool(type) to initialize
73 // the actual pools. So must come after the flag is true
81 GetStrPool(size_t type
)
83 static MemAllocator
*strPools
[mem_str_pool_count
];
84 static bool initialized
= false;
86 static const PoolMeta PoolAttrs
[mem_str_pool_count
] = {
87 {"Short Strings", MemAllocator::RoundedSize(36)}, /* to fit rfc1123 and similar */
88 {"Medium Strings", MemAllocator::RoundedSize(128)}, /* to fit most urls */
89 {"Long Strings", MemAllocator::RoundedSize(512)},
90 {"1KB Strings", MemAllocator::RoundedSize(1024)},
91 {"4KB Strings", MemAllocator::RoundedSize(4*1024)},
92 {"16KB Strings", MemAllocator::RoundedSize(16*1024)}
96 memset(strPools
, '\0', sizeof(strPools
));
98 /** Lastly init the string pools. */
99 for (int i
= 0; i
< mem_str_pool_count
; ++i
) {
100 strPools
[i
] = memPoolCreate(PoolAttrs
[i
].name
, PoolAttrs
[i
].obj_size
);
101 strPools
[i
]->zeroBlocks(false);
103 if (strPools
[i
]->objectSize() != PoolAttrs
[i
].obj_size
)
104 debugs(13, DBG_IMPORTANT
, "NOTICE: " << PoolAttrs
[i
].name
<<
105 " is " << strPools
[i
]->objectSize() <<
106 " bytes instead of requested " <<
107 PoolAttrs
[i
].obj_size
<< " bytes");
113 return *strPools
[type
];
116 /* Find the best fit string pool type */
118 memFindStringSizeType(size_t net_size
, bool fuzzy
)
120 mem_type type
= MEM_NONE
;
121 for (unsigned int i
= 0; i
< mem_str_pool_count
; ++i
) {
122 auto &pool
= GetStrPool(i
);
123 if (fuzzy
&& net_size
< pool
.objectSize()) {
124 type
= static_cast<mem_type
>(i
);
126 } else if (net_size
== pool
.objectSize()) {
127 type
= static_cast<mem_type
>(i
);
136 memStringStats(std::ostream
&stream
)
139 int pooled_count
= 0;
140 size_t pooled_volume
= 0;
142 stream
<< "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
145 for (i
= 0; i
< mem_str_pool_count
; ++i
) {
146 const auto &pool
= GetStrPool(i
);
147 const auto plevel
= pool
.getMeter().inuse
.currentLevel();
148 stream
<< std::setw(20) << std::left
<< pool
.objectType();
149 stream
<< std::right
<< "\t " << xpercentInt(plevel
, StrCountMeter
.currentLevel());
150 stream
<< "\t " << xpercentInt(plevel
* pool
.objectSize(), StrVolumeMeter
.currentLevel()) << "\n";
151 pooled_count
+= plevel
;
152 pooled_volume
+= plevel
* pool
.objectSize();
156 stream
<< std::setw(20) << std::left
<< "Other Strings";
157 stream
<< std::right
<< "\t ";
158 stream
<< xpercentInt(StrCountMeter
.currentLevel() - pooled_count
, StrCountMeter
.currentLevel()) << "\t ";
159 stream
<< xpercentInt(StrVolumeMeter
.currentLevel() - pooled_volume
, StrVolumeMeter
.currentLevel()) << "\n\n";
163 memBufStats(std::ostream
& stream
)
165 stream
<< "Large buffers: " <<
166 HugeBufCountMeter
.currentLevel() << " (" <<
167 HugeBufVolumeMeter
.currentLevel() / 1024 << " KB)\n";
171 Mem::Stats(StoreEntry
* sentry
)
173 PackableStream
stream(*sentry
);
175 memStringStats(stream
);
178 if (RUNNING_ON_VALGRIND
) {
179 long int leaked
= 0, dubious
= 0, reachable
= 0, suppressed
= 0;
180 stream
<< "Valgrind Report:\n";
181 stream
<< "Type\tAmount\n";
182 debugs(13, DBG_IMPORTANT
, "Asking valgrind for memleaks");
183 VALGRIND_DO_LEAK_CHECK
;
184 debugs(13, DBG_IMPORTANT
, "Getting valgrind statistics");
185 VALGRIND_COUNT_LEAKS(leaked
, dubious
, reachable
, suppressed
);
186 stream
<< "Leaked\t" << leaked
<< "\n";
187 stream
<< "Dubious\t" << dubious
<< "\n";
188 stream
<< "Reachable\t" << reachable
<< "\n";
189 stream
<< "Suppressed\t" << suppressed
<< "\n";
200 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
201 * Will ignore repeated calls for the same pool type.
203 * Relies on Mem::Init() having been called beforehand.
206 memDataInit(mem_type type
, const char *name
, size_t size
, int, bool doZero
)
208 assert(name
&& size
);
210 if (GetPool(type
) != NULL
)
213 GetPool(type
) = memPoolCreate(name
, size
);
214 GetPool(type
)->zeroBlocks(doZero
);
217 /* find appropriate pool and use it (pools always init buffer with 0s) */
219 memAllocate(mem_type type
)
221 assert(GetPool(type
));
222 return GetPool(type
)->alloc();
225 /* give memory back to the pool */
227 memFree(void *p
, int type
)
229 assert(GetPool(type
));
230 GetPool(type
)->freeOne(p
);
233 /* allocate a variable size buffer using best-fit string pool */
235 memAllocString(size_t net_size
, size_t * gross_size
)
239 auto type
= memFindStringSizeType(net_size
, true);
240 if (type
!= MEM_NONE
) {
241 auto &pool
= GetStrPool(type
);
242 *gross_size
= pool
.objectSize();
243 assert(*gross_size
>= net_size
);
245 StrVolumeMeter
+= *gross_size
;
249 *gross_size
= net_size
;
251 StrVolumeMeter
+= *gross_size
;
252 return xcalloc(1, net_size
);
260 for (int counter
= 0; counter
< mem_str_pool_count
; ++counter
)
261 result
+= GetStrPool(counter
).inUseCount();
266 /* free buffer allocated with memAllocString() */
268 memFreeString(size_t size
, void *buf
)
272 auto type
= memFindStringSizeType(size
, false);
273 if (type
!= MEM_NONE
)
274 GetStrPool(type
).freeOne(buf
);
279 StrVolumeMeter
-= size
;
282 /* Find the best fit MEM_X_BUF type */
284 memFindBufSizeType(size_t net_size
, size_t * gross_size
)
289 if (net_size
<= 2 * 1024) {
292 } else if (net_size
<= 4 * 1024) {
295 } else if (net_size
<= 8 * 1024) {
298 } else if (net_size
<= 16 * 1024) {
301 } else if (net_size
<= 32 * 1024) {
304 } else if (net_size
<= 64 * 1024) {
318 /* allocate a variable size buffer using best-fit pool */
320 memAllocBuf(size_t net_size
, size_t * gross_size
)
322 mem_type type
= memFindBufSizeType(net_size
, gross_size
);
324 if (type
!= MEM_NONE
)
325 return memAllocate(type
);
328 HugeBufVolumeMeter
+= *gross_size
;
329 return xcalloc(1, net_size
);
333 /* resize a variable sized buffer using best-fit pool */
335 memReallocBuf(void *oldbuf
, size_t net_size
, size_t * gross_size
)
337 /* XXX This can be optimized on very large buffers to use realloc() */
338 /* TODO: if the existing gross size is >= new gross size, do nothing */
339 size_t new_gross_size
;
340 void *newbuf
= memAllocBuf(net_size
, &new_gross_size
);
343 size_t data_size
= *gross_size
;
345 if (data_size
> net_size
)
346 data_size
= net_size
;
348 memcpy(newbuf
, oldbuf
, data_size
);
350 memFreeBuf(*gross_size
, oldbuf
);
353 *gross_size
= new_gross_size
;
357 /* free buffer allocated with memAllocBuf() */
359 memFreeBuf(size_t size
, void *buf
)
361 mem_type type
= memFindBufSizeType(size
, NULL
);
363 if (type
!= MEM_NONE
)
368 HugeBufVolumeMeter
-= size
;
372 static double clean_interval
= 15.0; /* time to live of idle chunk before release */
375 Mem::CleanIdlePools(void *)
377 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval
));
378 eventAdd("memPoolCleanIdlePools", CleanIdlePools
, NULL
, clean_interval
, 1);
384 int64_t new_pool_limit
;
386 /** Set to configured value first */
387 if (!Config
.onoff
.mem_pools
)
389 else if (Config
.MemPools
.limit
> 0)
390 new_pool_limit
= Config
.MemPools
.limit
;
392 if (Config
.MemPools
.limit
== 0)
393 debugs(13, DBG_IMPORTANT
, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
400 * No debugging here please because this method is called before
401 * the debug log is configured and we'll get the message on
402 * stderr when doing things like 'squid -k reconfigure'
404 if (MemPools::GetInstance().idleLimit() > new_pool_limit
)
405 debugs(13, DBG_IMPORTANT
, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit
) << " MB");
408 MemPools::GetInstance().setIdleLimit(new_pool_limit
);
414 /* all pools are ready to be used */
415 static bool MemIsInitialized
= false;
416 if (MemIsInitialized
)
420 * NOTE: Mem::Init() is called before the config file is parsed
421 * and before the debugging module has been initialized. Any
422 * debug messages here at level 0 or 1 will always be printed
427 * Then initialize all pools.
429 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
431 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
432 * small; someday we will figure out what to do with all the entries here
433 * that are never used or used only once; perhaps we should simply use
434 * malloc() for those? @?@
436 memDataInit(MEM_2K_BUF
, "2K Buffer", 2048, 10, false);
437 memDataInit(MEM_4K_BUF
, "4K Buffer", 4096, 10, false);
438 memDataInit(MEM_8K_BUF
, "8K Buffer", 8192, 10, false);
439 memDataInit(MEM_16K_BUF
, "16K Buffer", 16384, 10, false);
440 memDataInit(MEM_32K_BUF
, "32K Buffer", 32768, 10, false);
441 memDataInit(MEM_64K_BUF
, "64K Buffer", 65536, 10, false);
442 memDataInit(MEM_DREAD_CTRL
, "dread_ctrl", sizeof(dread_ctrl
), 0);
443 memDataInit(MEM_DWRITE_Q
, "dwrite_q", sizeof(dwrite_q
), 0);
444 memDataInit(MEM_MD5_DIGEST
, "MD5 digest", SQUID_MD5_DIGEST_LENGTH
, 0);
445 GetPool(MEM_MD5_DIGEST
)->setChunkSize(512 * 1024);
447 MemIsInitialized
= true;
449 // finally register with the cache manager
450 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats
, 0, 1);
456 debugs(13, 3, "Memory pools are '" <<
457 (Config
.onoff
.mem_pools
? "on" : "off") << "'; limit: " <<
458 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
462 mem_type
&operator++ (mem_type
&aMem
)
465 aMem
= (mem_type
)(++tmp
);
470 * Test that all entries are initialized
475 mem_type t
= MEM_NONE
;
477 while (++t
< MEM_MAX
) {
479 * If you hit this assertion, then you forgot to add a
480 * memDataInit() line for type 't'.
489 MemPoolGlobalStats stats
;
490 if (Config
.MemPools
.limit
> 0) // do not reset if disabled or same
491 MemPools::GetInstance().setIdleLimit(0);
492 MemPools::GetInstance().clean(0);
493 memPoolGetGlobalStats(&stats
);
495 if (stats
.tot_items_inuse
)
496 debugs(13, 2, "memCleanModule: " << stats
.tot_items_inuse
<<
497 " items in " << stats
.tot_chunks_inuse
<< " chunks and " <<
498 stats
.tot_pools_inuse
<< " pools are left dirty");
502 memInUse(mem_type type
)
504 return GetPool(type
)->inUseCount();
512 memFree(p
, MEM_2K_BUF
);
518 memFree(p
, MEM_4K_BUF
);
524 memFree(p
, MEM_8K_BUF
);
530 memFree(p
, MEM_16K_BUF
);
536 memFree(p
, MEM_32K_BUF
);
542 memFree(p
, MEM_64K_BUF
);
546 cxx_xfree(void * ptr
)
552 memFreeBufFunc(size_t size
)
576 HugeBufVolumeMeter
-= size
;
584 Mem::PoolReport(const MemPoolStats
* mp_st
, const MemPoolMeter
* AllMeter
, std::ostream
&stream
)
588 MemPoolMeter
*pm
= mp_st
->meter
;
589 const char *delim
= "\t ";
591 stream
.setf(std::ios_base::fixed
);
592 stream
<< std::setw(20) << std::left
<< mp_st
->label
<< delim
;
593 stream
<< std::setw(4) << std::right
<< mp_st
->obj_size
<< delim
;
596 if (mp_st
->chunk_capacity
) {
597 stream
<< std::setw(4) << toKB(mp_st
->obj_size
* mp_st
->chunk_capacity
) << delim
;
598 stream
<< std::setw(4) << mp_st
->chunk_capacity
<< delim
;
600 needed
= mp_st
->items_inuse
/ mp_st
->chunk_capacity
;
602 if (mp_st
->items_inuse
% mp_st
->chunk_capacity
)
605 excess
= mp_st
->chunks_inuse
- needed
;
607 stream
<< std::setw(4) << mp_st
->chunks_alloc
<< delim
;
608 stream
<< std::setw(4) << mp_st
->chunks_inuse
<< delim
;
609 stream
<< std::setw(4) << mp_st
->chunks_free
<< delim
;
610 stream
<< std::setw(4) << mp_st
->chunks_partial
<< delim
;
611 stream
<< std::setprecision(3) << xpercent(excess
, needed
) << delim
;
622 * Fragmentation calculation:
623 * needed = inuse.currentLevel() / chunk_capacity
624 * excess = used - needed
625 * fragmentation = excess / needed * 100%
627 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
630 stream
<< mp_st
->items_alloc
<< delim
;
631 stream
<< toKB(mp_st
->obj_size
* pm
->alloc
.currentLevel()) << delim
;
632 stream
<< toKB(mp_st
->obj_size
* pm
->alloc
.peak()) << delim
;
633 stream
<< std::setprecision(2) << ((squid_curtime
- pm
->alloc
.peakTime()) / 3600.) << delim
;
634 stream
<< std::setprecision(3) << xpercent(mp_st
->obj_size
* pm
->alloc
.currentLevel(), AllMeter
->alloc
.currentLevel()) << delim
;
636 stream
<< mp_st
->items_inuse
<< delim
;
637 stream
<< toKB(mp_st
->obj_size
* pm
->inuse
.currentLevel()) << delim
;
638 stream
<< toKB(mp_st
->obj_size
* pm
->inuse
.peak()) << delim
;
639 stream
<< std::setprecision(2) << ((squid_curtime
- pm
->inuse
.peakTime()) / 3600.) << delim
;
640 stream
<< std::setprecision(3) << xpercent(pm
->inuse
.currentLevel(), pm
->alloc
.currentLevel()) << delim
;
642 stream
<< mp_st
->items_idle
<< delim
;
643 stream
<< toKB(mp_st
->obj_size
* pm
->idle
.currentLevel()) << delim
;
644 stream
<< toKB(mp_st
->obj_size
* pm
->idle
.peak()) << delim
;
646 stream
<< (int)pm
->gb_saved
.count
<< delim
;
647 stream
<< std::setprecision(3) << xpercent(pm
->gb_saved
.count
, AllMeter
->gb_allocated
.count
) << delim
;
648 stream
<< std::setprecision(3) << xpercent(pm
->gb_saved
.bytes
, AllMeter
->gb_allocated
.bytes
) << delim
;
649 stream
<< std::setprecision(3) << xdiv(pm
->gb_allocated
.count
- pm
->gb_oallocated
.count
, xm_deltat
) << "\n";
650 pm
->gb_oallocated
.count
= pm
->gb_allocated
.count
;
654 MemPoolReportSorter(const void *a
, const void *b
)
656 const MemPoolStats
*A
= (MemPoolStats
*) a
;
657 const MemPoolStats
*B
= (MemPoolStats
*) b
;
659 // use this to sort on %Total Allocated
661 double pa
= (double) A
->obj_size
* A
->meter
->alloc
.currentLevel();
662 double pb
= (double) B
->obj_size
* B
->meter
->alloc
.currentLevel();
671 // use this to sort on In Use high(hrs)
673 if (A
->meter
->inuse
.peakTime() > B
->meter
->inuse
.peakTime())
676 if (B
->meter
->inuse
.peakTime() > A
->meter
->inuse
.peakTime())
685 Mem::Report(std::ostream
&stream
)
688 static MemPoolStats mp_stats
;
689 static MemPoolGlobalStats mp_total
;
691 MemPoolIterator
*iter
;
695 stream
<< "Current memory usage:\n";
697 stream
<< "Pool\t Obj Size\t"
698 "Chunks\t\t\t\t\t\t\t"
699 "Allocated\t\t\t\t\t"
702 "Allocations Saved\t\t\t"
707 "(#)\t used\t free\t part\t %Frag\t "
708 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
709 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
710 "(#)\t (KB)\t high (KB)\t"
711 "(#)\t %cnt\t %vol\t"
714 xm_deltat
= current_dtime
- xm_time
;
715 xm_time
= current_dtime
;
717 /* Get stats for Totals report line */
718 memPoolGetGlobalStats(&mp_total
);
720 MemPoolStats
*sortme
= (MemPoolStats
*) xcalloc(mp_total
.tot_pools_alloc
,sizeof(*sortme
));
724 iter
= memPoolIterate();
726 while ((pool
= memPoolIterateNext(iter
))) {
727 pool
->getStats(&mp_stats
);
729 if (!mp_stats
.pool
) /* pool destroyed */
732 if (mp_stats
.pool
->getMeter().gb_allocated
.count
> 0) {
733 /* this pool has been used */
734 sortme
[npools
] = mp_stats
;
741 memPoolIterateDone(&iter
);
743 qsort(sortme
, npools
, sizeof(*sortme
), MemPoolReportSorter
);
745 for (int i
= 0; i
< npools
; ++i
) {
746 PoolReport(&sortme
[i
], mp_total
.TheMeter
, stream
);
751 mp_stats
.pool
= NULL
;
752 mp_stats
.label
= "Total";
753 mp_stats
.meter
= mp_total
.TheMeter
;
754 mp_stats
.obj_size
= 1;
755 mp_stats
.chunk_capacity
= 0;
756 mp_stats
.chunk_size
= 0;
757 mp_stats
.chunks_alloc
= mp_total
.tot_chunks_alloc
;
758 mp_stats
.chunks_inuse
= mp_total
.tot_chunks_inuse
;
759 mp_stats
.chunks_partial
= mp_total
.tot_chunks_partial
;
760 mp_stats
.chunks_free
= mp_total
.tot_chunks_free
;
761 mp_stats
.items_alloc
= mp_total
.tot_items_alloc
;
762 mp_stats
.items_inuse
= mp_total
.tot_items_inuse
;
763 mp_stats
.items_idle
= mp_total
.tot_items_idle
;
764 mp_stats
.overhead
= mp_total
.tot_overhead
;
766 PoolReport(&mp_stats
, mp_total
.TheMeter
, stream
);
769 stream
<< "Cumulative allocated volume: "<< double_to_str(buf
, 64, mp_total
.TheMeter
->gb_allocated
.bytes
) << "\n";
771 stream
<< "Current overhead: " << mp_total
.tot_overhead
<< " bytes (" <<
772 std::setprecision(3) << xpercent(mp_total
.tot_overhead
, mp_total
.TheMeter
->inuse
.currentLevel()) << "%)\n";
774 if (mp_total
.mem_idle_limit
>= 0)
775 stream
<< "Idle pool limit: " << std::setprecision(2) << toMB(mp_total
.mem_idle_limit
) << " MB\n";
777 stream
<< "Total Pools created: " << mp_total
.tot_pools_alloc
<< "\n";
778 stream
<< "Pools ever used: " << mp_total
.tot_pools_alloc
- not_used
<< " (shown above)\n";
779 stream
<< "Currently in use: " << mp_total
.tot_pools_inuse
<< "\n";