2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 13 High Level Memory Pool Management */
12 #include "acl/AclDenyInfoList.h"
13 #include "acl/AclNameList.h"
14 #include "base/PackableStream.h"
15 #include "CacheDigest.h"
16 #include "ClientInfo.h"
20 #include "icmp/net_db.h"
22 #include "mem/forward.h"
23 #include "mem/Meter.h"
26 #include "mgr/Registration.h"
27 #include "SquidConfig.h"
28 #include "SquidList.h"
29 #include "SquidTime.h"
34 /* forward declarations */
35 static void memFree2K(void *);
36 static void memFree4K(void *);
37 static void memFree8K(void *);
38 static void memFree16K(void *);
39 static void memFree32K(void *);
40 static void memFree64K(void *);
43 const size_t squidSystemPageSize
=getpagesize();
45 /* local prototypes */
46 static void memStringStats(std::ostream
&);
49 static MemAllocator
*MemPools
[MEM_MAX
];
50 static double xm_time
= 0;
51 static double xm_deltat
= 0;
53 /* all pools are ready to be used */
54 static bool MemIsInitialized
= false;
57 #define mem_str_pool_count 6
59 // 4 bytes bigger than the biggest string pool size
60 // which is in turn calculated from SmallestStringBeforeMemIsInitialized
61 static const size_t SmallestStringBeforeMemIsInitialized
= 1024*16+4;
68 StrPoolsAttrs
[mem_str_pool_count
] = {
71 "Short Strings", MemAllocator::RoundedSize(36),
72 }, /* to fit rfc1123 and similar */
74 "Medium Strings", MemAllocator::RoundedSize(128),
75 }, /* to fit most urls */
77 "Long Strings", MemAllocator::RoundedSize(512),
80 "1KB Strings", MemAllocator::RoundedSize(1024),
83 "4KB Strings", MemAllocator::RoundedSize(4*1024),
87 MemAllocator::RoundedSize(SmallestStringBeforeMemIsInitialized
-4)
95 StrPools
[mem_str_pool_count
];
96 static Mem::Meter StrCountMeter
;
97 static Mem::Meter StrVolumeMeter
;
99 static Mem::Meter HugeBufCountMeter
;
100 static Mem::Meter HugeBufVolumeMeter
;
105 memStringStats(std::ostream
&stream
)
108 int pooled_count
= 0;
109 size_t pooled_volume
= 0;
111 stream
<< "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
114 for (i
= 0; i
< mem_str_pool_count
; ++i
) {
115 const MemAllocator
*pool
= StrPools
[i
].pool
;
116 const auto plevel
= pool
->getMeter().inuse
.currentLevel();
117 stream
<< std::setw(20) << std::left
<< pool
->objectType();
118 stream
<< std::right
<< "\t " << xpercentInt(plevel
, StrCountMeter
.currentLevel());
119 stream
<< "\t " << xpercentInt(plevel
* pool
->objectSize(), StrVolumeMeter
.currentLevel()) << "\n";
120 pooled_count
+= plevel
;
121 pooled_volume
+= plevel
* pool
->objectSize();
125 stream
<< std::setw(20) << std::left
<< "Other Strings";
126 stream
<< std::right
<< "\t ";
127 stream
<< xpercentInt(StrCountMeter
.currentLevel() - pooled_count
, StrCountMeter
.currentLevel()) << "\t ";
128 stream
<< xpercentInt(StrVolumeMeter
.currentLevel() - pooled_volume
, StrVolumeMeter
.currentLevel()) << "\n\n";
132 memBufStats(std::ostream
& stream
)
134 stream
<< "Large buffers: " <<
135 HugeBufCountMeter
.currentLevel() << " (" <<
136 HugeBufVolumeMeter
.currentLevel() / 1024 << " KB)\n";
140 Mem::Stats(StoreEntry
* sentry
)
142 PackableStream
stream(*sentry
);
144 memStringStats(stream
);
147 if (RUNNING_ON_VALGRIND
) {
148 long int leaked
= 0, dubious
= 0, reachable
= 0, suppressed
= 0;
149 stream
<< "Valgrind Report:\n";
150 stream
<< "Type\tAmount\n";
151 debugs(13, DBG_IMPORTANT
, "Asking valgrind for memleaks");
152 VALGRIND_DO_LEAK_CHECK
;
153 debugs(13, DBG_IMPORTANT
, "Getting valgrind statistics");
154 VALGRIND_COUNT_LEAKS(leaked
, dubious
, reachable
, suppressed
);
155 stream
<< "Leaked\t" << leaked
<< "\n";
156 stream
<< "Dubious\t" << dubious
<< "\n";
157 stream
<< "Reachable\t" << reachable
<< "\n";
158 stream
<< "Suppressed\t" << suppressed
<< "\n";
169 * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
170 * Will ignore repeated calls for the same pool type.
172 * Relies on Mem::Init() having been called beforehand.
175 memDataInit(mem_type type
, const char *name
, size_t size
, int, bool doZero
)
177 assert(name
&& size
);
179 if (MemPools
[type
] != NULL
)
182 MemPools
[type
] = memPoolCreate(name
, size
);
183 MemPools
[type
]->zeroBlocks(doZero
);
186 /* find appropriate pool and use it (pools always init buffer with 0s) */
188 memAllocate(mem_type type
)
190 assert(MemPools
[type
]);
191 return MemPools
[type
]->alloc();
194 /* give memory back to the pool */
196 memFree(void *p
, int type
)
198 assert(MemPools
[type
]);
199 MemPools
[type
]->freeOne(p
);
202 /* allocate a variable size buffer using best-fit string pool */
204 memAllocString(size_t net_size
, size_t * gross_size
)
206 MemAllocator
*pool
= NULL
;
209 // if pools are not yet ready, make sure that
210 // the requested size is not poolable so that the right deallocator
212 if (!MemIsInitialized
&& net_size
< SmallestStringBeforeMemIsInitialized
)
213 net_size
= SmallestStringBeforeMemIsInitialized
;
216 for (i
= 0; i
< mem_str_pool_count
; ++i
) {
217 if (net_size
<= StrPoolsAttrs
[i
].obj_size
) {
218 pool
= StrPools
[i
].pool
;
223 *gross_size
= pool
? StrPoolsAttrs
[i
].obj_size
: net_size
;
224 assert(*gross_size
>= net_size
);
225 // may forget [de]allocations until MemIsInitialized
227 StrVolumeMeter
+= *gross_size
;
228 return pool
? pool
->alloc() : xcalloc(1, net_size
);
236 for (int counter
= 0; counter
< mem_str_pool_count
; ++counter
)
237 result
+= memPoolInUseCount(StrPools
[counter
].pool
);
242 /* free buffer allocated with memAllocString() */
244 memFreeString(size_t size
, void *buf
)
246 MemAllocator
*pool
= NULL
;
249 if (MemIsInitialized
) {
250 for (unsigned int i
= 0; i
< mem_str_pool_count
; ++i
) {
251 if (size
<= StrPoolsAttrs
[i
].obj_size
) {
252 assert(size
== StrPoolsAttrs
[i
].obj_size
);
253 pool
= StrPools
[i
].pool
;
259 // may forget [de]allocations until MemIsInitialized
261 StrVolumeMeter
-= size
;
262 pool
? pool
->freeOne(buf
) : xfree(buf
);
265 /* Find the best fit MEM_X_BUF type */
267 memFindBufSizeType(size_t net_size
, size_t * gross_size
)
272 if (net_size
<= 2 * 1024) {
275 } else if (net_size
<= 4 * 1024) {
278 } else if (net_size
<= 8 * 1024) {
281 } else if (net_size
<= 16 * 1024) {
284 } else if (net_size
<= 32 * 1024) {
287 } else if (net_size
<= 64 * 1024) {
301 /* allocate a variable size buffer using best-fit pool */
303 memAllocBuf(size_t net_size
, size_t * gross_size
)
305 mem_type type
= memFindBufSizeType(net_size
, gross_size
);
307 if (type
!= MEM_NONE
)
308 return memAllocate(type
);
311 HugeBufVolumeMeter
+= *gross_size
;
312 return xcalloc(1, net_size
);
316 /* resize a variable sized buffer using best-fit pool */
318 memReallocBuf(void *oldbuf
, size_t net_size
, size_t * gross_size
)
320 /* XXX This can be optimized on very large buffers to use realloc() */
321 /* TODO: if the existing gross size is >= new gross size, do nothing */
322 size_t new_gross_size
;
323 void *newbuf
= memAllocBuf(net_size
, &new_gross_size
);
326 size_t data_size
= *gross_size
;
328 if (data_size
> net_size
)
329 data_size
= net_size
;
331 memcpy(newbuf
, oldbuf
, data_size
);
333 memFreeBuf(*gross_size
, oldbuf
);
336 *gross_size
= new_gross_size
;
340 /* free buffer allocated with memAllocBuf() */
342 memFreeBuf(size_t size
, void *buf
)
344 mem_type type
= memFindBufSizeType(size
, NULL
);
346 if (type
!= MEM_NONE
)
351 HugeBufVolumeMeter
-= size
;
355 static double clean_interval
= 15.0; /* time to live of idle chunk before release */
358 Mem::CleanIdlePools(void *)
360 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval
));
361 eventAdd("memPoolCleanIdlePools", CleanIdlePools
, NULL
, clean_interval
, 1);
367 int64_t new_pool_limit
;
369 /** Set to configured value first */
370 if (!Config
.onoff
.mem_pools
)
372 else if (Config
.MemPools
.limit
> 0)
373 new_pool_limit
= Config
.MemPools
.limit
;
375 if (Config
.MemPools
.limit
== 0)
376 debugs(13, DBG_IMPORTANT
, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
383 * No debugging here please because this method is called before
384 * the debug log is configured and we'll get the message on
385 * stderr when doing things like 'squid -k reconfigure'
387 if (MemPools::GetInstance().idleLimit() > new_pool_limit
)
388 debugs(13, DBG_IMPORTANT
, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit
) << " MB");
391 MemPools::GetInstance().setIdleLimit(new_pool_limit
);
394 /* XXX make these classes do their own memory management */
395 #include "HttpHdrContRange.h"
403 * NOTE: Mem::Init() is called before the config file is parsed
404 * and before the debugging module has been initialized. Any
405 * debug messages here at level 0 or 1 will always be printed
410 * Set all pointers to null. */
411 memset(MemPools
, '\0', sizeof(MemPools
));
413 * Then initialize all pools.
415 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
417 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
418 * small; someday we will figure out what to do with all the entries here
419 * that are never used or used only once; perhaps we should simply use
420 * malloc() for those? @?@
422 memDataInit(MEM_2K_BUF
, "2K Buffer", 2048, 10, false);
423 memDataInit(MEM_4K_BUF
, "4K Buffer", 4096, 10, false);
424 memDataInit(MEM_8K_BUF
, "8K Buffer", 8192, 10, false);
425 memDataInit(MEM_16K_BUF
, "16K Buffer", 16384, 10, false);
426 memDataInit(MEM_32K_BUF
, "32K Buffer", 32768, 10, false);
427 memDataInit(MEM_64K_BUF
, "64K Buffer", 65536, 10, false);
428 memDataInit(MEM_ACL_DENY_INFO_LIST
, "AclDenyInfoList",
429 sizeof(AclDenyInfoList
), 0);
430 memDataInit(MEM_ACL_NAME_LIST
, "acl_name_list", sizeof(AclNameList
), 0);
431 #if USE_CACHE_DIGESTS
433 memDataInit(MEM_CACHE_DIGEST
, "CacheDigest", sizeof(CacheDigest
), 0);
436 memDataInit(MEM_LINK_LIST
, "link_list", sizeof(link_list
), 10);
437 memDataInit(MEM_DLINK_NODE
, "dlink_node", sizeof(dlink_node
), 10);
438 memDataInit(MEM_DREAD_CTRL
, "dread_ctrl", sizeof(dread_ctrl
), 0);
439 memDataInit(MEM_DWRITE_Q
, "dwrite_q", sizeof(dwrite_q
), 0);
440 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE
, "HttpHdrContRange", sizeof(HttpHdrContRange
), 0);
441 memDataInit(MEM_NETDBENTRY
, "netdbEntry", sizeof(netdbEntry
), 0);
442 memDataInit(MEM_NET_DB_NAME
, "net_db_name", sizeof(net_db_name
), 0);
443 memDataInit(MEM_CLIENT_INFO
, "ClientInfo", sizeof(ClientInfo
), 0);
444 memDataInit(MEM_MD5_DIGEST
, "MD5 digest", SQUID_MD5_DIGEST_LENGTH
, 0);
445 MemPools
[MEM_MD5_DIGEST
]->setChunkSize(512 * 1024);
447 /** Lastly init the string pools. */
448 for (i
= 0; i
< mem_str_pool_count
; ++i
) {
449 StrPools
[i
].pool
= memPoolCreate(StrPoolsAttrs
[i
].name
, StrPoolsAttrs
[i
].obj_size
);
450 StrPools
[i
].pool
->zeroBlocks(false);
452 if (StrPools
[i
].pool
->objectSize() != StrPoolsAttrs
[i
].obj_size
)
453 debugs(13, DBG_IMPORTANT
, "Notice: " << StrPoolsAttrs
[i
].name
<< " is " << StrPools
[i
].pool
->objectSize() << " bytes instead of requested " << StrPoolsAttrs
[i
].obj_size
<< " bytes");
456 MemIsInitialized
= true;
458 // finally register with the cache manager
459 Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats
, 0, 1);
465 debugs(13, 3, "Memory pools are '" <<
466 (Config
.onoff
.mem_pools
? "on" : "off") << "'; limit: " <<
467 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
471 mem_type
&operator++ (mem_type
&aMem
)
474 aMem
= (mem_type
)(++tmp
);
479 * Test that all entries are initialized
484 mem_type t
= MEM_NONE
;
486 while (++t
< MEM_DONTFREE
) {
488 * If you hit this assertion, then you forgot to add a
489 * memDataInit() line for type 't'.
490 * Or placed the pool type in the wrong section of the enum list.
499 MemPoolGlobalStats stats
;
500 if (Config
.MemPools
.limit
> 0) // do not reset if disabled or same
501 MemPools::GetInstance().setIdleLimit(0);
502 MemPools::GetInstance().clean(0);
503 memPoolGetGlobalStats(&stats
);
505 if (stats
.tot_items_inuse
)
506 debugs(13, 2, "memCleanModule: " << stats
.tot_items_inuse
<<
507 " items in " << stats
.tot_chunks_inuse
<< " chunks and " <<
508 stats
.tot_pools_inuse
<< " pools are left dirty");
512 memInUse(mem_type type
)
514 return memPoolInUseCount(MemPools
[type
]);
522 memFree(p
, MEM_2K_BUF
);
528 memFree(p
, MEM_4K_BUF
);
534 memFree(p
, MEM_8K_BUF
);
540 memFree(p
, MEM_16K_BUF
);
546 memFree(p
, MEM_32K_BUF
);
552 memFree(p
, MEM_64K_BUF
);
556 cxx_xfree(void * ptr
)
562 memFreeBufFunc(size_t size
)
586 HugeBufVolumeMeter
-= size
;
594 Mem::PoolReport(const MemPoolStats
* mp_st
, const MemPoolMeter
* AllMeter
, std::ostream
&stream
)
598 MemPoolMeter
*pm
= mp_st
->meter
;
599 const char *delim
= "\t ";
601 stream
.setf(std::ios_base::fixed
);
602 stream
<< std::setw(20) << std::left
<< mp_st
->label
<< delim
;
603 stream
<< std::setw(4) << std::right
<< mp_st
->obj_size
<< delim
;
606 if (mp_st
->chunk_capacity
) {
607 stream
<< std::setw(4) << toKB(mp_st
->obj_size
* mp_st
->chunk_capacity
) << delim
;
608 stream
<< std::setw(4) << mp_st
->chunk_capacity
<< delim
;
610 needed
= mp_st
->items_inuse
/ mp_st
->chunk_capacity
;
612 if (mp_st
->items_inuse
% mp_st
->chunk_capacity
)
615 excess
= mp_st
->chunks_inuse
- needed
;
617 stream
<< std::setw(4) << mp_st
->chunks_alloc
<< delim
;
618 stream
<< std::setw(4) << mp_st
->chunks_inuse
<< delim
;
619 stream
<< std::setw(4) << mp_st
->chunks_free
<< delim
;
620 stream
<< std::setw(4) << mp_st
->chunks_partial
<< delim
;
621 stream
<< std::setprecision(3) << xpercent(excess
, needed
) << delim
;
632 * Fragmentation calculation:
633 * needed = inuse.currentLevel() / chunk_capacity
634 * excess = used - needed
635 * fragmentation = excess / needed * 100%
637 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
640 stream
<< mp_st
->items_alloc
<< delim
;
641 stream
<< toKB(mp_st
->obj_size
* pm
->alloc
.currentLevel()) << delim
;
642 stream
<< toKB(mp_st
->obj_size
* pm
->alloc
.peak()) << delim
;
643 stream
<< std::setprecision(2) << ((squid_curtime
- pm
->alloc
.peakTime()) / 3600.) << delim
;
644 stream
<< std::setprecision(3) << xpercent(mp_st
->obj_size
* pm
->alloc
.currentLevel(), AllMeter
->alloc
.currentLevel()) << delim
;
646 stream
<< mp_st
->items_inuse
<< delim
;
647 stream
<< toKB(mp_st
->obj_size
* pm
->inuse
.currentLevel()) << delim
;
648 stream
<< toKB(mp_st
->obj_size
* pm
->inuse
.peak()) << delim
;
649 stream
<< std::setprecision(2) << ((squid_curtime
- pm
->inuse
.peakTime()) / 3600.) << delim
;
650 stream
<< std::setprecision(3) << xpercent(pm
->inuse
.currentLevel(), pm
->alloc
.currentLevel()) << delim
;
652 stream
<< mp_st
->items_idle
<< delim
;
653 stream
<< toKB(mp_st
->obj_size
* pm
->idle
.currentLevel()) << delim
;
654 stream
<< toKB(mp_st
->obj_size
* pm
->idle
.peak()) << delim
;
656 stream
<< (int)pm
->gb_saved
.count
<< delim
;
657 stream
<< std::setprecision(3) << xpercent(pm
->gb_saved
.count
, AllMeter
->gb_allocated
.count
) << delim
;
658 stream
<< std::setprecision(3) << xpercent(pm
->gb_saved
.bytes
, AllMeter
->gb_allocated
.bytes
) << delim
;
659 stream
<< std::setprecision(3) << xdiv(pm
->gb_allocated
.count
- pm
->gb_oallocated
.count
, xm_deltat
) << "\n";
660 pm
->gb_oallocated
.count
= pm
->gb_allocated
.count
;
664 MemPoolReportSorter(const void *a
, const void *b
)
666 const MemPoolStats
*A
= (MemPoolStats
*) a
;
667 const MemPoolStats
*B
= (MemPoolStats
*) b
;
669 // use this to sort on %Total Allocated
671 double pa
= (double) A
->obj_size
* A
->meter
->alloc
.currentLevel();
672 double pb
= (double) B
->obj_size
* B
->meter
->alloc
.currentLevel();
681 // use this to sort on In Use high(hrs)
683 if (A
->meter
->inuse
.peakTime() > B
->meter
->inuse
.peakTime())
686 if (B
->meter
->inuse
.peakTime() > A
->meter
->inuse
.peakTime())
695 Mem::Report(std::ostream
&stream
)
698 static MemPoolStats mp_stats
;
699 static MemPoolGlobalStats mp_total
;
701 MemPoolIterator
*iter
;
705 stream
<< "Current memory usage:\n";
707 stream
<< "Pool\t Obj Size\t"
708 "Chunks\t\t\t\t\t\t\t"
709 "Allocated\t\t\t\t\t"
712 "Allocations Saved\t\t\t"
717 "(#)\t used\t free\t part\t %Frag\t "
718 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
719 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
720 "(#)\t (KB)\t high (KB)\t"
721 "(#)\t %cnt\t %vol\t"
724 xm_deltat
= current_dtime
- xm_time
;
725 xm_time
= current_dtime
;
727 /* Get stats for Totals report line */
728 memPoolGetGlobalStats(&mp_total
);
730 MemPoolStats
*sortme
= (MemPoolStats
*) xcalloc(mp_total
.tot_pools_alloc
,sizeof(*sortme
));
734 iter
= memPoolIterate();
736 while ((pool
= memPoolIterateNext(iter
))) {
737 pool
->getStats(&mp_stats
);
739 if (!mp_stats
.pool
) /* pool destroyed */
742 if (mp_stats
.pool
->getMeter().gb_allocated
.count
> 0) {
743 /* this pool has been used */
744 sortme
[npools
] = mp_stats
;
751 memPoolIterateDone(&iter
);
753 qsort(sortme
, npools
, sizeof(*sortme
), MemPoolReportSorter
);
755 for (int i
= 0; i
< npools
; ++i
) {
756 PoolReport(&sortme
[i
], mp_total
.TheMeter
, stream
);
761 mp_stats
.pool
= NULL
;
762 mp_stats
.label
= "Total";
763 mp_stats
.meter
= mp_total
.TheMeter
;
764 mp_stats
.obj_size
= 1;
765 mp_stats
.chunk_capacity
= 0;
766 mp_stats
.chunk_size
= 0;
767 mp_stats
.chunks_alloc
= mp_total
.tot_chunks_alloc
;
768 mp_stats
.chunks_inuse
= mp_total
.tot_chunks_inuse
;
769 mp_stats
.chunks_partial
= mp_total
.tot_chunks_partial
;
770 mp_stats
.chunks_free
= mp_total
.tot_chunks_free
;
771 mp_stats
.items_alloc
= mp_total
.tot_items_alloc
;
772 mp_stats
.items_inuse
= mp_total
.tot_items_inuse
;
773 mp_stats
.items_idle
= mp_total
.tot_items_idle
;
774 mp_stats
.overhead
= mp_total
.tot_overhead
;
776 PoolReport(&mp_stats
, mp_total
.TheMeter
, stream
);
779 stream
<< "Cumulative allocated volume: "<< double_to_str(buf
, 64, mp_total
.TheMeter
->gb_allocated
.bytes
) << "\n";
781 stream
<< "Current overhead: " << mp_total
.tot_overhead
<< " bytes (" <<
782 std::setprecision(3) << xpercent(mp_total
.tot_overhead
, mp_total
.TheMeter
->inuse
.currentLevel()) << "%)\n";
784 if (mp_total
.mem_idle_limit
>= 0)
785 stream
<< "Idle pool limit: " << std::setprecision(2) << toMB(mp_total
.mem_idle_limit
) << " MB\n";
787 stream
<< "Total Pools created: " << mp_total
.tot_pools_alloc
<< "\n";
788 stream
<< "Pools ever used: " << mp_total
.tot_pools_alloc
- not_used
<< " (shown above)\n";
789 stream
<< "Currently in use: " << mp_total
.tot_pools_inuse
<< "\n";