4 * DEBUG: section 13 High Level Memory Pool Management
5 * AUTHOR: Harvest Derived
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
37 #include "CacheManager.h"
38 #include "ClientInfo.h"
42 #include "StoreEntryStream.h"
44 #include "SquidTime.h"
55 /* local prototypes */
56 static void memStringStats(std::ostream
&);
59 static MemAllocator
*MemPools
[MEM_MAX
];
60 static double xm_time
= 0;
61 static double xm_deltat
= 0;
64 #define mem_str_pool_count 3
71 StrPoolsAttrs
[mem_str_pool_count
] = {
74 "Short Strings", MemAllocator::RoundedSize(36),
75 }, /* to fit rfc1123 and similar */
77 "Medium Strings", MemAllocator::RoundedSize(128),
78 }, /* to fit most urls */
80 "Long Strings", MemAllocator::RoundedSize(512)
88 StrPools
[mem_str_pool_count
];
89 static MemMeter StrCountMeter
;
90 static MemMeter StrVolumeMeter
;
92 static MemMeter HugeBufCountMeter
;
93 static MemMeter HugeBufVolumeMeter
;
98 memStringStats(std::ostream
&stream
)
101 int pooled_count
= 0;
102 size_t pooled_volume
= 0;
104 stream
<< "String Pool\t Impact\t\t\n \t (%strings)\t (%volume)\n";
107 for (i
= 0; i
< mem_str_pool_count
; i
++) {
108 const MemAllocator
*pool
= StrPools
[i
].pool
;
109 const int plevel
= pool
->getMeter().inuse
.level
;
110 stream
<< std::setw(20) << std::left
<< pool
->objectType();
111 stream
<< std::right
<< "\t " << xpercentInt(plevel
, StrCountMeter
.level
);
112 stream
<< "\t " << xpercentInt(plevel
* pool
->objectSize(), StrVolumeMeter
.level
) << "\n";
113 pooled_count
+= plevel
;
114 pooled_volume
+= plevel
* pool
->objectSize();
118 stream
<< std::setw(20) << std::left
<< "Other Strings";
120 stream
<< std::right
<< "\t ";
122 stream
<< xpercentInt(StrCountMeter
.level
- pooled_count
, StrCountMeter
.level
) << "\t ";
124 stream
<< xpercentInt(StrVolumeMeter
.level
- pooled_volume
, StrVolumeMeter
.level
) << "\n\n";
128 memBufStats(std::ostream
& stream
)
130 stream
<< "Large buffers: " <<
131 HugeBufCountMeter
.level
<< " (" <<
132 HugeBufVolumeMeter
.level
/ 1024 << " KB)\n";
136 Mem::Stats(StoreEntry
* sentry
)
138 StoreEntryStream
stream(sentry
);
140 memStringStats(stream
);
143 if (RUNNING_ON_VALGRIND
) {
144 long int leaked
= 0, dubious
= 0, reachable
= 0, suppressed
= 0;
145 stream
<< "Valgrind Report:\n";
146 stream
<< "Type\tAmount\n";
147 debugs(13, 1, "Asking valgrind for memleaks");
148 VALGRIND_DO_LEAK_CHECK
;
149 debugs(13, 1, "Getting valgrind statistics");
150 VALGRIND_COUNT_LEAKS(leaked
, dubious
, reachable
, suppressed
);
151 stream
<< "Leaked\t" << leaked
<< "\n";
152 stream
<< "Dubious\t" << dubious
<< "\n";
153 stream
<< "Reachable\t" << reachable
<< "\n";
154 stream
<< "Suppressed\t" << suppressed
<< "\n";
165 * we have a limit on _total_ amount of idle memory so we ignore
169 memDataInit(mem_type type
, const char *name
, size_t size
, int max_pages_notused
, bool zeroOnPush
)
171 assert(name
&& size
);
172 assert(MemPools
[type
] == NULL
);
173 MemPools
[type
] = memPoolCreate(name
, size
);
174 MemPools
[type
]->zeroOnPush(zeroOnPush
);
178 /* find appropriate pool and use it (pools always init buffer with 0s) */
180 memAllocate(mem_type type
)
182 return MemPools
[type
]->alloc();
185 /* give memory back to the pool */
187 memFree(void *p
, int type
)
189 MemPools
[type
]->freeOne(p
);
192 /* allocate a variable size buffer using best-fit pool */
194 memAllocString(size_t net_size
, size_t * gross_size
)
197 MemAllocator
*pool
= NULL
;
200 for (i
= 0; i
< mem_str_pool_count
; i
++) {
201 if (net_size
<= StrPoolsAttrs
[i
].obj_size
) {
202 pool
= StrPools
[i
].pool
;
207 *gross_size
= pool
? StrPoolsAttrs
[i
].obj_size
: net_size
;
208 assert(*gross_size
>= net_size
);
209 memMeterInc(StrCountMeter
);
210 memMeterAdd(StrVolumeMeter
, *gross_size
);
211 return pool
? pool
->alloc() : xcalloc(1, net_size
);
214 extern size_t memStringCount();
220 for (int counter
= 0; counter
< mem_str_pool_count
; ++counter
)
221 result
+= memPoolInUseCount(StrPools
[counter
].pool
);
226 /* free buffer allocated with memAllocString() */
228 memFreeString(size_t size
, void *buf
)
231 MemAllocator
*pool
= NULL
;
234 for (i
= 0; i
< mem_str_pool_count
; i
++) {
235 if (size
<= StrPoolsAttrs
[i
].obj_size
) {
236 assert(size
== StrPoolsAttrs
[i
].obj_size
);
237 pool
= StrPools
[i
].pool
;
242 memMeterDec(StrCountMeter
);
243 memMeterDel(StrVolumeMeter
, size
);
244 pool
? pool
->freeOne(buf
) : xfree(buf
);
247 /* Find the best fit MEM_X_BUF type */
249 memFindBufSizeType(size_t net_size
, size_t * gross_size
)
254 if (net_size
<= 2 * 1024) {
257 } else if (net_size
<= 4 * 1024) {
260 } else if (net_size
<= 8 * 1024) {
263 } else if (net_size
<= 16 * 1024) {
266 } else if (net_size
<= 32 * 1024) {
269 } else if (net_size
<= 64 * 1024) {
283 /* allocate a variable size buffer using best-fit pool */
285 memAllocBuf(size_t net_size
, size_t * gross_size
)
287 mem_type type
= memFindBufSizeType(net_size
, gross_size
);
289 if (type
!= MEM_NONE
)
290 return memAllocate(type
);
292 memMeterInc(HugeBufCountMeter
);
293 memMeterAdd(HugeBufVolumeMeter
, *gross_size
);
294 return xcalloc(1, net_size
);
298 /* resize a variable sized buffer using best-fit pool */
300 memReallocBuf(void *oldbuf
, size_t net_size
, size_t * gross_size
)
302 /* XXX This can be optimized on very large buffers to use realloc() */
303 /* TODO: if the existing gross size is >= new gross size, do nothing */
304 size_t new_gross_size
;
305 void *newbuf
= memAllocBuf(net_size
, &new_gross_size
);
308 size_t data_size
= *gross_size
;
310 if (data_size
> net_size
)
311 data_size
= net_size
;
313 memcpy(newbuf
, oldbuf
, data_size
);
315 memFreeBuf(*gross_size
, oldbuf
);
318 *gross_size
= new_gross_size
;
322 /* free buffer allocated with memAllocBuf() */
324 memFreeBuf(size_t size
, void *buf
)
326 mem_type type
= memFindBufSizeType(size
, NULL
);
328 if (type
!= MEM_NONE
)
332 memMeterDec(HugeBufCountMeter
);
333 memMeterDel(HugeBufVolumeMeter
, size
);
337 static double clean_interval
= 15.0; /* time to live of idle chunk before release */
340 Mem::CleanIdlePools(void *unused
)
342 MemPools::GetInstance().clean(static_cast<time_t>(clean_interval
));
343 eventAdd("memPoolCleanIdlePools", CleanIdlePools
, NULL
, clean_interval
, 1);
349 size_t new_pool_limit
;
351 /** Set to configured value first */
352 if (!Config
.onoff
.mem_pools
)
354 else if (Config
.MemPools
.limit
> 0)
355 new_pool_limit
= Config
.MemPools
.limit
;
357 if (Config
.MemPools
.limit
== 0)
358 debugs(13, 1, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
365 * No debugging here please because this method is called before
366 * the debug log is configured and we'll get the message on
367 * stderr when doing things like 'squid -k reconfigure'
369 if (MemPools::GetInstance().idleLimit() > new_pool_limit
)
370 debugs(13, 1, "Shrinking idle mem pools to "<< std::setprecision(3) << toMB(new_pool_limit
) << " MB");
373 MemPools::GetInstance().setIdleLimit(new_pool_limit
);
376 /* XXX make these classes do their own memory management */
377 #include "HttpHdrContRange.h"
385 * NOTE: Mem::Init() is called before the config file is parsed
386 * and before the debugging module has been initialized. Any
387 * debug messages here at level 0 or 1 will always be printed
392 * Set all pointers to null. */
393 memset(MemPools
, '\0', sizeof(MemPools
));
395 * Then initialize all pools.
397 * Starting with generic 2kB - 64kB buffr pools, then specific object types.
399 * It does not hurt much to have a lot of pools since sizeof(MemPool) is
400 * small; someday we will figure out what to do with all the entries here
401 * that are never used or used only once; perhaps we should simply use
402 * malloc() for those? @?@
404 memDataInit(MEM_2K_BUF
, "2K Buffer", 2048, 10, false);
405 memDataInit(MEM_4K_BUF
, "4K Buffer", 4096, 10, false);
406 memDataInit(MEM_8K_BUF
, "8K Buffer", 8192, 10, false);
407 memDataInit(MEM_16K_BUF
, "16K Buffer", 16384, 10, false);
408 memDataInit(MEM_32K_BUF
, "32K Buffer", 32768, 10, false);
409 memDataInit(MEM_64K_BUF
, "64K Buffer", 65536, 10, false);
410 memDataInit(MEM_ACL_DENY_INFO_LIST
, "acl_deny_info_list",
411 sizeof(acl_deny_info_list
), 0);
412 memDataInit(MEM_ACL_NAME_LIST
, "acl_name_list", sizeof(acl_name_list
), 0);
413 #if USE_CACHE_DIGESTS
415 memDataInit(MEM_CACHE_DIGEST
, "CacheDigest", sizeof(CacheDigest
), 0);
418 memDataInit(MEM_LINK_LIST
, "link_list", sizeof(link_list
), 10);
419 memDataInit(MEM_DLINK_NODE
, "dlink_node", sizeof(dlink_node
), 10);
420 memDataInit(MEM_DREAD_CTRL
, "dread_ctrl", sizeof(dread_ctrl
), 0);
421 memDataInit(MEM_DWRITE_Q
, "dwrite_q", sizeof(dwrite_q
), 0);
422 memDataInit(MEM_HTTP_HDR_CC
, "HttpHdrCc", sizeof(HttpHdrCc
), 0);
423 memDataInit(MEM_HTTP_HDR_CONTENT_RANGE
, "HttpHdrContRange", sizeof(HttpHdrContRange
), 0);
424 memDataInit(MEM_NETDBENTRY
, "netdbEntry", sizeof(netdbEntry
), 0);
425 memDataInit(MEM_NET_DB_NAME
, "net_db_name", sizeof(net_db_name
), 0);
426 memDataInit(MEM_RELIST
, "relist", sizeof(relist
), 0);
427 memDataInit(MEM_CLIENT_INFO
, "ClientInfo", sizeof(ClientInfo
), 0);
428 memDataInit(MEM_MD5_DIGEST
, "MD5 digest", SQUID_MD5_DIGEST_LENGTH
, 0);
429 MemPools
[MEM_MD5_DIGEST
]->setChunkSize(512 * 1024);
431 /** Lastly init the string pools. */
432 for (i
= 0; i
< mem_str_pool_count
; i
++) {
433 StrPools
[i
].pool
= memPoolCreate(StrPoolsAttrs
[i
].name
, StrPoolsAttrs
[i
].obj_size
);
434 StrPools
[i
].pool
->zeroOnPush(false);
436 if (StrPools
[i
].pool
->objectSize() != StrPoolsAttrs
[i
].obj_size
)
437 debugs(13, 1, "Notice: " << StrPoolsAttrs
[i
].name
<< " is " << StrPools
[i
].pool
->objectSize() << " bytes instead of requested " << StrPoolsAttrs
[i
].obj_size
<< " bytes");
441 * finally register with the cache manager */
442 RegisterWithCacheManager();
448 debugs(13, 3, "Memory pools are '" <<
449 (Config
.onoff
.mem_pools
? "on" : "off") << "'; limit: " <<
450 std::setprecision(3) << toMB(MemPools::GetInstance().idleLimit()) <<
455 Mem::RegisterWithCacheManager(void)
457 CacheManager::GetInstance()->registerAction("mem", "Memory Utilization",
461 mem_type
&operator++ (mem_type
&aMem
)
464 aMem
= (mem_type
)(++tmp
);
469 * Test that all entries are initialized
476 for (t
= MEM_NONE
, ++t
; t
< MEM_MAX
; ++t
) {
477 if (MEM_DONTFREE
== t
)
481 * If you hit this assertion, then you forgot to add a
482 * memDataInit() line for type 't'.
491 MemPoolGlobalStats stats
;
492 MemPools::GetInstance().setIdleLimit(0);
493 MemPools::GetInstance().clean(0);
494 memPoolGetGlobalStats(&stats
);
496 if (stats
.tot_items_inuse
)
497 debugs(13, 2, "memCleanModule: " << stats
.tot_items_inuse
<<
498 " items in " << stats
.tot_chunks_inuse
<< " chunks and " <<
499 stats
.tot_pools_inuse
<< " pools are left dirty");
503 memInUse(mem_type type
)
505 return memPoolInUseCount(MemPools
[type
]);
513 memFree(p
, MEM_2K_BUF
);
519 memFree(p
, MEM_4K_BUF
);
525 memFree(p
, MEM_8K_BUF
);
531 memFree(p
, MEM_16K_BUF
);
537 memFree(p
, MEM_32K_BUF
);
543 memFree(p
, MEM_64K_BUF
);
547 memFreeBufFunc(size_t size
)
570 memMeterDec(HugeBufCountMeter
);
571 memMeterDel(HugeBufVolumeMeter
, size
);
579 Mem::PoolReport(const MemPoolStats
* mp_st
, const MemPoolMeter
* AllMeter
, std::ostream
&stream
)
583 MemPoolMeter
*pm
= mp_st
->meter
;
584 const char *delim
= "\t ";
587 stream
.setf(std::ios_base::fixed
);
589 stream
<< std::setw(20) << std::left
<< mp_st
->label
<< delim
;
590 stream
<< std::setw(4) << std::right
<< mp_st
->obj_size
<< delim
;
593 if (mp_st
->chunk_capacity
) {
594 stream
<< std::setw(4) << toKB(mp_st
->obj_size
* mp_st
->chunk_capacity
) << delim
;
595 stream
<< std::setw(4) << mp_st
->chunk_capacity
<< delim
;
597 needed
= mp_st
->items_inuse
/ mp_st
->chunk_capacity
;
599 if (mp_st
->items_inuse
% mp_st
->chunk_capacity
)
602 excess
= mp_st
->chunks_inuse
- needed
;
604 stream
<< std::setw(4) << mp_st
->chunks_alloc
<< delim
;
605 stream
<< std::setw(4) << mp_st
->chunks_inuse
<< delim
;
606 stream
<< std::setw(4) << mp_st
->chunks_free
<< delim
;
607 stream
<< std::setw(4) << mp_st
->chunks_partial
<< delim
;
608 stream
<< std::setprecision(3) << xpercent(excess
, needed
) << delim
;
619 * Fragmentation calculation:
620 * needed = inuse.level / chunk_capacity
621 * excess = used - needed
622 * fragmentation = excess / needed * 100%
624 * Fragm = (alloced - (inuse / obj_ch) ) / alloced
627 stream
<< mp_st
->items_alloc
<< delim
;
628 stream
<< toKB(mp_st
->obj_size
* pm
->alloc
.level
) << delim
;
629 stream
<< toKB(mp_st
->obj_size
* pm
->alloc
.hwater_level
) << delim
;
630 stream
<< std::setprecision(2) << ((squid_curtime
- pm
->alloc
.hwater_stamp
) / 3600.) << delim
;
631 stream
<< std::setprecision(3) << xpercent(mp_st
->obj_size
* pm
->alloc
.level
, AllMeter
->alloc
.level
) << delim
;
633 stream
<< mp_st
->items_inuse
<< delim
;
634 stream
<< toKB(mp_st
->obj_size
* pm
->inuse
.level
) << delim
;
635 stream
<< toKB(mp_st
->obj_size
* pm
->inuse
.hwater_level
) << delim
;
636 stream
<< std::setprecision(2) << ((squid_curtime
- pm
->inuse
.hwater_stamp
) / 3600.) << delim
;
637 stream
<< std::setprecision(3) << xpercent(pm
->inuse
.level
, pm
->alloc
.level
) << delim
;
639 stream
<< mp_st
->items_idle
<< delim
;
640 stream
<< toKB(mp_st
->obj_size
* pm
->idle
.level
) << delim
;
641 stream
<< toKB(mp_st
->obj_size
* pm
->idle
.hwater_level
) << delim
;
643 stream
<< (int)pm
->gb_saved
.count
<< delim
;
644 stream
<< std::setprecision(3) << xpercent(pm
->gb_saved
.count
, AllMeter
->gb_allocated
.count
) << delim
;
645 stream
<< std::setprecision(3) << xpercent(pm
->gb_saved
.bytes
, AllMeter
->gb_allocated
.bytes
) << delim
;
646 stream
<< std::setprecision(3) << xdiv(pm
->gb_allocated
.count
- pm
->gb_oallocated
.count
, xm_deltat
) << "\n";
647 pm
->gb_oallocated
.count
= pm
->gb_allocated
.count
;
651 MemPoolReportSorter(const void *a
, const void *b
)
653 const MemPoolStats
*A
= (MemPoolStats
*) a
;
654 const MemPoolStats
*B
= (MemPoolStats
*) b
;
656 // use this to sort on %Total Allocated
658 double pa
= (double) A
->obj_size
* A
->meter
->alloc
.level
;
659 double pb
= (double) B
->obj_size
* B
->meter
->alloc
.level
;
668 // use this to sort on In Use high(hrs)
670 if (A
->meter
->inuse
.hwater_stamp
> B
->meter
->inuse
.hwater_stamp
)
673 if (B
->meter
->inuse
.hwater_stamp
> A
->meter
->inuse
.hwater_stamp
)
682 Mem::Report(std::ostream
&stream
)
685 static MemPoolStats mp_stats
;
686 static MemPoolGlobalStats mp_total
;
688 MemPoolIterator
*iter
;
692 stream
<< "Current memory usage:\n";
694 stream
<< "Pool\t Obj Size\t"
695 "Chunks\t\t\t\t\t\t\t"
696 "Allocated\t\t\t\t\t"
699 "Allocations Saved\t\t\t"
704 "(#)\t used\t free\t part\t %Frag\t "
705 "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
706 "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
707 "(#)\t (KB)\t high (KB)\t"
708 "(#)\t %cnt\t %vol\t"
711 xm_deltat
= current_dtime
- xm_time
;
712 xm_time
= current_dtime
;
714 /* Get stats for Totals report line */
715 memPoolGetGlobalStats(&mp_total
);
717 MemPoolStats
*sortme
= (MemPoolStats
*) xcalloc(mp_total
.tot_pools_alloc
,sizeof(*sortme
));
721 iter
= memPoolIterate();
723 while ((pool
= memPoolIterateNext(iter
))) {
724 pool
->getStats(&mp_stats
);
726 if (!mp_stats
.pool
) /* pool destroyed */
729 if (mp_stats
.pool
->getMeter().gb_allocated
.count
> 0) /* this pool has been used */
730 sortme
[npools
++] = mp_stats
;
735 memPoolIterateDone(&iter
);
737 qsort(sortme
, npools
, sizeof(*sortme
), MemPoolReportSorter
);
739 for (int i
= 0; i
< npools
; i
++) {
740 PoolReport(&sortme
[i
], mp_total
.TheMeter
, stream
);
745 mp_stats
.pool
= NULL
;
746 mp_stats
.label
= "Total";
747 mp_stats
.meter
= mp_total
.TheMeter
;
748 mp_stats
.obj_size
= 1;
749 mp_stats
.chunk_capacity
= 0;
750 mp_stats
.chunk_size
= 0;
751 mp_stats
.chunks_alloc
= mp_total
.tot_chunks_alloc
;
752 mp_stats
.chunks_inuse
= mp_total
.tot_chunks_inuse
;
753 mp_stats
.chunks_partial
= mp_total
.tot_chunks_partial
;
754 mp_stats
.chunks_free
= mp_total
.tot_chunks_free
;
755 mp_stats
.items_alloc
= mp_total
.tot_items_alloc
;
756 mp_stats
.items_inuse
= mp_total
.tot_items_inuse
;
757 mp_stats
.items_idle
= mp_total
.tot_items_idle
;
758 mp_stats
.overhead
= mp_total
.tot_overhead
;
760 PoolReport(&mp_stats
, mp_total
.TheMeter
, stream
);
763 stream
<< "Cumulative allocated volume: "<< double_to_str(buf
, 64, mp_total
.TheMeter
->gb_allocated
.bytes
) << "\n";
765 stream
<< "Current overhead: " << mp_total
.tot_overhead
<< " bytes (" <<
766 std::setprecision(3) << xpercent(mp_total
.tot_overhead
, mp_total
.TheMeter
->inuse
.level
) << "%)\n";
768 if (mp_total
.mem_idle_limit
>= 0)
769 stream
<< "Idle pool limit: " << std::setprecision(2) << toMB(mp_total
.mem_idle_limit
) << " MB\n";
771 stream
<< "Total Pools created: " << mp_total
.tot_pools_alloc
<< "\n";
772 stream
<< "Pools ever used: " << mp_total
.tot_pools_alloc
- not_used
<< " (shown above)\n";
773 stream
<< "Currently in use: " << mp_total
.tot_pools_inuse
<< "\n";