]> git.ipfire.org Git - thirdparty/squid.git/blob - lib/MemPool.cc
31be72e2d7b0093d4e30a8fd2fa42fba8cf032bb
[thirdparty/squid.git] / lib / MemPool.cc
1 /*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /*
10 * DEBUG: section 63 Low Level Memory Pool Management
11 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
12 */
13
14 #include "squid.h"
15
16 #include <cassert>
17
18 #include "MemPool.h"
19 #include "MemPoolChunked.h"
20 #include "MemPoolMalloc.h"
21
22 #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
23
24 #include <cstring>
25
26 /*
27 * XXX This is a boundary violation between lib and src.. would be good
28 * if it could be solved otherwise, but left for now.
29 */
30 extern time_t squid_curtime;
31
32 /* local data */
33 static MemPoolMeter TheMeter;
34 static MemPoolIterator Iterator;
35
36 static int Pool_id_counter = 0;
37
38 MemPools &
39 MemPools::GetInstance()
40 {
41 /* Must use this idiom, as we can be double-initialised
42 * if we are called during static initialisations.
43 */
44 if (!Instance)
45 Instance = new MemPools;
46 return *Instance;
47 }
48
49 MemPools * MemPools::Instance = NULL;
50
51 MemPoolIterator *
52 memPoolIterate(void)
53 {
54 Iterator.pool = MemPools::GetInstance().pools;
55 return &Iterator;
56 }
57
58 void
59 memPoolIterateDone(MemPoolIterator ** iter)
60 {
61 assert(iter != NULL);
62 Iterator.pool = NULL;
63 *iter = NULL;
64 }
65
66 MemImplementingAllocator *
67 memPoolIterateNext(MemPoolIterator * iter)
68 {
69 MemImplementingAllocator *pool;
70 assert(iter != NULL);
71
72 pool = iter->pool;
73 if (!pool)
74 return NULL;
75
76 iter->pool = pool->next;
77 return pool;
78 }
79
80 void
81 MemPools::setIdleLimit(ssize_t new_idle_limit)
82 {
83 mem_idle_limit = new_idle_limit;
84 }
85
86 ssize_t
87 MemPools::idleLimit() const
88 {
89 return mem_idle_limit;
90 }
91
92 /* Change the default calue of defaultIsChunked to override
93 * all pools - including those used before main() starts where
94 * MemPools::GetInstance().setDefaultPoolChunking() can be called.
95 */
96 MemPools::MemPools() : pools(NULL), mem_idle_limit(2 << 20 /* 2 MB */),
97 poolCount(0), defaultIsChunked(USE_CHUNKEDMEMPOOLS && !RUNNING_ON_VALGRIND)
98 {
99 char *cfg = getenv("MEMPOOLS");
100 if (cfg)
101 defaultIsChunked = atoi(cfg);
102 }
103
104 MemImplementingAllocator *
105 MemPools::create(const char *label, size_t obj_size)
106 {
107 ++poolCount;
108 if (defaultIsChunked)
109 return new MemPoolChunked (label, obj_size);
110 else
111 return new MemPoolMalloc (label, obj_size);
112 }
113
114 void
115 MemPools::setDefaultPoolChunking(bool const &aBool)
116 {
117 defaultIsChunked = aBool;
118 }
119
120 char const *
121 MemAllocator::objectType() const
122 {
123 return label;
124 }
125
126 int
127 MemAllocator::inUseCount()
128 {
129 return getInUseCount();
130 }
131
132 void
133 MemImplementingAllocator::flushMeters()
134 {
135 size_t calls;
136
137 calls = free_calls;
138 if (calls) {
139 meter.gb_freed.count += calls;
140 free_calls = 0;
141 }
142 calls = alloc_calls;
143 if (calls) {
144 meter.gb_allocated.count += calls;
145 alloc_calls = 0;
146 }
147 calls = saved_calls;
148 if (calls) {
149 meter.gb_saved.count += calls;
150 saved_calls = 0;
151 }
152 }
153
154 void
155 MemImplementingAllocator::flushMetersFull()
156 {
157 flushMeters();
158 getMeter().gb_allocated.bytes = getMeter().gb_allocated.count * obj_size;
159 getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size;
160 getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size;
161 }
162
163 void
164 MemPoolMeter::flush()
165 {
166 alloc.level = 0;
167 inuse.level = 0;
168 idle.level = 0;
169 gb_allocated.count = 0;
170 gb_allocated.bytes = 0;
171 gb_oallocated.count = 0;
172 gb_oallocated.bytes = 0;
173 gb_saved.count = 0;
174 gb_saved.bytes = 0;
175 gb_freed.count = 0;
176 gb_freed.bytes = 0;
177 }
178
179 MemPoolMeter::MemPoolMeter()
180 {
181 flush();
182 }
183
184 /*
185 * Updates all pool counters, and recreates TheMeter totals from all pools
186 */
187 void
188 MemPools::flushMeters()
189 {
190 MemImplementingAllocator *pool;
191 MemPoolIterator *iter;
192
193 TheMeter.flush();
194
195 iter = memPoolIterate();
196 while ((pool = memPoolIterateNext(iter))) {
197 pool->flushMetersFull();
198 memMeterAdd(TheMeter.alloc, pool->getMeter().alloc.level * pool->obj_size);
199 memMeterAdd(TheMeter.inuse, pool->getMeter().inuse.level * pool->obj_size);
200 memMeterAdd(TheMeter.idle, pool->getMeter().idle.level * pool->obj_size);
201 TheMeter.gb_allocated.count += pool->getMeter().gb_allocated.count;
202 TheMeter.gb_saved.count += pool->getMeter().gb_saved.count;
203 TheMeter.gb_freed.count += pool->getMeter().gb_freed.count;
204 TheMeter.gb_allocated.bytes += pool->getMeter().gb_allocated.bytes;
205 TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes;
206 TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes;
207 }
208 memPoolIterateDone(&iter);
209 }
210
211 void *
212 MemImplementingAllocator::alloc()
213 {
214 if (++alloc_calls == FLUSH_LIMIT)
215 flushMeters();
216
217 return allocate();
218 }
219
220 void
221 MemImplementingAllocator::freeOne(void *obj)
222 {
223 assert(obj != NULL);
224 (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
225 deallocate(obj, MemPools::GetInstance().mem_idle_limit == 0);
226 ++free_calls;
227 }
228
229 /*
230 * Returns all cached frees to their home chunks
231 * If chunks unreferenced age is over, destroys Idle chunk
232 * Flushes meters for a pool
233 * If pool is not specified, iterates through all pools.
234 * When used for all pools, if new_idle_limit is above -1, new
235 * idle memory limit is set before Cleanup. This allows to shrink
236 * memPool memory usage to specified minimum.
237 */
238 void
239 MemPools::clean(time_t maxage)
240 {
241 flushMeters();
242 if (mem_idle_limit < 0) // no limit to enforce
243 return;
244
245 int shift = 1;
246 if (TheMeter.idle.level > mem_idle_limit)
247 maxage = shift = 0;
248
249 MemImplementingAllocator *pool;
250 MemPoolIterator *iter;
251 iter = memPoolIterate();
252 while ((pool = memPoolIterateNext(iter)))
253 if (pool->idleTrigger(shift))
254 pool->clean(maxage);
255 memPoolIterateDone(&iter);
256 }
257
258 /* Persistent Pool stats. for GlobalStats accumulation */
259 static MemPoolStats pp_stats;
260
261 /*
262 * Totals statistics is returned
263 */
264 int
265 memPoolGetGlobalStats(MemPoolGlobalStats * stats)
266 {
267 int pools_inuse = 0;
268 MemAllocator *pool;
269 MemPoolIterator *iter;
270
271 memset(stats, 0, sizeof(MemPoolGlobalStats));
272 memset(&pp_stats, 0, sizeof(MemPoolStats));
273
274 MemPools::GetInstance().flushMeters(); /* recreate TheMeter */
275
276 /* gather all stats for Totals */
277 iter = memPoolIterate();
278 while ((pool = memPoolIterateNext(iter))) {
279 if (pool->getStats(&pp_stats, 1) > 0)
280 ++pools_inuse;
281 }
282 memPoolIterateDone(&iter);
283
284 stats->TheMeter = &TheMeter;
285
286 stats->tot_pools_alloc = MemPools::GetInstance().poolCount;
287 stats->tot_pools_inuse = pools_inuse;
288 stats->tot_pools_mempid = Pool_id_counter;
289
290 stats->tot_chunks_alloc = pp_stats.chunks_alloc;
291 stats->tot_chunks_inuse = pp_stats.chunks_inuse;
292 stats->tot_chunks_partial = pp_stats.chunks_partial;
293 stats->tot_chunks_free = pp_stats.chunks_free;
294 stats->tot_items_alloc = pp_stats.items_alloc;
295 stats->tot_items_inuse = pp_stats.items_inuse;
296 stats->tot_items_idle = pp_stats.items_idle;
297
298 stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemAllocator *);
299 stats->mem_idle_limit = MemPools::GetInstance().mem_idle_limit;
300
301 return pools_inuse;
302 }
303
304 MemAllocator::MemAllocator(char const *aLabel) : doZero(true), label(aLabel)
305 {
306 }
307
308 size_t MemAllocator::RoundedSize(size_t s)
309 {
310 return ((s + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*);
311 }
312
313 int
314 memPoolInUseCount(MemAllocator * pool)
315 {
316 return pool->inUseCount();
317 }
318
319 int
320 memPoolsTotalAllocated(void)
321 {
322 MemPoolGlobalStats stats;
323 memPoolGetGlobalStats(&stats);
324 return stats.TheMeter->alloc.level;
325 }
326
327 void *
328 MemAllocatorProxy::alloc()
329 {
330 return getAllocator()->alloc();
331 }
332
333 void
334 MemAllocatorProxy::freeOne(void *address)
335 {
336 getAllocator()->freeOne(address);
337 /* TODO: check for empty, and if so, if the default type has altered,
338 * switch
339 */
340 }
341
342 MemAllocator *
343 MemAllocatorProxy::getAllocator() const
344 {
345 if (!theAllocator)
346 theAllocator = MemPools::GetInstance().create(objectType(), size);
347 return theAllocator;
348 }
349
350 int
351 MemAllocatorProxy::inUseCount() const
352 {
353 if (!theAllocator)
354 return 0;
355 else
356 return memPoolInUseCount(theAllocator);
357 }
358
359 size_t
360 MemAllocatorProxy::objectSize() const
361 {
362 return size;
363 }
364
365 char const *
366 MemAllocatorProxy::objectType() const
367 {
368 return label;
369 }
370
371 MemPoolMeter const &
372 MemAllocatorProxy::getMeter() const
373 {
374 return getAllocator()->getMeter();
375 }
376
377 int
378 MemAllocatorProxy::getStats(MemPoolStats * stats)
379 {
380 return getAllocator()->getStats(stats);
381 }
382
383 MemImplementingAllocator::MemImplementingAllocator(char const *aLabel, size_t aSize) : MemAllocator(aLabel),
384 next(NULL),
385 alloc_calls(0),
386 free_calls(0),
387 saved_calls(0),
388 obj_size(RoundedSize(aSize))
389 {
390 memPID = ++Pool_id_counter;
391
392 MemImplementingAllocator *last_pool;
393
394 assert(aLabel != NULL && aSize);
395 /* Append as Last */
396 for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;)
397 last_pool = last_pool->next;
398 if (last_pool)
399 last_pool->next = this;
400 else
401 MemPools::GetInstance().pools = this;
402 }
403
404 MemImplementingAllocator::~MemImplementingAllocator()
405 {
406 MemImplementingAllocator *find_pool, *prev_pool;
407
408 /* Abort if the associated pool doesn't exist */
409 assert(MemPools::GetInstance().pools != NULL );
410
411 /* Pool clean, remove it from List and free */
412 for (find_pool = MemPools::GetInstance().pools, prev_pool = NULL; (find_pool && this != find_pool); find_pool = find_pool->next)
413 prev_pool = find_pool;
414
415 /* make sure that we found the pool to destroy */
416 assert(find_pool != NULL);
417
418 if (prev_pool)
419 prev_pool->next = next;
420 else
421 MemPools::GetInstance().pools = next;
422 --MemPools::GetInstance().poolCount;
423 }
424
425 MemPoolMeter const &
426 MemImplementingAllocator::getMeter() const
427 {
428 return meter;
429 }
430
431 MemPoolMeter &
432 MemImplementingAllocator::getMeter()
433 {
434 return meter;
435 }
436
437 size_t
438 MemImplementingAllocator::objectSize() const
439 {
440 return obj_size;
441 }