]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/Pool.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / mem / Pool.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /*
10 * DEBUG: section 63 Low Level Memory Pool Management
11 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
12 */
13
14 #include "squid.h"
15 #include "mem/PoolChunked.h"
16 #include "mem/PoolMalloc.h"
17
18 #include <cassert>
19 #include <cstring>
20
21 #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
22
23 extern time_t squid_curtime;
24
25 static MemPoolMeter TheMeter;
26 static MemPoolIterator Iterator;
27 static int Pool_id_counter = 0;
28
29 MemPools &
30 MemPools::GetInstance()
31 {
32 // We must initialize on first use (which may happen during static
33 // initialization) and preserve until the last user is gone (which
34 // may happen long after main() exit). We currently preserve forever.
35 static MemPools *Instance = new MemPools;
36 return *Instance;
37 }
38
39 MemPoolIterator *
40 memPoolIterate(void)
41 {
42 Iterator.pool = MemPools::GetInstance().pools;
43 return &Iterator;
44 }
45
46 void
47 memPoolIterateDone(MemPoolIterator ** iter)
48 {
49 assert(iter != NULL);
50 Iterator.pool = NULL;
51 *iter = NULL;
52 }
53
54 MemImplementingAllocator *
55 memPoolIterateNext(MemPoolIterator * iter)
56 {
57 MemImplementingAllocator *pool;
58 assert(iter != NULL);
59
60 pool = iter->pool;
61 if (!pool)
62 return NULL;
63
64 iter->pool = pool->next;
65 return pool;
66 }
67
68 void
69 MemPools::setIdleLimit(ssize_t new_idle_limit)
70 {
71 mem_idle_limit = new_idle_limit;
72 }
73
74 ssize_t
75 MemPools::idleLimit() const
76 {
77 return mem_idle_limit;
78 }
79
80 /* Change the default calue of defaultIsChunked to override
81 * all pools - including those used before main() starts where
82 * MemPools::GetInstance().setDefaultPoolChunking() can be called.
83 */
84 MemPools::MemPools() : pools(NULL), mem_idle_limit(2 << 20 /* 2 MB */),
85 poolCount(0), defaultIsChunked(USE_CHUNKEDMEMPOOLS && !RUNNING_ON_VALGRIND)
86 {
87 char *cfg = getenv("MEMPOOLS");
88 if (cfg)
89 defaultIsChunked = atoi(cfg);
90 }
91
92 MemImplementingAllocator *
93 MemPools::create(const char *label, size_t obj_size)
94 {
95 ++poolCount;
96 if (defaultIsChunked)
97 return new MemPoolChunked (label, obj_size);
98 else
99 return new MemPoolMalloc (label, obj_size);
100 }
101
102 void
103 MemPools::setDefaultPoolChunking(bool const &aBool)
104 {
105 defaultIsChunked = aBool;
106 }
107
108 char const *
109 MemAllocator::objectType() const
110 {
111 return label;
112 }
113
114 int
115 MemAllocator::inUseCount()
116 {
117 return getInUseCount();
118 }
119
120 void
121 MemImplementingAllocator::flushMeters()
122 {
123 size_t calls;
124
125 calls = free_calls;
126 if (calls) {
127 meter.gb_freed.count += calls;
128 free_calls = 0;
129 }
130 calls = alloc_calls;
131 if (calls) {
132 meter.gb_allocated.count += calls;
133 alloc_calls = 0;
134 }
135 calls = saved_calls;
136 if (calls) {
137 meter.gb_saved.count += calls;
138 saved_calls = 0;
139 }
140 }
141
142 void
143 MemImplementingAllocator::flushMetersFull()
144 {
145 flushMeters();
146 getMeter().gb_allocated.bytes = getMeter().gb_allocated.count * obj_size;
147 getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size;
148 getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size;
149 }
150
151 void
152 MemPoolMeter::flush()
153 {
154 alloc.flush();
155 inuse.flush();
156 idle.flush();
157 gb_allocated.count = 0;
158 gb_allocated.bytes = 0;
159 gb_oallocated.count = 0;
160 gb_oallocated.bytes = 0;
161 gb_saved.count = 0;
162 gb_saved.bytes = 0;
163 gb_freed.count = 0;
164 gb_freed.bytes = 0;
165 }
166
167 MemPoolMeter::MemPoolMeter()
168 {
169 flush();
170 }
171
172 /*
173 * Updates all pool counters, and recreates TheMeter totals from all pools
174 */
175 void
176 MemPools::flushMeters()
177 {
178 TheMeter.flush();
179
180 MemPoolIterator *iter = memPoolIterate();
181 while (MemImplementingAllocator *pool = memPoolIterateNext(iter)) {
182 pool->flushMetersFull();
183 // are these TheMeter grow() operations or accumulated volumes ?
184 TheMeter.alloc += pool->getMeter().alloc.currentLevel() * pool->obj_size;
185 TheMeter.inuse += pool->getMeter().inuse.currentLevel() * pool->obj_size;
186 TheMeter.idle += pool->getMeter().idle.currentLevel() * pool->obj_size;
187
188 TheMeter.gb_allocated.count += pool->getMeter().gb_allocated.count;
189 TheMeter.gb_saved.count += pool->getMeter().gb_saved.count;
190 TheMeter.gb_freed.count += pool->getMeter().gb_freed.count;
191 TheMeter.gb_allocated.bytes += pool->getMeter().gb_allocated.bytes;
192 TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes;
193 TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes;
194 }
195 memPoolIterateDone(&iter);
196 }
197
198 void *
199 MemImplementingAllocator::alloc()
200 {
201 if (++alloc_calls == FLUSH_LIMIT)
202 flushMeters();
203
204 return allocate();
205 }
206
207 void
208 MemImplementingAllocator::freeOne(void *obj)
209 {
210 assert(obj != NULL);
211 (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
212 deallocate(obj, MemPools::GetInstance().mem_idle_limit == 0);
213 ++free_calls;
214 }
215
216 /*
217 * Returns all cached frees to their home chunks
218 * If chunks unreferenced age is over, destroys Idle chunk
219 * Flushes meters for a pool
220 * If pool is not specified, iterates through all pools.
221 * When used for all pools, if new_idle_limit is above -1, new
222 * idle memory limit is set before Cleanup. This allows to shrink
223 * memPool memory usage to specified minimum.
224 */
225 void
226 MemPools::clean(time_t maxage)
227 {
228 flushMeters();
229 if (mem_idle_limit < 0) // no limit to enforce
230 return;
231
232 int shift = 1;
233 if (TheMeter.idle.currentLevel() > mem_idle_limit)
234 maxage = shift = 0;
235
236 MemImplementingAllocator *pool;
237 MemPoolIterator *iter;
238 iter = memPoolIterate();
239 while ((pool = memPoolIterateNext(iter)))
240 if (pool->idleTrigger(shift))
241 pool->clean(maxage);
242 memPoolIterateDone(&iter);
243 }
244
245 /* Persistent Pool stats. for GlobalStats accumulation */
246 static MemPoolStats pp_stats;
247
248 /*
249 * Totals statistics is returned
250 */
251 int
252 memPoolGetGlobalStats(MemPoolGlobalStats * stats)
253 {
254 int pools_inuse = 0;
255 MemAllocator *pool;
256 MemPoolIterator *iter;
257
258 memset(stats, 0, sizeof(MemPoolGlobalStats));
259 memset(&pp_stats, 0, sizeof(MemPoolStats));
260
261 MemPools::GetInstance().flushMeters(); /* recreate TheMeter */
262
263 /* gather all stats for Totals */
264 iter = memPoolIterate();
265 while ((pool = memPoolIterateNext(iter))) {
266 if (pool->getStats(&pp_stats, 1) > 0)
267 ++pools_inuse;
268 }
269 memPoolIterateDone(&iter);
270
271 stats->TheMeter = &TheMeter;
272
273 stats->tot_pools_alloc = MemPools::GetInstance().poolCount;
274 stats->tot_pools_inuse = pools_inuse;
275 stats->tot_pools_mempid = Pool_id_counter;
276
277 stats->tot_chunks_alloc = pp_stats.chunks_alloc;
278 stats->tot_chunks_inuse = pp_stats.chunks_inuse;
279 stats->tot_chunks_partial = pp_stats.chunks_partial;
280 stats->tot_chunks_free = pp_stats.chunks_free;
281 stats->tot_items_alloc = pp_stats.items_alloc;
282 stats->tot_items_inuse = pp_stats.items_inuse;
283 stats->tot_items_idle = pp_stats.items_idle;
284
285 stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemAllocator *);
286 stats->mem_idle_limit = MemPools::GetInstance().mem_idle_limit;
287
288 return pools_inuse;
289 }
290
291 MemAllocator::MemAllocator(char const *aLabel) : doZero(true), label(aLabel)
292 {
293 }
294
295 size_t MemAllocator::RoundedSize(size_t s)
296 {
297 return ((s + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*);
298 }
299
300 int
301 memPoolsTotalAllocated(void)
302 {
303 MemPoolGlobalStats stats;
304 memPoolGetGlobalStats(&stats);
305 return stats.TheMeter->alloc.currentLevel();
306 }
307
308 MemImplementingAllocator::MemImplementingAllocator(char const *aLabel, size_t aSize) : MemAllocator(aLabel),
309 next(NULL),
310 alloc_calls(0),
311 free_calls(0),
312 saved_calls(0),
313 obj_size(RoundedSize(aSize))
314 {
315 memPID = ++Pool_id_counter;
316
317 MemImplementingAllocator *last_pool;
318
319 assert(aLabel != NULL && aSize);
320 /* Append as Last */
321 for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;)
322 last_pool = last_pool->next;
323 if (last_pool)
324 last_pool->next = this;
325 else
326 MemPools::GetInstance().pools = this;
327 }
328
329 MemImplementingAllocator::~MemImplementingAllocator()
330 {
331 MemImplementingAllocator *find_pool, *prev_pool;
332
333 /* Abort if the associated pool doesn't exist */
334 assert(MemPools::GetInstance().pools != NULL );
335
336 /* Pool clean, remove it from List and free */
337 for (find_pool = MemPools::GetInstance().pools, prev_pool = NULL; (find_pool && this != find_pool); find_pool = find_pool->next)
338 prev_pool = find_pool;
339
340 /* make sure that we found the pool to destroy */
341 assert(find_pool != NULL);
342
343 if (prev_pool)
344 prev_pool->next = next;
345 else
346 MemPools::GetInstance().pools = next;
347 --MemPools::GetInstance().poolCount;
348 }
349
350 MemPoolMeter const &
351 MemImplementingAllocator::getMeter() const
352 {
353 return meter;
354 }
355
356 MemPoolMeter &
357 MemImplementingAllocator::getMeter()
358 {
359 return meter;
360 }
361
362 size_t
363 MemImplementingAllocator::objectSize() const
364 {
365 return obj_size;
366 }
367