]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/Pool.cc
Source Format Enforcement (#532)
[thirdparty/squid.git] / src / mem / Pool.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /*
10 * DEBUG: section 63 Low Level Memory Pool Management
11 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
12 */
13
14 #include "squid.h"
15 #include "mem/PoolChunked.h"
16 #include "mem/PoolMalloc.h"
17
18 #include <cassert>
19 #include <cstring>
20
21 #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
22
23 extern time_t squid_curtime;
24
25 static MemPoolMeter TheMeter;
26 static MemPoolIterator Iterator;
27 static int Pool_id_counter = 0;
28
29 MemPools &
30 MemPools::GetInstance()
31 {
32 // We must initialize on first use (which may happen during static
33 // initialization) and preserve until the last user is gone (which
34 // may happen long after main() exit). We currently preserve forever.
35 static MemPools *Instance = new MemPools;
36 return *Instance;
37 }
38
39 MemPoolIterator *
40 memPoolIterate(void)
41 {
42 Iterator.pool = MemPools::GetInstance().pools;
43 return &Iterator;
44 }
45
46 void
47 memPoolIterateDone(MemPoolIterator ** iter)
48 {
49 assert(iter != NULL);
50 Iterator.pool = NULL;
51 *iter = NULL;
52 }
53
54 MemImplementingAllocator *
55 memPoolIterateNext(MemPoolIterator * iter)
56 {
57 MemImplementingAllocator *pool;
58 assert(iter != NULL);
59
60 pool = iter->pool;
61 if (!pool)
62 return NULL;
63
64 iter->pool = pool->next;
65 return pool;
66 }
67
68 void
69 MemPools::setIdleLimit(ssize_t new_idle_limit)
70 {
71 mem_idle_limit = new_idle_limit;
72 }
73
74 ssize_t
75 MemPools::idleLimit() const
76 {
77 return mem_idle_limit;
78 }
79
80 /* Change the default value of defaultIsChunked to override
81 * all pools - including those used before main() starts where
82 * MemPools::GetInstance().setDefaultPoolChunking() can be called.
83 */
84 MemPools::MemPools()
85 {
86 if (char *cfg = getenv("MEMPOOLS"))
87 defaultIsChunked = atoi(cfg);
88 }
89
90 MemImplementingAllocator *
91 MemPools::create(const char *label, size_t obj_size)
92 {
93 ++poolCount;
94 if (defaultIsChunked)
95 return new MemPoolChunked (label, obj_size);
96 else
97 return new MemPoolMalloc (label, obj_size);
98 }
99
100 void
101 MemPools::setDefaultPoolChunking(bool const &aBool)
102 {
103 defaultIsChunked = aBool;
104 }
105
106 char const *
107 MemAllocator::objectType() const
108 {
109 return label;
110 }
111
112 int
113 MemAllocator::inUseCount()
114 {
115 return getInUseCount();
116 }
117
118 void
119 MemImplementingAllocator::flushMeters()
120 {
121 size_t calls;
122
123 calls = free_calls;
124 if (calls) {
125 meter.gb_freed.count += calls;
126 free_calls = 0;
127 }
128 calls = alloc_calls;
129 if (calls) {
130 meter.gb_allocated.count += calls;
131 alloc_calls = 0;
132 }
133 calls = saved_calls;
134 if (calls) {
135 meter.gb_saved.count += calls;
136 saved_calls = 0;
137 }
138 }
139
140 void
141 MemImplementingAllocator::flushMetersFull()
142 {
143 flushMeters();
144 getMeter().gb_allocated.bytes = getMeter().gb_allocated.count * obj_size;
145 getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size;
146 getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size;
147 }
148
149 void
150 MemPoolMeter::flush()
151 {
152 alloc.flush();
153 inuse.flush();
154 idle.flush();
155 gb_allocated.count = 0;
156 gb_allocated.bytes = 0;
157 gb_oallocated.count = 0;
158 gb_oallocated.bytes = 0;
159 gb_saved.count = 0;
160 gb_saved.bytes = 0;
161 gb_freed.count = 0;
162 gb_freed.bytes = 0;
163 }
164
165 MemPoolMeter::MemPoolMeter()
166 {
167 flush();
168 }
169
170 /*
171 * Updates all pool counters, and recreates TheMeter totals from all pools
172 */
173 void
174 MemPools::flushMeters()
175 {
176 TheMeter.flush();
177
178 MemPoolIterator *iter = memPoolIterate();
179 while (MemImplementingAllocator *pool = memPoolIterateNext(iter)) {
180 pool->flushMetersFull();
181 // are these TheMeter grow() operations or accumulated volumes ?
182 TheMeter.alloc += pool->getMeter().alloc.currentLevel() * pool->obj_size;
183 TheMeter.inuse += pool->getMeter().inuse.currentLevel() * pool->obj_size;
184 TheMeter.idle += pool->getMeter().idle.currentLevel() * pool->obj_size;
185
186 TheMeter.gb_allocated.count += pool->getMeter().gb_allocated.count;
187 TheMeter.gb_saved.count += pool->getMeter().gb_saved.count;
188 TheMeter.gb_freed.count += pool->getMeter().gb_freed.count;
189 TheMeter.gb_allocated.bytes += pool->getMeter().gb_allocated.bytes;
190 TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes;
191 TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes;
192 }
193 memPoolIterateDone(&iter);
194 }
195
196 void *
197 MemImplementingAllocator::alloc()
198 {
199 if (++alloc_calls == FLUSH_LIMIT)
200 flushMeters();
201
202 return allocate();
203 }
204
205 void
206 MemImplementingAllocator::freeOne(void *obj)
207 {
208 assert(obj != NULL);
209 (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
210 deallocate(obj, MemPools::GetInstance().mem_idle_limit == 0);
211 ++free_calls;
212 }
213
214 /*
215 * Returns all cached frees to their home chunks
216 * If chunks unreferenced age is over, destroys Idle chunk
217 * Flushes meters for a pool
218 * If pool is not specified, iterates through all pools.
219 * When used for all pools, if new_idle_limit is above -1, new
220 * idle memory limit is set before Cleanup. This allows to shrink
221 * memPool memory usage to specified minimum.
222 */
223 void
224 MemPools::clean(time_t maxage)
225 {
226 flushMeters();
227 if (mem_idle_limit < 0) // no limit to enforce
228 return;
229
230 int shift = 1;
231 if (TheMeter.idle.currentLevel() > mem_idle_limit)
232 maxage = shift = 0;
233
234 MemImplementingAllocator *pool;
235 MemPoolIterator *iter;
236 iter = memPoolIterate();
237 while ((pool = memPoolIterateNext(iter)))
238 if (pool->idleTrigger(shift))
239 pool->clean(maxage);
240 memPoolIterateDone(&iter);
241 }
242
243 /* Persistent Pool stats. for GlobalStats accumulation */
244 static MemPoolStats pp_stats;
245
246 /*
247 * Totals statistics is returned
248 */
249 int
250 memPoolGetGlobalStats(MemPoolGlobalStats * stats)
251 {
252 int pools_inuse = 0;
253 MemAllocator *pool;
254 MemPoolIterator *iter;
255
256 memset(stats, 0, sizeof(MemPoolGlobalStats));
257 memset(&pp_stats, 0, sizeof(MemPoolStats));
258
259 MemPools::GetInstance().flushMeters(); /* recreate TheMeter */
260
261 /* gather all stats for Totals */
262 iter = memPoolIterate();
263 while ((pool = memPoolIterateNext(iter))) {
264 if (pool->getStats(&pp_stats, 1) > 0)
265 ++pools_inuse;
266 }
267 memPoolIterateDone(&iter);
268
269 stats->TheMeter = &TheMeter;
270
271 stats->tot_pools_alloc = MemPools::GetInstance().poolCount;
272 stats->tot_pools_inuse = pools_inuse;
273 stats->tot_pools_mempid = Pool_id_counter;
274
275 stats->tot_chunks_alloc = pp_stats.chunks_alloc;
276 stats->tot_chunks_inuse = pp_stats.chunks_inuse;
277 stats->tot_chunks_partial = pp_stats.chunks_partial;
278 stats->tot_chunks_free = pp_stats.chunks_free;
279 stats->tot_items_alloc = pp_stats.items_alloc;
280 stats->tot_items_inuse = pp_stats.items_inuse;
281 stats->tot_items_idle = pp_stats.items_idle;
282
283 stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemAllocator *);
284 stats->mem_idle_limit = MemPools::GetInstance().mem_idle_limit;
285
286 return pools_inuse;
287 }
288
289 MemAllocator::MemAllocator(char const *aLabel) : doZero(true), label(aLabel)
290 {
291 }
292
293 size_t MemAllocator::RoundedSize(size_t s)
294 {
295 return ((s + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*);
296 }
297
298 int
299 memPoolsTotalAllocated(void)
300 {
301 MemPoolGlobalStats stats;
302 memPoolGetGlobalStats(&stats);
303 return stats.TheMeter->alloc.currentLevel();
304 }
305
306 MemImplementingAllocator::MemImplementingAllocator(char const *aLabel, size_t aSize) : MemAllocator(aLabel),
307 next(NULL),
308 alloc_calls(0),
309 free_calls(0),
310 saved_calls(0),
311 obj_size(RoundedSize(aSize))
312 {
313 memPID = ++Pool_id_counter;
314
315 MemImplementingAllocator *last_pool;
316
317 assert(aLabel != NULL && aSize);
318 /* Append as Last */
319 for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;)
320 last_pool = last_pool->next;
321 if (last_pool)
322 last_pool->next = this;
323 else
324 MemPools::GetInstance().pools = this;
325 }
326
327 MemImplementingAllocator::~MemImplementingAllocator()
328 {
329 MemImplementingAllocator *find_pool, *prev_pool;
330
331 /* Abort if the associated pool doesn't exist */
332 assert(MemPools::GetInstance().pools != NULL );
333
334 /* Pool clean, remove it from List and free */
335 for (find_pool = MemPools::GetInstance().pools, prev_pool = NULL; (find_pool && this != find_pool); find_pool = find_pool->next)
336 prev_pool = find_pool;
337
338 /* make sure that we found the pool to destroy */
339 assert(find_pool != NULL);
340
341 if (prev_pool)
342 prev_pool->next = next;
343 else
344 MemPools::GetInstance().pools = next;
345 --MemPools::GetInstance().poolCount;
346 }
347
348 MemPoolMeter const &
349 MemImplementingAllocator::getMeter() const
350 {
351 return meter;
352 }
353
354 MemPoolMeter &
355 MemImplementingAllocator::getMeter()
356 {
357 return meter;
358 }
359
360 size_t
361 MemImplementingAllocator::objectSize() const
362 {
363 return obj_size;
364 }
365