]> git.ipfire.org Git - thirdparty/squid.git/blob - src/mem/Pool.cc
Maintenance: Removed most NULLs using modernize-use-nullptr (#1075)
[thirdparty/squid.git] / src / mem / Pool.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /*
10 * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins
11 */
12
13 #include "squid.h"
14 #include "mem/PoolChunked.h"
15 #include "mem/PoolMalloc.h"
16
17 #include <cassert>
18 #include <cstring>
19
20 #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */
21
22 extern time_t squid_curtime;
23
24 static MemPoolMeter TheMeter;
25 static MemPoolIterator Iterator;
26 static int Pool_id_counter = 0;
27
28 MemPools &
29 MemPools::GetInstance()
30 {
31 // We must initialize on first use (which may happen during static
32 // initialization) and preserve until the last user is gone (which
33 // may happen long after main() exit). We currently preserve forever.
34 static MemPools *Instance = new MemPools;
35 return *Instance;
36 }
37
38 MemPoolIterator *
39 memPoolIterate(void)
40 {
41 Iterator.pool = MemPools::GetInstance().pools;
42 return &Iterator;
43 }
44
45 void
46 memPoolIterateDone(MemPoolIterator ** iter)
47 {
48 assert(iter != nullptr);
49 Iterator.pool = nullptr;
50 *iter = nullptr;
51 }
52
53 MemImplementingAllocator *
54 memPoolIterateNext(MemPoolIterator * iter)
55 {
56 MemImplementingAllocator *pool;
57 assert(iter != nullptr);
58
59 pool = iter->pool;
60 if (!pool)
61 return nullptr;
62
63 iter->pool = pool->next;
64 return pool;
65 }
66
67 void
68 MemPools::setIdleLimit(ssize_t new_idle_limit)
69 {
70 mem_idle_limit = new_idle_limit;
71 }
72
73 ssize_t
74 MemPools::idleLimit() const
75 {
76 return mem_idle_limit;
77 }
78
79 /* Change the default value of defaultIsChunked to override
80 * all pools - including those used before main() starts where
81 * MemPools::GetInstance().setDefaultPoolChunking() can be called.
82 */
83 MemPools::MemPools()
84 {
85 if (char *cfg = getenv("MEMPOOLS"))
86 defaultIsChunked = atoi(cfg);
87 }
88
89 MemImplementingAllocator *
90 MemPools::create(const char *label, size_t obj_size)
91 {
92 ++poolCount;
93 if (defaultIsChunked)
94 return new MemPoolChunked (label, obj_size);
95 else
96 return new MemPoolMalloc (label, obj_size);
97 }
98
99 void
100 MemPools::setDefaultPoolChunking(bool const &aBool)
101 {
102 defaultIsChunked = aBool;
103 }
104
105 char const *
106 MemAllocator::objectType() const
107 {
108 return label;
109 }
110
111 int
112 MemAllocator::inUseCount()
113 {
114 return getInUseCount();
115 }
116
117 void
118 MemImplementingAllocator::flushMeters()
119 {
120 size_t calls;
121
122 calls = free_calls;
123 if (calls) {
124 meter.gb_freed.count += calls;
125 free_calls = 0;
126 }
127 calls = alloc_calls;
128 if (calls) {
129 meter.gb_allocated.count += calls;
130 alloc_calls = 0;
131 }
132 calls = saved_calls;
133 if (calls) {
134 meter.gb_saved.count += calls;
135 saved_calls = 0;
136 }
137 }
138
139 void
140 MemImplementingAllocator::flushMetersFull()
141 {
142 flushMeters();
143 getMeter().gb_allocated.bytes = getMeter().gb_allocated.count * obj_size;
144 getMeter().gb_saved.bytes = getMeter().gb_saved.count * obj_size;
145 getMeter().gb_freed.bytes = getMeter().gb_freed.count * obj_size;
146 }
147
148 void
149 MemPoolMeter::flush()
150 {
151 alloc.flush();
152 inuse.flush();
153 idle.flush();
154 gb_allocated.count = 0;
155 gb_allocated.bytes = 0;
156 gb_oallocated.count = 0;
157 gb_oallocated.bytes = 0;
158 gb_saved.count = 0;
159 gb_saved.bytes = 0;
160 gb_freed.count = 0;
161 gb_freed.bytes = 0;
162 }
163
164 MemPoolMeter::MemPoolMeter()
165 {
166 flush();
167 }
168
169 /*
170 * Updates all pool counters, and recreates TheMeter totals from all pools
171 */
172 void
173 MemPools::flushMeters()
174 {
175 TheMeter.flush();
176
177 MemPoolIterator *iter = memPoolIterate();
178 while (MemImplementingAllocator *pool = memPoolIterateNext(iter)) {
179 pool->flushMetersFull();
180 // are these TheMeter grow() operations or accumulated volumes ?
181 TheMeter.alloc += pool->getMeter().alloc.currentLevel() * pool->obj_size;
182 TheMeter.inuse += pool->getMeter().inuse.currentLevel() * pool->obj_size;
183 TheMeter.idle += pool->getMeter().idle.currentLevel() * pool->obj_size;
184
185 TheMeter.gb_allocated.count += pool->getMeter().gb_allocated.count;
186 TheMeter.gb_saved.count += pool->getMeter().gb_saved.count;
187 TheMeter.gb_freed.count += pool->getMeter().gb_freed.count;
188 TheMeter.gb_allocated.bytes += pool->getMeter().gb_allocated.bytes;
189 TheMeter.gb_saved.bytes += pool->getMeter().gb_saved.bytes;
190 TheMeter.gb_freed.bytes += pool->getMeter().gb_freed.bytes;
191 }
192 memPoolIterateDone(&iter);
193 }
194
195 void *
196 MemImplementingAllocator::alloc()
197 {
198 if (++alloc_calls == FLUSH_LIMIT)
199 flushMeters();
200
201 return allocate();
202 }
203
204 void
205 MemImplementingAllocator::freeOne(void *obj)
206 {
207 assert(obj != nullptr);
208 (void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, obj_size);
209 deallocate(obj, MemPools::GetInstance().mem_idle_limit == 0);
210 ++free_calls;
211 }
212
213 /*
214 * Returns all cached frees to their home chunks
215 * If chunks unreferenced age is over, destroys Idle chunk
216 * Flushes meters for a pool
217 * If pool is not specified, iterates through all pools.
218 * When used for all pools, if new_idle_limit is above -1, new
219 * idle memory limit is set before Cleanup. This allows to shrink
220 * memPool memory usage to specified minimum.
221 */
222 void
223 MemPools::clean(time_t maxage)
224 {
225 flushMeters();
226 if (mem_idle_limit < 0) // no limit to enforce
227 return;
228
229 int shift = 1;
230 if (TheMeter.idle.currentLevel() > mem_idle_limit)
231 maxage = shift = 0;
232
233 MemImplementingAllocator *pool;
234 MemPoolIterator *iter;
235 iter = memPoolIterate();
236 while ((pool = memPoolIterateNext(iter)))
237 if (pool->idleTrigger(shift))
238 pool->clean(maxage);
239 memPoolIterateDone(&iter);
240 }
241
242 /* Persistent Pool stats. for GlobalStats accumulation */
243 static MemPoolStats pp_stats;
244
245 /*
246 * Totals statistics is returned
247 */
248 int
249 memPoolGetGlobalStats(MemPoolGlobalStats * stats)
250 {
251 int pools_inuse = 0;
252 MemAllocator *pool;
253 MemPoolIterator *iter;
254
255 memset(stats, 0, sizeof(MemPoolGlobalStats));
256 memset(&pp_stats, 0, sizeof(MemPoolStats));
257
258 MemPools::GetInstance().flushMeters(); /* recreate TheMeter */
259
260 /* gather all stats for Totals */
261 iter = memPoolIterate();
262 while ((pool = memPoolIterateNext(iter))) {
263 if (pool->getStats(&pp_stats, 1) > 0)
264 ++pools_inuse;
265 }
266 memPoolIterateDone(&iter);
267
268 stats->TheMeter = &TheMeter;
269
270 stats->tot_pools_alloc = MemPools::GetInstance().poolCount;
271 stats->tot_pools_inuse = pools_inuse;
272 stats->tot_pools_mempid = Pool_id_counter;
273
274 stats->tot_chunks_alloc = pp_stats.chunks_alloc;
275 stats->tot_chunks_inuse = pp_stats.chunks_inuse;
276 stats->tot_chunks_partial = pp_stats.chunks_partial;
277 stats->tot_chunks_free = pp_stats.chunks_free;
278 stats->tot_items_alloc = pp_stats.items_alloc;
279 stats->tot_items_inuse = pp_stats.items_inuse;
280 stats->tot_items_idle = pp_stats.items_idle;
281
282 stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemAllocator *);
283 stats->mem_idle_limit = MemPools::GetInstance().mem_idle_limit;
284
285 return pools_inuse;
286 }
287
288 MemAllocator::MemAllocator(char const *aLabel) : doZero(true), label(aLabel)
289 {
290 }
291
292 size_t MemAllocator::RoundedSize(size_t s)
293 {
294 return ((s + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*);
295 }
296
297 int
298 memPoolsTotalAllocated(void)
299 {
300 MemPoolGlobalStats stats;
301 memPoolGetGlobalStats(&stats);
302 return stats.TheMeter->alloc.currentLevel();
303 }
304
305 MemImplementingAllocator::MemImplementingAllocator(char const *aLabel, size_t aSize) : MemAllocator(aLabel),
306 next(nullptr),
307 alloc_calls(0),
308 free_calls(0),
309 saved_calls(0),
310 obj_size(RoundedSize(aSize))
311 {
312 memPID = ++Pool_id_counter;
313
314 MemImplementingAllocator *last_pool;
315
316 assert(aLabel != nullptr && aSize);
317 /* Append as Last */
318 for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;)
319 last_pool = last_pool->next;
320 if (last_pool)
321 last_pool->next = this;
322 else
323 MemPools::GetInstance().pools = this;
324 }
325
326 MemImplementingAllocator::~MemImplementingAllocator()
327 {
328 MemImplementingAllocator *find_pool, *prev_pool;
329
330 /* Abort if the associated pool doesn't exist */
331 assert(MemPools::GetInstance().pools != nullptr );
332
333 /* Pool clean, remove it from List and free */
334 for (find_pool = MemPools::GetInstance().pools, prev_pool = nullptr; (find_pool && this != find_pool); find_pool = find_pool->next)
335 prev_pool = find_pool;
336
337 /* make sure that we found the pool to destroy */
338 assert(find_pool != nullptr);
339
340 if (prev_pool)
341 prev_pool->next = next;
342 else
343 MemPools::GetInstance().pools = next;
344 --MemPools::GetInstance().poolCount;
345 }
346
347 MemPoolMeter const &
348 MemImplementingAllocator::getMeter() const
349 {
350 return meter;
351 }
352
353 MemPoolMeter &
354 MemImplementingAllocator::getMeter()
355 {
356 return meter;
357 }
358
359 size_t
360 MemImplementingAllocator::objectSize() const
361 {
362 return obj_size;
363 }
364