]> git.ipfire.org Git - thirdparty/squid.git/blob - include/MemPool.h
SourceFormat Enforcement
[thirdparty/squid.git] / include / MemPool.h
1 #ifndef _MEM_POOL_H_
2 #define _MEM_POOL_H_
3
4 /**
5 \defgroup MemPoolsAPI Memory Management (Memory Pool Allocator)
6 \ingroup Components
7 *
8 *\par
9 * MemPools are a pooled memory allocator running on top of malloc(). It's
10 * purpose is to reduce memory fragmentation and provide detailed statistics
11 * on memory consumption.
12 *
13 \par
14 * Preferably all memory allocations in Squid should be done using MemPools
15 * or one of the types built on top of it (i.e. cbdata).
16 *
17 \note Usually it is better to use cbdata types as these gives you additional
18 * safeguards in references and typechecking. However, for high usage pools where
19 * the cbdata functionality of cbdata is not required directly using a MemPool
20 * might be the way to go.
21 */
22
23 #include "util.h"
24
25 #include "memMeter.h"
26 #include "splay.h"
27
28 #if HAVE_GNUMALLOC_H
29 #include <gnumalloc.h>
30 #elif HAVE_MALLOC_H
31 #include <malloc.h>
32 #endif
33
34 #if HAVE_MEMORY_H
35 #include <memory.h>
36 #endif
37
38 #if !M_MMAP_MAX
39 #if USE_DLMALLOC
40 #define M_MMAP_MAX -4
41 #endif
42 #endif
43
44 /// \ingroup MemPoolsAPI
45 #define MB ((size_t)1024*1024)
46 /// \ingroup MemPoolsAPI
47 #define toMB(size) ( ((double) size) / MB )
48 /// \ingroup MemPoolsAPI
49 #define toKB(size) ( (size + 1024 - 1) / 1024 )
50
51 /// \ingroup MemPoolsAPI
52 #define MEM_PAGE_SIZE 4096
53 /// \ingroup MemPoolsAPI
54 #define MEM_CHUNK_SIZE 4096 * 4
55 /// \ingroup MemPoolsAPI
56 #define MEM_CHUNK_MAX_SIZE 256 * 1024 /* 2MB */
57 /// \ingroup MemPoolsAPI
58 #define MEM_MIN_FREE 32
59 /// \ingroup MemPoolsAPI
60 #define MEM_MAX_FREE 65535 /* unsigned short is max number of items per chunk */
61
62 class MemImplementingAllocator;
63 class MemPoolStats;
64
65 /// \ingroup MemPoolsAPI
66 /// \todo Kill this typedef for C++
67 typedef struct _MemPoolGlobalStats MemPoolGlobalStats;
68
69 /// \ingroup MemPoolsAPI
70 class MemPoolIterator
71 {
72 public:
73 MemImplementingAllocator *pool;
74 MemPoolIterator * next;
75 };
76
77 /**
78 \ingroup MemPoolsAPI
79 * Object to track per-pool cumulative counters
80 */
81 class mgb_t
82 {
83 public:
84 mgb_t() : count(0), bytes(0) {}
85 double count;
86 double bytes;
87 };
88
89 /**
90 \ingroup MemPoolsAPI
91 * Object to track per-pool memory usage (alloc = inuse+idle)
92 */
93 class MemPoolMeter
94 {
95 public:
96 MemPoolMeter();
97 void flush();
98 MemMeter alloc;
99 MemMeter inuse;
100 MemMeter idle;
101
102 /** history Allocations */
103 mgb_t gb_allocated;
104 mgb_t gb_oallocated;
105
106 /** account Saved Allocations */
107 mgb_t gb_saved;
108
109 /** account Free calls */
110 mgb_t gb_freed;
111 };
112
113 class MemImplementingAllocator;
114
115 /// \ingroup MemPoolsAPI
116 class MemPools
117 {
118 public:
119 static MemPools &GetInstance();
120 MemPools();
121 void init();
122 void flushMeters();
123
124 /**
125 \param label Name for the pool. Displayed in stats.
126 \param obj_size Size of elements in MemPool.
127 */
128 MemImplementingAllocator * create(const char *label, size_t obj_size);
129
130 /**
131 * Sets upper limit in bytes to amount of free ram kept in pools. This is
132 * not strict upper limit, but a hint. When MemPools are over this limit,
133 * totally free chunks are immediately considered for release. Otherwise
134 * only chunks that have not been referenced for a long time are checked.
135 */
136 void setIdleLimit(ssize_t new_idle_limit);
137 ssize_t idleLimit() const;
138
139 /**
140 \par
141 * Main cleanup handler. For MemPools to stay within upper idle limits,
142 * this function needs to be called periodically, preferrably at some
143 * constant rate, eg. from Squid event. It looks through all pools and
144 * chunks, cleans up internal states and checks for releasable chunks.
145 *
146 \par
147 * Between the calls to this function objects are placed onto internal
148 * cache instead of returning to their home chunks, mainly for speedup
149 * purpose. During that time state of chunk is not known, it is not
150 * known whether chunk is free or in use. This call returns all objects
151 * to their chunks and restores consistency.
152 *
153 \par
154 * Should be called relatively often, as it sorts chunks in suitable
155 * order as to reduce free memory fragmentation and increase chunk
156 * utilisation.
157 * Suitable frequency for cleanup is in range of few tens of seconds to
158 * few minutes, depending of memory activity.
159 *
160 \todo DOCS: Re-write this shorter!
161 *
162 \param maxage Release all totally idle chunks that
163 * have not been referenced for maxage seconds.
164 */
165 void clean(time_t maxage);
166
167 void setDefaultPoolChunking(bool const &);
168 MemImplementingAllocator *pools;
169 ssize_t mem_idle_limit;
170 int poolCount;
171 bool defaultIsChunked;
172 private:
173 static MemPools *Instance;
174 };
175
176 /**
177 \ingroup MemPoolsAPI
178 * a pool is a [growing] space for objects of the same size
179 */
180 class MemAllocator
181 {
182 public:
183 MemAllocator (char const *aLabel);
184 virtual ~MemAllocator() {}
185
186 /**
187 \param stats Object to be filled with statistical data about pool.
188 \retval Number of objects in use, ie. allocated.
189 */
190 virtual int getStats(MemPoolStats * stats, int accumulate = 0) = 0;
191
192 virtual MemPoolMeter const &getMeter() const = 0;
193
194 /**
195 * Allocate one element from the pool
196 */
197 virtual void *alloc() = 0;
198
199 /**
200 * Free a element allocated by MemAllocator::alloc()
201 */
202 virtual void freeOne(void *) = 0;
203
204 virtual char const *objectType() const;
205 virtual size_t objectSize() const = 0;
206 virtual int getInUseCount() = 0;
207 void zeroOnPush(bool doIt);
208 int inUseCount();
209
210 /**
211 * Allows you tune chunk size of pooling. Objects are allocated in chunks
212 * instead of individually. This conserves memory, reduces fragmentation.
213 * Because of that memory can be freed also only in chunks. Therefore
214 * there is tradeoff between memory conservation due to chunking and free
215 * memory fragmentation.
216 *
217 \note As a general guideline, increase chunk size only for pools that keep
218 * very many items for relatively long time.
219 */
220 virtual void setChunkSize(size_t chunksize) {}
221
222 /**
223 \param minSize Minimum size needed to be allocated.
224 \retval n Smallest size divisible by sizeof(void*)
225 */
226 static size_t RoundedSize(size_t minSize);
227
228 protected:
229 bool doZeroOnPush;
230
231 private:
232 const char *label;
233 };
234
235 /**
236 \ingroup MemPoolsAPI
237 * Support late binding of pool type for allocator agnostic classes
238 */
239 class MemAllocatorProxy
240 {
241 public:
242 inline MemAllocatorProxy(char const *aLabel, size_t const &);
243
244 /**
245 * Allocate one element from the pool
246 */
247 void *alloc();
248
249 /**
250 * Free a element allocated by MemAllocatorProxy::alloc()
251 */
252 void freeOne(void *);
253
254 int inUseCount() const;
255 size_t objectSize() const;
256 MemPoolMeter const &getMeter() const;
257
258 /**
259 \param stats Object to be filled with statistical data about pool.
260 \retval Number of objects in use, ie. allocated.
261 */
262 int getStats(MemPoolStats * stats);
263
264 char const * objectType() const;
265 private:
266 MemAllocator *getAllocator() const;
267 const char *label;
268 size_t size;
269 mutable MemAllocator *theAllocator;
270 };
271
272 /* help for classes */
273
274 /**
275 \ingroup MemPoolsAPI
276 \hideinitializer
277 *
278 * This macro is intended for use within the declaration of a class.
279 */
280 #define MEMPROXY_CLASS(CLASS) \
281 inline void *operator new(size_t); \
282 inline void operator delete(void *); \
283 static inline MemAllocatorProxy &Pool()
284
285 /**
286 \ingroup MemPoolsAPI
287 \hideinitializer
288 *
289 * This macro is intended for use within the .h or .cci of a class as appropriate.
290 */
291 #define MEMPROXY_CLASS_INLINE(CLASS) \
292 MemAllocatorProxy& CLASS::Pool() \
293 { \
294 static MemAllocatorProxy thePool(#CLASS, sizeof (CLASS)); \
295 return thePool; \
296 } \
297 \
298 void * \
299 CLASS::operator new (size_t byteCount) \
300 { \
301 /* derived classes with different sizes must implement their own new */ \
302 assert (byteCount == sizeof (CLASS)); \
303 \
304 return Pool().alloc(); \
305 } \
306 \
307 void \
308 CLASS::operator delete (void *address) \
309 { \
310 Pool().freeOne(address); \
311 }
312
313 /// \ingroup MemPoolsAPI
314 class MemImplementingAllocator : public MemAllocator
315 {
316 public:
317 MemImplementingAllocator(char const *aLabel, size_t aSize);
318 virtual ~MemImplementingAllocator();
319 virtual MemPoolMeter const &getMeter() const;
320 virtual MemPoolMeter &getMeter();
321 virtual void flushMetersFull();
322 virtual void flushMeters();
323
324 /**
325 * Allocate one element from the pool
326 */
327 virtual void *alloc();
328
329 /**
330 * Free a element allocated by MemImplementingAllocator::alloc()
331 */
332 virtual void freeOne(void *);
333
334 virtual bool idleTrigger(int shift) const = 0;
335 virtual void clean(time_t maxage) = 0;
336 virtual size_t objectSize() const;
337 virtual int getInUseCount() = 0;
338 protected:
339 virtual void *allocate() = 0;
340 virtual void deallocate(void *, bool aggressive) = 0;
341 MemPoolMeter meter;
342 int memPID;
343 public:
344 MemImplementingAllocator *next;
345 public:
346 size_t alloc_calls;
347 size_t free_calls;
348 size_t saved_calls;
349 size_t obj_size;
350 };
351
352 /// \ingroup MemPoolsAPI
353 class MemPoolStats
354 {
355 public:
356 MemAllocator *pool;
357 const char *label;
358 MemPoolMeter *meter;
359 int obj_size;
360 int chunk_capacity;
361 int chunk_size;
362
363 int chunks_alloc;
364 int chunks_inuse;
365 int chunks_partial;
366 int chunks_free;
367
368 int items_alloc;
369 int items_inuse;
370 int items_idle;
371
372 int overhead;
373 };
374
375 /// \ingroup MemPoolsAPI
376 /// \todo Classify and add constructor/destructor to initialize properly.
377 struct _MemPoolGlobalStats {
378 MemPoolMeter *TheMeter;
379
380 int tot_pools_alloc;
381 int tot_pools_inuse;
382 int tot_pools_mempid;
383
384 int tot_chunks_alloc;
385 int tot_chunks_inuse;
386 int tot_chunks_partial;
387 int tot_chunks_free;
388
389 int tot_items_alloc;
390 int tot_items_inuse;
391 int tot_items_idle;
392
393 int tot_overhead;
394 ssize_t mem_idle_limit;
395 };
396
397 /// \ingroup MemPoolsAPI
398 #define memPoolCreate MemPools::GetInstance().create
399
400 /* Allocator API */
401 /**
402 \ingroup MemPoolsAPI
403 * Initialise iteration through all of the pools.
404 \retval Iterator for use by memPoolIterateNext() and memPoolIterateDone()
405 */
406 extern MemPoolIterator * memPoolIterate(void);
407
408 /**
409 \ingroup MemPoolsAPI
410 * Get next pool pointer, until getting NULL pointer.
411 */
412 extern MemImplementingAllocator * memPoolIterateNext(MemPoolIterator * iter);
413
414 /**
415 \ingroup MemPoolsAPI
416 * Should be called after finished with iterating through all pools.
417 */
418 extern void memPoolIterateDone(MemPoolIterator ** iter);
419
420 /**
421 \ingroup MemPoolsAPI
422 \todo Stats API - not sured how to refactor yet
423 *
424 * Fills MemPoolGlobalStats with statistical data about overall
425 * usage for all pools.
426 *
427 \retval Number of pools that have at least one object in use.
428 * Ie. number of dirty pools.
429 */
430 extern int memPoolGetGlobalStats(MemPoolGlobalStats * stats);
431
432 /// \ingroup MemPoolsAPI
433 extern int memPoolInUseCount(MemAllocator *);
434 /// \ingroup MemPoolsAPI
435 extern int memPoolsTotalAllocated(void);
436
437 MemAllocatorProxy::MemAllocatorProxy(char const *aLabel, size_t const &aSize) : label (aLabel), size(aSize), theAllocator (NULL)
438 {
439 }
440
441 #endif /* _MEM_POOL_H_ */