]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/alloc-pool.h
Update copyright years.
[thirdparty/gcc.git] / gcc / alloc-pool.h
1 /* Functions to support a pool of allocatable objects
2 Copyright (C) 1997-2017 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@cgsoftware.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20 #ifndef ALLOC_POOL_H
21 #define ALLOC_POOL_H
22
23 #include "memory-block.h"
24 #include "options.h" // for flag_checking
25
26 extern void dump_alloc_pool_statistics (void);
27
28 /* Flag indicates whether memory statistics are gathered any longer. */
29 extern bool after_memory_report;
30
31 typedef unsigned long ALLOC_POOL_ID_TYPE;
32
33 /* Last used ID. */
34 extern ALLOC_POOL_ID_TYPE last_id;
35
36 /* Pool allocator memory usage. */
37 struct pool_usage: public mem_usage
38 {
39 /* Default contructor. */
40 pool_usage (): m_element_size (0), m_pool_name ("") {}
41 /* Constructor. */
42 pool_usage (size_t allocated, size_t times, size_t peak,
43 size_t instances, size_t element_size,
44 const char *pool_name)
45 : mem_usage (allocated, times, peak, instances),
46 m_element_size (element_size),
47 m_pool_name (pool_name) {}
48
49 /* Sum the usage with SECOND usage. */
50 pool_usage
51 operator+ (const pool_usage &second)
52 {
53 return pool_usage (m_allocated + second.m_allocated,
54 m_times + second.m_times,
55 m_peak + second.m_peak,
56 m_instances + second.m_instances,
57 m_element_size, m_pool_name);
58 }
59
60 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
61 inline void
62 dump (mem_location *loc, mem_usage &total) const
63 {
64 char *location_string = loc->to_string ();
65
66 fprintf (stderr, "%-32s%-48s %6li%10li:%5.1f%%%10li%10li:%5.1f%%%12li\n",
67 m_pool_name, location_string, (long)m_instances,
68 (long)m_allocated, get_percent (m_allocated, total.m_allocated),
69 (long)m_peak, (long)m_times,
70 get_percent (m_times, total.m_times),
71 (long)m_element_size);
72
73 free (location_string);
74 }
75
76 /* Dump header with NAME. */
77 static inline void
78 dump_header (const char *name)
79 {
80 fprintf (stderr, "%-32s%-48s %6s%11s%16s%17s%12s\n", "Pool name", name,
81 "Pools", "Leak", "Peak", "Times", "Elt size");
82 print_dash_line ();
83 }
84
85 /* Dump footer. */
86 inline void
87 dump_footer ()
88 {
89 print_dash_line ();
90 fprintf (stderr, "%s%82li%10li\n", "Total", (long)m_instances,
91 (long)m_allocated);
92 print_dash_line ();
93 }
94
95 /* Element size. */
96 size_t m_element_size;
97 /* Pool name. */
98 const char *m_pool_name;
99 };
100
101 extern mem_alloc_description<pool_usage> pool_allocator_usage;
102
103 #if 0
104 /* If a pool with custom block size is needed, one might use the following
105 template. An instance of this template can be used as a parameter for
106 instantiating base_pool_allocator template:
107
108 typedef custom_block_allocator <128*1024> huge_block_allocator;
109 ...
110 static base_pool_allocator <huge_block_allocator>
111 value_pool ("value", 16384);
112
113 Right now it's not used anywhere in the code, and is given here as an
114 example). */
115
116 template <size_t BlockSize>
117 class custom_block_allocator
118 {
119 public:
120 static const size_t block_size = BlockSize;
121
122 static inline void *
123 allocate () ATTRIBUTE_MALLOC
124 {
125 return XNEWVEC (char, BlockSize);
126 }
127
128 static inline void
129 release (void *block)
130 {
131 XDELETEVEC (block);
132 }
133 };
134 #endif
135
136 /* Generic pool allocator. */
137
138 template <typename TBlockAllocator>
139 class base_pool_allocator
140 {
141 public:
142 /* Default constructor for pool allocator called NAME. */
143 base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
144 ~base_pool_allocator ();
145 void release ();
146 void release_if_empty ();
147 void *allocate () ATTRIBUTE_MALLOC;
148 void remove (void *object);
149 size_t num_elts_current ();
150
151 private:
152 struct allocation_pool_list
153 {
154 allocation_pool_list *next;
155 };
156
157 /* Initialize a pool allocator. */
158 void initialize ();
159
160 struct allocation_object
161 {
162 #if CHECKING_P
163 /* The ID of alloc pool which the object was allocated from. */
164 ALLOC_POOL_ID_TYPE id;
165 #endif
166
167 union
168 {
169 /* The data of the object. */
170 char data[1];
171
172 /* Because we want any type of data to be well aligned after the ID,
173 the following elements are here. They are never accessed so
174 the allocated object may be even smaller than this structure.
175 We do not care about alignment for floating-point types. */
176 char *align_p;
177 int64_t align_i;
178 } u;
179
180 #if CHECKING_P
181 static inline allocation_object*
182 get_instance (void *data_ptr)
183 {
184 return (allocation_object *)(((char *)(data_ptr))
185 - offsetof (allocation_object,
186 u.data));
187 }
188 #endif
189
190 static inline void*
191 get_data (void *instance_ptr)
192 {
193 return (void*)(((allocation_object *) instance_ptr)->u.data);
194 }
195 };
196
197 /* Align X to 8. */
198 static inline size_t
199 align_eight (size_t x)
200 {
201 return (((x+7) >> 3) << 3);
202 }
203
204 const char *m_name;
205 ALLOC_POOL_ID_TYPE m_id;
206 size_t m_elts_per_block;
207
208 /* These are the elements that have been allocated at least once
209 and freed. */
210 allocation_pool_list *m_returned_free_list;
211
212 /* These are the elements that have not yet been allocated out of
213 the last block obtained from XNEWVEC. */
214 char* m_virgin_free_list;
215
216 /* The number of elements in the virgin_free_list that can be
217 allocated before needing another block. */
218 size_t m_virgin_elts_remaining;
219 /* The number of elements that are allocated. */
220 size_t m_elts_allocated;
221 /* The number of elements that are released. */
222 size_t m_elts_free;
223 /* The number of allocated blocks. */
224 size_t m_blocks_allocated;
225 /* List of blocks that are used to allocate new objects. */
226 allocation_pool_list *m_block_list;
227 /* Size of a pool elements in bytes. */
228 size_t m_elt_size;
229 /* Size in bytes that should be allocated for each element. */
230 size_t m_size;
231 /* Flag if a pool allocator is initialized. */
232 bool m_initialized;
233 /* Memory allocation location. */
234 mem_location m_location;
235 };
236
237 template <typename TBlockAllocator>
238 inline
239 base_pool_allocator <TBlockAllocator>::base_pool_allocator (
240 const char *name, size_t size MEM_STAT_DECL):
241 m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
242 m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
243 m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_size (size),
244 m_initialized (false), m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
245
246 /* Initialize a pool allocator. */
247
248 template <typename TBlockAllocator>
249 inline void
250 base_pool_allocator <TBlockAllocator>::initialize ()
251 {
252 gcc_checking_assert (!m_initialized);
253 m_initialized = true;
254
255 size_t size = m_size;
256
257 gcc_checking_assert (m_name);
258
259 /* Make size large enough to store the list header. */
260 if (size < sizeof (allocation_pool_list*))
261 size = sizeof (allocation_pool_list*);
262
263 /* Now align the size to a multiple of 8. */
264 size = align_eight (size);
265
266 /* Add the aligned size of ID. */
267 size += offsetof (allocation_object, u.data);
268
269 m_elt_size = size;
270
271 if (GATHER_STATISTICS)
272 {
273 pool_usage *u = pool_allocator_usage.register_descriptor
274 (this, new mem_location (m_location));
275
276 u->m_element_size = m_elt_size;
277 u->m_pool_name = m_name;
278 }
279
280 /* List header size should be a multiple of 8. */
281 size_t header_size = align_eight (sizeof (allocation_pool_list));
282
283 m_elts_per_block = (TBlockAllocator::block_size - header_size) / size;
284 gcc_checking_assert (m_elts_per_block != 0);
285
286 /* Increase the last used ID and use it for this pool.
287 ID == 0 is used for free elements of pool so skip it. */
288 last_id++;
289 if (last_id == 0)
290 last_id++;
291
292 m_id = last_id;
293 }
294
295 /* Free all memory allocated for the given memory pool. */
296 template <typename TBlockAllocator>
297 inline void
298 base_pool_allocator <TBlockAllocator>::release ()
299 {
300 if (!m_initialized)
301 return;
302
303 allocation_pool_list *block, *next_block;
304
305 /* Free each block allocated to the pool. */
306 for (block = m_block_list; block != NULL; block = next_block)
307 {
308 next_block = block->next;
309 TBlockAllocator::release (block);
310 }
311
312 if (GATHER_STATISTICS && !after_memory_report)
313 {
314 pool_allocator_usage.release_instance_overhead
315 (this, (m_elts_allocated - m_elts_free) * m_elt_size);
316 }
317
318 m_returned_free_list = NULL;
319 m_virgin_free_list = NULL;
320 m_virgin_elts_remaining = 0;
321 m_elts_allocated = 0;
322 m_elts_free = 0;
323 m_blocks_allocated = 0;
324 m_block_list = NULL;
325 }
326
327 template <typename TBlockAllocator>
328 inline void
329 base_pool_allocator <TBlockAllocator>::release_if_empty ()
330 {
331 if (m_elts_free == m_elts_allocated)
332 release ();
333 }
334
335 template <typename TBlockAllocator>
336 inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator ()
337 {
338 release ();
339 }
340
341 /* Allocates one element from the pool specified. */
342 template <typename TBlockAllocator>
343 inline void*
344 base_pool_allocator <TBlockAllocator>::allocate ()
345 {
346 if (!m_initialized)
347 initialize ();
348
349 allocation_pool_list *header;
350 #ifdef ENABLE_VALGRIND_ANNOTATIONS
351 int size;
352 #endif
353
354 if (GATHER_STATISTICS)
355 {
356 pool_allocator_usage.register_instance_overhead (m_elt_size, this);
357 }
358
359 #ifdef ENABLE_VALGRIND_ANNOTATIONS
360 size = m_elt_size - offsetof (allocation_object, u.data);
361 #endif
362
363 /* If there are no more free elements, make some more!. */
364 if (!m_returned_free_list)
365 {
366 char *block;
367 if (!m_virgin_elts_remaining)
368 {
369 allocation_pool_list *block_header;
370
371 /* Make the block. */
372 block = reinterpret_cast<char *> (TBlockAllocator::allocate ());
373 block_header = new (block) allocation_pool_list;
374 block += align_eight (sizeof (allocation_pool_list));
375
376 /* Throw it on the block list. */
377 block_header->next = m_block_list;
378 m_block_list = block_header;
379
380 /* Make the block available for allocation. */
381 m_virgin_free_list = block;
382 m_virgin_elts_remaining = m_elts_per_block;
383
384 /* Also update the number of elements we have free/allocated, and
385 increment the allocated block count. */
386 m_elts_allocated += m_elts_per_block;
387 m_elts_free += m_elts_per_block;
388 m_blocks_allocated += 1;
389 }
390
391 /* We now know that we can take the first elt off the virgin list and
392 put it on the returned list. */
393 block = m_virgin_free_list;
394 header = (allocation_pool_list*) allocation_object::get_data (block);
395 header->next = NULL;
396
397 /* Mark the element to be free. */
398 #if CHECKING_P
399 ((allocation_object*) block)->id = 0;
400 #endif
401 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
402 m_returned_free_list = header;
403 m_virgin_free_list += m_elt_size;
404 m_virgin_elts_remaining--;
405
406 }
407
408 /* Pull the first free element from the free list, and return it. */
409 header = m_returned_free_list;
410 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
411 m_returned_free_list = header->next;
412 m_elts_free--;
413
414 /* Set the ID for element. */
415 #if CHECKING_P
416 allocation_object::get_instance (header)->id = m_id;
417 #endif
418 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
419
420 return (void *)(header);
421 }
422
423 /* Puts PTR back on POOL's free list. */
424 template <typename TBlockAllocator>
425 inline void
426 base_pool_allocator <TBlockAllocator>::remove (void *object)
427 {
428 int size = m_elt_size - offsetof (allocation_object, u.data);
429
430 if (flag_checking)
431 {
432 gcc_assert (m_initialized);
433 gcc_assert (object
434 /* Check if we free more than we allocated. */
435 && m_elts_free < m_elts_allocated);
436 #if CHECKING_P
437 /* Check whether the PTR was allocated from POOL. */
438 gcc_assert (m_id == allocation_object::get_instance (object)->id);
439 #endif
440
441 memset (object, 0xaf, size);
442 }
443
444 #if CHECKING_P
445 /* Mark the element to be free. */
446 allocation_object::get_instance (object)->id = 0;
447 #endif
448
449 allocation_pool_list *header = new (object) allocation_pool_list;
450 header->next = m_returned_free_list;
451 m_returned_free_list = header;
452 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
453 m_elts_free++;
454
455 if (GATHER_STATISTICS)
456 {
457 pool_allocator_usage.release_instance_overhead (this, m_elt_size);
458 }
459 }
460
461 /* Number of elements currently active (not returned to pool). Used for cheap
462 consistency checks. */
463 template <typename TBlockAllocator>
464 inline size_t
465 base_pool_allocator <TBlockAllocator>::num_elts_current ()
466 {
467 return m_elts_allocated - m_elts_free;
468 }
469
470 /* Specialization of base_pool_allocator which should be used in most cases.
471 Another specialization may be needed, if object size is greater than
472 memory_block_pool::block_size (64 KB). */
473 typedef base_pool_allocator <memory_block_pool> pool_allocator;
474
475 /* Type based memory pool allocator. */
476 template <typename T>
477 class object_allocator
478 {
479 public:
480 /* Default constructor for pool allocator called NAME. */
481 object_allocator (const char *name CXX_MEM_STAT_INFO):
482 m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
483
484 inline void
485 release ()
486 {
487 m_allocator.release ();
488 }
489
490 inline void release_if_empty ()
491 {
492 m_allocator.release_if_empty ();
493 }
494
495
496 /* Allocate memory for instance of type T and call a default constructor. */
497
498 inline T *
499 allocate () ATTRIBUTE_MALLOC
500 {
501 return ::new (m_allocator.allocate ()) T;
502 }
503
504 /* Allocate memory for instance of type T and return void * that
505 could be used in situations where a default constructor is not provided
506 by the class T. */
507
508 inline void *
509 allocate_raw () ATTRIBUTE_MALLOC
510 {
511 return m_allocator.allocate ();
512 }
513
514 inline void
515 remove (T *object)
516 {
517 /* Call destructor. */
518 object->~T ();
519
520 m_allocator.remove (object);
521 }
522
523 inline size_t
524 num_elts_current ()
525 {
526 return m_allocator.num_elts_current ();
527 }
528
529 private:
530 pool_allocator m_allocator;
531 };
532
533 /* Store information about each particular alloc_pool. Note that this
534 will underestimate the amount the amount of storage used by a small amount:
535 1) The overhead in a pool is not accounted for.
536 2) The unallocated elements in a block are not accounted for. Note
537 that this can at worst case be one element smaller that the block
538 size for that pool. */
539 struct alloc_pool_descriptor
540 {
541 /* Number of pools allocated. */
542 unsigned long created;
543 /* Gross allocated storage. */
544 unsigned long allocated;
545 /* Amount of currently active storage. */
546 unsigned long current;
547 /* Peak amount of storage used. */
548 unsigned long peak;
549 /* Size of element in the pool. */
550 int elt_size;
551 };
552
553 /* Helper for classes that do not provide default ctor. */
554
555 template <typename T>
556 inline void *
557 operator new (size_t, object_allocator<T> &a)
558 {
559 return a.allocate_raw ();
560 }
561
562 /* Hashtable mapping alloc_pool names to descriptors. */
563 extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
564
565
566 #endif