]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/alloc-pool.h
[Ada] Use Standard.Natural on bit references to packed arrays
[thirdparty/gcc.git] / gcc / alloc-pool.h
CommitLineData
7f22efe1 1/* Functions to support a pool of allocatable objects
8d9254fc 2 Copyright (C) 1997-2020 Free Software Foundation, Inc.
7f22efe1 3 Contributed by Daniel Berlin <dan@cgsoftware.com>
76abd4c6 4
7f22efe1
DB
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9dcd6f09 9the Free Software Foundation; either version 3, or (at your option)
7f22efe1
DB
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
7f22efe1
DB
20#ifndef ALLOC_POOL_H
21#define ALLOC_POOL_H
22
fcb87c50 23#include "memory-block.h"
9df01367 24#include "options.h" // for flag_checking
7d50111b 25
1e0f41c9 26extern void dump_alloc_pool_statistics (void);
7d50111b 27
bd545bcf
ML
28/* Flag indicates whether memory statistics are gathered any longer. */
29extern bool after_memory_report;
30
7d50111b
ML
31typedef unsigned long ALLOC_POOL_ID_TYPE;
32
fb0b2914
ML
33/* Last used ID. */
34extern ALLOC_POOL_ID_TYPE last_id;
35
ac059261 36/* Pool allocator memory usage. */
6c1dae73 37class pool_usage: public mem_usage
ac059261 38{
6c1dae73 39public:
ac059261
ML
40 /* Default contructor. */
41 pool_usage (): m_element_size (0), m_pool_name ("") {}
42 /* Constructor. */
43 pool_usage (size_t allocated, size_t times, size_t peak,
44 size_t instances, size_t element_size,
45 const char *pool_name)
46 : mem_usage (allocated, times, peak, instances),
47 m_element_size (element_size),
48 m_pool_name (pool_name) {}
49
50 /* Sum the usage with SECOND usage. */
80a4fe78
ML
51 pool_usage
52 operator+ (const pool_usage &second)
ac059261
ML
53 {
54 return pool_usage (m_allocated + second.m_allocated,
55 m_times + second.m_times,
56 m_peak + second.m_peak,
57 m_instances + second.m_instances,
58 m_element_size, m_pool_name);
59 }
60
61 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
80a4fe78 62 inline void
d73d45f1 63 dump (mem_location *loc, const mem_usage &total) const
ac059261
ML
64 {
65 char *location_string = loc->to_string ();
66
a0b48080
MM
67 fprintf (stderr, "%-32s%-48s " PRsa(5) PRsa(9) ":%5.1f%%"
68 PRsa(9) PRsa(9) ":%5.1f%%%12" PRIu64 "\n",
40ce7fa6
ML
69 m_pool_name, location_string,
70 SIZE_AMOUNT (m_instances),
71 SIZE_AMOUNT (m_allocated),
72 get_percent (m_allocated, total.m_allocated),
73 SIZE_AMOUNT (m_peak),
74 SIZE_AMOUNT (m_times),
ac059261 75 get_percent (m_times, total.m_times),
a0b48080 76 (uint64_t)m_element_size);
ac059261
ML
77
78 free (location_string);
79 }
80
81 /* Dump header with NAME. */
80a4fe78
ML
82 static inline void
83 dump_header (const char *name)
ac059261
ML
84 {
85 fprintf (stderr, "%-32s%-48s %6s%11s%16s%17s%12s\n", "Pool name", name,
86 "Pools", "Leak", "Peak", "Times", "Elt size");
ac059261
ML
87 }
88
89 /* Dump footer. */
80a4fe78
ML
90 inline void
91 dump_footer ()
ac059261 92 {
a0b48080 93 fprintf (stderr, "%s" PRsa(82) PRsa(10) "\n", "Total",
40ce7fa6 94 SIZE_AMOUNT (m_instances), SIZE_AMOUNT (m_allocated));
ac059261
ML
95 }
96
97 /* Element size. */
98 size_t m_element_size;
99 /* Pool name. */
100 const char *m_pool_name;
101};
102
103extern mem_alloc_description<pool_usage> pool_allocator_usage;
104
fcb87c50
MM
105#if 0
106/* If a pool with custom block size is needed, one might use the following
107 template. An instance of this template can be used as a parameter for
108 instantiating base_pool_allocator template:
109
110 typedef custom_block_allocator <128*1024> huge_block_allocator;
111 ...
112 static base_pool_allocator <huge_block_allocator>
113 value_pool ("value", 16384);
114
115 Right now it's not used anywhere in the code, and is given here as an
116 example). */
117
118template <size_t BlockSize>
119class custom_block_allocator
120{
121public:
122 static const size_t block_size = BlockSize;
123
124 static inline void *
125 allocate () ATTRIBUTE_MALLOC
126 {
127 return XNEWVEC (char, BlockSize);
128 }
129
130 static inline void
131 release (void *block)
132 {
133 XDELETEVEC (block);
134 }
135};
136#endif
137
fb0b2914 138/* Generic pool allocator. */
fcb87c50
MM
139
140template <typename TBlockAllocator>
141class base_pool_allocator
7d50111b
ML
142{
143public:
fcb87c50
MM
144 /* Default constructor for pool allocator called NAME. */
145 base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
146 ~base_pool_allocator ();
7d50111b 147 void release ();
7d50111b 148 void release_if_empty ();
fb0b2914
ML
149 void *allocate () ATTRIBUTE_MALLOC;
150 void remove (void *object);
fcb87c50 151 size_t num_elts_current ();
7d50111b
ML
152
153private:
154 struct allocation_pool_list
155 {
156 allocation_pool_list *next;
157 };
158
159 /* Initialize a pool allocator. */
160 void initialize ();
161
7d50111b
ML
162 struct allocation_object
163 {
d286e1e3 164#if CHECKING_P
7d50111b
ML
165 /* The ID of alloc pool which the object was allocated from. */
166 ALLOC_POOL_ID_TYPE id;
d286e1e3 167#endif
7d50111b
ML
168
169 union
170 {
171 /* The data of the object. */
172 char data[1];
173
174 /* Because we want any type of data to be well aligned after the ID,
175 the following elements are here. They are never accessed so
176 the allocated object may be even smaller than this structure.
177 We do not care about alignment for floating-point types. */
178 char *align_p;
179 int64_t align_i;
180 } u;
181
d286e1e3 182#if CHECKING_P
fb0b2914 183 static inline allocation_object*
80a4fe78 184 get_instance (void *data_ptr)
7d50111b 185 {
fb0b2914
ML
186 return (allocation_object *)(((char *)(data_ptr))
187 - offsetof (allocation_object,
7d50111b
ML
188 u.data));
189 }
d286e1e3 190#endif
7d50111b 191
fb0b2914 192 static inline void*
80a4fe78 193 get_data (void *instance_ptr)
7d50111b 194 {
fb0b2914 195 return (void*)(((allocation_object *) instance_ptr)->u.data);
7d50111b
ML
196 }
197 };
198
199 /* Align X to 8. */
fcb87c50 200 static inline size_t
80a4fe78 201 align_eight (size_t x)
7d50111b
ML
202 {
203 return (((x+7) >> 3) << 3);
204 }
205
206 const char *m_name;
207 ALLOC_POOL_ID_TYPE m_id;
208 size_t m_elts_per_block;
209
210 /* These are the elements that have been allocated at least once
211 and freed. */
212 allocation_pool_list *m_returned_free_list;
213
214 /* These are the elements that have not yet been allocated out of
215 the last block obtained from XNEWVEC. */
216 char* m_virgin_free_list;
217
218 /* The number of elements in the virgin_free_list that can be
219 allocated before needing another block. */
220 size_t m_virgin_elts_remaining;
221 /* The number of elements that are allocated. */
222 size_t m_elts_allocated;
223 /* The number of elements that are released. */
224 size_t m_elts_free;
225 /* The number of allocated blocks. */
226 size_t m_blocks_allocated;
227 /* List of blocks that are used to allocate new objects. */
228 allocation_pool_list *m_block_list;
7d50111b
ML
229 /* Size of a pool elements in bytes. */
230 size_t m_elt_size;
fb0b2914
ML
231 /* Size in bytes that should be allocated for each element. */
232 size_t m_size;
7d50111b
ML
233 /* Flag if a pool allocator is initialized. */
234 bool m_initialized;
ac059261
ML
235 /* Memory allocation location. */
236 mem_location m_location;
7d50111b
ML
237};
238
fcb87c50 239template <typename TBlockAllocator>
7d50111b 240inline
fcb87c50
MM
241base_pool_allocator <TBlockAllocator>::base_pool_allocator (
242 const char *name, size_t size MEM_STAT_DECL):
243 m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
7d50111b 244 m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
5e9fab92
MP
245 m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_elt_size (0),
246 m_size (size), m_initialized (false),
247 m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
7d50111b
ML
248
249/* Initialize a pool allocator. */
250
fcb87c50 251template <typename TBlockAllocator>
fb0b2914 252inline void
fcb87c50 253base_pool_allocator <TBlockAllocator>::initialize ()
7d50111b
ML
254{
255 gcc_checking_assert (!m_initialized);
256 m_initialized = true;
257
fb0b2914 258 size_t size = m_size;
7d50111b
ML
259
260 gcc_checking_assert (m_name);
29f6f2d2 261 gcc_checking_assert (m_size);
7d50111b
ML
262
263 /* Make size large enough to store the list header. */
264 if (size < sizeof (allocation_pool_list*))
265 size = sizeof (allocation_pool_list*);
266
fcb87c50 267 /* Now align the size to a multiple of 8. */
7d50111b
ML
268 size = align_eight (size);
269
270 /* Add the aligned size of ID. */
fb0b2914 271 size += offsetof (allocation_object, u.data);
7d50111b 272
7d50111b
ML
273 m_elt_size = size;
274
275 if (GATHER_STATISTICS)
276 {
ac059261
ML
277 pool_usage *u = pool_allocator_usage.register_descriptor
278 (this, new mem_location (m_location));
279
280 u->m_element_size = m_elt_size;
281 u->m_pool_name = m_name;
7d50111b
ML
282 }
283
284 /* List header size should be a multiple of 8. */
fcb87c50 285 size_t header_size = align_eight (sizeof (allocation_pool_list));
7d50111b 286
fcb87c50
MM
287 m_elts_per_block = (TBlockAllocator::block_size - header_size) / size;
288 gcc_checking_assert (m_elts_per_block != 0);
7d50111b 289
7d50111b
ML
290 /* Increase the last used ID and use it for this pool.
291 ID == 0 is used for free elements of pool so skip it. */
292 last_id++;
293 if (last_id == 0)
294 last_id++;
295
296 m_id = last_id;
7d50111b
ML
297}
298
299/* Free all memory allocated for the given memory pool. */
fcb87c50 300template <typename TBlockAllocator>
7d50111b 301inline void
fcb87c50 302base_pool_allocator <TBlockAllocator>::release ()
7d50111b
ML
303{
304 if (!m_initialized)
305 return;
306
307 allocation_pool_list *block, *next_block;
308
309 /* Free each block allocated to the pool. */
310 for (block = m_block_list; block != NULL; block = next_block)
311 {
312 next_block = block->next;
fcb87c50 313 TBlockAllocator::release (block);
7d50111b
ML
314 }
315
bd545bcf 316 if (GATHER_STATISTICS && !after_memory_report)
7d50111b 317 {
ac059261
ML
318 pool_allocator_usage.release_instance_overhead
319 (this, (m_elts_allocated - m_elts_free) * m_elt_size);
7d50111b
ML
320 }
321
322 m_returned_free_list = NULL;
323 m_virgin_free_list = NULL;
324 m_virgin_elts_remaining = 0;
325 m_elts_allocated = 0;
326 m_elts_free = 0;
327 m_blocks_allocated = 0;
328 m_block_list = NULL;
329}
330
fcb87c50
MM
331template <typename TBlockAllocator>
332inline void
333base_pool_allocator <TBlockAllocator>::release_if_empty ()
7d50111b
ML
334{
335 if (m_elts_free == m_elts_allocated)
336 release ();
337}
338
fcb87c50
MM
339template <typename TBlockAllocator>
340inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator ()
7d50111b
ML
341{
342 release ();
343}
344
345/* Allocates one element from the pool specified. */
fcb87c50 346template <typename TBlockAllocator>
fb0b2914 347inline void*
fcb87c50 348base_pool_allocator <TBlockAllocator>::allocate ()
7d50111b
ML
349{
350 if (!m_initialized)
351 initialize ();
352
353 allocation_pool_list *header;
354#ifdef ENABLE_VALGRIND_ANNOTATIONS
355 int size;
356#endif
357
358 if (GATHER_STATISTICS)
359 {
ac059261 360 pool_allocator_usage.register_instance_overhead (m_elt_size, this);
7d50111b
ML
361 }
362
363#ifdef ENABLE_VALGRIND_ANNOTATIONS
fb0b2914 364 size = m_elt_size - offsetof (allocation_object, u.data);
7d50111b
ML
365#endif
366
367 /* If there are no more free elements, make some more!. */
368 if (!m_returned_free_list)
369 {
370 char *block;
371 if (!m_virgin_elts_remaining)
372 {
373 allocation_pool_list *block_header;
374
375 /* Make the block. */
fcb87c50 376 block = reinterpret_cast<char *> (TBlockAllocator::allocate ());
5929c659 377 block_header = new (block) allocation_pool_list;
7d50111b
ML
378 block += align_eight (sizeof (allocation_pool_list));
379
380 /* Throw it on the block list. */
381 block_header->next = m_block_list;
382 m_block_list = block_header;
383
384 /* Make the block available for allocation. */
385 m_virgin_free_list = block;
386 m_virgin_elts_remaining = m_elts_per_block;
387
388 /* Also update the number of elements we have free/allocated, and
389 increment the allocated block count. */
390 m_elts_allocated += m_elts_per_block;
391 m_elts_free += m_elts_per_block;
392 m_blocks_allocated += 1;
393 }
394
395 /* We now know that we can take the first elt off the virgin list and
396 put it on the returned list. */
397 block = m_virgin_free_list;
fb0b2914 398 header = (allocation_pool_list*) allocation_object::get_data (block);
7d50111b 399 header->next = NULL;
9df01367 400
7d50111b 401 /* Mark the element to be free. */
d286e1e3 402#if CHECKING_P
fb0b2914 403 ((allocation_object*) block)->id = 0;
d286e1e3 404#endif
7d50111b
ML
405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
406 m_returned_free_list = header;
407 m_virgin_free_list += m_elt_size;
408 m_virgin_elts_remaining--;
409
410 }
411
412 /* Pull the first free element from the free list, and return it. */
413 header = m_returned_free_list;
414 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
415 m_returned_free_list = header->next;
416 m_elts_free--;
417
7d50111b 418 /* Set the ID for element. */
d286e1e3 419#if CHECKING_P
fb0b2914 420 allocation_object::get_instance (header)->id = m_id;
d286e1e3 421#endif
7d50111b
ML
422 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
423
fb0b2914 424 return (void *)(header);
7d50111b
ML
425}
426
427/* Puts PTR back on POOL's free list. */
fcb87c50 428template <typename TBlockAllocator>
fb0b2914 429inline void
fcb87c50 430base_pool_allocator <TBlockAllocator>::remove (void *object)
7d50111b 431{
5929c659
RB
432 int size = m_elt_size - offsetof (allocation_object, u.data);
433
9df01367
MM
434 if (flag_checking)
435 {
436 gcc_assert (m_initialized);
437 gcc_assert (object
d286e1e3
RB
438 /* Check if we free more than we allocated. */
439 && m_elts_free < m_elts_allocated);
440#if CHECKING_P
441 /* Check whether the PTR was allocated from POOL. */
442 gcc_assert (m_id == allocation_object::get_instance (object)->id);
443#endif
7d50111b 444
9df01367
MM
445 memset (object, 0xaf, size);
446 }
7d50111b 447
d286e1e3 448#if CHECKING_P
7d50111b 449 /* Mark the element to be free. */
fb0b2914 450 allocation_object::get_instance (object)->id = 0;
d286e1e3 451#endif
7d50111b 452
5929c659 453 allocation_pool_list *header = new (object) allocation_pool_list;
7d50111b
ML
454 header->next = m_returned_free_list;
455 m_returned_free_list = header;
456 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
457 m_elts_free++;
458
459 if (GATHER_STATISTICS)
460 {
ac059261 461 pool_allocator_usage.release_instance_overhead (this, m_elt_size);
7d50111b
ML
462 }
463}
464
fcb87c50
MM
465/* Number of elements currently active (not returned to pool). Used for cheap
466 consistency checks. */
467template <typename TBlockAllocator>
468inline size_t
469base_pool_allocator <TBlockAllocator>::num_elts_current ()
470{
471 return m_elts_allocated - m_elts_free;
472}
473
474/* Specialization of base_pool_allocator which should be used in most cases.
475 Another specialization may be needed, if object size is greater than
476 memory_block_pool::block_size (64 KB). */
477typedef base_pool_allocator <memory_block_pool> pool_allocator;
478
fb0b2914
ML
479/* Type based memory pool allocator. */
480template <typename T>
481class object_allocator
482{
483public:
fcb87c50
MM
484 /* Default constructor for pool allocator called NAME. */
485 object_allocator (const char *name CXX_MEM_STAT_INFO):
486 m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
fb0b2914
ML
487
488 inline void
489 release ()
490 {
491 m_allocator.release ();
492 }
493
494 inline void release_if_empty ()
495 {
496 m_allocator.release_if_empty ();
497 }
498
8bd37a2e
ML
499
500 /* Allocate memory for instance of type T and call a default constructor. */
501
fb0b2914
ML
502 inline T *
503 allocate () ATTRIBUTE_MALLOC
504 {
f1aa4bb3 505 return ::new (m_allocator.allocate ()) T;
fb0b2914
ML
506 }
507
8bd37a2e
ML
508 /* Allocate memory for instance of type T and return void * that
509 could be used in situations where a default constructor is not provided
510 by the class T. */
511
512 inline void *
513 allocate_raw () ATTRIBUTE_MALLOC
514 {
515 return m_allocator.allocate ();
516 }
517
fb0b2914
ML
518 inline void
519 remove (T *object)
520 {
521 /* Call destructor. */
522 object->~T ();
523
524 m_allocator.remove (object);
525 }
526
fcb87c50
MM
527 inline size_t
528 num_elts_current ()
529 {
530 return m_allocator.num_elts_current ();
531 }
532
fb0b2914
ML
533private:
534 pool_allocator m_allocator;
535};
536
537/* Store information about each particular alloc_pool. Note that this
538 will underestimate the amount the amount of storage used by a small amount:
539 1) The overhead in a pool is not accounted for.
540 2) The unallocated elements in a block are not accounted for. Note
541 that this can at worst case be one element smaller that the block
542 size for that pool. */
543struct alloc_pool_descriptor
544{
545 /* Number of pools allocated. */
546 unsigned long created;
547 /* Gross allocated storage. */
548 unsigned long allocated;
549 /* Amount of currently active storage. */
550 unsigned long current;
551 /* Peak amount of storage used. */
552 unsigned long peak;
553 /* Size of element in the pool. */
554 int elt_size;
555};
556
557/* Helper for classes that do not provide default ctor. */
558
559template <typename T>
560inline void *
561operator new (size_t, object_allocator<T> &a)
562{
8bd37a2e 563 return a.allocate_raw ();
fb0b2914
ML
564}
565
566/* Hashtable mapping alloc_pool names to descriptors. */
567extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
568
569
7f22efe1 570#endif