]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/alloc-pool.h
Remove old pool allocator.
[thirdparty/gcc.git] / gcc / alloc-pool.h
1 /* Functions to support a pool of allocatable objects
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@cgsoftware.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20 #ifndef ALLOC_POOL_H
21 #define ALLOC_POOL_H
22
23 #include "hash-map.h"
24
25 extern void dump_alloc_pool_statistics (void);
26
27 typedef unsigned long ALLOC_POOL_ID_TYPE;
28
29 /* Type based memory pool allocator. */
30 template <typename T>
31 class pool_allocator
32 {
33 public:
34 /* Default constructor for pool allocator called NAME. Each block
35 has NUM elements. The allocator support EXTRA_SIZE and can
36 potentially IGNORE_TYPE_SIZE. */
37 pool_allocator (const char *name, size_t num, size_t extra_size = 0,
38 bool ignore_type_size = false);
39 ~pool_allocator ();
40 void release ();
41 void release_if_empty ();
42 T *allocate () ATTRIBUTE_MALLOC;
43 void remove (T *object);
44
45 private:
46 struct allocation_pool_list
47 {
48 allocation_pool_list *next;
49 };
50
51 /* Initialize a pool allocator. */
52 void initialize ();
53
54 template <typename U>
55 struct allocation_object
56 {
57 /* The ID of alloc pool which the object was allocated from. */
58 ALLOC_POOL_ID_TYPE id;
59
60 union
61 {
62 /* The data of the object. */
63 char data[1];
64
65 /* Because we want any type of data to be well aligned after the ID,
66 the following elements are here. They are never accessed so
67 the allocated object may be even smaller than this structure.
68 We do not care about alignment for floating-point types. */
69 char *align_p;
70 int64_t align_i;
71 } u;
72
73 static inline allocation_object<U> *get_instance (void *data_ptr)
74 {
75 return (allocation_object<U> *)(((char *)(data_ptr))
76 - offsetof (allocation_object<U>,
77 u.data));
78 }
79
80 static inline U *get_data (void *instance_ptr)
81 {
82 return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
83 }
84 };
85
86 /* Align X to 8. */
87 size_t align_eight (size_t x)
88 {
89 return (((x+7) >> 3) << 3);
90 }
91
92 const char *m_name;
93 ALLOC_POOL_ID_TYPE m_id;
94 size_t m_elts_per_block;
95
96 /* These are the elements that have been allocated at least once
97 and freed. */
98 allocation_pool_list *m_returned_free_list;
99
100 /* These are the elements that have not yet been allocated out of
101 the last block obtained from XNEWVEC. */
102 char* m_virgin_free_list;
103
104 /* The number of elements in the virgin_free_list that can be
105 allocated before needing another block. */
106 size_t m_virgin_elts_remaining;
107 /* The number of elements that are allocated. */
108 size_t m_elts_allocated;
109 /* The number of elements that are released. */
110 size_t m_elts_free;
111 /* The number of allocated blocks. */
112 size_t m_blocks_allocated;
113 /* List of blocks that are used to allocate new objects. */
114 allocation_pool_list *m_block_list;
115 /* The number of elements in a block. */
116 size_t m_block_size;
117 /* Size of a pool elements in bytes. */
118 size_t m_elt_size;
119 /* Flag if we shoul ignore size of a type. */
120 bool m_ignore_type_size;
121 /* Extra size in bytes that should be allocated for each element. */
122 size_t m_extra_size;
123 /* Flag if a pool allocator is initialized. */
124 bool m_initialized;
125 };
126
127 /* Last used ID. */
128 extern ALLOC_POOL_ID_TYPE last_id;
129
130 /* Store information about each particular alloc_pool. Note that this
131 will underestimate the amount the amount of storage used by a small amount:
132 1) The overhead in a pool is not accounted for.
133 2) The unallocated elements in a block are not accounted for. Note
134 that this can at worst case be one element smaller that the block
135 size for that pool. */
136 struct alloc_pool_descriptor
137 {
138 /* Number of pools allocated. */
139 unsigned long created;
140 /* Gross allocated storage. */
141 unsigned long allocated;
142 /* Amount of currently active storage. */
143 unsigned long current;
144 /* Peak amount of storage used. */
145 unsigned long peak;
146 /* Size of element in the pool. */
147 int elt_size;
148 };
149
150
151 /* Hashtable mapping alloc_pool names to descriptors. */
152 extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
153
154 /* For given name, return descriptor, create new if needed. */
155 alloc_pool_descriptor *
156 allocate_pool_descriptor (const char *name);
157
158 template <typename T>
159 inline
160 pool_allocator<T>::pool_allocator (const char *name, size_t num,
161 size_t extra_size, bool ignore_type_size):
162 m_name (name), m_elts_per_block (num), m_returned_free_list (NULL),
163 m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
164 m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
165 m_ignore_type_size (ignore_type_size), m_extra_size (extra_size),
166 m_initialized (false) {}
167
168 /* Initialize a pool allocator. */
169
170 template <typename T>
171 void
172 pool_allocator<T>::initialize ()
173 {
174 gcc_checking_assert (!m_initialized);
175 m_initialized = true;
176
177 size_t header_size;
178 size_t size = (m_ignore_type_size ? 0 : sizeof (T)) + m_extra_size;
179
180 gcc_checking_assert (m_name);
181
182 /* Make size large enough to store the list header. */
183 if (size < sizeof (allocation_pool_list*))
184 size = sizeof (allocation_pool_list*);
185
186 /* Now align the size to a multiple of 4. */
187 size = align_eight (size);
188
189 /* Add the aligned size of ID. */
190 size += offsetof (allocation_object<T>, u.data);
191
192 /* Um, we can't really allocate 0 elements per block. */
193 gcc_checking_assert (m_elts_per_block);
194
195 m_elt_size = size;
196
197 if (GATHER_STATISTICS)
198 {
199 alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
200 desc->elt_size = size;
201 desc->created++;
202 }
203
204 /* List header size should be a multiple of 8. */
205 header_size = align_eight (sizeof (allocation_pool_list));
206
207 m_block_size = (size * m_elts_per_block) + header_size;
208
209 #ifdef ENABLE_CHECKING
210 /* Increase the last used ID and use it for this pool.
211 ID == 0 is used for free elements of pool so skip it. */
212 last_id++;
213 if (last_id == 0)
214 last_id++;
215
216 m_id = last_id;
217 #endif
218
219 }
220
221 /* Free all memory allocated for the given memory pool. */
222 template <typename T>
223 inline void
224 pool_allocator<T>::release ()
225 {
226 if (!m_initialized)
227 return;
228
229 allocation_pool_list *block, *next_block;
230
231 /* Free each block allocated to the pool. */
232 for (block = m_block_list; block != NULL; block = next_block)
233 {
234 next_block = block->next;
235 free (block);
236 }
237
238 if (GATHER_STATISTICS && false)
239 {
240 alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
241 desc->current -= (m_elts_allocated - m_elts_free) * m_elt_size;
242 }
243
244 m_returned_free_list = NULL;
245 m_virgin_free_list = NULL;
246 m_virgin_elts_remaining = 0;
247 m_elts_allocated = 0;
248 m_elts_free = 0;
249 m_blocks_allocated = 0;
250 m_block_list = NULL;
251 }
252
253 template <typename T>
254 void
255 inline pool_allocator<T>::release_if_empty ()
256 {
257 if (m_elts_free == m_elts_allocated)
258 release ();
259 }
260
261 template <typename T>
262 inline pool_allocator<T>::~pool_allocator ()
263 {
264 release ();
265 }
266
267 /* Allocates one element from the pool specified. */
268 template <typename T>
269 inline T *
270 pool_allocator<T>::allocate ()
271 {
272 if (!m_initialized)
273 initialize ();
274
275 allocation_pool_list *header;
276 #ifdef ENABLE_VALGRIND_ANNOTATIONS
277 int size;
278 #endif
279
280 if (GATHER_STATISTICS)
281 {
282 alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
283
284 desc->allocated += m_elt_size;
285 desc->current += m_elt_size;
286 if (desc->peak < desc->current)
287 desc->peak = desc->current;
288 }
289
290 #ifdef ENABLE_VALGRIND_ANNOTATIONS
291 size = m_elt_size - offsetof (allocation_object<T>, u.data);
292 #endif
293
294 /* If there are no more free elements, make some more!. */
295 if (!m_returned_free_list)
296 {
297 char *block;
298 if (!m_virgin_elts_remaining)
299 {
300 allocation_pool_list *block_header;
301
302 /* Make the block. */
303 block = XNEWVEC (char, m_block_size);
304 block_header = (allocation_pool_list*) block;
305 block += align_eight (sizeof (allocation_pool_list));
306
307 /* Throw it on the block list. */
308 block_header->next = m_block_list;
309 m_block_list = block_header;
310
311 /* Make the block available for allocation. */
312 m_virgin_free_list = block;
313 m_virgin_elts_remaining = m_elts_per_block;
314
315 /* Also update the number of elements we have free/allocated, and
316 increment the allocated block count. */
317 m_elts_allocated += m_elts_per_block;
318 m_elts_free += m_elts_per_block;
319 m_blocks_allocated += 1;
320 }
321
322 /* We now know that we can take the first elt off the virgin list and
323 put it on the returned list. */
324 block = m_virgin_free_list;
325 header = (allocation_pool_list*) allocation_object<T>::get_data (block);
326 header->next = NULL;
327 #ifdef ENABLE_CHECKING
328 /* Mark the element to be free. */
329 ((allocation_object<T> *) block)->id = 0;
330 #endif
331 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
332 m_returned_free_list = header;
333 m_virgin_free_list += m_elt_size;
334 m_virgin_elts_remaining--;
335
336 }
337
338 /* Pull the first free element from the free list, and return it. */
339 header = m_returned_free_list;
340 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
341 m_returned_free_list = header->next;
342 m_elts_free--;
343
344 #ifdef ENABLE_CHECKING
345 /* Set the ID for element. */
346 allocation_object<T>::get_instance (header)->id = m_id;
347 #endif
348 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
349
350 /* Call default constructor. */
351 return (T *)(header);
352 }
353
354 /* Puts PTR back on POOL's free list. */
355 template <typename T>
356 void
357 pool_allocator<T>::remove (T *object)
358 {
359 gcc_checking_assert (m_initialized);
360
361 allocation_pool_list *header;
362 int size;
363 size = m_elt_size - offsetof (allocation_object<T>, u.data);
364
365 #ifdef ENABLE_CHECKING
366 gcc_assert (object
367 /* Check if we free more than we allocated, which is Bad (TM). */
368 && m_elts_free < m_elts_allocated
369 /* Check whether the PTR was allocated from POOL. */
370 && m_id == allocation_object<T>::get_instance (object)->id);
371
372 memset (object, 0xaf, size);
373
374 /* Mark the element to be free. */
375 allocation_object<T>::get_instance (object)->id = 0;
376 #endif
377
378 header = (allocation_pool_list*) object;
379 header->next = m_returned_free_list;
380 m_returned_free_list = header;
381 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
382 m_elts_free++;
383
384 if (GATHER_STATISTICS)
385 {
386 alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
387 desc->current -= m_elt_size;
388 }
389 }
390
391 #endif