]>
Commit | Line | Data |
---|---|---|
911ab6b9 | 1 | /* "Bag-of-pages" garbage collector for the GNU compiler. |
fbd26352 | 2 | Copyright (C) 1999-2019 Free Software Foundation, Inc. |
911ab6b9 | 3 | |
f12b58b3 | 4 | This file is part of GCC. |
911ab6b9 | 5 | |
f12b58b3 | 6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 8 | Software Foundation; either version 3, or (at your option) any later |
f12b58b3 | 9 | version. |
911ab6b9 | 10 | |
f12b58b3 | 11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
911ab6b9 | 15 | |
90856340 | 16 | You should have received a copy of the GNU General Public License |
8c4c00c1 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
911ab6b9 | 19 | |
911ab6b9 | 20 | #include "config.h" |
911ab6b9 | 21 | #include "system.h" |
805e22b2 | 22 | #include "coretypes.h" |
9ef16211 | 23 | #include "backend.h" |
b20a8bb4 | 24 | #include "alias.h" |
911ab6b9 | 25 | #include "tree.h" |
f01a3c5e | 26 | #include "rtl.h" |
ad7b10a2 | 27 | #include "memmodel.h" |
f8e15e8a | 28 | #include "tm_p.h" |
0b205f4c | 29 | #include "diagnostic-core.h" |
911ab6b9 | 30 | #include "flags.h" |
ba72912a | 31 | #include "ggc-internal.h" |
74d2af64 | 32 | #include "timevar.h" |
2a3edec5 | 33 | #include "params.h" |
073c1fd5 | 34 | #include "cgraph.h" |
3089b75c | 35 | #include "cfgloop.h" |
740cd0be | 36 | #include "plugin.h" |
f01a3c5e | 37 | |
901dfcc7 | 38 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a |
39 | file open. Prefer either to valloc. */ | |
40 | #ifdef HAVE_MMAP_ANON | |
41 | # undef HAVE_MMAP_DEV_ZERO | |
901dfcc7 | 42 | # define USING_MMAP |
71661611 | 43 | #endif |
911ab6b9 | 44 | |
901dfcc7 | 45 | #ifdef HAVE_MMAP_DEV_ZERO |
901dfcc7 | 46 | # define USING_MMAP |
80d3ceff | 47 | #endif |
48 | ||
80eb2355 | 49 | #ifndef USING_MMAP |
50 | #define USING_MALLOC_PAGE_GROUPS | |
d3b95fd7 | 51 | #endif |
911ab6b9 | 52 | |
f4245e06 | 53 | #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \ |
54 | && defined(USING_MMAP) | |
c5db973f | 55 | # define USING_MADVISE |
56 | #endif | |
57 | ||
442e3cb9 | 58 | /* Strategy: |
911ab6b9 | 59 | |
60 | This garbage-collecting allocator allocates objects on one of a set | |
61 | of pages. Each page can allocate objects of a single size only; | |
62 | available sizes are powers of two starting at four bytes. The size | |
63 | of an allocation request is rounded up to the next power of two | |
64 | (`order'), and satisfied from the appropriate page. | |
65 | ||
66 | Each page is recorded in a page-entry, which also maintains an | |
67 | in-use bitmap of object positions on the page. This allows the | |
68 | allocation state of a particular object to be flipped without | |
69 | touching the page itself. | |
70 | ||
71 | Each page-entry also has a context depth, which is used to track | |
72 | pushing and popping of allocation contexts. Only objects allocated | |
3cfec666 | 73 | in the current (highest-numbered) context may be collected. |
911ab6b9 | 74 | |
75 | Page entries are arranged in an array of singly-linked lists. The | |
76 | array is indexed by the allocation size, in bits, of the pages on | |
77 | it; i.e. all pages on a list allocate objects of the same size. | |
78 | Pages are ordered on the list such that all non-full pages precede | |
79 | all full pages, with non-full pages arranged in order of decreasing | |
80 | context depth. | |
81 | ||
82 | Empty pages (of all orders) are kept on a single page cache list, | |
83 | and are considered first when new pages are required; they are | |
84 | deallocated at the start of the next collection if they haven't | |
85 | been recycled by then. */ | |
86 | ||
911ab6b9 | 87 | /* Define GGC_DEBUG_LEVEL to print debugging information. |
88 | 0: No debugging output. | |
89 | 1: GC statistics only. | |
90 | 2: Page-entry allocations/deallocations as well. | |
91 | 3: Object allocations as well. | |
1e625a2e | 92 | 4: Object marks as well. */ |
911ab6b9 | 93 | #define GGC_DEBUG_LEVEL (0) |
911ab6b9 | 94 | \f |
95 | /* A two-level tree is used to look up the page-entry for a given | |
96 | pointer. Two chunks of the pointer's bits are extracted to index | |
97 | the first and second levels of the tree, as follows: | |
98 | ||
99 | HOST_PAGE_SIZE_BITS | |
100 | 32 | | | |
101 | msb +----------------+----+------+------+ lsb | |
102 | | | | | |
103 | PAGE_L1_BITS | | |
104 | | | | |
105 | PAGE_L2_BITS | |
106 | ||
107 | The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry | |
108 | pages are aligned on system page boundaries. The next most | |
109 | significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first | |
3cfec666 | 110 | index values in the lookup table, respectively. |
911ab6b9 | 111 | |
71661611 | 112 | For 32-bit architectures and the settings below, there are no |
113 | leftover bits. For architectures with wider pointers, the lookup | |
114 | tree points to a list of pages, which must be scanned to find the | |
115 | correct one. */ | |
911ab6b9 | 116 | |
117 | #define PAGE_L1_BITS (8) | |
118 | #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) | |
337c992b | 119 | #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS) |
120 | #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS) | |
911ab6b9 | 121 | |
122 | #define LOOKUP_L1(p) \ | |
337c992b | 123 | (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) |
911ab6b9 | 124 | |
125 | #define LOOKUP_L2(p) \ | |
337c992b | 126 | (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) |
911ab6b9 | 127 | |
2f6aecaf | 128 | /* The number of objects per allocation page, for objects on a page of |
129 | the indicated ORDER. */ | |
130 | #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] | |
131 | ||
573aba85 | 132 | /* The number of objects in P. */ |
133 | #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order)) | |
134 | ||
2f6aecaf | 135 | /* The size of an object on a page of the indicated ORDER. */ |
136 | #define OBJECT_SIZE(ORDER) object_size_table[ORDER] | |
137 | ||
40b9be5e | 138 | /* For speed, we avoid doing a general integer divide to locate the |
139 | offset in the allocation bitmap, by precalculating numbers M, S | |
140 | such that (O * M) >> S == O / Z (modulo 2^32), for any offset O | |
141 | within the page which is evenly divisible by the object size Z. */ | |
142 | #define DIV_MULT(ORDER) inverse_table[ORDER].mult | |
143 | #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift | |
144 | #define OFFSET_TO_BIT(OFFSET, ORDER) \ | |
145 | (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) | |
146 | ||
3089b75c | 147 | /* We use this structure to determine the alignment required for |
148 | allocations. For power-of-two sized allocations, that's not a | |
149 | problem, but it does matter for odd-sized allocations. | |
150 | We do not care about alignment for floating-point types. */ | |
151 | ||
152 | struct max_alignment { | |
153 | char c; | |
154 | union { | |
3a4303e7 | 155 | int64_t i; |
3089b75c | 156 | void *p; |
157 | } u; | |
158 | }; | |
159 | ||
160 | /* The biggest alignment required. */ | |
161 | ||
162 | #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
163 | ||
164 | ||
2f6aecaf | 165 | /* The number of extra orders, not corresponding to power-of-two sized |
166 | objects. */ | |
167 | ||
3585dac7 | 168 | #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table) |
2f6aecaf | 169 | |
7bf41779 | 170 | #define RTL_SIZE(NSLOTS) \ |
bf6b5685 | 171 | (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion)) |
7bf41779 | 172 | |
761cbb5d | 173 | #define TREE_EXP_SIZE(OPS) \ |
174 | (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) | |
175 | ||
2f6aecaf | 176 | /* The Ith entry is the maximum size of an object to be stored in the |
177 | Ith extra order. Adding a new entry to this array is the *only* | |
178 | thing you need to do to add a new special allocation size. */ | |
179 | ||
180 | static const size_t extra_order_size_table[] = { | |
3089b75c | 181 | /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. |
182 | There are a lot of structures with these sizes and explicitly | |
183 | listing them risks orders being dropped because they changed size. */ | |
184 | MAX_ALIGNMENT * 3, | |
185 | MAX_ALIGNMENT * 5, | |
186 | MAX_ALIGNMENT * 6, | |
187 | MAX_ALIGNMENT * 7, | |
188 | MAX_ALIGNMENT * 9, | |
189 | MAX_ALIGNMENT * 10, | |
190 | MAX_ALIGNMENT * 11, | |
191 | MAX_ALIGNMENT * 12, | |
192 | MAX_ALIGNMENT * 13, | |
193 | MAX_ALIGNMENT * 14, | |
194 | MAX_ALIGNMENT * 15, | |
5ded8c6f | 195 | sizeof (struct tree_decl_non_common), |
196 | sizeof (struct tree_field_decl), | |
197 | sizeof (struct tree_parm_decl), | |
198 | sizeof (struct tree_var_decl), | |
8f2eb9e1 | 199 | sizeof (struct tree_type_non_common), |
1c2a6a66 | 200 | sizeof (struct function), |
201 | sizeof (struct basic_block_def), | |
3089b75c | 202 | sizeof (struct cgraph_node), |
203 | sizeof (struct loop), | |
2f6aecaf | 204 | }; |
205 | ||
206 | /* The total number of orders. */ | |
207 | ||
208 | #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) | |
209 | ||
573aba85 | 210 | /* Compute the smallest nonnegative number which when added to X gives |
211 | a multiple of F. */ | |
212 | ||
213 | #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | |
214 | ||
25a28b44 | 215 | /* Round X to next multiple of the page size */ |
216 | ||
02ce3c0f | 217 | #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize) |
25a28b44 | 218 | |
2f6aecaf | 219 | /* The Ith entry is the number of objects on a page or order I. */ |
220 | ||
221 | static unsigned objects_per_page_table[NUM_ORDERS]; | |
222 | ||
223 | /* The Ith entry is the size of an object on a page of order I. */ | |
224 | ||
225 | static size_t object_size_table[NUM_ORDERS]; | |
911ab6b9 | 226 | |
40b9be5e | 227 | /* The Ith entry is a pair of numbers (mult, shift) such that |
228 | ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, | |
229 | for all k evenly divisible by OBJECT_SIZE(I). */ | |
230 | ||
231 | static struct | |
232 | { | |
2c06e494 | 233 | size_t mult; |
40b9be5e | 234 | unsigned int shift; |
235 | } | |
236 | inverse_table[NUM_ORDERS]; | |
237 | ||
911ab6b9 | 238 | /* A page_entry records the status of an allocation page. This |
239 | structure is dynamically sized to fit the bitmap in_use_p. */ | |
6dc50383 | 240 | struct page_entry |
911ab6b9 | 241 | { |
242 | /* The next page-entry with objects of the same size, or NULL if | |
243 | this is the last page-entry. */ | |
244 | struct page_entry *next; | |
245 | ||
4a755ae7 | 246 | /* The previous page-entry with objects of the same size, or NULL if |
247 | this is the first page-entry. The PREV pointer exists solely to | |
5aedf60c | 248 | keep the cost of ggc_free manageable. */ |
4a755ae7 | 249 | struct page_entry *prev; |
250 | ||
911ab6b9 | 251 | /* The number of bytes allocated. (This will always be a multiple |
252 | of the host system page size.) */ | |
253 | size_t bytes; | |
254 | ||
255 | /* The address at which the memory is allocated. */ | |
256 | char *page; | |
257 | ||
80eb2355 | 258 | #ifdef USING_MALLOC_PAGE_GROUPS |
259 | /* Back pointer to the page group this page came from. */ | |
260 | struct page_group *group; | |
261 | #endif | |
262 | ||
76e1b933 | 263 | /* This is the index in the by_depth varray where this page table |
264 | can be found. */ | |
265 | unsigned long index_by_depth; | |
911ab6b9 | 266 | |
267 | /* Context depth of this page. */ | |
938cf571 | 268 | unsigned short context_depth; |
911ab6b9 | 269 | |
270 | /* The number of free objects remaining on this page. */ | |
271 | unsigned short num_free_objects; | |
272 | ||
273 | /* A likely candidate for the bit position of a free object for the | |
274 | next allocation from this page. */ | |
275 | unsigned short next_bit_hint; | |
276 | ||
938cf571 | 277 | /* The lg of size of objects allocated from this page. */ |
278 | unsigned char order; | |
279 | ||
c5db973f | 280 | /* Discarded page? */ |
281 | bool discarded; | |
282 | ||
911ab6b9 | 283 | /* A bit vector indicating whether or not objects are in use. The |
284 | Nth bit is one if the Nth object on this page is allocated. This | |
285 | array is dynamically sized. */ | |
286 | unsigned long in_use_p[1]; | |
6dc50383 | 287 | }; |
911ab6b9 | 288 | |
80eb2355 | 289 | #ifdef USING_MALLOC_PAGE_GROUPS |
290 | /* A page_group describes a large allocation from malloc, from which | |
291 | we parcel out aligned pages. */ | |
6dc50383 | 292 | struct page_group |
80eb2355 | 293 | { |
294 | /* A linked list of all extant page groups. */ | |
295 | struct page_group *next; | |
296 | ||
297 | /* The address we received from malloc. */ | |
298 | char *allocation; | |
299 | ||
300 | /* The size of the block. */ | |
301 | size_t alloc_size; | |
302 | ||
303 | /* A bitmask of pages in use. */ | |
304 | unsigned int in_use; | |
6dc50383 | 305 | }; |
80eb2355 | 306 | #endif |
911ab6b9 | 307 | |
308 | #if HOST_BITS_PER_PTR <= 32 | |
309 | ||
310 | /* On 32-bit hosts, we use a two level page table, as pictured above. */ | |
311 | typedef page_entry **page_table[PAGE_L1_SIZE]; | |
312 | ||
313 | #else | |
314 | ||
71661611 | 315 | /* On 64-bit hosts, we use the same two level page tables plus a linked |
316 | list that disambiguates the top 32-bits. There will almost always be | |
911ab6b9 | 317 | exactly one entry in the list. */ |
318 | typedef struct page_table_chain | |
319 | { | |
320 | struct page_table_chain *next; | |
321 | size_t high_bits; | |
322 | page_entry **table[PAGE_L1_SIZE]; | |
323 | } *page_table; | |
324 | ||
325 | #endif | |
326 | ||
92f06184 | 327 | class finalizer |
328 | { | |
329 | public: | |
330 | finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {} | |
331 | ||
332 | void *addr () const { return m_addr; } | |
333 | ||
334 | void call () const { m_function (m_addr); } | |
335 | ||
336 | private: | |
337 | void *m_addr; | |
338 | void (*m_function)(void *); | |
339 | }; | |
340 | ||
341 | class vec_finalizer | |
342 | { | |
343 | public: | |
344 | vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) : | |
345 | m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {} | |
346 | ||
347 | void call () const | |
348 | { | |
349 | for (size_t i = 0; i < m_n_objects; i++) | |
350 | m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size))); | |
351 | } | |
352 | ||
353 | void *addr () const { return reinterpret_cast<void *> (m_addr); } | |
354 | ||
355 | private: | |
356 | uintptr_t m_addr; | |
357 | void (*m_function)(void *); | |
358 | size_t m_object_size; | |
359 | size_t m_n_objects; | |
5517ecc9 | 360 | }; |
92f06184 | 361 | |
3e5823c9 | 362 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
363 | /* List of free objects to be verified as actually free on the | |
364 | next collection. */ | |
365 | struct free_object | |
366 | { | |
367 | void *object; | |
368 | struct free_object *next; | |
369 | }; | |
370 | #endif | |
371 | ||
911ab6b9 | 372 | /* The rest of the global variables. */ |
9908fe4d | 373 | static struct ggc_globals |
911ab6b9 | 374 | { |
375 | /* The Nth element in this array is a page with objects of size 2^N. | |
376 | If there are any pages with free objects, they will be at the | |
377 | head of the list. NULL if there are no page-entries for this | |
378 | object size. */ | |
2f6aecaf | 379 | page_entry *pages[NUM_ORDERS]; |
911ab6b9 | 380 | |
381 | /* The Nth element in this array is the last page with objects of | |
382 | size 2^N. NULL if there are no page-entries for this object | |
383 | size. */ | |
2f6aecaf | 384 | page_entry *page_tails[NUM_ORDERS]; |
911ab6b9 | 385 | |
386 | /* Lookup table for associating allocation pages with object addresses. */ | |
387 | page_table lookup; | |
388 | ||
389 | /* The system's page size. */ | |
390 | size_t pagesize; | |
391 | size_t lg_pagesize; | |
392 | ||
393 | /* Bytes currently allocated. */ | |
394 | size_t allocated; | |
395 | ||
396 | /* Bytes currently allocated at the end of the last collection. */ | |
397 | size_t allocated_last_gc; | |
398 | ||
4e00b6fd | 399 | /* Total amount of memory mapped. */ |
400 | size_t bytes_mapped; | |
401 | ||
598638e2 | 402 | /* Bit N set if any allocations have been done at context depth N. */ |
403 | unsigned long context_depth_allocations; | |
404 | ||
405 | /* Bit N set if any collections have been done at context depth N. */ | |
406 | unsigned long context_depth_collections; | |
407 | ||
911ab6b9 | 408 | /* The current depth in the context stack. */ |
2d15cd89 | 409 | unsigned short context_depth; |
911ab6b9 | 410 | |
411 | /* A file descriptor open to /dev/zero for reading. */ | |
901dfcc7 | 412 | #if defined (HAVE_MMAP_DEV_ZERO) |
911ab6b9 | 413 | int dev_zero_fd; |
414 | #endif | |
415 | ||
416 | /* A cache of free system pages. */ | |
417 | page_entry *free_pages; | |
418 | ||
80eb2355 | 419 | #ifdef USING_MALLOC_PAGE_GROUPS |
420 | page_group *page_groups; | |
421 | #endif | |
422 | ||
911ab6b9 | 423 | /* The file descriptor for debugging output. */ |
424 | FILE *debug_file; | |
76e1b933 | 425 | |
426 | /* Current number of elements in use in depth below. */ | |
427 | unsigned int depth_in_use; | |
428 | ||
429 | /* Maximum number of elements that can be used before resizing. */ | |
430 | unsigned int depth_max; | |
431 | ||
f0b5f617 | 432 | /* Each element of this array is an index in by_depth where the given |
76e1b933 | 433 | depth starts. This structure is indexed by that given depth we |
434 | are interested in. */ | |
435 | unsigned int *depth; | |
436 | ||
437 | /* Current number of elements in use in by_depth below. */ | |
438 | unsigned int by_depth_in_use; | |
439 | ||
440 | /* Maximum number of elements that can be used before resizing. */ | |
441 | unsigned int by_depth_max; | |
442 | ||
443 | /* Each element of this array is a pointer to a page_entry, all | |
444 | page_entries can be found in here by increasing depth. | |
445 | index_by_depth in the page_entry is the index into this data | |
446 | structure where that page_entry can be found. This is used to | |
447 | speed up finding all page_entries at a particular depth. */ | |
448 | page_entry **by_depth; | |
449 | ||
450 | /* Each element is a pointer to the saved in_use_p bits, if any, | |
451 | zero otherwise. We allocate them all together, to enable a | |
452 | better runtime data access pattern. */ | |
453 | unsigned long **save_in_use; | |
c4e03242 | 454 | |
5517ecc9 | 455 | /* Finalizers for single objects. The first index is collection_depth. */ |
456 | vec<vec<finalizer> > finalizers; | |
92f06184 | 457 | |
458 | /* Finalizers for vectors of objects. */ | |
5517ecc9 | 459 | vec<vec<vec_finalizer> > vec_finalizers; |
92f06184 | 460 | |
c4e03242 | 461 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
462 | /* List of free objects to be verified as actually free on the | |
463 | next collection. */ | |
3e5823c9 | 464 | struct free_object *free_object_list; |
c4e03242 | 465 | #endif |
466 | ||
b7257530 | 467 | struct |
468 | { | |
ba72912a | 469 | /* Total GC-allocated memory. */ |
b7257530 | 470 | unsigned long long total_allocated; |
ba72912a | 471 | /* Total overhead for GC-allocated memory. */ |
b7257530 | 472 | unsigned long long total_overhead; |
473 | ||
474 | /* Total allocations and overhead for sizes less than 32, 64 and 128. | |
475 | These sizes are interesting because they are typical cache line | |
b4b174c3 | 476 | sizes. */ |
48e1416a | 477 | |
b7257530 | 478 | unsigned long long total_allocated_under32; |
479 | unsigned long long total_overhead_under32; | |
48e1416a | 480 | |
b7257530 | 481 | unsigned long long total_allocated_under64; |
482 | unsigned long long total_overhead_under64; | |
48e1416a | 483 | |
b7257530 | 484 | unsigned long long total_allocated_under128; |
485 | unsigned long long total_overhead_under128; | |
48e1416a | 486 | |
86736f9e | 487 | /* The allocations for each of the allocation orders. */ |
488 | unsigned long long total_allocated_per_order[NUM_ORDERS]; | |
489 | ||
b4b174c3 | 490 | /* The overhead for each of the allocation orders. */ |
b7257530 | 491 | unsigned long long total_overhead_per_order[NUM_ORDERS]; |
492 | } stats; | |
911ab6b9 | 493 | } G; |
494 | ||
8f359205 | 495 | /* True if a gc is currently taking place. */ |
496 | ||
497 | static bool in_gc = false; | |
498 | ||
911ab6b9 | 499 | /* The size in bytes required to maintain a bitmap for the objects |
500 | on a page-entry. */ | |
501 | #define BITMAP_SIZE(Num_objects) \ | |
9af5ce0c | 502 | (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long)) |
911ab6b9 | 503 | |
80eb2355 | 504 | /* Allocate pages in chunks of this size, to throttle calls to memory |
505 | allocation routines. The first page is used, the rest go onto the | |
506 | free list. This cannot be larger than HOST_BITS_PER_INT for the | |
28a61eb7 | 507 | in_use bitmask for page_group. Hosts that need a different value |
dac49aa5 | 508 | can override this by defining GGC_QUIRE_SIZE explicitly. */ |
28a61eb7 | 509 | #ifndef GGC_QUIRE_SIZE |
510 | # ifdef USING_MMAP | |
ada13b28 | 511 | # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */ |
28a61eb7 | 512 | # else |
513 | # define GGC_QUIRE_SIZE 16 | |
514 | # endif | |
515 | #endif | |
76e1b933 | 516 | |
517 | /* Initial guess as to how many page table entries we might need. */ | |
518 | #define INITIAL_PTE_COUNT 128 | |
911ab6b9 | 519 | \f |
6ec1f4e0 | 520 | static page_entry *lookup_page_table_entry (const void *); |
521 | static void set_page_table_entry (void *, page_entry *); | |
80eb2355 | 522 | #ifdef USING_MMAP |
4a2f812e | 523 | static char *alloc_anon (char *, size_t, bool check); |
80eb2355 | 524 | #endif |
525 | #ifdef USING_MALLOC_PAGE_GROUPS | |
6ec1f4e0 | 526 | static size_t page_group_index (char *, char *); |
527 | static void set_page_group_in_use (page_group *, char *); | |
528 | static void clear_page_group_in_use (page_group *, char *); | |
80eb2355 | 529 | #endif |
6ec1f4e0 | 530 | static struct page_entry * alloc_page (unsigned); |
531 | static void free_page (struct page_entry *); | |
532 | static void release_pages (void); | |
533 | static void clear_marks (void); | |
534 | static void sweep_pages (void); | |
535 | static void ggc_recalculate_in_use_p (page_entry *); | |
536 | static void compute_inverse (unsigned); | |
537 | static inline void adjust_depth (void); | |
538 | static void move_ptes_to_front (int, int); | |
911ab6b9 | 539 | |
6ec1f4e0 | 540 | void debug_print_page_list (int); |
541 | static void push_depth (unsigned int); | |
542 | static void push_by_depth (page_entry *, unsigned long *); | |
7d60cc60 | 543 | |
76e1b933 | 544 | /* Push an entry onto G.depth. */ |
545 | ||
546 | inline static void | |
6ec1f4e0 | 547 | push_depth (unsigned int i) |
76e1b933 | 548 | { |
549 | if (G.depth_in_use >= G.depth_max) | |
550 | { | |
551 | G.depth_max *= 2; | |
4077bf7a | 552 | G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max); |
76e1b933 | 553 | } |
554 | G.depth[G.depth_in_use++] = i; | |
555 | } | |
556 | ||
557 | /* Push an entry onto G.by_depth and G.save_in_use. */ | |
558 | ||
559 | inline static void | |
6ec1f4e0 | 560 | push_by_depth (page_entry *p, unsigned long *s) |
76e1b933 | 561 | { |
562 | if (G.by_depth_in_use >= G.by_depth_max) | |
563 | { | |
564 | G.by_depth_max *= 2; | |
4077bf7a | 565 | G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max); |
566 | G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use, | |
567 | G.by_depth_max); | |
76e1b933 | 568 | } |
569 | G.by_depth[G.by_depth_in_use] = p; | |
570 | G.save_in_use[G.by_depth_in_use++] = s; | |
571 | } | |
572 | ||
573 | #if (GCC_VERSION < 3001) | |
574 | #define prefetch(X) ((void) X) | |
575 | #else | |
576 | #define prefetch(X) __builtin_prefetch (X) | |
577 | #endif | |
578 | ||
579 | #define save_in_use_p_i(__i) \ | |
580 | (G.save_in_use[__i]) | |
581 | #define save_in_use_p(__p) \ | |
582 | (save_in_use_p_i (__p->index_by_depth)) | |
583 | ||
8a55e97d | 584 | /* Traverse the page table and find the entry for a page. |
585 | If the object wasn't allocated in GC return NULL. */ | |
911ab6b9 | 586 | |
8a55e97d | 587 | static inline page_entry * |
588 | safe_lookup_page_table_entry (const void *p) | |
911ab6b9 | 589 | { |
590 | page_entry ***base; | |
71661611 | 591 | size_t L1, L2; |
911ab6b9 | 592 | |
593 | #if HOST_BITS_PER_PTR <= 32 | |
594 | base = &G.lookup[0]; | |
595 | #else | |
596 | page_table table = G.lookup; | |
337c992b | 597 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
71661611 | 598 | while (1) |
599 | { | |
600 | if (table == NULL) | |
8a55e97d | 601 | return NULL; |
71661611 | 602 | if (table->high_bits == high_bits) |
603 | break; | |
604 | table = table->next; | |
605 | } | |
911ab6b9 | 606 | base = &table->table[0]; |
607 | #endif | |
608 | ||
4a82352a | 609 | /* Extract the level 1 and 2 indices. */ |
e3691812 | 610 | L1 = LOOKUP_L1 (p); |
611 | L2 = LOOKUP_L2 (p); | |
8a55e97d | 612 | if (! base[L1]) |
613 | return NULL; | |
e3691812 | 614 | |
8a55e97d | 615 | return base[L1][L2]; |
e3691812 | 616 | } |
617 | ||
3cfec666 | 618 | /* Traverse the page table and find the entry for a page. |
e3691812 | 619 | Die (probably) if the object wasn't allocated via GC. */ |
620 | ||
621 | static inline page_entry * | |
6ec1f4e0 | 622 | lookup_page_table_entry (const void *p) |
e3691812 | 623 | { |
624 | page_entry ***base; | |
625 | size_t L1, L2; | |
626 | ||
71661611 | 627 | #if HOST_BITS_PER_PTR <= 32 |
628 | base = &G.lookup[0]; | |
629 | #else | |
630 | page_table table = G.lookup; | |
337c992b | 631 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
71661611 | 632 | while (table->high_bits != high_bits) |
633 | table = table->next; | |
634 | base = &table->table[0]; | |
635 | #endif | |
e3691812 | 636 | |
4a82352a | 637 | /* Extract the level 1 and 2 indices. */ |
911ab6b9 | 638 | L1 = LOOKUP_L1 (p); |
639 | L2 = LOOKUP_L2 (p); | |
640 | ||
641 | return base[L1][L2]; | |
642 | } | |
643 | ||
911ab6b9 | 644 | /* Set the page table entry for a page. */ |
e3c4633e | 645 | |
911ab6b9 | 646 | static void |
6ec1f4e0 | 647 | set_page_table_entry (void *p, page_entry *entry) |
911ab6b9 | 648 | { |
649 | page_entry ***base; | |
650 | size_t L1, L2; | |
651 | ||
652 | #if HOST_BITS_PER_PTR <= 32 | |
653 | base = &G.lookup[0]; | |
654 | #else | |
655 | page_table table; | |
337c992b | 656 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
911ab6b9 | 657 | for (table = G.lookup; table; table = table->next) |
658 | if (table->high_bits == high_bits) | |
659 | goto found; | |
660 | ||
661 | /* Not found -- allocate a new table. */ | |
c768b48b | 662 | table = XCNEW (struct page_table_chain); |
911ab6b9 | 663 | table->next = G.lookup; |
664 | table->high_bits = high_bits; | |
665 | G.lookup = table; | |
666 | found: | |
667 | base = &table->table[0]; | |
668 | #endif | |
669 | ||
4a82352a | 670 | /* Extract the level 1 and 2 indices. */ |
911ab6b9 | 671 | L1 = LOOKUP_L1 (p); |
672 | L2 = LOOKUP_L2 (p); | |
673 | ||
674 | if (base[L1] == NULL) | |
4c36ffe6 | 675 | base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); |
911ab6b9 | 676 | |
677 | base[L1][L2] = entry; | |
678 | } | |
679 | ||
911ab6b9 | 680 | /* Prints the page-entry for object size ORDER, for debugging. */ |
e3c4633e | 681 | |
4b987fac | 682 | DEBUG_FUNCTION void |
6ec1f4e0 | 683 | debug_print_page_list (int order) |
911ab6b9 | 684 | { |
685 | page_entry *p; | |
6ec1f4e0 | 686 | printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], |
687 | (void *) G.page_tails[order]); | |
911ab6b9 | 688 | p = G.pages[order]; |
689 | while (p != NULL) | |
690 | { | |
6ec1f4e0 | 691 | printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, |
f8cb9479 | 692 | p->num_free_objects); |
911ab6b9 | 693 | p = p->next; |
694 | } | |
695 | printf ("NULL\n"); | |
696 | fflush (stdout); | |
697 | } | |
698 | ||
80eb2355 | 699 | #ifdef USING_MMAP |
911ab6b9 | 700 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, |
901dfcc7 | 701 | (if non-null). The ifdef structure here is intended to cause a |
702 | compile error unless exactly one of the HAVE_* is defined. */ | |
e3c4633e | 703 | |
911ab6b9 | 704 | static inline char * |
4a2f812e | 705 | alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check) |
911ab6b9 | 706 | { |
901dfcc7 | 707 | #ifdef HAVE_MMAP_ANON |
4077bf7a | 708 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
709 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
901dfcc7 | 710 | #endif |
711 | #ifdef HAVE_MMAP_DEV_ZERO | |
4077bf7a | 712 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
713 | MAP_PRIVATE, G.dev_zero_fd, 0); | |
911ab6b9 | 714 | #endif |
901dfcc7 | 715 | |
716 | if (page == (char *) MAP_FAILED) | |
71661611 | 717 | { |
4a2f812e | 718 | if (!check) |
719 | return NULL; | |
cb8bacb6 | 720 | perror ("virtual memory exhausted"); |
018eba2e | 721 | exit (FATAL_EXIT_CODE); |
71661611 | 722 | } |
911ab6b9 | 723 | |
4e00b6fd | 724 | /* Remember that we allocated this memory. */ |
725 | G.bytes_mapped += size; | |
726 | ||
dd359afe | 727 | /* Pretend we don't have access to the allocated pages. We'll enable |
ba72912a | 728 | access to smaller pieces of the area in ggc_internal_alloc. Discard the |
dd359afe | 729 | handle to avoid handle leak. */ |
a7779e75 | 730 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); |
dd359afe | 731 | |
911ab6b9 | 732 | return page; |
733 | } | |
80eb2355 | 734 | #endif |
735 | #ifdef USING_MALLOC_PAGE_GROUPS | |
736 | /* Compute the index for this page into the page group. */ | |
737 | ||
738 | static inline size_t | |
6ec1f4e0 | 739 | page_group_index (char *allocation, char *page) |
80eb2355 | 740 | { |
dfe09cce | 741 | return (size_t) (page - allocation) >> G.lg_pagesize; |
80eb2355 | 742 | } |
743 | ||
744 | /* Set and clear the in_use bit for this page in the page group. */ | |
745 | ||
746 | static inline void | |
6ec1f4e0 | 747 | set_page_group_in_use (page_group *group, char *page) |
80eb2355 | 748 | { |
749 | group->in_use |= 1 << page_group_index (group->allocation, page); | |
750 | } | |
751 | ||
752 | static inline void | |
6ec1f4e0 | 753 | clear_page_group_in_use (page_group *group, char *page) |
80eb2355 | 754 | { |
755 | group->in_use &= ~(1 << page_group_index (group->allocation, page)); | |
756 | } | |
757 | #endif | |
911ab6b9 | 758 | |
759 | /* Allocate a new page for allocating objects of size 2^ORDER, | |
760 | and return an entry for it. The entry is not added to the | |
761 | appropriate page_table list. */ | |
e3c4633e | 762 | |
911ab6b9 | 763 | static inline struct page_entry * |
6ec1f4e0 | 764 | alloc_page (unsigned order) |
911ab6b9 | 765 | { |
766 | struct page_entry *entry, *p, **pp; | |
767 | char *page; | |
768 | size_t num_objects; | |
769 | size_t bitmap_size; | |
770 | size_t page_entry_size; | |
771 | size_t entry_size; | |
80eb2355 | 772 | #ifdef USING_MALLOC_PAGE_GROUPS |
773 | page_group *group; | |
774 | #endif | |
911ab6b9 | 775 | |
776 | num_objects = OBJECTS_PER_PAGE (order); | |
777 | bitmap_size = BITMAP_SIZE (num_objects + 1); | |
778 | page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; | |
2f6aecaf | 779 | entry_size = num_objects * OBJECT_SIZE (order); |
9acc7238 | 780 | if (entry_size < G.pagesize) |
781 | entry_size = G.pagesize; | |
25a28b44 | 782 | entry_size = PAGE_ALIGN (entry_size); |
911ab6b9 | 783 | |
784 | entry = NULL; | |
785 | page = NULL; | |
786 | ||
787 | /* Check the list of free pages for one we can use. */ | |
018eba2e | 788 | for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) |
911ab6b9 | 789 | if (p->bytes == entry_size) |
790 | break; | |
791 | ||
792 | if (p != NULL) | |
793 | { | |
c5db973f | 794 | if (p->discarded) |
795 | G.bytes_mapped += p->bytes; | |
796 | p->discarded = false; | |
797 | ||
aa40f561 | 798 | /* Recycle the allocated memory from this page ... */ |
911ab6b9 | 799 | *pp = p->next; |
800 | page = p->page; | |
018eba2e | 801 | |
80eb2355 | 802 | #ifdef USING_MALLOC_PAGE_GROUPS |
803 | group = p->group; | |
804 | #endif | |
018eba2e | 805 | |
911ab6b9 | 806 | /* ... and, if possible, the page entry itself. */ |
807 | if (p->order == order) | |
808 | { | |
809 | entry = p; | |
810 | memset (entry, 0, page_entry_size); | |
811 | } | |
812 | else | |
813 | free (p); | |
814 | } | |
901dfcc7 | 815 | #ifdef USING_MMAP |
9a2e8b0a | 816 | else if (entry_size == G.pagesize) |
911ab6b9 | 817 | { |
9a2e8b0a | 818 | /* We want just one page. Allocate a bunch of them and put the |
819 | extras on the freelist. (Can only do this optimization with | |
820 | mmap for backing store.) */ | |
821 | struct page_entry *e, *f = G.free_pages; | |
4a2f812e | 822 | int i, entries = GGC_QUIRE_SIZE; |
9a2e8b0a | 823 | |
4a2f812e | 824 | page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false); |
825 | if (page == NULL) | |
826 | { | |
9af5ce0c | 827 | page = alloc_anon (NULL, G.pagesize, true); |
4a2f812e | 828 | entries = 1; |
829 | } | |
018eba2e | 830 | |
9a2e8b0a | 831 | /* This loop counts down so that the chain will be in ascending |
832 | memory order. */ | |
4a2f812e | 833 | for (i = entries - 1; i >= 1; i--) |
9a2e8b0a | 834 | { |
4077bf7a | 835 | e = XCNEWVAR (struct page_entry, page_entry_size); |
9acc7238 | 836 | e->order = order; |
837 | e->bytes = G.pagesize; | |
838 | e->page = page + (i << G.lg_pagesize); | |
9a2e8b0a | 839 | e->next = f; |
840 | f = e; | |
841 | } | |
018eba2e | 842 | |
9a2e8b0a | 843 | G.free_pages = f; |
911ab6b9 | 844 | } |
9a2e8b0a | 845 | else |
4a2f812e | 846 | page = alloc_anon (NULL, entry_size, true); |
80eb2355 | 847 | #endif |
848 | #ifdef USING_MALLOC_PAGE_GROUPS | |
849 | else | |
850 | { | |
851 | /* Allocate a large block of memory and serve out the aligned | |
852 | pages therein. This results in much less memory wastage | |
853 | than the traditional implementation of valloc. */ | |
854 | ||
855 | char *allocation, *a, *enda; | |
856 | size_t alloc_size, head_slop, tail_slop; | |
857 | int multiple_pages = (entry_size == G.pagesize); | |
858 | ||
859 | if (multiple_pages) | |
860 | alloc_size = GGC_QUIRE_SIZE * G.pagesize; | |
861 | else | |
862 | alloc_size = entry_size + G.pagesize - 1; | |
781dec7f | 863 | allocation = XNEWVEC (char, alloc_size); |
80eb2355 | 864 | |
337c992b | 865 | page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize); |
80eb2355 | 866 | head_slop = page - allocation; |
867 | if (multiple_pages) | |
868 | tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); | |
869 | else | |
870 | tail_slop = alloc_size - entry_size - head_slop; | |
871 | enda = allocation + alloc_size - tail_slop; | |
872 | ||
873 | /* We allocated N pages, which are likely not aligned, leaving | |
874 | us with N-1 usable pages. We plan to place the page_group | |
875 | structure somewhere in the slop. */ | |
876 | if (head_slop >= sizeof (page_group)) | |
877 | group = (page_group *)page - 1; | |
878 | else | |
879 | { | |
880 | /* We magically got an aligned allocation. Too bad, we have | |
881 | to waste a page anyway. */ | |
882 | if (tail_slop == 0) | |
883 | { | |
884 | enda -= G.pagesize; | |
885 | tail_slop += G.pagesize; | |
886 | } | |
0d59b19d | 887 | gcc_assert (tail_slop >= sizeof (page_group)); |
80eb2355 | 888 | group = (page_group *)enda; |
889 | tail_slop -= sizeof (page_group); | |
890 | } | |
891 | ||
892 | /* Remember that we allocated this memory. */ | |
893 | group->next = G.page_groups; | |
894 | group->allocation = allocation; | |
895 | group->alloc_size = alloc_size; | |
896 | group->in_use = 0; | |
897 | G.page_groups = group; | |
898 | G.bytes_mapped += alloc_size; | |
899 | ||
900 | /* If we allocated multiple pages, put the rest on the free list. */ | |
901 | if (multiple_pages) | |
902 | { | |
903 | struct page_entry *e, *f = G.free_pages; | |
904 | for (a = enda - G.pagesize; a != page; a -= G.pagesize) | |
905 | { | |
781dec7f | 906 | e = XCNEWVAR (struct page_entry, page_entry_size); |
80eb2355 | 907 | e->order = order; |
908 | e->bytes = G.pagesize; | |
909 | e->page = a; | |
910 | e->group = group; | |
911 | e->next = f; | |
912 | f = e; | |
913 | } | |
914 | G.free_pages = f; | |
915 | } | |
916 | } | |
917 | #endif | |
911ab6b9 | 918 | |
919 | if (entry == NULL) | |
4077bf7a | 920 | entry = XCNEWVAR (struct page_entry, page_entry_size); |
911ab6b9 | 921 | |
922 | entry->bytes = entry_size; | |
923 | entry->page = page; | |
924 | entry->context_depth = G.context_depth; | |
925 | entry->order = order; | |
926 | entry->num_free_objects = num_objects; | |
927 | entry->next_bit_hint = 1; | |
928 | ||
598638e2 | 929 | G.context_depth_allocations |= (unsigned long)1 << G.context_depth; |
930 | ||
80eb2355 | 931 | #ifdef USING_MALLOC_PAGE_GROUPS |
932 | entry->group = group; | |
933 | set_page_group_in_use (group, page); | |
934 | #endif | |
935 | ||
911ab6b9 | 936 | /* Set the one-past-the-end in-use bit. This acts as a sentry as we |
937 | increment the hint. */ | |
938 | entry->in_use_p[num_objects / HOST_BITS_PER_LONG] | |
939 | = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG); | |
940 | ||
941 | set_page_table_entry (page, entry); | |
942 | ||
943 | if (GGC_DEBUG_LEVEL >= 2) | |
3cfec666 | 944 | fprintf (G.debug_file, |
29e7390a | 945 | "Allocating page at %p, object size=%lu, data %p-%p\n", |
6ec1f4e0 | 946 | (void *) entry, (unsigned long) OBJECT_SIZE (order), page, |
018eba2e | 947 | page + entry_size - 1); |
911ab6b9 | 948 | |
949 | return entry; | |
950 | } | |
951 | ||
76e1b933 | 952 | /* Adjust the size of G.depth so that no index greater than the one |
953 | used by the top of the G.by_depth is used. */ | |
954 | ||
955 | static inline void | |
6ec1f4e0 | 956 | adjust_depth (void) |
76e1b933 | 957 | { |
958 | page_entry *top; | |
959 | ||
960 | if (G.by_depth_in_use) | |
961 | { | |
962 | top = G.by_depth[G.by_depth_in_use-1]; | |
963 | ||
d01481af | 964 | /* Peel back indices in depth that index into by_depth, so that |
965 | as new elements are added to by_depth, we note the indices | |
76e1b933 | 966 | of those elements, if they are for new context depths. */ |
967 | while (G.depth_in_use > (size_t)top->context_depth+1) | |
968 | --G.depth_in_use; | |
969 | } | |
970 | } | |
971 | ||
e3c4633e | 972 | /* For a page that is no longer needed, put it on the free page list. */ |
911ab6b9 | 973 | |
c4e03242 | 974 | static void |
6ec1f4e0 | 975 | free_page (page_entry *entry) |
911ab6b9 | 976 | { |
977 | if (GGC_DEBUG_LEVEL >= 2) | |
3cfec666 | 978 | fprintf (G.debug_file, |
6ec1f4e0 | 979 | "Deallocating page at %p, data %p-%p\n", (void *) entry, |
911ab6b9 | 980 | entry->page, entry->page + entry->bytes - 1); |
981 | ||
dd359afe | 982 | /* Mark the page as inaccessible. Discard the handle to avoid handle |
983 | leak. */ | |
a7779e75 | 984 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes)); |
dd359afe | 985 | |
911ab6b9 | 986 | set_page_table_entry (entry->page, NULL); |
987 | ||
80eb2355 | 988 | #ifdef USING_MALLOC_PAGE_GROUPS |
989 | clear_page_group_in_use (entry->group, entry->page); | |
990 | #endif | |
991 | ||
76e1b933 | 992 | if (G.by_depth_in_use > 1) |
993 | { | |
994 | page_entry *top = G.by_depth[G.by_depth_in_use-1]; | |
0d59b19d | 995 | int i = entry->index_by_depth; |
996 | ||
997 | /* We cannot free a page from a context deeper than the current | |
998 | one. */ | |
999 | gcc_assert (entry->context_depth == top->context_depth); | |
48e1416a | 1000 | |
0d59b19d | 1001 | /* Put top element into freed slot. */ |
1002 | G.by_depth[i] = top; | |
1003 | G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; | |
1004 | top->index_by_depth = i; | |
76e1b933 | 1005 | } |
1006 | --G.by_depth_in_use; | |
1007 | ||
1008 | adjust_depth (); | |
1009 | ||
911ab6b9 | 1010 | entry->next = G.free_pages; |
1011 | G.free_pages = entry; | |
1012 | } | |
1013 | ||
e3c4633e | 1014 | /* Release the free page cache to the system. */ |
911ab6b9 | 1015 | |
c10b9b1c | 1016 | static void |
6ec1f4e0 | 1017 | release_pages (void) |
911ab6b9 | 1018 | { |
c5db973f | 1019 | #ifdef USING_MADVISE |
1020 | page_entry *p, *start_p; | |
1021 | char *start; | |
1022 | size_t len; | |
e8b7c612 | 1023 | size_t mapped_len; |
1024 | page_entry *next, *prev, *newprev; | |
1025 | size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize; | |
1026 | ||
1027 | /* First free larger continuous areas to the OS. | |
1028 | This allows other allocators to grab these areas if needed. | |
1029 | This is only done on larger chunks to avoid fragmentation. | |
1030 | This does not always work because the free_pages list is only | |
1031 | approximately sorted. */ | |
1032 | ||
1033 | p = G.free_pages; | |
1034 | prev = NULL; | |
1035 | while (p) | |
1036 | { | |
1037 | start = p->page; | |
1038 | start_p = p; | |
1039 | len = 0; | |
1040 | mapped_len = 0; | |
1041 | newprev = prev; | |
1042 | while (p && p->page == start + len) | |
1043 | { | |
1044 | len += p->bytes; | |
1045 | if (!p->discarded) | |
1046 | mapped_len += p->bytes; | |
1047 | newprev = p; | |
1048 | p = p->next; | |
1049 | } | |
1050 | if (len >= free_unit) | |
1051 | { | |
1052 | while (start_p != p) | |
1053 | { | |
1054 | next = start_p->next; | |
1055 | free (start_p); | |
1056 | start_p = next; | |
1057 | } | |
1058 | munmap (start, len); | |
1059 | if (prev) | |
1060 | prev->next = p; | |
1061 | else | |
1062 | G.free_pages = p; | |
1063 | G.bytes_mapped -= mapped_len; | |
1064 | continue; | |
1065 | } | |
1066 | prev = newprev; | |
1067 | } | |
1068 | ||
1069 | /* Now give back the fragmented pages to the OS, but keep the address | |
1070 | space to reuse it next time. */ | |
c5db973f | 1071 | |
1072 | for (p = G.free_pages; p; ) | |
1073 | { | |
1074 | if (p->discarded) | |
1075 | { | |
1076 | p = p->next; | |
1077 | continue; | |
1078 | } | |
1079 | start = p->page; | |
1080 | len = p->bytes; | |
1081 | start_p = p; | |
1082 | p = p->next; | |
1083 | while (p && p->page == start + len) | |
1084 | { | |
1085 | len += p->bytes; | |
1086 | p = p->next; | |
1087 | } | |
1088 | /* Give the page back to the kernel, but don't free the mapping. | |
1089 | This avoids fragmentation in the virtual memory map of the | |
1090 | process. Next time we can reuse it by just touching it. */ | |
1091 | madvise (start, len, MADV_DONTNEED); | |
1092 | /* Don't count those pages as mapped to not touch the garbage collector | |
1093 | unnecessarily. */ | |
1094 | G.bytes_mapped -= len; | |
1095 | while (start_p != p) | |
1096 | { | |
1097 | start_p->discarded = true; | |
1098 | start_p = start_p->next; | |
1099 | } | |
1100 | } | |
1101 | #endif | |
1102 | #if defined(USING_MMAP) && !defined(USING_MADVISE) | |
80eb2355 | 1103 | page_entry *p, *next; |
911ab6b9 | 1104 | char *start; |
1105 | size_t len; | |
1106 | ||
9a2e8b0a | 1107 | /* Gather up adjacent pages so they are unmapped together. */ |
911ab6b9 | 1108 | p = G.free_pages; |
911ab6b9 | 1109 | |
1110 | while (p) | |
1111 | { | |
9a2e8b0a | 1112 | start = p->page; |
911ab6b9 | 1113 | next = p->next; |
9a2e8b0a | 1114 | len = p->bytes; |
911ab6b9 | 1115 | free (p); |
1116 | p = next; | |
911ab6b9 | 1117 | |
9a2e8b0a | 1118 | while (p && p->page == start + len) |
1119 | { | |
1120 | next = p->next; | |
1121 | len += p->bytes; | |
1122 | free (p); | |
1123 | p = next; | |
1124 | } | |
1125 | ||
1126 | munmap (start, len); | |
1127 | G.bytes_mapped -= len; | |
1128 | } | |
71661611 | 1129 | |
911ab6b9 | 1130 | G.free_pages = NULL; |
80eb2355 | 1131 | #endif |
1132 | #ifdef USING_MALLOC_PAGE_GROUPS | |
1133 | page_entry **pp, *p; | |
1134 | page_group **gp, *g; | |
1135 | ||
1136 | /* Remove all pages from free page groups from the list. */ | |
1137 | pp = &G.free_pages; | |
1138 | while ((p = *pp) != NULL) | |
1139 | if (p->group->in_use == 0) | |
1140 | { | |
1141 | *pp = p->next; | |
1142 | free (p); | |
1143 | } | |
1144 | else | |
1145 | pp = &p->next; | |
1146 | ||
1147 | /* Remove all free page groups, and release the storage. */ | |
1148 | gp = &G.page_groups; | |
1149 | while ((g = *gp) != NULL) | |
1150 | if (g->in_use == 0) | |
1151 | { | |
1152 | *gp = g->next; | |
3cfec666 | 1153 | G.bytes_mapped -= g->alloc_size; |
80eb2355 | 1154 | free (g->allocation); |
1155 | } | |
1156 | else | |
1157 | gp = &g->next; | |
1158 | #endif | |
911ab6b9 | 1159 | } |
1160 | ||
911ab6b9 | 1161 | /* This table provides a fast way to determine ceil(log_2(size)) for |
f806fb68 | 1162 | allocation requests. The minimum allocation size is eight bytes. */ |
f68513d3 | 1163 | #define NUM_SIZE_LOOKUP 512 |
1164 | static unsigned char size_lookup[NUM_SIZE_LOOKUP] = | |
f806fb68 | 1165 | { |
3cfec666 | 1166 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, |
1167 | 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, | |
1168 | 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1169 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1170 | 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1171 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1172 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
911ab6b9 | 1173 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
911ab6b9 | 1174 | 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1175 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1176 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1177 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1178 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1179 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1180 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1181 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1c2a6a66 | 1182 | 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1183 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1184 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1185 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1186 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1187 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1188 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1189 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1190 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1191 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1192 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1193 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1194 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1195 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1196 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
8c14f57e | 1197 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 |
911ab6b9 | 1198 | }; |
1199 | ||
1ae3520e | 1200 | /* For a given size of memory requested for allocation, return the |
1201 | actual size that is going to be allocated, as well as the size | |
1202 | order. */ | |
1203 | ||
1204 | static void | |
1205 | ggc_round_alloc_size_1 (size_t requested_size, | |
1206 | size_t *size_order, | |
1207 | size_t *alloced_size) | |
1208 | { | |
1209 | size_t order, object_size; | |
1210 | ||
1211 | if (requested_size < NUM_SIZE_LOOKUP) | |
1212 | { | |
1213 | order = size_lookup[requested_size]; | |
1214 | object_size = OBJECT_SIZE (order); | |
1215 | } | |
1216 | else | |
1217 | { | |
1218 | order = 10; | |
1219 | while (requested_size > (object_size = OBJECT_SIZE (order))) | |
1220 | order++; | |
1221 | } | |
1222 | ||
1223 | if (size_order) | |
1224 | *size_order = order; | |
1225 | if (alloced_size) | |
1226 | *alloced_size = object_size; | |
1227 | } | |
1228 | ||
1229 | /* For a given size of memory requested for allocation, return the | |
1230 | actual size that is going to be allocated. */ | |
1231 | ||
1232 | size_t | |
1233 | ggc_round_alloc_size (size_t requested_size) | |
1234 | { | |
1235 | size_t size = 0; | |
1236 | ||
1237 | ggc_round_alloc_size_1 (requested_size, NULL, &size); | |
1238 | return size; | |
1239 | } | |
1240 | ||
5517ecc9 | 1241 | /* Push a finalizer onto the appropriate vec. */ |
1242 | ||
1243 | static void | |
1244 | add_finalizer (void *result, void (*f)(void *), size_t s, size_t n) | |
1245 | { | |
1246 | if (f == NULL) | |
1247 | /* No finalizer. */; | |
1248 | else if (n == 1) | |
1249 | { | |
1250 | finalizer fin (result, f); | |
1251 | G.finalizers[G.context_depth].safe_push (fin); | |
1252 | } | |
1253 | else | |
1254 | { | |
1255 | vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n); | |
1256 | G.vec_finalizers[G.context_depth].safe_push (fin); | |
1257 | } | |
1258 | } | |
1259 | ||
908b11c1 | 1260 | /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ |
e3c4633e | 1261 | |
71661611 | 1262 | void * |
92f06184 | 1263 | ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n |
1264 | MEM_STAT_DECL) | |
911ab6b9 | 1265 | { |
c4e03242 | 1266 | size_t order, word, bit, object_offset, object_size; |
911ab6b9 | 1267 | struct page_entry *entry; |
1268 | void *result; | |
1269 | ||
1ae3520e | 1270 | ggc_round_alloc_size_1 (size, &order, &object_size); |
911ab6b9 | 1271 | |
1272 | /* If there are non-full pages for this size allocation, they are at | |
1273 | the head of the list. */ | |
1274 | entry = G.pages[order]; | |
1275 | ||
1276 | /* If there is no page for this object size, or all pages in this | |
1277 | context are full, allocate a new page. */ | |
c10b9b1c | 1278 | if (entry == NULL || entry->num_free_objects == 0) |
911ab6b9 | 1279 | { |
1280 | struct page_entry *new_entry; | |
1281 | new_entry = alloc_page (order); | |
3cfec666 | 1282 | |
76e1b933 | 1283 | new_entry->index_by_depth = G.by_depth_in_use; |
1284 | push_by_depth (new_entry, 0); | |
1285 | ||
1286 | /* We can skip context depths, if we do, make sure we go all the | |
1287 | way to the new depth. */ | |
1288 | while (new_entry->context_depth >= G.depth_in_use) | |
1289 | push_depth (G.by_depth_in_use-1); | |
1290 | ||
4a755ae7 | 1291 | /* If this is the only entry, it's also the tail. If it is not |
1292 | the only entry, then we must update the PREV pointer of the | |
1293 | ENTRY (G.pages[order]) to point to our new page entry. */ | |
911ab6b9 | 1294 | if (entry == NULL) |
1295 | G.page_tails[order] = new_entry; | |
4a755ae7 | 1296 | else |
1297 | entry->prev = new_entry; | |
3cfec666 | 1298 | |
4a755ae7 | 1299 | /* Put new pages at the head of the page list. By definition the |
1300 | entry at the head of the list always has a NULL pointer. */ | |
911ab6b9 | 1301 | new_entry->next = entry; |
4a755ae7 | 1302 | new_entry->prev = NULL; |
911ab6b9 | 1303 | entry = new_entry; |
1304 | G.pages[order] = new_entry; | |
1305 | ||
1306 | /* For a new page, we know the word and bit positions (in the | |
1307 | in_use bitmap) of the first available object -- they're zero. */ | |
1308 | new_entry->next_bit_hint = 1; | |
1309 | word = 0; | |
1310 | bit = 0; | |
1311 | object_offset = 0; | |
1312 | } | |
1313 | else | |
1314 | { | |
1315 | /* First try to use the hint left from the previous allocation | |
1316 | to locate a clear bit in the in-use bitmap. We've made sure | |
1317 | that the one-past-the-end bit is always set, so if the hint | |
1318 | has run over, this test will fail. */ | |
1319 | unsigned hint = entry->next_bit_hint; | |
1320 | word = hint / HOST_BITS_PER_LONG; | |
1321 | bit = hint % HOST_BITS_PER_LONG; | |
3cfec666 | 1322 | |
911ab6b9 | 1323 | /* If the hint didn't work, scan the bitmap from the beginning. */ |
1324 | if ((entry->in_use_p[word] >> bit) & 1) | |
1325 | { | |
1326 | word = bit = 0; | |
1327 | while (~entry->in_use_p[word] == 0) | |
1328 | ++word; | |
bff49002 | 1329 | |
1330 | #if GCC_VERSION >= 3004 | |
1331 | bit = __builtin_ctzl (~entry->in_use_p[word]); | |
1332 | #else | |
911ab6b9 | 1333 | while ((entry->in_use_p[word] >> bit) & 1) |
1334 | ++bit; | |
bff49002 | 1335 | #endif |
1336 | ||
911ab6b9 | 1337 | hint = word * HOST_BITS_PER_LONG + bit; |
1338 | } | |
1339 | ||
1340 | /* Next time, try the next bit. */ | |
1341 | entry->next_bit_hint = hint + 1; | |
1342 | ||
c4e03242 | 1343 | object_offset = hint * object_size; |
911ab6b9 | 1344 | } |
1345 | ||
1346 | /* Set the in-use bit. */ | |
1347 | entry->in_use_p[word] |= ((unsigned long) 1 << bit); | |
1348 | ||
1349 | /* Keep a running total of the number of free objects. If this page | |
1350 | fills up, we may have to move it to the end of the list if the | |
1351 | next page isn't full. If the next page is full, all subsequent | |
1352 | pages are full, so there's no need to move it. */ | |
1353 | if (--entry->num_free_objects == 0 | |
1354 | && entry->next != NULL | |
1355 | && entry->next->num_free_objects > 0) | |
1356 | { | |
4a755ae7 | 1357 | /* We have a new head for the list. */ |
911ab6b9 | 1358 | G.pages[order] = entry->next; |
4a755ae7 | 1359 | |
1360 | /* We are moving ENTRY to the end of the page table list. | |
1361 | The new page at the head of the list will have NULL in | |
1362 | its PREV field and ENTRY will have NULL in its NEXT field. */ | |
1363 | entry->next->prev = NULL; | |
911ab6b9 | 1364 | entry->next = NULL; |
4a755ae7 | 1365 | |
1366 | /* Append ENTRY to the tail of the list. */ | |
1367 | entry->prev = G.page_tails[order]; | |
911ab6b9 | 1368 | G.page_tails[order]->next = entry; |
1369 | G.page_tails[order] = entry; | |
1370 | } | |
1371 | ||
1372 | /* Calculate the object's address. */ | |
1373 | result = entry->page + object_offset; | |
ecd52ea9 | 1374 | if (GATHER_STATISTICS) |
1375 | ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, | |
1376 | result FINAL_PASS_MEM_STAT); | |
911ab6b9 | 1377 | |
2a3edec5 | 1378 | #ifdef ENABLE_GC_CHECKING |
dd359afe | 1379 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the |
1380 | exact same semantics in presence of memory bugs, regardless of | |
1381 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | |
1382 | handle to avoid handle leak. */ | |
a7779e75 | 1383 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size)); |
dd359afe | 1384 | |
791ceafe | 1385 | /* `Poison' the entire allocated object, including any padding at |
1386 | the end. */ | |
c4e03242 | 1387 | memset (result, 0xaf, object_size); |
dd359afe | 1388 | |
1389 | /* Make the bytes after the end of the object unaccessible. Discard the | |
1390 | handle to avoid handle leak. */ | |
a7779e75 | 1391 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size, |
1392 | object_size - size)); | |
911ab6b9 | 1393 | #endif |
e3c4633e | 1394 | |
dd359afe | 1395 | /* Tell Valgrind that the memory is there, but its content isn't |
1396 | defined. The bytes at the end of the object are still marked | |
1397 | unaccessible. */ | |
a7779e75 | 1398 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); |
dd359afe | 1399 | |
911ab6b9 | 1400 | /* Keep track of how many bytes are being allocated. This |
1401 | information is used in deciding when to collect. */ | |
c4e03242 | 1402 | G.allocated += object_size; |
911ab6b9 | 1403 | |
8d453ddb | 1404 | /* For timevar statistics. */ |
1405 | timevar_ggc_mem_total += object_size; | |
1406 | ||
5517ecc9 | 1407 | if (f) |
1408 | add_finalizer (result, f, s, n); | |
92f06184 | 1409 | |
ecd52ea9 | 1410 | if (GATHER_STATISTICS) |
1411 | { | |
1412 | size_t overhead = object_size - size; | |
b7257530 | 1413 | |
ecd52ea9 | 1414 | G.stats.total_overhead += overhead; |
1415 | G.stats.total_allocated += object_size; | |
1416 | G.stats.total_overhead_per_order[order] += overhead; | |
1417 | G.stats.total_allocated_per_order[order] += object_size; | |
b7257530 | 1418 | |
ecd52ea9 | 1419 | if (size <= 32) |
1420 | { | |
1421 | G.stats.total_overhead_under32 += overhead; | |
1422 | G.stats.total_allocated_under32 += object_size; | |
1423 | } | |
1424 | if (size <= 64) | |
1425 | { | |
1426 | G.stats.total_overhead_under64 += overhead; | |
1427 | G.stats.total_allocated_under64 += object_size; | |
1428 | } | |
1429 | if (size <= 128) | |
1430 | { | |
1431 | G.stats.total_overhead_under128 += overhead; | |
1432 | G.stats.total_allocated_under128 += object_size; | |
1433 | } | |
1434 | } | |
c4e03242 | 1435 | |
911ab6b9 | 1436 | if (GGC_DEBUG_LEVEL >= 3) |
3cfec666 | 1437 | fprintf (G.debug_file, |
29e7390a | 1438 | "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", |
c4e03242 | 1439 | (unsigned long) size, (unsigned long) object_size, result, |
6ec1f4e0 | 1440 | (void *) entry); |
911ab6b9 | 1441 | |
1442 | return result; | |
1443 | } | |
1444 | ||
dfecde36 | 1445 | /* Mark function for strings. */ |
1446 | ||
1447 | void | |
1448 | gt_ggc_m_S (const void *p) | |
1449 | { | |
1450 | page_entry *entry; | |
1451 | unsigned bit, word; | |
1452 | unsigned long mask; | |
1453 | unsigned long offset; | |
1454 | ||
8a55e97d | 1455 | if (!p) |
dfecde36 | 1456 | return; |
1457 | ||
8a55e97d | 1458 | /* Look up the page on which the object is alloced. If it was not |
1459 | GC allocated, gracefully bail out. */ | |
1460 | entry = safe_lookup_page_table_entry (p); | |
1461 | if (!entry) | |
1462 | return; | |
dfecde36 | 1463 | |
1464 | /* Calculate the index of the object on the page; this is its bit | |
1465 | position in the in_use_p bitmap. Note that because a char* might | |
1466 | point to the middle of an object, we need special code here to | |
1467 | make sure P points to the start of an object. */ | |
1468 | offset = ((const char *) p - entry->page) % object_size_table[entry->order]; | |
1469 | if (offset) | |
1470 | { | |
1471 | /* Here we've seen a char* which does not point to the beginning | |
1472 | of an allocated object. We assume it points to the middle of | |
1473 | a STRING_CST. */ | |
1474 | gcc_assert (offset == offsetof (struct tree_string, str)); | |
1475 | p = ((const char *) p) - offset; | |
4077bf7a | 1476 | gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)); |
dfecde36 | 1477 | return; |
1478 | } | |
1479 | ||
1480 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); | |
1481 | word = bit / HOST_BITS_PER_LONG; | |
1482 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
1483 | ||
1484 | /* If the bit was previously set, skip it. */ | |
1485 | if (entry->in_use_p[word] & mask) | |
1486 | return; | |
1487 | ||
1488 | /* Otherwise set it, and decrement the free object count. */ | |
1489 | entry->in_use_p[word] |= mask; | |
1490 | entry->num_free_objects -= 1; | |
1491 | ||
1492 | if (GGC_DEBUG_LEVEL >= 4) | |
1493 | fprintf (G.debug_file, "Marking %p\n", p); | |
1494 | ||
1495 | return; | |
1496 | } | |
1497 | ||
2b15d2ba | 1498 | |
1499 | /* User-callable entry points for marking string X. */ | |
1500 | ||
1501 | void | |
1502 | gt_ggc_mx (const char *& x) | |
1503 | { | |
1504 | gt_ggc_m_S (x); | |
1505 | } | |
1506 | ||
1507 | void | |
1508 | gt_ggc_mx (unsigned char *& x) | |
1509 | { | |
1510 | gt_ggc_m_S (x); | |
1511 | } | |
1512 | ||
1513 | void | |
1514 | gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED) | |
1515 | { | |
1516 | } | |
1517 | ||
e3c4633e | 1518 | /* If P is not marked, marks it and return false. Otherwise return true. |
911ab6b9 | 1519 | P must have been allocated by the GC allocator; it mustn't point to |
1520 | static objects, stack variables, or memory allocated with malloc. */ | |
e3c4633e | 1521 | |
71661611 | 1522 | int |
6ec1f4e0 | 1523 | ggc_set_mark (const void *p) |
911ab6b9 | 1524 | { |
1525 | page_entry *entry; | |
1526 | unsigned bit, word; | |
1527 | unsigned long mask; | |
1528 | ||
1529 | /* Look up the page on which the object is alloced. If the object | |
1530 | wasn't allocated by the collector, we'll probably die. */ | |
e3691812 | 1531 | entry = lookup_page_table_entry (p); |
0d59b19d | 1532 | gcc_assert (entry); |
911ab6b9 | 1533 | |
1534 | /* Calculate the index of the object on the page; this is its bit | |
1535 | position in the in_use_p bitmap. */ | |
40b9be5e | 1536 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
911ab6b9 | 1537 | word = bit / HOST_BITS_PER_LONG; |
1538 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
3cfec666 | 1539 | |
aa40f561 | 1540 | /* If the bit was previously set, skip it. */ |
911ab6b9 | 1541 | if (entry->in_use_p[word] & mask) |
1542 | return 1; | |
1543 | ||
1544 | /* Otherwise set it, and decrement the free object count. */ | |
1545 | entry->in_use_p[word] |= mask; | |
1546 | entry->num_free_objects -= 1; | |
1547 | ||
911ab6b9 | 1548 | if (GGC_DEBUG_LEVEL >= 4) |
1549 | fprintf (G.debug_file, "Marking %p\n", p); | |
1550 | ||
1551 | return 0; | |
1552 | } | |
1553 | ||
3cfec666 | 1554 | /* Return 1 if P has been marked, zero otherwise. |
15d769aa | 1555 | P must have been allocated by the GC allocator; it mustn't point to |
1556 | static objects, stack variables, or memory allocated with malloc. */ | |
1557 | ||
1558 | int | |
6ec1f4e0 | 1559 | ggc_marked_p (const void *p) |
15d769aa | 1560 | { |
1561 | page_entry *entry; | |
1562 | unsigned bit, word; | |
1563 | unsigned long mask; | |
1564 | ||
1565 | /* Look up the page on which the object is alloced. If the object | |
1566 | wasn't allocated by the collector, we'll probably die. */ | |
1567 | entry = lookup_page_table_entry (p); | |
0d59b19d | 1568 | gcc_assert (entry); |
15d769aa | 1569 | |
1570 | /* Calculate the index of the object on the page; this is its bit | |
1571 | position in the in_use_p bitmap. */ | |
40b9be5e | 1572 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
15d769aa | 1573 | word = bit / HOST_BITS_PER_LONG; |
1574 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
3cfec666 | 1575 | |
291e3f96 | 1576 | return (entry->in_use_p[word] & mask) != 0; |
15d769aa | 1577 | } |
1578 | ||
e3c4633e | 1579 | /* Return the size of the gc-able object P. */ |
1580 | ||
4e00b6fd | 1581 | size_t |
6ec1f4e0 | 1582 | ggc_get_size (const void *p) |
4e00b6fd | 1583 | { |
1584 | page_entry *pe = lookup_page_table_entry (p); | |
2f6aecaf | 1585 | return OBJECT_SIZE (pe->order); |
4e00b6fd | 1586 | } |
c4e03242 | 1587 | |
1588 | /* Release the memory for object P. */ | |
1589 | ||
1590 | void | |
1591 | ggc_free (void *p) | |
1592 | { | |
8f359205 | 1593 | if (in_gc) |
1594 | return; | |
1595 | ||
c4e03242 | 1596 | page_entry *pe = lookup_page_table_entry (p); |
1597 | size_t order = pe->order; | |
1598 | size_t size = OBJECT_SIZE (order); | |
1599 | ||
ecd52ea9 | 1600 | if (GATHER_STATISTICS) |
1601 | ggc_free_overhead (p); | |
0ca9a7b6 | 1602 | |
c4e03242 | 1603 | if (GGC_DEBUG_LEVEL >= 3) |
1604 | fprintf (G.debug_file, | |
1605 | "Freeing object, actual size=%lu, at %p on %p\n", | |
1606 | (unsigned long) size, p, (void *) pe); | |
1607 | ||
1608 | #ifdef ENABLE_GC_CHECKING | |
1609 | /* Poison the data, to indicate the data is garbage. */ | |
a7779e75 | 1610 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); |
c4e03242 | 1611 | memset (p, 0xa5, size); |
1612 | #endif | |
1613 | /* Let valgrind know the object is free. */ | |
a7779e75 | 1614 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); |
c4e03242 | 1615 | |
1616 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
1617 | /* In the completely-anal-checking mode, we do *not* immediately free | |
48e1416a | 1618 | the data, but instead verify that the data is *actually* not |
c4e03242 | 1619 | reachable the next time we collect. */ |
1620 | { | |
4c36ffe6 | 1621 | struct free_object *fo = XNEW (struct free_object); |
c4e03242 | 1622 | fo->object = p; |
1623 | fo->next = G.free_object_list; | |
1624 | G.free_object_list = fo; | |
1625 | } | |
1626 | #else | |
1627 | { | |
1628 | unsigned int bit_offset, word, bit; | |
1629 | ||
1630 | G.allocated -= size; | |
1631 | ||
1632 | /* Mark the object not-in-use. */ | |
1633 | bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); | |
1634 | word = bit_offset / HOST_BITS_PER_LONG; | |
1635 | bit = bit_offset % HOST_BITS_PER_LONG; | |
1636 | pe->in_use_p[word] &= ~(1UL << bit); | |
1637 | ||
1638 | if (pe->num_free_objects++ == 0) | |
1639 | { | |
4a755ae7 | 1640 | page_entry *p, *q; |
1641 | ||
c4e03242 | 1642 | /* If the page is completely full, then it's supposed to |
1643 | be after all pages that aren't. Since we've freed one | |
1644 | object from a page that was full, we need to move the | |
48e1416a | 1645 | page to the head of the list. |
c4e03242 | 1646 | |
4a755ae7 | 1647 | PE is the node we want to move. Q is the previous node |
1648 | and P is the next node in the list. */ | |
1649 | q = pe->prev; | |
c4e03242 | 1650 | if (q && q->num_free_objects == 0) |
1651 | { | |
1652 | p = pe->next; | |
4a755ae7 | 1653 | |
c4e03242 | 1654 | q->next = p; |
4a755ae7 | 1655 | |
1656 | /* If PE was at the end of the list, then Q becomes the | |
1657 | new end of the list. If PE was not the end of the | |
1658 | list, then we need to update the PREV field for P. */ | |
c4e03242 | 1659 | if (!p) |
1660 | G.page_tails[order] = q; | |
4a755ae7 | 1661 | else |
1662 | p->prev = q; | |
1663 | ||
1664 | /* Move PE to the head of the list. */ | |
c4e03242 | 1665 | pe->next = G.pages[order]; |
4a755ae7 | 1666 | pe->prev = NULL; |
1667 | G.pages[order]->prev = pe; | |
c4e03242 | 1668 | G.pages[order] = pe; |
1669 | } | |
1670 | ||
1671 | /* Reset the hint bit to point to the only free object. */ | |
1672 | pe->next_bit_hint = bit_offset; | |
1673 | } | |
1674 | } | |
1675 | #endif | |
1676 | } | |
911ab6b9 | 1677 | \f |
40b9be5e | 1678 | /* Subroutine of init_ggc which computes the pair of numbers used to |
1679 | perform division by OBJECT_SIZE (order) and fills in inverse_table[]. | |
1680 | ||
1681 | This algorithm is taken from Granlund and Montgomery's paper | |
1682 | "Division by Invariant Integers using Multiplication" | |
1683 | (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by | |
1684 | constants). */ | |
1685 | ||
1686 | static void | |
6ec1f4e0 | 1687 | compute_inverse (unsigned order) |
40b9be5e | 1688 | { |
48e1416a | 1689 | size_t size, inv; |
2c06e494 | 1690 | unsigned int e; |
ff34fd94 | 1691 | |
40b9be5e | 1692 | size = OBJECT_SIZE (order); |
1693 | e = 0; | |
1694 | while (size % 2 == 0) | |
1695 | { | |
1696 | e++; | |
1697 | size >>= 1; | |
1698 | } | |
e3c4633e | 1699 | |
40b9be5e | 1700 | inv = size; |
1701 | while (inv * size != 1) | |
1702 | inv = inv * (2 - inv*size); | |
1703 | ||
1704 | DIV_MULT (order) = inv; | |
1705 | DIV_SHIFT (order) = e; | |
1706 | } | |
1707 | ||
1708 | /* Initialize the ggc-mmap allocator. */ | |
911ab6b9 | 1709 | void |
6ec1f4e0 | 1710 | init_ggc (void) |
911ab6b9 | 1711 | { |
415309e2 | 1712 | static bool init_p = false; |
2f6aecaf | 1713 | unsigned order; |
1714 | ||
415309e2 | 1715 | if (init_p) |
1716 | return; | |
1717 | init_p = true; | |
1718 | ||
9af5ce0c | 1719 | G.pagesize = getpagesize (); |
911ab6b9 | 1720 | G.lg_pagesize = exact_log2 (G.pagesize); |
1721 | ||
901dfcc7 | 1722 | #ifdef HAVE_MMAP_DEV_ZERO |
911ab6b9 | 1723 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY); |
1724 | if (G.dev_zero_fd == -1) | |
3a2aee0e | 1725 | internal_error ("open /dev/zero: %m"); |
911ab6b9 | 1726 | #endif |
1727 | ||
1728 | #if 0 | |
1729 | G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
1730 | #else | |
1731 | G.debug_file = stdout; | |
1732 | #endif | |
1733 | ||
901dfcc7 | 1734 | #ifdef USING_MMAP |
42f8e268 | 1735 | /* StunOS has an amazing off-by-one error for the first mmap allocation |
1736 | after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
1737 | believe, is an unaligned page allocation, which would cause us to | |
1738 | hork badly if we tried to use it. */ | |
1739 | { | |
4a2f812e | 1740 | char *p = alloc_anon (NULL, G.pagesize, true); |
901dfcc7 | 1741 | struct page_entry *e; |
337c992b | 1742 | if ((uintptr_t)p & (G.pagesize - 1)) |
42f8e268 | 1743 | { |
1744 | /* How losing. Discard this one and try another. If we still | |
1745 | can't get something useful, give up. */ | |
1746 | ||
4a2f812e | 1747 | p = alloc_anon (NULL, G.pagesize, true); |
337c992b | 1748 | gcc_assert (!((uintptr_t)p & (G.pagesize - 1))); |
42f8e268 | 1749 | } |
901dfcc7 | 1750 | |
aa40f561 | 1751 | /* We have a good page, might as well hold onto it... */ |
4c36ffe6 | 1752 | e = XCNEW (struct page_entry); |
901dfcc7 | 1753 | e->bytes = G.pagesize; |
1754 | e->page = p; | |
1755 | e->next = G.free_pages; | |
1756 | G.free_pages = e; | |
42f8e268 | 1757 | } |
1758 | #endif | |
2f6aecaf | 1759 | |
1760 | /* Initialize the object size table. */ | |
1761 | for (order = 0; order < HOST_BITS_PER_PTR; ++order) | |
1762 | object_size_table[order] = (size_t) 1 << order; | |
1763 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) | |
918edee0 | 1764 | { |
1765 | size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR]; | |
5da2078a | 1766 | |
1767 | /* If S is not a multiple of the MAX_ALIGNMENT, then round it up | |
1768 | so that we're sure of getting aligned memory. */ | |
1769 | s = ROUND_UP (s, MAX_ALIGNMENT); | |
918edee0 | 1770 | object_size_table[order] = s; |
1771 | } | |
2f6aecaf | 1772 | |
40b9be5e | 1773 | /* Initialize the objects-per-page and inverse tables. */ |
2f6aecaf | 1774 | for (order = 0; order < NUM_ORDERS; ++order) |
1775 | { | |
1776 | objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order); | |
1777 | if (objects_per_page_table[order] == 0) | |
1778 | objects_per_page_table[order] = 1; | |
40b9be5e | 1779 | compute_inverse (order); |
2f6aecaf | 1780 | } |
1781 | ||
1782 | /* Reset the size_lookup array to put appropriately sized objects in | |
1783 | the special orders. All objects bigger than the previous power | |
1784 | of two, but no greater than the special size, should go in the | |
5da2078a | 1785 | new order. */ |
2f6aecaf | 1786 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) |
1787 | { | |
5da2078a | 1788 | int o; |
1789 | int i; | |
76e1b933 | 1790 | |
f68513d3 | 1791 | i = OBJECT_SIZE (order); |
1792 | if (i >= NUM_SIZE_LOOKUP) | |
1793 | continue; | |
1794 | ||
1795 | for (o = size_lookup[i]; o == size_lookup [i]; --i) | |
5da2078a | 1796 | size_lookup[i] = order; |
1797 | } | |
8c14f57e | 1798 | |
76e1b933 | 1799 | G.depth_in_use = 0; |
1800 | G.depth_max = 10; | |
4c36ffe6 | 1801 | G.depth = XNEWVEC (unsigned int, G.depth_max); |
76e1b933 | 1802 | |
1803 | G.by_depth_in_use = 0; | |
1804 | G.by_depth_max = INITIAL_PTE_COUNT; | |
4c36ffe6 | 1805 | G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
1806 | G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | |
5517ecc9 | 1807 | |
1808 | /* Allocate space for the depth 0 finalizers. */ | |
1809 | G.finalizers.safe_push (vNULL); | |
1810 | G.vec_finalizers.safe_push (vNULL); | |
1811 | gcc_assert (G.finalizers.length() == 1); | |
911ab6b9 | 1812 | } |
1813 | ||
c10b9b1c | 1814 | /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P |
1815 | reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ | |
1816 | ||
1817 | static void | |
6ec1f4e0 | 1818 | ggc_recalculate_in_use_p (page_entry *p) |
c10b9b1c | 1819 | { |
1820 | unsigned int i; | |
1821 | size_t num_objects; | |
1822 | ||
3cfec666 | 1823 | /* Because the past-the-end bit in in_use_p is always set, we |
c10b9b1c | 1824 | pretend there is one additional object. */ |
573aba85 | 1825 | num_objects = OBJECTS_IN_PAGE (p) + 1; |
c10b9b1c | 1826 | |
1827 | /* Reset the free object count. */ | |
1828 | p->num_free_objects = num_objects; | |
1829 | ||
1830 | /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ | |
3cfec666 | 1831 | for (i = 0; |
2f6aecaf | 1832 | i < CEIL (BITMAP_SIZE (num_objects), |
1833 | sizeof (*p->in_use_p)); | |
c10b9b1c | 1834 | ++i) |
1835 | { | |
1836 | unsigned long j; | |
1837 | ||
1838 | /* Something is in use if it is marked, or if it was in use in a | |
1839 | context further down the context stack. */ | |
76e1b933 | 1840 | p->in_use_p[i] |= save_in_use_p (p)[i]; |
c10b9b1c | 1841 | |
1842 | /* Decrement the free object count for every object allocated. */ | |
1843 | for (j = p->in_use_p[i]; j; j >>= 1) | |
1844 | p->num_free_objects -= (j & 1); | |
1845 | } | |
1846 | ||
0d59b19d | 1847 | gcc_assert (p->num_free_objects < num_objects); |
c10b9b1c | 1848 | } |
911ab6b9 | 1849 | \f |
e3c4633e | 1850 | /* Unmark all objects. */ |
1851 | ||
c4e03242 | 1852 | static void |
6ec1f4e0 | 1853 | clear_marks (void) |
911ab6b9 | 1854 | { |
1855 | unsigned order; | |
1856 | ||
2f6aecaf | 1857 | for (order = 2; order < NUM_ORDERS; order++) |
911ab6b9 | 1858 | { |
911ab6b9 | 1859 | page_entry *p; |
1860 | ||
1861 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1862 | { | |
573aba85 | 1863 | size_t num_objects = OBJECTS_IN_PAGE (p); |
1864 | size_t bitmap_size = BITMAP_SIZE (num_objects + 1); | |
1865 | ||
911ab6b9 | 1866 | /* The data should be page-aligned. */ |
337c992b | 1867 | gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1))); |
911ab6b9 | 1868 | |
1869 | /* Pages that aren't in the topmost context are not collected; | |
1870 | nevertheless, we need their in-use bit vectors to store GC | |
1871 | marks. So, back them up first. */ | |
c10b9b1c | 1872 | if (p->context_depth < G.context_depth) |
911ab6b9 | 1873 | { |
76e1b933 | 1874 | if (! save_in_use_p (p)) |
4077bf7a | 1875 | save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size); |
76e1b933 | 1876 | memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); |
911ab6b9 | 1877 | } |
1878 | ||
1879 | /* Reset reset the number of free objects and clear the | |
1880 | in-use bits. These will be adjusted by mark_obj. */ | |
1881 | p->num_free_objects = num_objects; | |
1882 | memset (p->in_use_p, 0, bitmap_size); | |
1883 | ||
1884 | /* Make sure the one-past-the-end bit is always set. */ | |
3cfec666 | 1885 | p->in_use_p[num_objects / HOST_BITS_PER_LONG] |
911ab6b9 | 1886 | = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); |
1887 | } | |
1888 | } | |
1889 | } | |
1890 | ||
92960204 | 1891 | /* Check if any blocks with a registered finalizer have become unmarked. If so |
1892 | run the finalizer and unregister it because the block is about to be freed. | |
1893 | Note that no garantee is made about what order finalizers will run in so | |
1894 | touching other objects in gc memory is extremely unwise. */ | |
1895 | ||
92f06184 | 1896 | static void |
1897 | ggc_handle_finalizers () | |
1898 | { | |
5517ecc9 | 1899 | unsigned dlen = G.finalizers.length(); |
1900 | for (unsigned d = G.context_depth; d < dlen; ++d) | |
92f06184 | 1901 | { |
5517ecc9 | 1902 | vec<finalizer> &v = G.finalizers[d]; |
1903 | unsigned length = v.length (); | |
1904 | for (unsigned int i = 0; i < length;) | |
92f06184 | 1905 | { |
5517ecc9 | 1906 | finalizer &f = v[i]; |
1907 | if (!ggc_marked_p (f.addr ())) | |
1908 | { | |
1909 | f.call (); | |
1910 | v.unordered_remove (i); | |
1911 | length--; | |
1912 | } | |
1913 | else | |
1914 | i++; | |
92f06184 | 1915 | } |
92f06184 | 1916 | } |
1917 | ||
5517ecc9 | 1918 | gcc_assert (dlen == G.vec_finalizers.length()); |
1919 | for (unsigned d = G.context_depth; d < dlen; ++d) | |
92f06184 | 1920 | { |
5517ecc9 | 1921 | vec<vec_finalizer> &vv = G.vec_finalizers[d]; |
1922 | unsigned length = vv.length (); | |
1923 | for (unsigned int i = 0; i < length;) | |
92f06184 | 1924 | { |
5517ecc9 | 1925 | vec_finalizer &f = vv[i]; |
1926 | if (!ggc_marked_p (f.addr ())) | |
1927 | { | |
1928 | f.call (); | |
1929 | vv.unordered_remove (i); | |
1930 | length--; | |
1931 | } | |
1932 | else | |
1933 | i++; | |
92f06184 | 1934 | } |
92f06184 | 1935 | } |
1936 | } | |
1937 | ||
e3c4633e | 1938 | /* Free all empty pages. Partially empty pages need no attention |
1939 | because the `mark' bit doubles as an `unused' bit. */ | |
1940 | ||
c4e03242 | 1941 | static void |
6ec1f4e0 | 1942 | sweep_pages (void) |
911ab6b9 | 1943 | { |
1944 | unsigned order; | |
1945 | ||
2f6aecaf | 1946 | for (order = 2; order < NUM_ORDERS; order++) |
911ab6b9 | 1947 | { |
1948 | /* The last page-entry to consider, regardless of entries | |
1949 | placed at the end of the list. */ | |
1950 | page_entry * const last = G.page_tails[order]; | |
1951 | ||
573aba85 | 1952 | size_t num_objects; |
9a2e8b0a | 1953 | size_t live_objects; |
911ab6b9 | 1954 | page_entry *p, *previous; |
1955 | int done; | |
3cfec666 | 1956 | |
911ab6b9 | 1957 | p = G.pages[order]; |
1958 | if (p == NULL) | |
1959 | continue; | |
1960 | ||
1961 | previous = NULL; | |
1962 | do | |
1963 | { | |
1964 | page_entry *next = p->next; | |
1965 | ||
1966 | /* Loop until all entries have been examined. */ | |
1967 | done = (p == last); | |
6ec1f4e0 | 1968 | |
573aba85 | 1969 | num_objects = OBJECTS_IN_PAGE (p); |
911ab6b9 | 1970 | |
9a2e8b0a | 1971 | /* Add all live objects on this page to the count of |
1972 | allocated memory. */ | |
1973 | live_objects = num_objects - p->num_free_objects; | |
1974 | ||
2f6aecaf | 1975 | G.allocated += OBJECT_SIZE (order) * live_objects; |
9a2e8b0a | 1976 | |
911ab6b9 | 1977 | /* Only objects on pages in the topmost context should get |
1978 | collected. */ | |
1979 | if (p->context_depth < G.context_depth) | |
1980 | ; | |
1981 | ||
1982 | /* Remove the page if it's empty. */ | |
9a2e8b0a | 1983 | else if (live_objects == 0) |
911ab6b9 | 1984 | { |
4a755ae7 | 1985 | /* If P was the first page in the list, then NEXT |
1986 | becomes the new first page in the list, otherwise | |
1987 | splice P out of the forward pointers. */ | |
911ab6b9 | 1988 | if (! previous) |
1989 | G.pages[order] = next; | |
1990 | else | |
1991 | previous->next = next; | |
48e1416a | 1992 | |
4a755ae7 | 1993 | /* Splice P out of the back pointers too. */ |
1994 | if (next) | |
1995 | next->prev = previous; | |
911ab6b9 | 1996 | |
1997 | /* Are we removing the last element? */ | |
1998 | if (p == G.page_tails[order]) | |
1999 | G.page_tails[order] = previous; | |
2000 | free_page (p); | |
2001 | p = previous; | |
2002 | } | |
2003 | ||
2004 | /* If the page is full, move it to the end. */ | |
2005 | else if (p->num_free_objects == 0) | |
2006 | { | |
2007 | /* Don't move it if it's already at the end. */ | |
2008 | if (p != G.page_tails[order]) | |
2009 | { | |
2010 | /* Move p to the end of the list. */ | |
2011 | p->next = NULL; | |
4a755ae7 | 2012 | p->prev = G.page_tails[order]; |
911ab6b9 | 2013 | G.page_tails[order]->next = p; |
2014 | ||
2015 | /* Update the tail pointer... */ | |
2016 | G.page_tails[order] = p; | |
2017 | ||
2018 | /* ... and the head pointer, if necessary. */ | |
2019 | if (! previous) | |
2020 | G.pages[order] = next; | |
2021 | else | |
2022 | previous->next = next; | |
4a755ae7 | 2023 | |
2024 | /* And update the backpointer in NEXT if necessary. */ | |
2025 | if (next) | |
2026 | next->prev = previous; | |
2027 | ||
911ab6b9 | 2028 | p = previous; |
2029 | } | |
2030 | } | |
2031 | ||
2032 | /* If we've fallen through to here, it's a page in the | |
2033 | topmost context that is neither full nor empty. Such a | |
2034 | page must precede pages at lesser context depth in the | |
2035 | list, so move it to the head. */ | |
2036 | else if (p != G.pages[order]) | |
2037 | { | |
2038 | previous->next = p->next; | |
4a755ae7 | 2039 | |
2040 | /* Update the backchain in the next node if it exists. */ | |
2041 | if (p->next) | |
2042 | p->next->prev = previous; | |
2043 | ||
2044 | /* Move P to the head of the list. */ | |
911ab6b9 | 2045 | p->next = G.pages[order]; |
4a755ae7 | 2046 | p->prev = NULL; |
2047 | G.pages[order]->prev = p; | |
2048 | ||
2049 | /* Update the head pointer. */ | |
911ab6b9 | 2050 | G.pages[order] = p; |
4a755ae7 | 2051 | |
911ab6b9 | 2052 | /* Are we moving the last element? */ |
2053 | if (G.page_tails[order] == p) | |
2054 | G.page_tails[order] = previous; | |
2055 | p = previous; | |
2056 | } | |
2057 | ||
2058 | previous = p; | |
2059 | p = next; | |
3cfec666 | 2060 | } |
911ab6b9 | 2061 | while (! done); |
c10b9b1c | 2062 | |
2063 | /* Now, restore the in_use_p vectors for any pages from contexts | |
2064 | other than the current one. */ | |
2065 | for (p = G.pages[order]; p; p = p->next) | |
2066 | if (p->context_depth != G.context_depth) | |
2067 | ggc_recalculate_in_use_p (p); | |
911ab6b9 | 2068 | } |
2069 | } | |
2070 | ||
2a3edec5 | 2071 | #ifdef ENABLE_GC_CHECKING |
e3c4633e | 2072 | /* Clobber all free objects. */ |
2073 | ||
c4e03242 | 2074 | static void |
6ec1f4e0 | 2075 | poison_pages (void) |
911ab6b9 | 2076 | { |
2077 | unsigned order; | |
2078 | ||
2f6aecaf | 2079 | for (order = 2; order < NUM_ORDERS; order++) |
911ab6b9 | 2080 | { |
2f6aecaf | 2081 | size_t size = OBJECT_SIZE (order); |
911ab6b9 | 2082 | page_entry *p; |
2083 | ||
2084 | for (p = G.pages[order]; p != NULL; p = p->next) | |
2085 | { | |
573aba85 | 2086 | size_t num_objects; |
911ab6b9 | 2087 | size_t i; |
2d517b2f | 2088 | |
2089 | if (p->context_depth != G.context_depth) | |
2090 | /* Since we don't do any collection for pages in pushed | |
2091 | contexts, there's no need to do any poisoning. And | |
2092 | besides, the IN_USE_P array isn't valid until we pop | |
2093 | contexts. */ | |
2094 | continue; | |
2095 | ||
573aba85 | 2096 | num_objects = OBJECTS_IN_PAGE (p); |
911ab6b9 | 2097 | for (i = 0; i < num_objects; i++) |
2098 | { | |
2099 | size_t word, bit; | |
2100 | word = i / HOST_BITS_PER_LONG; | |
2101 | bit = i % HOST_BITS_PER_LONG; | |
2102 | if (((p->in_use_p[word] >> bit) & 1) == 0) | |
dd359afe | 2103 | { |
2104 | char *object = p->page + i * size; | |
2105 | ||
2106 | /* Keep poison-by-write when we expect to use Valgrind, | |
2107 | so the exact same memory semantics is kept, in case | |
2108 | there are memory errors. We override this request | |
2109 | below. */ | |
a7779e75 | 2110 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object, |
2111 | size)); | |
dd359afe | 2112 | memset (object, 0xa5, size); |
2113 | ||
2114 | /* Drop the handle to avoid handle leak. */ | |
a7779e75 | 2115 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size)); |
dd359afe | 2116 | } |
911ab6b9 | 2117 | } |
2118 | } | |
2119 | } | |
2120 | } | |
c4e03242 | 2121 | #else |
2122 | #define poison_pages() | |
2123 | #endif | |
2124 | ||
2125 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
2126 | /* Validate that the reportedly free objects actually are. */ | |
2127 | ||
2128 | static void | |
2129 | validate_free_objects (void) | |
2130 | { | |
2131 | struct free_object *f, *next, *still_free = NULL; | |
2132 | ||
2133 | for (f = G.free_object_list; f ; f = next) | |
2134 | { | |
2135 | page_entry *pe = lookup_page_table_entry (f->object); | |
2136 | size_t bit, word; | |
2137 | ||
2138 | bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); | |
2139 | word = bit / HOST_BITS_PER_LONG; | |
2140 | bit = bit % HOST_BITS_PER_LONG; | |
2141 | next = f->next; | |
2142 | ||
2143 | /* Make certain it isn't visible from any root. Notice that we | |
2144 | do this check before sweep_pages merges save_in_use_p. */ | |
0d59b19d | 2145 | gcc_assert (!(pe->in_use_p[word] & (1UL << bit))); |
c4e03242 | 2146 | |
2147 | /* If the object comes from an outer context, then retain the | |
2148 | free_object entry, so that we can verify that the address | |
2149 | isn't live on the stack in some outer context. */ | |
2150 | if (pe->context_depth != G.context_depth) | |
2151 | { | |
2152 | f->next = still_free; | |
2153 | still_free = f; | |
2154 | } | |
2155 | else | |
2156 | free (f); | |
2157 | } | |
2158 | ||
2159 | G.free_object_list = still_free; | |
2160 | } | |
2161 | #else | |
2162 | #define validate_free_objects() | |
911ab6b9 | 2163 | #endif |
2164 | ||
e3c4633e | 2165 | /* Top level mark-and-sweep routine. */ |
2166 | ||
911ab6b9 | 2167 | void |
6ec1f4e0 | 2168 | ggc_collect (void) |
911ab6b9 | 2169 | { |
911ab6b9 | 2170 | /* Avoid frequent unnecessary work by skipping collection if the |
2171 | total allocations haven't expanded much since the last | |
2172 | collection. */ | |
83142a4c | 2173 | float allocated_last_gc = |
2a3edec5 | 2174 | MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); |
2175 | ||
83142a4c | 2176 | float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; |
0ca9a7b6 | 2177 | if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) |
911ab6b9 | 2178 | return; |
911ab6b9 | 2179 | |
74d2af64 | 2180 | timevar_push (TV_GC); |
911ab6b9 | 2181 | if (!quiet_flag) |
90856340 | 2182 | fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024); |
c4e03242 | 2183 | if (GGC_DEBUG_LEVEL >= 2) |
2184 | fprintf (G.debug_file, "BEGIN COLLECTING\n"); | |
911ab6b9 | 2185 | |
9a2e8b0a | 2186 | /* Zero the total allocated bytes. This will be recalculated in the |
2187 | sweep phase. */ | |
911ab6b9 | 2188 | G.allocated = 0; |
2189 | ||
3cfec666 | 2190 | /* Release the pages we freed the last time we collected, but didn't |
911ab6b9 | 2191 | reuse in the interim. */ |
2192 | release_pages (); | |
2193 | ||
598638e2 | 2194 | /* Indicate that we've seen collections at this context depth. */ |
2195 | G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; | |
2196 | ||
740cd0be | 2197 | invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); |
2198 | ||
8f359205 | 2199 | in_gc = true; |
911ab6b9 | 2200 | clear_marks (); |
2201 | ggc_mark_roots (); | |
92f06184 | 2202 | ggc_handle_finalizers (); |
ecd52ea9 | 2203 | |
2204 | if (GATHER_STATISTICS) | |
2205 | ggc_prune_overhead_list (); | |
2206 | ||
911ab6b9 | 2207 | poison_pages (); |
c4e03242 | 2208 | validate_free_objects (); |
e3c4633e | 2209 | sweep_pages (); |
2210 | ||
8f359205 | 2211 | in_gc = false; |
911ab6b9 | 2212 | G.allocated_last_gc = G.allocated; |
2213 | ||
740cd0be | 2214 | invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); |
2215 | ||
74d2af64 | 2216 | timevar_pop (TV_GC); |
911ab6b9 | 2217 | |
911ab6b9 | 2218 | if (!quiet_flag) |
74d2af64 | 2219 | fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); |
c4e03242 | 2220 | if (GGC_DEBUG_LEVEL >= 2) |
2221 | fprintf (G.debug_file, "END COLLECTING\n"); | |
911ab6b9 | 2222 | } |
4e00b6fd | 2223 | |
4f78e0a8 | 2224 | /* Assume that all GGC memory is reachable and grow the limits for next collection. |
2225 | With checking, trigger GGC so -Q compilation outputs how much of memory really is | |
2226 | reachable. */ | |
2227 | ||
2228 | void | |
2229 | ggc_grow (void) | |
2230 | { | |
382ecba7 | 2231 | if (!flag_checking) |
2232 | G.allocated_last_gc = MAX (G.allocated_last_gc, | |
2233 | G.allocated); | |
2234 | else | |
2235 | ggc_collect (); | |
4f78e0a8 | 2236 | if (!quiet_flag) |
2237 | fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024); | |
2238 | } | |
2239 | ||
4e00b6fd | 2240 | void |
6ec1f4e0 | 2241 | ggc_print_statistics (void) |
4e00b6fd | 2242 | { |
2243 | struct ggc_statistics stats; | |
c10b9b1c | 2244 | unsigned int i; |
2a8997e8 | 2245 | size_t total_overhead = 0; |
4e00b6fd | 2246 | |
2247 | /* Clear the statistics. */ | |
3e9d8cee | 2248 | memset (&stats, 0, sizeof (stats)); |
3cfec666 | 2249 | |
4e00b6fd | 2250 | /* Make sure collection will really occur. */ |
2251 | G.allocated_last_gc = 0; | |
2252 | ||
2253 | /* Collect and print the statistics common across collectors. */ | |
2a8997e8 | 2254 | ggc_print_common_statistics (stderr, &stats); |
4e00b6fd | 2255 | |
c10b9b1c | 2256 | /* Release free pages so that we will not count the bytes allocated |
2257 | there as part of the total allocated memory. */ | |
2258 | release_pages (); | |
2259 | ||
3cfec666 | 2260 | /* Collect some information about the various sizes of |
4e00b6fd | 2261 | allocation. */ |
86736f9e | 2262 | fprintf (stderr, |
2263 | "Memory still allocated at the end of the compilation process\n"); | |
98cc198b | 2264 | fprintf (stderr, "%-8s %10s %10s %10s\n", |
f806fb68 | 2265 | "Size", "Allocated", "Used", "Overhead"); |
2f6aecaf | 2266 | for (i = 0; i < NUM_ORDERS; ++i) |
4e00b6fd | 2267 | { |
2268 | page_entry *p; | |
2269 | size_t allocated; | |
2270 | size_t in_use; | |
2a8997e8 | 2271 | size_t overhead; |
4e00b6fd | 2272 | |
2273 | /* Skip empty entries. */ | |
2274 | if (!G.pages[i]) | |
2275 | continue; | |
2276 | ||
2a8997e8 | 2277 | overhead = allocated = in_use = 0; |
4e00b6fd | 2278 | |
2279 | /* Figure out the total number of bytes allocated for objects of | |
2a8997e8 | 2280 | this size, and how many of them are actually in use. Also figure |
2281 | out how much memory the page table is using. */ | |
4e00b6fd | 2282 | for (p = G.pages[i]; p; p = p->next) |
2283 | { | |
2284 | allocated += p->bytes; | |
6ec1f4e0 | 2285 | in_use += |
573aba85 | 2286 | (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); |
2a8997e8 | 2287 | |
2288 | overhead += (sizeof (page_entry) - sizeof (long) | |
573aba85 | 2289 | + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); |
4e00b6fd | 2290 | } |
03fac02c | 2291 | fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " " |
2292 | PRsa (10) "\n", | |
2293 | (uint64_t)OBJECT_SIZE (i), | |
7a413494 | 2294 | SIZE_AMOUNT (allocated), |
2295 | SIZE_AMOUNT (in_use), | |
2296 | SIZE_AMOUNT (overhead)); | |
2a8997e8 | 2297 | total_overhead += overhead; |
4e00b6fd | 2298 | } |
03fac02c | 2299 | fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n", |
7a413494 | 2300 | "Total", |
2301 | SIZE_AMOUNT (G.bytes_mapped), | |
2302 | SIZE_AMOUNT (G.allocated), | |
2303 | SIZE_AMOUNT (total_overhead)); | |
b7257530 | 2304 | |
ecd52ea9 | 2305 | if (GATHER_STATISTICS) |
2306 | { | |
98cc198b | 2307 | fprintf (stderr, "\nTotal allocations and overheads during " |
2308 | "the compilation process\n"); | |
ecd52ea9 | 2309 | |
03fac02c | 2310 | fprintf (stderr, "Total Overhead: " |
2311 | PRsa (9) "\n", | |
7a413494 | 2312 | SIZE_AMOUNT (G.stats.total_overhead)); |
03fac02c | 2313 | fprintf (stderr, "Total Allocated: " |
2314 | PRsa (9) "\n", | |
7a413494 | 2315 | SIZE_AMOUNT (G.stats.total_allocated)); |
2316 | ||
03fac02c | 2317 | fprintf (stderr, "Total Overhead under 32B: " |
2318 | PRsa (9) "\n", | |
7a413494 | 2319 | SIZE_AMOUNT (G.stats.total_overhead_under32)); |
03fac02c | 2320 | fprintf (stderr, "Total Allocated under 32B: " |
2321 | PRsa (9) "\n", | |
7a413494 | 2322 | SIZE_AMOUNT (G.stats.total_allocated_under32)); |
03fac02c | 2323 | fprintf (stderr, "Total Overhead under 64B: " |
2324 | PRsa (9) "\n", | |
7a413494 | 2325 | SIZE_AMOUNT (G.stats.total_overhead_under64)); |
03fac02c | 2326 | fprintf (stderr, "Total Allocated under 64B: " |
2327 | PRsa (9) "\n", | |
7a413494 | 2328 | SIZE_AMOUNT (G.stats.total_allocated_under64)); |
03fac02c | 2329 | fprintf (stderr, "Total Overhead under 128B: " |
2330 | PRsa (9) "\n", | |
7a413494 | 2331 | SIZE_AMOUNT (G.stats.total_overhead_under128)); |
03fac02c | 2332 | fprintf (stderr, "Total Allocated under 128B: " |
2333 | PRsa (9) "\n", | |
7a413494 | 2334 | SIZE_AMOUNT (G.stats.total_allocated_under128)); |
ecd52ea9 | 2335 | |
2336 | for (i = 0; i < NUM_ORDERS; i++) | |
2337 | if (G.stats.total_allocated_per_order[i]) | |
2338 | { | |
03fac02c | 2339 | fprintf (stderr, "Total Overhead page size %9" PRIu64 ": " |
2340 | PRsa (9) "\n", | |
2341 | (uint64_t)OBJECT_SIZE (i), | |
7a413494 | 2342 | SIZE_AMOUNT (G.stats.total_overhead_per_order[i])); |
03fac02c | 2343 | fprintf (stderr, "Total Allocated page size %9" PRIu64 ": " |
2344 | PRsa (9) "\n", | |
2345 | (uint64_t)OBJECT_SIZE (i), | |
7a413494 | 2346 | SIZE_AMOUNT (G.stats.total_allocated_per_order[i])); |
ecd52ea9 | 2347 | } |
b7257530 | 2348 | } |
4e00b6fd | 2349 | } |
573aba85 | 2350 | \f |
0b09525f | 2351 | struct ggc_pch_ondisk |
2352 | { | |
2353 | unsigned totals[NUM_ORDERS]; | |
2354 | }; | |
2355 | ||
573aba85 | 2356 | struct ggc_pch_data |
2357 | { | |
0b09525f | 2358 | struct ggc_pch_ondisk d; |
337c992b | 2359 | uintptr_t base[NUM_ORDERS]; |
573aba85 | 2360 | size_t written[NUM_ORDERS]; |
2361 | }; | |
2362 | ||
2363 | struct ggc_pch_data * | |
6ec1f4e0 | 2364 | init_ggc_pch (void) |
573aba85 | 2365 | { |
4c36ffe6 | 2366 | return XCNEW (struct ggc_pch_data); |
573aba85 | 2367 | } |
2368 | ||
6ec1f4e0 | 2369 | void |
2370 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
5cc13354 | 2371 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
573aba85 | 2372 | { |
2373 | unsigned order; | |
2374 | ||
f68513d3 | 2375 | if (size < NUM_SIZE_LOOKUP) |
573aba85 | 2376 | order = size_lookup[size]; |
2377 | else | |
2378 | { | |
1c2a6a66 | 2379 | order = 10; |
573aba85 | 2380 | while (size > OBJECT_SIZE (order)) |
2381 | order++; | |
2382 | } | |
6ec1f4e0 | 2383 | |
573aba85 | 2384 | d->d.totals[order]++; |
2385 | } | |
6ec1f4e0 | 2386 | |
573aba85 | 2387 | size_t |
6ec1f4e0 | 2388 | ggc_pch_total_size (struct ggc_pch_data *d) |
573aba85 | 2389 | { |
2390 | size_t a = 0; | |
2391 | unsigned i; | |
2392 | ||
2393 | for (i = 0; i < NUM_ORDERS; i++) | |
25a28b44 | 2394 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
573aba85 | 2395 | return a; |
2396 | } | |
2397 | ||
2398 | void | |
6ec1f4e0 | 2399 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) |
573aba85 | 2400 | { |
337c992b | 2401 | uintptr_t a = (uintptr_t) base; |
573aba85 | 2402 | unsigned i; |
6ec1f4e0 | 2403 | |
573aba85 | 2404 | for (i = 0; i < NUM_ORDERS; i++) |
2405 | { | |
2406 | d->base[i] = a; | |
25a28b44 | 2407 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
573aba85 | 2408 | } |
2409 | } | |
2410 | ||
2411 | ||
2412 | char * | |
6ec1f4e0 | 2413 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, |
5cc13354 | 2414 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
573aba85 | 2415 | { |
2416 | unsigned order; | |
2417 | char *result; | |
6ec1f4e0 | 2418 | |
f68513d3 | 2419 | if (size < NUM_SIZE_LOOKUP) |
573aba85 | 2420 | order = size_lookup[size]; |
2421 | else | |
2422 | { | |
1c2a6a66 | 2423 | order = 10; |
573aba85 | 2424 | while (size > OBJECT_SIZE (order)) |
2425 | order++; | |
2426 | } | |
2427 | ||
2428 | result = (char *) d->base[order]; | |
2429 | d->base[order] += OBJECT_SIZE (order); | |
2430 | return result; | |
2431 | } | |
2432 | ||
6ec1f4e0 | 2433 | void |
2434 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
2435 | FILE *f ATTRIBUTE_UNUSED) | |
573aba85 | 2436 | { |
2437 | /* Nothing to do. */ | |
2438 | } | |
2439 | ||
2440 | void | |
98044fac | 2441 | ggc_pch_write_object (struct ggc_pch_data *d, |
6ec1f4e0 | 2442 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, |
7d60cc60 | 2443 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
573aba85 | 2444 | { |
2445 | unsigned order; | |
1a7c0ccb | 2446 | static const char emptyBytes[256] = { 0 }; |
573aba85 | 2447 | |
f68513d3 | 2448 | if (size < NUM_SIZE_LOOKUP) |
573aba85 | 2449 | order = size_lookup[size]; |
2450 | else | |
2451 | { | |
1c2a6a66 | 2452 | order = 10; |
573aba85 | 2453 | while (size > OBJECT_SIZE (order)) |
2454 | order++; | |
2455 | } | |
6ec1f4e0 | 2456 | |
573aba85 | 2457 | if (fwrite (x, size, 1, f) != 1) |
85b9be9b | 2458 | fatal_error (input_location, "cannot write PCH file: %m"); |
573aba85 | 2459 | |
89e22a52 | 2460 | /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the |
e9efa031 | 2461 | object out to OBJECT_SIZE(order). This happens for strings. */ |
89e22a52 | 2462 | |
2463 | if (size != OBJECT_SIZE (order)) | |
2464 | { | |
9af5ce0c | 2465 | unsigned padding = OBJECT_SIZE (order) - size; |
89e22a52 | 2466 | |
2467 | /* To speed small writes, we use a nulled-out array that's larger | |
2468 | than most padding requests as the source for our null bytes. This | |
2469 | permits us to do the padding with fwrite() rather than fseek(), and | |
822e391f | 2470 | limits the chance the OS may try to flush any outstanding writes. */ |
9af5ce0c | 2471 | if (padding <= sizeof (emptyBytes)) |
89e22a52 | 2472 | { |
2473 | if (fwrite (emptyBytes, 1, padding, f) != padding) | |
85b9be9b | 2474 | fatal_error (input_location, "cannot write PCH file"); |
89e22a52 | 2475 | } |
2476 | else | |
2477 | { | |
e9efa031 | 2478 | /* Larger than our buffer? Just default to fseek. */ |
89e22a52 | 2479 | if (fseek (f, padding, SEEK_CUR) != 0) |
85b9be9b | 2480 | fatal_error (input_location, "cannot write PCH file"); |
89e22a52 | 2481 | } |
2482 | } | |
573aba85 | 2483 | |
2484 | d->written[order]++; | |
2485 | if (d->written[order] == d->d.totals[order] | |
2486 | && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), | |
2487 | G.pagesize), | |
2488 | SEEK_CUR) != 0) | |
85b9be9b | 2489 | fatal_error (input_location, "cannot write PCH file: %m"); |
573aba85 | 2490 | } |
2491 | ||
2492 | void | |
6ec1f4e0 | 2493 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) |
573aba85 | 2494 | { |
2495 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
85b9be9b | 2496 | fatal_error (input_location, "cannot write PCH file: %m"); |
573aba85 | 2497 | free (d); |
2498 | } | |
2499 | ||
76e1b933 | 2500 | /* Move the PCH PTE entries just added to the end of by_depth, to the |
2501 | front. */ | |
2502 | ||
2503 | static void | |
6ec1f4e0 | 2504 | move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) |
76e1b933 | 2505 | { |
76e1b933 | 2506 | /* First, we swap the new entries to the front of the varrays. */ |
2507 | page_entry **new_by_depth; | |
2508 | unsigned long **new_save_in_use; | |
2509 | ||
4c36ffe6 | 2510 | new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
2511 | new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | |
76e1b933 | 2512 | |
2513 | memcpy (&new_by_depth[0], | |
2514 | &G.by_depth[count_old_page_tables], | |
2515 | count_new_page_tables * sizeof (void *)); | |
2516 | memcpy (&new_by_depth[count_new_page_tables], | |
2517 | &G.by_depth[0], | |
2518 | count_old_page_tables * sizeof (void *)); | |
2519 | memcpy (&new_save_in_use[0], | |
2520 | &G.save_in_use[count_old_page_tables], | |
2521 | count_new_page_tables * sizeof (void *)); | |
2522 | memcpy (&new_save_in_use[count_new_page_tables], | |
2523 | &G.save_in_use[0], | |
2524 | count_old_page_tables * sizeof (void *)); | |
2525 | ||
2526 | free (G.by_depth); | |
2527 | free (G.save_in_use); | |
6ec1f4e0 | 2528 | |
76e1b933 | 2529 | G.by_depth = new_by_depth; |
2530 | G.save_in_use = new_save_in_use; | |
2531 | ||
2532 | /* Now update all the index_by_depth fields. */ | |
3df92687 | 2533 | for (unsigned i = G.by_depth_in_use; i--;) |
76e1b933 | 2534 | { |
3df92687 | 2535 | page_entry *p = G.by_depth[i]; |
2536 | p->index_by_depth = i; | |
76e1b933 | 2537 | } |
2538 | ||
2539 | /* And last, we update the depth pointers in G.depth. The first | |
2540 | entry is already 0, and context 0 entries always start at index | |
2541 | 0, so there is nothing to update in the first slot. We need a | |
2542 | second slot, only if we have old ptes, and if we do, they start | |
2543 | at index count_new_page_tables. */ | |
2544 | if (count_old_page_tables) | |
2545 | push_depth (count_new_page_tables); | |
2546 | } | |
2547 | ||
573aba85 | 2548 | void |
6ec1f4e0 | 2549 | ggc_pch_read (FILE *f, void *addr) |
573aba85 | 2550 | { |
2551 | struct ggc_pch_ondisk d; | |
2552 | unsigned i; | |
4077bf7a | 2553 | char *offs = (char *) addr; |
76e1b933 | 2554 | unsigned long count_old_page_tables; |
2555 | unsigned long count_new_page_tables; | |
2556 | ||
2557 | count_old_page_tables = G.by_depth_in_use; | |
2558 | ||
2559 | /* We've just read in a PCH file. So, every object that used to be | |
2560 | allocated is now free. */ | |
573aba85 | 2561 | clear_marks (); |
32bbbaac | 2562 | #ifdef ENABLE_GC_CHECKING |
573aba85 | 2563 | poison_pages (); |
2564 | #endif | |
3fff4d99 | 2565 | /* Since we free all the allocated objects, the free list becomes |
2566 | useless. Validate it now, which will also clear it. */ | |
9af5ce0c | 2567 | validate_free_objects (); |
573aba85 | 2568 | |
2569 | /* No object read from a PCH file should ever be freed. So, set the | |
2570 | context depth to 1, and set the depth of all the currently-allocated | |
2571 | pages to be 1 too. PCH pages will have depth 0. */ | |
0d59b19d | 2572 | gcc_assert (!G.context_depth); |
573aba85 | 2573 | G.context_depth = 1; |
5517ecc9 | 2574 | /* Allocate space for the depth 1 finalizers. */ |
2575 | G.finalizers.safe_push (vNULL); | |
2576 | G.vec_finalizers.safe_push (vNULL); | |
2577 | gcc_assert (G.finalizers.length() == 2); | |
573aba85 | 2578 | for (i = 0; i < NUM_ORDERS; i++) |
2579 | { | |
2580 | page_entry *p; | |
2581 | for (p = G.pages[i]; p != NULL; p = p->next) | |
2582 | p->context_depth = G.context_depth; | |
2583 | } | |
2584 | ||
2585 | /* Allocate the appropriate page-table entries for the pages read from | |
2586 | the PCH file. */ | |
2587 | if (fread (&d, sizeof (d), 1, f) != 1) | |
85b9be9b | 2588 | fatal_error (input_location, "cannot read PCH file: %m"); |
6ec1f4e0 | 2589 | |
573aba85 | 2590 | for (i = 0; i < NUM_ORDERS; i++) |
2591 | { | |
2592 | struct page_entry *entry; | |
2593 | char *pte; | |
2594 | size_t bytes; | |
2595 | size_t num_objs; | |
2596 | size_t j; | |
76e1b933 | 2597 | |
573aba85 | 2598 | if (d.totals[i] == 0) |
2599 | continue; | |
76e1b933 | 2600 | |
25a28b44 | 2601 | bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i)); |
573aba85 | 2602 | num_objs = bytes / OBJECT_SIZE (i); |
4077bf7a | 2603 | entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) |
2604 | - sizeof (long) | |
2605 | + BITMAP_SIZE (num_objs + 1))); | |
573aba85 | 2606 | entry->bytes = bytes; |
2607 | entry->page = offs; | |
2608 | entry->context_depth = 0; | |
2609 | offs += bytes; | |
2610 | entry->num_free_objects = 0; | |
2611 | entry->order = i; | |
2612 | ||
6ec1f4e0 | 2613 | for (j = 0; |
573aba85 | 2614 | j + HOST_BITS_PER_LONG <= num_objs + 1; |
2615 | j += HOST_BITS_PER_LONG) | |
2616 | entry->in_use_p[j / HOST_BITS_PER_LONG] = -1; | |
2617 | for (; j < num_objs + 1; j++) | |
6ec1f4e0 | 2618 | entry->in_use_p[j / HOST_BITS_PER_LONG] |
573aba85 | 2619 | |= 1L << (j % HOST_BITS_PER_LONG); |
2620 | ||
6ec1f4e0 | 2621 | for (pte = entry->page; |
2622 | pte < entry->page + entry->bytes; | |
573aba85 | 2623 | pte += G.pagesize) |
2624 | set_page_table_entry (pte, entry); | |
2625 | ||
2626 | if (G.page_tails[i] != NULL) | |
2627 | G.page_tails[i]->next = entry; | |
2628 | else | |
2629 | G.pages[i] = entry; | |
2630 | G.page_tails[i] = entry; | |
76e1b933 | 2631 | |
2632 | /* We start off by just adding all the new information to the | |
2633 | end of the varrays, later, we will move the new information | |
2634 | to the front of the varrays, as the PCH page tables are at | |
2635 | context 0. */ | |
2636 | push_by_depth (entry, 0); | |
573aba85 | 2637 | } |
2638 | ||
76e1b933 | 2639 | /* Now, we update the various data structures that speed page table |
2640 | handling. */ | |
2641 | count_new_page_tables = G.by_depth_in_use - count_old_page_tables; | |
2642 | ||
2643 | move_ptes_to_front (count_old_page_tables, count_new_page_tables); | |
2644 | ||
573aba85 | 2645 | /* Update the statistics. */ |
2646 | G.allocated = G.allocated_last_gc = offs - (char *)addr; | |
2647 | } |