]>
Commit | Line | Data |
---|---|---|
21341cfd | 1 | /* "Bag-of-pages" garbage collector for the GNU compiler. |
8d9254fc | 2 | Copyright (C) 1999-2020 Free Software Foundation, Inc. |
21341cfd | 3 | |
1322177d | 4 | This file is part of GCC. |
21341cfd | 5 | |
1322177d LB |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 8 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 9 | version. |
21341cfd | 10 | |
1322177d LB |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
21341cfd | 15 | |
b9bfacf0 | 16 | You should have received a copy of the GNU General Public License |
9dcd6f09 NC |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
21341cfd | 19 | |
21341cfd | 20 | #include "config.h" |
21341cfd | 21 | #include "system.h" |
4977bab6 | 22 | #include "coretypes.h" |
c7131fb2 | 23 | #include "backend.h" |
40e23961 | 24 | #include "alias.h" |
21341cfd | 25 | #include "tree.h" |
e5ecd4ea | 26 | #include "rtl.h" |
4d0cdd0c | 27 | #include "memmodel.h" |
1b42a6a9 | 28 | #include "tm_p.h" |
718f9c0f | 29 | #include "diagnostic-core.h" |
21341cfd | 30 | #include "flags.h" |
a9429e29 | 31 | #include "ggc-internal.h" |
2a9a326b | 32 | #include "timevar.h" |
442b4905 | 33 | #include "cgraph.h" |
b78cd885 | 34 | #include "cfgloop.h" |
ae2392a9 | 35 | #include "plugin.h" |
e5ecd4ea | 36 | |
825b6926 ZW |
37 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a |
38 | file open. Prefer either to valloc. */ | |
39 | #ifdef HAVE_MMAP_ANON | |
40 | # undef HAVE_MMAP_DEV_ZERO | |
825b6926 | 41 | # define USING_MMAP |
005537df | 42 | #endif |
21341cfd | 43 | |
825b6926 | 44 | #ifdef HAVE_MMAP_DEV_ZERO |
825b6926 | 45 | # define USING_MMAP |
8342b467 RH |
46 | #endif |
47 | ||
130fadbb RH |
48 | #ifndef USING_MMAP |
49 | #define USING_MALLOC_PAGE_GROUPS | |
5b918807 | 50 | #endif |
21341cfd | 51 | |
87fb500b RO |
52 | #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \ |
53 | && defined(USING_MMAP) | |
711a3d82 AK |
54 | # define USING_MADVISE |
55 | #endif | |
56 | ||
0fa2e4df | 57 | /* Strategy: |
21341cfd AS |
58 | |
59 | This garbage-collecting allocator allocates objects on one of a set | |
60 | of pages. Each page can allocate objects of a single size only; | |
61 | available sizes are powers of two starting at four bytes. The size | |
62 | of an allocation request is rounded up to the next power of two | |
63 | (`order'), and satisfied from the appropriate page. | |
64 | ||
65 | Each page is recorded in a page-entry, which also maintains an | |
66 | in-use bitmap of object positions on the page. This allows the | |
67 | allocation state of a particular object to be flipped without | |
68 | touching the page itself. | |
69 | ||
70 | Each page-entry also has a context depth, which is used to track | |
71 | pushing and popping of allocation contexts. Only objects allocated | |
589005ff | 72 | in the current (highest-numbered) context may be collected. |
21341cfd AS |
73 | |
74 | Page entries are arranged in an array of singly-linked lists. The | |
75 | array is indexed by the allocation size, in bits, of the pages on | |
76 | it; i.e. all pages on a list allocate objects of the same size. | |
77 | Pages are ordered on the list such that all non-full pages precede | |
78 | all full pages, with non-full pages arranged in order of decreasing | |
79 | context depth. | |
80 | ||
81 | Empty pages (of all orders) are kept on a single page cache list, | |
82 | and are considered first when new pages are required; they are | |
83 | deallocated at the start of the next collection if they haven't | |
84 | been recycled by then. */ | |
85 | ||
21341cfd AS |
86 | /* Define GGC_DEBUG_LEVEL to print debugging information. |
87 | 0: No debugging output. | |
88 | 1: GC statistics only. | |
89 | 2: Page-entry allocations/deallocations as well. | |
90 | 3: Object allocations as well. | |
6d2f8887 | 91 | 4: Object marks as well. */ |
21341cfd | 92 | #define GGC_DEBUG_LEVEL (0) |
21341cfd AS |
93 | \f |
94 | /* A two-level tree is used to look up the page-entry for a given | |
95 | pointer. Two chunks of the pointer's bits are extracted to index | |
96 | the first and second levels of the tree, as follows: | |
97 | ||
98 | HOST_PAGE_SIZE_BITS | |
99 | 32 | | | |
100 | msb +----------------+----+------+------+ lsb | |
101 | | | | | |
102 | PAGE_L1_BITS | | |
103 | | | | |
104 | PAGE_L2_BITS | |
105 | ||
106 | The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry | |
107 | pages are aligned on system page boundaries. The next most | |
108 | significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first | |
589005ff | 109 | index values in the lookup table, respectively. |
21341cfd | 110 | |
005537df RH |
111 | For 32-bit architectures and the settings below, there are no |
112 | leftover bits. For architectures with wider pointers, the lookup | |
113 | tree points to a list of pages, which must be scanned to find the | |
114 | correct one. */ | |
21341cfd AS |
115 | |
116 | #define PAGE_L1_BITS (8) | |
117 | #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) | |
2a6e6fea TG |
118 | #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS) |
119 | #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS) | |
21341cfd AS |
120 | |
121 | #define LOOKUP_L1(p) \ | |
2a6e6fea | 122 | (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) |
21341cfd AS |
123 | |
124 | #define LOOKUP_L2(p) \ | |
2a6e6fea | 125 | (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) |
21341cfd | 126 | |
2be510b8 MM |
127 | /* The number of objects per allocation page, for objects on a page of |
128 | the indicated ORDER. */ | |
129 | #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] | |
130 | ||
17211ab5 GK |
131 | /* The number of objects in P. */ |
132 | #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order)) | |
133 | ||
2be510b8 MM |
134 | /* The size of an object on a page of the indicated ORDER. */ |
135 | #define OBJECT_SIZE(ORDER) object_size_table[ORDER] | |
136 | ||
8537ed68 ZW |
137 | /* For speed, we avoid doing a general integer divide to locate the |
138 | offset in the allocation bitmap, by precalculating numbers M, S | |
139 | such that (O * M) >> S == O / Z (modulo 2^32), for any offset O | |
140 | within the page which is evenly divisible by the object size Z. */ | |
141 | #define DIV_MULT(ORDER) inverse_table[ORDER].mult | |
142 | #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift | |
143 | #define OFFSET_TO_BIT(OFFSET, ORDER) \ | |
144 | (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) | |
145 | ||
b78cd885 RG |
146 | /* We use this structure to determine the alignment required for |
147 | allocations. For power-of-two sized allocations, that's not a | |
148 | problem, but it does matter for odd-sized allocations. | |
149 | We do not care about alignment for floating-point types. */ | |
150 | ||
151 | struct max_alignment { | |
152 | char c; | |
153 | union { | |
a9243bfc | 154 | int64_t i; |
b78cd885 RG |
155 | void *p; |
156 | } u; | |
157 | }; | |
158 | ||
159 | /* The biggest alignment required. */ | |
160 | ||
161 | #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
162 | ||
163 | ||
2be510b8 MM |
164 | /* The number of extra orders, not corresponding to power-of-two sized |
165 | objects. */ | |
166 | ||
ca7558fc | 167 | #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table) |
2be510b8 | 168 | |
d1f1cc6a | 169 | #define RTL_SIZE(NSLOTS) \ |
e1de1560 | 170 | (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion)) |
d1f1cc6a | 171 | |
5e26df64 SB |
172 | #define TREE_EXP_SIZE(OPS) \ |
173 | (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) | |
174 | ||
2be510b8 MM |
175 | /* The Ith entry is the maximum size of an object to be stored in the |
176 | Ith extra order. Adding a new entry to this array is the *only* | |
177 | thing you need to do to add a new special allocation size. */ | |
178 | ||
179 | static const size_t extra_order_size_table[] = { | |
b78cd885 RG |
180 | /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. |
181 | There are a lot of structures with these sizes and explicitly | |
182 | listing them risks orders being dropped because they changed size. */ | |
183 | MAX_ALIGNMENT * 3, | |
184 | MAX_ALIGNMENT * 5, | |
185 | MAX_ALIGNMENT * 6, | |
186 | MAX_ALIGNMENT * 7, | |
187 | MAX_ALIGNMENT * 9, | |
188 | MAX_ALIGNMENT * 10, | |
189 | MAX_ALIGNMENT * 11, | |
190 | MAX_ALIGNMENT * 12, | |
191 | MAX_ALIGNMENT * 13, | |
192 | MAX_ALIGNMENT * 14, | |
193 | MAX_ALIGNMENT * 15, | |
820cc88f DB |
194 | sizeof (struct tree_decl_non_common), |
195 | sizeof (struct tree_field_decl), | |
196 | sizeof (struct tree_parm_decl), | |
197 | sizeof (struct tree_var_decl), | |
51545682 | 198 | sizeof (struct tree_type_non_common), |
f5938dcd RG |
199 | sizeof (struct function), |
200 | sizeof (struct basic_block_def), | |
b78cd885 | 201 | sizeof (struct cgraph_node), |
99b1c316 | 202 | sizeof (class loop), |
2be510b8 MM |
203 | }; |
204 | ||
205 | /* The total number of orders. */ | |
206 | ||
207 | #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) | |
208 | ||
17211ab5 GK |
209 | /* Compute the smallest nonnegative number which when added to X gives |
210 | a multiple of F. */ | |
211 | ||
212 | #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | |
213 | ||
3bc50163 AK |
214 | /* Round X to next multiple of the page size */ |
215 | ||
54070b51 | 216 | #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize) |
3bc50163 | 217 | |
2be510b8 MM |
218 | /* The Ith entry is the number of objects on a page or order I. */ |
219 | ||
220 | static unsigned objects_per_page_table[NUM_ORDERS]; | |
221 | ||
222 | /* The Ith entry is the size of an object on a page of order I. */ | |
223 | ||
224 | static size_t object_size_table[NUM_ORDERS]; | |
21341cfd | 225 | |
8537ed68 ZW |
226 | /* The Ith entry is a pair of numbers (mult, shift) such that |
227 | ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, | |
228 | for all k evenly divisible by OBJECT_SIZE(I). */ | |
229 | ||
230 | static struct | |
231 | { | |
75d75435 | 232 | size_t mult; |
8537ed68 ZW |
233 | unsigned int shift; |
234 | } | |
235 | inverse_table[NUM_ORDERS]; | |
236 | ||
21341cfd AS |
237 | /* A page_entry records the status of an allocation page. This |
238 | structure is dynamically sized to fit the bitmap in_use_p. */ | |
a79683d5 | 239 | struct page_entry |
21341cfd AS |
240 | { |
241 | /* The next page-entry with objects of the same size, or NULL if | |
242 | this is the last page-entry. */ | |
243 | struct page_entry *next; | |
244 | ||
9bf793f9 JL |
245 | /* The previous page-entry with objects of the same size, or NULL if |
246 | this is the first page-entry. The PREV pointer exists solely to | |
71cc389b | 247 | keep the cost of ggc_free manageable. */ |
9bf793f9 JL |
248 | struct page_entry *prev; |
249 | ||
21341cfd AS |
250 | /* The number of bytes allocated. (This will always be a multiple |
251 | of the host system page size.) */ | |
252 | size_t bytes; | |
253 | ||
254 | /* The address at which the memory is allocated. */ | |
255 | char *page; | |
256 | ||
130fadbb RH |
257 | #ifdef USING_MALLOC_PAGE_GROUPS |
258 | /* Back pointer to the page group this page came from. */ | |
259 | struct page_group *group; | |
260 | #endif | |
261 | ||
c4775f82 MS |
262 | /* This is the index in the by_depth varray where this page table |
263 | can be found. */ | |
264 | unsigned long index_by_depth; | |
21341cfd AS |
265 | |
266 | /* Context depth of this page. */ | |
ae373eda | 267 | unsigned short context_depth; |
21341cfd AS |
268 | |
269 | /* The number of free objects remaining on this page. */ | |
270 | unsigned short num_free_objects; | |
271 | ||
272 | /* A likely candidate for the bit position of a free object for the | |
273 | next allocation from this page. */ | |
274 | unsigned short next_bit_hint; | |
275 | ||
ae373eda MM |
276 | /* The lg of size of objects allocated from this page. */ |
277 | unsigned char order; | |
278 | ||
711a3d82 AK |
279 | /* Discarded page? */ |
280 | bool discarded; | |
281 | ||
21341cfd AS |
282 | /* A bit vector indicating whether or not objects are in use. The |
283 | Nth bit is one if the Nth object on this page is allocated. This | |
284 | array is dynamically sized. */ | |
285 | unsigned long in_use_p[1]; | |
a79683d5 | 286 | }; |
21341cfd | 287 | |
130fadbb RH |
288 | #ifdef USING_MALLOC_PAGE_GROUPS |
289 | /* A page_group describes a large allocation from malloc, from which | |
290 | we parcel out aligned pages. */ | |
a79683d5 | 291 | struct page_group |
130fadbb RH |
292 | { |
293 | /* A linked list of all extant page groups. */ | |
294 | struct page_group *next; | |
295 | ||
296 | /* The address we received from malloc. */ | |
297 | char *allocation; | |
298 | ||
299 | /* The size of the block. */ | |
300 | size_t alloc_size; | |
301 | ||
302 | /* A bitmask of pages in use. */ | |
303 | unsigned int in_use; | |
a79683d5 | 304 | }; |
130fadbb | 305 | #endif |
21341cfd AS |
306 | |
307 | #if HOST_BITS_PER_PTR <= 32 | |
308 | ||
309 | /* On 32-bit hosts, we use a two level page table, as pictured above. */ | |
310 | typedef page_entry **page_table[PAGE_L1_SIZE]; | |
311 | ||
312 | #else | |
313 | ||
005537df RH |
314 | /* On 64-bit hosts, we use the same two level page tables plus a linked |
315 | list that disambiguates the top 32-bits. There will almost always be | |
21341cfd AS |
316 | exactly one entry in the list. */ |
317 | typedef struct page_table_chain | |
318 | { | |
319 | struct page_table_chain *next; | |
320 | size_t high_bits; | |
321 | page_entry **table[PAGE_L1_SIZE]; | |
322 | } *page_table; | |
323 | ||
324 | #endif | |
325 | ||
de49ce19 TS |
326 | class finalizer |
327 | { | |
328 | public: | |
329 | finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {} | |
330 | ||
331 | void *addr () const { return m_addr; } | |
332 | ||
333 | void call () const { m_function (m_addr); } | |
334 | ||
335 | private: | |
336 | void *m_addr; | |
337 | void (*m_function)(void *); | |
338 | }; | |
339 | ||
340 | class vec_finalizer | |
341 | { | |
342 | public: | |
343 | vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) : | |
344 | m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {} | |
345 | ||
346 | void call () const | |
347 | { | |
348 | for (size_t i = 0; i < m_n_objects; i++) | |
349 | m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size))); | |
350 | } | |
351 | ||
352 | void *addr () const { return reinterpret_cast<void *> (m_addr); } | |
353 | ||
354 | private: | |
355 | uintptr_t m_addr; | |
356 | void (*m_function)(void *); | |
357 | size_t m_object_size; | |
358 | size_t m_n_objects; | |
2a304777 | 359 | }; |
de49ce19 | 360 | |
9957322d DK |
361 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
362 | /* List of free objects to be verified as actually free on the | |
363 | next collection. */ | |
364 | struct free_object | |
365 | { | |
366 | void *object; | |
367 | struct free_object *next; | |
368 | }; | |
369 | #endif | |
370 | ||
21341cfd | 371 | /* The rest of the global variables. */ |
11478306 | 372 | static struct ggc_globals |
21341cfd AS |
373 | { |
374 | /* The Nth element in this array is a page with objects of size 2^N. | |
375 | If there are any pages with free objects, they will be at the | |
376 | head of the list. NULL if there are no page-entries for this | |
377 | object size. */ | |
2be510b8 | 378 | page_entry *pages[NUM_ORDERS]; |
21341cfd AS |
379 | |
380 | /* The Nth element in this array is the last page with objects of | |
381 | size 2^N. NULL if there are no page-entries for this object | |
382 | size. */ | |
2be510b8 | 383 | page_entry *page_tails[NUM_ORDERS]; |
21341cfd AS |
384 | |
385 | /* Lookup table for associating allocation pages with object addresses. */ | |
386 | page_table lookup; | |
387 | ||
388 | /* The system's page size. */ | |
389 | size_t pagesize; | |
390 | size_t lg_pagesize; | |
391 | ||
392 | /* Bytes currently allocated. */ | |
393 | size_t allocated; | |
394 | ||
395 | /* Bytes currently allocated at the end of the last collection. */ | |
396 | size_t allocated_last_gc; | |
397 | ||
3277221c MM |
398 | /* Total amount of memory mapped. */ |
399 | size_t bytes_mapped; | |
400 | ||
52895e1a RH |
401 | /* Bit N set if any allocations have been done at context depth N. */ |
402 | unsigned long context_depth_allocations; | |
403 | ||
404 | /* Bit N set if any collections have been done at context depth N. */ | |
405 | unsigned long context_depth_collections; | |
406 | ||
21341cfd | 407 | /* The current depth in the context stack. */ |
d416576b | 408 | unsigned short context_depth; |
21341cfd AS |
409 | |
410 | /* A file descriptor open to /dev/zero for reading. */ | |
825b6926 | 411 | #if defined (HAVE_MMAP_DEV_ZERO) |
21341cfd AS |
412 | int dev_zero_fd; |
413 | #endif | |
414 | ||
415 | /* A cache of free system pages. */ | |
416 | page_entry *free_pages; | |
417 | ||
130fadbb RH |
418 | #ifdef USING_MALLOC_PAGE_GROUPS |
419 | page_group *page_groups; | |
420 | #endif | |
421 | ||
21341cfd AS |
422 | /* The file descriptor for debugging output. */ |
423 | FILE *debug_file; | |
c4775f82 MS |
424 | |
425 | /* Current number of elements in use in depth below. */ | |
426 | unsigned int depth_in_use; | |
427 | ||
428 | /* Maximum number of elements that can be used before resizing. */ | |
429 | unsigned int depth_max; | |
430 | ||
fa10beec | 431 | /* Each element of this array is an index in by_depth where the given |
c4775f82 MS |
432 | depth starts. This structure is indexed by that given depth we |
433 | are interested in. */ | |
434 | unsigned int *depth; | |
435 | ||
436 | /* Current number of elements in use in by_depth below. */ | |
437 | unsigned int by_depth_in_use; | |
438 | ||
439 | /* Maximum number of elements that can be used before resizing. */ | |
440 | unsigned int by_depth_max; | |
441 | ||
442 | /* Each element of this array is a pointer to a page_entry, all | |
443 | page_entries can be found in here by increasing depth. | |
444 | index_by_depth in the page_entry is the index into this data | |
445 | structure where that page_entry can be found. This is used to | |
446 | speed up finding all page_entries at a particular depth. */ | |
447 | page_entry **by_depth; | |
448 | ||
449 | /* Each element is a pointer to the saved in_use_p bits, if any, | |
450 | zero otherwise. We allocate them all together, to enable a | |
451 | better runtime data access pattern. */ | |
452 | unsigned long **save_in_use; | |
685fe032 | 453 | |
2a304777 JM |
454 | /* Finalizers for single objects. The first index is collection_depth. */ |
455 | vec<vec<finalizer> > finalizers; | |
de49ce19 TS |
456 | |
457 | /* Finalizers for vectors of objects. */ | |
2a304777 | 458 | vec<vec<vec_finalizer> > vec_finalizers; |
de49ce19 | 459 | |
685fe032 RH |
460 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
461 | /* List of free objects to be verified as actually free on the | |
462 | next collection. */ | |
9957322d | 463 | struct free_object *free_object_list; |
685fe032 RH |
464 | #endif |
465 | ||
adc4adcd GP |
466 | struct |
467 | { | |
a9429e29 | 468 | /* Total GC-allocated memory. */ |
adc4adcd | 469 | unsigned long long total_allocated; |
a9429e29 | 470 | /* Total overhead for GC-allocated memory. */ |
adc4adcd GP |
471 | unsigned long long total_overhead; |
472 | ||
473 | /* Total allocations and overhead for sizes less than 32, 64 and 128. | |
474 | These sizes are interesting because they are typical cache line | |
938d968e | 475 | sizes. */ |
b8698a0f | 476 | |
adc4adcd GP |
477 | unsigned long long total_allocated_under32; |
478 | unsigned long long total_overhead_under32; | |
b8698a0f | 479 | |
adc4adcd GP |
480 | unsigned long long total_allocated_under64; |
481 | unsigned long long total_overhead_under64; | |
b8698a0f | 482 | |
adc4adcd GP |
483 | unsigned long long total_allocated_under128; |
484 | unsigned long long total_overhead_under128; | |
b8698a0f | 485 | |
439a7e54 DN |
486 | /* The allocations for each of the allocation orders. */ |
487 | unsigned long long total_allocated_per_order[NUM_ORDERS]; | |
488 | ||
938d968e | 489 | /* The overhead for each of the allocation orders. */ |
adc4adcd GP |
490 | unsigned long long total_overhead_per_order[NUM_ORDERS]; |
491 | } stats; | |
21341cfd AS |
492 | } G; |
493 | ||
b086d530 TS |
494 | /* True if a gc is currently taking place. */ |
495 | ||
496 | static bool in_gc = false; | |
497 | ||
21341cfd AS |
498 | /* The size in bytes required to maintain a bitmap for the objects |
499 | on a page-entry. */ | |
500 | #define BITMAP_SIZE(Num_objects) \ | |
c3284718 | 501 | (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long)) |
21341cfd | 502 | |
130fadbb RH |
503 | /* Allocate pages in chunks of this size, to throttle calls to memory |
504 | allocation routines. The first page is used, the rest go onto the | |
505 | free list. This cannot be larger than HOST_BITS_PER_INT for the | |
772299b3 | 506 | in_use bitmask for page_group. Hosts that need a different value |
471854f8 | 507 | can override this by defining GGC_QUIRE_SIZE explicitly. */ |
772299b3 MA |
508 | #ifndef GGC_QUIRE_SIZE |
509 | # ifdef USING_MMAP | |
6c995fa5 | 510 | # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */ |
772299b3 MA |
511 | # else |
512 | # define GGC_QUIRE_SIZE 16 | |
513 | # endif | |
514 | #endif | |
c4775f82 MS |
515 | |
516 | /* Initial guess as to how many page table entries we might need. */ | |
517 | #define INITIAL_PTE_COUNT 128 | |
21341cfd | 518 | \f |
20c1dc5e AJ |
519 | static page_entry *lookup_page_table_entry (const void *); |
520 | static void set_page_table_entry (void *, page_entry *); | |
130fadbb | 521 | #ifdef USING_MMAP |
25f0ea81 | 522 | static char *alloc_anon (char *, size_t, bool check); |
130fadbb RH |
523 | #endif |
524 | #ifdef USING_MALLOC_PAGE_GROUPS | |
20c1dc5e AJ |
525 | static size_t page_group_index (char *, char *); |
526 | static void set_page_group_in_use (page_group *, char *); | |
527 | static void clear_page_group_in_use (page_group *, char *); | |
130fadbb | 528 | #endif |
20c1dc5e AJ |
529 | static struct page_entry * alloc_page (unsigned); |
530 | static void free_page (struct page_entry *); | |
20c1dc5e AJ |
531 | static void clear_marks (void); |
532 | static void sweep_pages (void); | |
533 | static void ggc_recalculate_in_use_p (page_entry *); | |
534 | static void compute_inverse (unsigned); | |
535 | static inline void adjust_depth (void); | |
536 | static void move_ptes_to_front (int, int); | |
21341cfd | 537 | |
20c1dc5e AJ |
538 | void debug_print_page_list (int); |
539 | static void push_depth (unsigned int); | |
540 | static void push_by_depth (page_entry *, unsigned long *); | |
b6f61163 | 541 | |
c4775f82 MS |
542 | /* Push an entry onto G.depth. */ |
543 | ||
544 | inline static void | |
20c1dc5e | 545 | push_depth (unsigned int i) |
c4775f82 MS |
546 | { |
547 | if (G.depth_in_use >= G.depth_max) | |
548 | { | |
549 | G.depth_max *= 2; | |
d3bfe4de | 550 | G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max); |
c4775f82 MS |
551 | } |
552 | G.depth[G.depth_in_use++] = i; | |
553 | } | |
554 | ||
555 | /* Push an entry onto G.by_depth and G.save_in_use. */ | |
556 | ||
557 | inline static void | |
20c1dc5e | 558 | push_by_depth (page_entry *p, unsigned long *s) |
c4775f82 MS |
559 | { |
560 | if (G.by_depth_in_use >= G.by_depth_max) | |
561 | { | |
562 | G.by_depth_max *= 2; | |
d3bfe4de KG |
563 | G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max); |
564 | G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use, | |
565 | G.by_depth_max); | |
c4775f82 MS |
566 | } |
567 | G.by_depth[G.by_depth_in_use] = p; | |
568 | G.save_in_use[G.by_depth_in_use++] = s; | |
569 | } | |
570 | ||
571 | #if (GCC_VERSION < 3001) | |
572 | #define prefetch(X) ((void) X) | |
573 | #else | |
574 | #define prefetch(X) __builtin_prefetch (X) | |
575 | #endif | |
576 | ||
577 | #define save_in_use_p_i(__i) \ | |
578 | (G.save_in_use[__i]) | |
579 | #define save_in_use_p(__p) \ | |
580 | (save_in_use_p_i (__p->index_by_depth)) | |
581 | ||
acb14262 RB |
582 | /* Traverse the page table and find the entry for a page. |
583 | If the object wasn't allocated in GC return NULL. */ | |
21341cfd | 584 | |
acb14262 RB |
585 | static inline page_entry * |
586 | safe_lookup_page_table_entry (const void *p) | |
21341cfd AS |
587 | { |
588 | page_entry ***base; | |
005537df | 589 | size_t L1, L2; |
21341cfd AS |
590 | |
591 | #if HOST_BITS_PER_PTR <= 32 | |
592 | base = &G.lookup[0]; | |
593 | #else | |
594 | page_table table = G.lookup; | |
2a6e6fea | 595 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
005537df RH |
596 | while (1) |
597 | { | |
598 | if (table == NULL) | |
acb14262 | 599 | return NULL; |
005537df RH |
600 | if (table->high_bits == high_bits) |
601 | break; | |
602 | table = table->next; | |
603 | } | |
21341cfd AS |
604 | base = &table->table[0]; |
605 | #endif | |
606 | ||
eaec9b3d | 607 | /* Extract the level 1 and 2 indices. */ |
74c937ca MM |
608 | L1 = LOOKUP_L1 (p); |
609 | L2 = LOOKUP_L2 (p); | |
acb14262 RB |
610 | if (! base[L1]) |
611 | return NULL; | |
74c937ca | 612 | |
acb14262 | 613 | return base[L1][L2]; |
74c937ca MM |
614 | } |
615 | ||
589005ff | 616 | /* Traverse the page table and find the entry for a page. |
74c937ca MM |
617 | Die (probably) if the object wasn't allocated via GC. */ |
618 | ||
619 | static inline page_entry * | |
20c1dc5e | 620 | lookup_page_table_entry (const void *p) |
74c937ca MM |
621 | { |
622 | page_entry ***base; | |
623 | size_t L1, L2; | |
624 | ||
005537df RH |
625 | #if HOST_BITS_PER_PTR <= 32 |
626 | base = &G.lookup[0]; | |
627 | #else | |
628 | page_table table = G.lookup; | |
2a6e6fea | 629 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
005537df RH |
630 | while (table->high_bits != high_bits) |
631 | table = table->next; | |
632 | base = &table->table[0]; | |
633 | #endif | |
74c937ca | 634 | |
eaec9b3d | 635 | /* Extract the level 1 and 2 indices. */ |
21341cfd AS |
636 | L1 = LOOKUP_L1 (p); |
637 | L2 = LOOKUP_L2 (p); | |
638 | ||
639 | return base[L1][L2]; | |
640 | } | |
641 | ||
21341cfd | 642 | /* Set the page table entry for a page. */ |
cb2ec151 | 643 | |
21341cfd | 644 | static void |
20c1dc5e | 645 | set_page_table_entry (void *p, page_entry *entry) |
21341cfd AS |
646 | { |
647 | page_entry ***base; | |
648 | size_t L1, L2; | |
649 | ||
650 | #if HOST_BITS_PER_PTR <= 32 | |
651 | base = &G.lookup[0]; | |
652 | #else | |
653 | page_table table; | |
2a6e6fea | 654 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
21341cfd AS |
655 | for (table = G.lookup; table; table = table->next) |
656 | if (table->high_bits == high_bits) | |
657 | goto found; | |
658 | ||
659 | /* Not found -- allocate a new table. */ | |
4dc6c528 | 660 | table = XCNEW (struct page_table_chain); |
21341cfd AS |
661 | table->next = G.lookup; |
662 | table->high_bits = high_bits; | |
663 | G.lookup = table; | |
664 | found: | |
665 | base = &table->table[0]; | |
666 | #endif | |
667 | ||
eaec9b3d | 668 | /* Extract the level 1 and 2 indices. */ |
21341cfd AS |
669 | L1 = LOOKUP_L1 (p); |
670 | L2 = LOOKUP_L2 (p); | |
671 | ||
672 | if (base[L1] == NULL) | |
5ed6ace5 | 673 | base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); |
21341cfd AS |
674 | |
675 | base[L1][L2] = entry; | |
676 | } | |
677 | ||
21341cfd | 678 | /* Prints the page-entry for object size ORDER, for debugging. */ |
cb2ec151 | 679 | |
24e47c76 | 680 | DEBUG_FUNCTION void |
20c1dc5e | 681 | debug_print_page_list (int order) |
21341cfd AS |
682 | { |
683 | page_entry *p; | |
20c1dc5e AJ |
684 | printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], |
685 | (void *) G.page_tails[order]); | |
21341cfd AS |
686 | p = G.pages[order]; |
687 | while (p != NULL) | |
688 | { | |
20c1dc5e | 689 | printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, |
683eb0e9 | 690 | p->num_free_objects); |
21341cfd AS |
691 | p = p->next; |
692 | } | |
693 | printf ("NULL\n"); | |
694 | fflush (stdout); | |
695 | } | |
696 | ||
130fadbb | 697 | #ifdef USING_MMAP |
21341cfd | 698 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, |
825b6926 ZW |
699 | (if non-null). The ifdef structure here is intended to cause a |
700 | compile error unless exactly one of the HAVE_* is defined. */ | |
cb2ec151 | 701 | |
21341cfd | 702 | static inline char * |
25f0ea81 | 703 | alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check) |
21341cfd | 704 | { |
825b6926 | 705 | #ifdef HAVE_MMAP_ANON |
d3bfe4de KG |
706 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
707 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
825b6926 ZW |
708 | #endif |
709 | #ifdef HAVE_MMAP_DEV_ZERO | |
d3bfe4de KG |
710 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
711 | MAP_PRIVATE, G.dev_zero_fd, 0); | |
21341cfd | 712 | #endif |
825b6926 ZW |
713 | |
714 | if (page == (char *) MAP_FAILED) | |
005537df | 715 | { |
25f0ea81 AK |
716 | if (!check) |
717 | return NULL; | |
1f978f5f | 718 | perror ("virtual memory exhausted"); |
bd0f0717 | 719 | exit (FATAL_EXIT_CODE); |
005537df | 720 | } |
21341cfd | 721 | |
3277221c MM |
722 | /* Remember that we allocated this memory. */ |
723 | G.bytes_mapped += size; | |
724 | ||
9a0a7d5d | 725 | /* Pretend we don't have access to the allocated pages. We'll enable |
a9429e29 | 726 | access to smaller pieces of the area in ggc_internal_alloc. Discard the |
9a0a7d5d | 727 | handle to avoid handle leak. */ |
35dee980 | 728 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); |
9a0a7d5d | 729 | |
21341cfd AS |
730 | return page; |
731 | } | |
130fadbb RH |
732 | #endif |
733 | #ifdef USING_MALLOC_PAGE_GROUPS | |
734 | /* Compute the index for this page into the page group. */ | |
735 | ||
736 | static inline size_t | |
20c1dc5e | 737 | page_group_index (char *allocation, char *page) |
130fadbb | 738 | { |
c4f2c499 | 739 | return (size_t) (page - allocation) >> G.lg_pagesize; |
130fadbb RH |
740 | } |
741 | ||
742 | /* Set and clear the in_use bit for this page in the page group. */ | |
743 | ||
744 | static inline void | |
20c1dc5e | 745 | set_page_group_in_use (page_group *group, char *page) |
130fadbb RH |
746 | { |
747 | group->in_use |= 1 << page_group_index (group->allocation, page); | |
748 | } | |
749 | ||
750 | static inline void | |
20c1dc5e | 751 | clear_page_group_in_use (page_group *group, char *page) |
130fadbb RH |
752 | { |
753 | group->in_use &= ~(1 << page_group_index (group->allocation, page)); | |
754 | } | |
755 | #endif | |
21341cfd AS |
756 | |
757 | /* Allocate a new page for allocating objects of size 2^ORDER, | |
758 | and return an entry for it. The entry is not added to the | |
759 | appropriate page_table list. */ | |
cb2ec151 | 760 | |
21341cfd | 761 | static inline struct page_entry * |
20c1dc5e | 762 | alloc_page (unsigned order) |
21341cfd AS |
763 | { |
764 | struct page_entry *entry, *p, **pp; | |
765 | char *page; | |
766 | size_t num_objects; | |
767 | size_t bitmap_size; | |
768 | size_t page_entry_size; | |
769 | size_t entry_size; | |
130fadbb RH |
770 | #ifdef USING_MALLOC_PAGE_GROUPS |
771 | page_group *group; | |
772 | #endif | |
21341cfd AS |
773 | |
774 | num_objects = OBJECTS_PER_PAGE (order); | |
775 | bitmap_size = BITMAP_SIZE (num_objects + 1); | |
776 | page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; | |
2be510b8 | 777 | entry_size = num_objects * OBJECT_SIZE (order); |
ca79429a RH |
778 | if (entry_size < G.pagesize) |
779 | entry_size = G.pagesize; | |
3bc50163 | 780 | entry_size = PAGE_ALIGN (entry_size); |
21341cfd AS |
781 | |
782 | entry = NULL; | |
783 | page = NULL; | |
784 | ||
785 | /* Check the list of free pages for one we can use. */ | |
bd0f0717 | 786 | for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) |
21341cfd AS |
787 | if (p->bytes == entry_size) |
788 | break; | |
789 | ||
790 | if (p != NULL) | |
791 | { | |
711a3d82 AK |
792 | if (p->discarded) |
793 | G.bytes_mapped += p->bytes; | |
794 | p->discarded = false; | |
795 | ||
dc297297 | 796 | /* Recycle the allocated memory from this page ... */ |
21341cfd AS |
797 | *pp = p->next; |
798 | page = p->page; | |
bd0f0717 | 799 | |
130fadbb RH |
800 | #ifdef USING_MALLOC_PAGE_GROUPS |
801 | group = p->group; | |
802 | #endif | |
bd0f0717 | 803 | |
21341cfd AS |
804 | /* ... and, if possible, the page entry itself. */ |
805 | if (p->order == order) | |
806 | { | |
807 | entry = p; | |
808 | memset (entry, 0, page_entry_size); | |
809 | } | |
810 | else | |
811 | free (p); | |
812 | } | |
825b6926 | 813 | #ifdef USING_MMAP |
054f5e69 | 814 | else if (entry_size == G.pagesize) |
21341cfd | 815 | { |
054f5e69 ZW |
816 | /* We want just one page. Allocate a bunch of them and put the |
817 | extras on the freelist. (Can only do this optimization with | |
818 | mmap for backing store.) */ | |
819 | struct page_entry *e, *f = G.free_pages; | |
25f0ea81 | 820 | int i, entries = GGC_QUIRE_SIZE; |
054f5e69 | 821 | |
25f0ea81 AK |
822 | page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false); |
823 | if (page == NULL) | |
824 | { | |
c3284718 | 825 | page = alloc_anon (NULL, G.pagesize, true); |
25f0ea81 AK |
826 | entries = 1; |
827 | } | |
bd0f0717 | 828 | |
054f5e69 ZW |
829 | /* This loop counts down so that the chain will be in ascending |
830 | memory order. */ | |
25f0ea81 | 831 | for (i = entries - 1; i >= 1; i--) |
054f5e69 | 832 | { |
d3bfe4de | 833 | e = XCNEWVAR (struct page_entry, page_entry_size); |
ca79429a RH |
834 | e->order = order; |
835 | e->bytes = G.pagesize; | |
836 | e->page = page + (i << G.lg_pagesize); | |
054f5e69 ZW |
837 | e->next = f; |
838 | f = e; | |
839 | } | |
bd0f0717 | 840 | |
054f5e69 | 841 | G.free_pages = f; |
21341cfd | 842 | } |
054f5e69 | 843 | else |
25f0ea81 | 844 | page = alloc_anon (NULL, entry_size, true); |
130fadbb RH |
845 | #endif |
846 | #ifdef USING_MALLOC_PAGE_GROUPS | |
847 | else | |
848 | { | |
849 | /* Allocate a large block of memory and serve out the aligned | |
850 | pages therein. This results in much less memory wastage | |
851 | than the traditional implementation of valloc. */ | |
852 | ||
853 | char *allocation, *a, *enda; | |
854 | size_t alloc_size, head_slop, tail_slop; | |
855 | int multiple_pages = (entry_size == G.pagesize); | |
856 | ||
857 | if (multiple_pages) | |
858 | alloc_size = GGC_QUIRE_SIZE * G.pagesize; | |
859 | else | |
860 | alloc_size = entry_size + G.pagesize - 1; | |
ec4d7730 | 861 | allocation = XNEWVEC (char, alloc_size); |
130fadbb | 862 | |
2a6e6fea | 863 | page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize); |
130fadbb RH |
864 | head_slop = page - allocation; |
865 | if (multiple_pages) | |
866 | tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); | |
867 | else | |
868 | tail_slop = alloc_size - entry_size - head_slop; | |
869 | enda = allocation + alloc_size - tail_slop; | |
870 | ||
871 | /* We allocated N pages, which are likely not aligned, leaving | |
872 | us with N-1 usable pages. We plan to place the page_group | |
873 | structure somewhere in the slop. */ | |
874 | if (head_slop >= sizeof (page_group)) | |
875 | group = (page_group *)page - 1; | |
876 | else | |
877 | { | |
878 | /* We magically got an aligned allocation. Too bad, we have | |
879 | to waste a page anyway. */ | |
880 | if (tail_slop == 0) | |
881 | { | |
882 | enda -= G.pagesize; | |
883 | tail_slop += G.pagesize; | |
884 | } | |
282899df | 885 | gcc_assert (tail_slop >= sizeof (page_group)); |
130fadbb RH |
886 | group = (page_group *)enda; |
887 | tail_slop -= sizeof (page_group); | |
888 | } | |
889 | ||
890 | /* Remember that we allocated this memory. */ | |
891 | group->next = G.page_groups; | |
892 | group->allocation = allocation; | |
893 | group->alloc_size = alloc_size; | |
894 | group->in_use = 0; | |
895 | G.page_groups = group; | |
896 | G.bytes_mapped += alloc_size; | |
897 | ||
898 | /* If we allocated multiple pages, put the rest on the free list. */ | |
899 | if (multiple_pages) | |
900 | { | |
901 | struct page_entry *e, *f = G.free_pages; | |
902 | for (a = enda - G.pagesize; a != page; a -= G.pagesize) | |
903 | { | |
ec4d7730 | 904 | e = XCNEWVAR (struct page_entry, page_entry_size); |
130fadbb RH |
905 | e->order = order; |
906 | e->bytes = G.pagesize; | |
907 | e->page = a; | |
908 | e->group = group; | |
909 | e->next = f; | |
910 | f = e; | |
911 | } | |
912 | G.free_pages = f; | |
913 | } | |
914 | } | |
915 | #endif | |
21341cfd AS |
916 | |
917 | if (entry == NULL) | |
d3bfe4de | 918 | entry = XCNEWVAR (struct page_entry, page_entry_size); |
21341cfd AS |
919 | |
920 | entry->bytes = entry_size; | |
921 | entry->page = page; | |
922 | entry->context_depth = G.context_depth; | |
923 | entry->order = order; | |
924 | entry->num_free_objects = num_objects; | |
925 | entry->next_bit_hint = 1; | |
926 | ||
52895e1a RH |
927 | G.context_depth_allocations |= (unsigned long)1 << G.context_depth; |
928 | ||
130fadbb RH |
929 | #ifdef USING_MALLOC_PAGE_GROUPS |
930 | entry->group = group; | |
931 | set_page_group_in_use (group, page); | |
932 | #endif | |
933 | ||
21341cfd AS |
934 | /* Set the one-past-the-end in-use bit. This acts as a sentry as we |
935 | increment the hint. */ | |
936 | entry->in_use_p[num_objects / HOST_BITS_PER_LONG] | |
937 | = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG); | |
938 | ||
939 | set_page_table_entry (page, entry); | |
940 | ||
941 | if (GGC_DEBUG_LEVEL >= 2) | |
589005ff | 942 | fprintf (G.debug_file, |
8a951190 | 943 | "Allocating page at %p, object size=%lu, data %p-%p\n", |
c53930bb ML |
944 | (void *) entry, (unsigned long) OBJECT_SIZE (order), |
945 | (void *) page, (void *) (page + entry_size - 1)); | |
21341cfd AS |
946 | |
947 | return entry; | |
948 | } | |
949 | ||
c4775f82 MS |
950 | /* Adjust the size of G.depth so that no index greater than the one |
951 | used by the top of the G.by_depth is used. */ | |
952 | ||
953 | static inline void | |
20c1dc5e | 954 | adjust_depth (void) |
c4775f82 MS |
955 | { |
956 | page_entry *top; | |
957 | ||
958 | if (G.by_depth_in_use) | |
959 | { | |
960 | top = G.by_depth[G.by_depth_in_use-1]; | |
961 | ||
e0bb17a8 KH |
962 | /* Peel back indices in depth that index into by_depth, so that |
963 | as new elements are added to by_depth, we note the indices | |
c4775f82 MS |
964 | of those elements, if they are for new context depths. */ |
965 | while (G.depth_in_use > (size_t)top->context_depth+1) | |
966 | --G.depth_in_use; | |
967 | } | |
968 | } | |
969 | ||
cb2ec151 | 970 | /* For a page that is no longer needed, put it on the free page list. */ |
21341cfd | 971 | |
685fe032 | 972 | static void |
20c1dc5e | 973 | free_page (page_entry *entry) |
21341cfd AS |
974 | { |
975 | if (GGC_DEBUG_LEVEL >= 2) | |
589005ff | 976 | fprintf (G.debug_file, |
20c1dc5e | 977 | "Deallocating page at %p, data %p-%p\n", (void *) entry, |
c53930bb | 978 | (void *) entry->page, (void *) (entry->page + entry->bytes - 1)); |
21341cfd | 979 | |
9a0a7d5d HPN |
980 | /* Mark the page as inaccessible. Discard the handle to avoid handle |
981 | leak. */ | |
35dee980 | 982 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes)); |
9a0a7d5d | 983 | |
21341cfd AS |
984 | set_page_table_entry (entry->page, NULL); |
985 | ||
130fadbb RH |
986 | #ifdef USING_MALLOC_PAGE_GROUPS |
987 | clear_page_group_in_use (entry->group, entry->page); | |
988 | #endif | |
989 | ||
c4775f82 MS |
990 | if (G.by_depth_in_use > 1) |
991 | { | |
992 | page_entry *top = G.by_depth[G.by_depth_in_use-1]; | |
282899df NS |
993 | int i = entry->index_by_depth; |
994 | ||
995 | /* We cannot free a page from a context deeper than the current | |
996 | one. */ | |
997 | gcc_assert (entry->context_depth == top->context_depth); | |
b8698a0f | 998 | |
282899df NS |
999 | /* Put top element into freed slot. */ |
1000 | G.by_depth[i] = top; | |
1001 | G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; | |
1002 | top->index_by_depth = i; | |
c4775f82 MS |
1003 | } |
1004 | --G.by_depth_in_use; | |
1005 | ||
1006 | adjust_depth (); | |
1007 | ||
21341cfd AS |
1008 | entry->next = G.free_pages; |
1009 | G.free_pages = entry; | |
1010 | } | |
1011 | ||
cb2ec151 | 1012 | /* Release the free page cache to the system. */ |
21341cfd | 1013 | |
4934cc53 | 1014 | static void |
20c1dc5e | 1015 | release_pages (void) |
21341cfd | 1016 | { |
e5207f1a JH |
1017 | size_t n1 = 0; |
1018 | size_t n2 = 0; | |
711a3d82 AK |
1019 | #ifdef USING_MADVISE |
1020 | page_entry *p, *start_p; | |
1021 | char *start; | |
1022 | size_t len; | |
d33ef9a5 AK |
1023 | size_t mapped_len; |
1024 | page_entry *next, *prev, *newprev; | |
1025 | size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize; | |
1026 | ||
1027 | /* First free larger continuous areas to the OS. | |
1028 | This allows other allocators to grab these areas if needed. | |
1029 | This is only done on larger chunks to avoid fragmentation. | |
1030 | This does not always work because the free_pages list is only | |
1031 | approximately sorted. */ | |
1032 | ||
1033 | p = G.free_pages; | |
1034 | prev = NULL; | |
1035 | while (p) | |
1036 | { | |
1037 | start = p->page; | |
1038 | start_p = p; | |
1039 | len = 0; | |
1040 | mapped_len = 0; | |
1041 | newprev = prev; | |
1042 | while (p && p->page == start + len) | |
1043 | { | |
1044 | len += p->bytes; | |
1045 | if (!p->discarded) | |
1046 | mapped_len += p->bytes; | |
1047 | newprev = p; | |
1048 | p = p->next; | |
1049 | } | |
1050 | if (len >= free_unit) | |
1051 | { | |
1052 | while (start_p != p) | |
1053 | { | |
1054 | next = start_p->next; | |
1055 | free (start_p); | |
1056 | start_p = next; | |
1057 | } | |
1058 | munmap (start, len); | |
1059 | if (prev) | |
1060 | prev->next = p; | |
1061 | else | |
1062 | G.free_pages = p; | |
1063 | G.bytes_mapped -= mapped_len; | |
e5207f1a | 1064 | n1 += len; |
d33ef9a5 AK |
1065 | continue; |
1066 | } | |
1067 | prev = newprev; | |
1068 | } | |
1069 | ||
1070 | /* Now give back the fragmented pages to the OS, but keep the address | |
1071 | space to reuse it next time. */ | |
711a3d82 AK |
1072 | |
1073 | for (p = G.free_pages; p; ) | |
1074 | { | |
1075 | if (p->discarded) | |
1076 | { | |
1077 | p = p->next; | |
1078 | continue; | |
1079 | } | |
1080 | start = p->page; | |
1081 | len = p->bytes; | |
1082 | start_p = p; | |
1083 | p = p->next; | |
1084 | while (p && p->page == start + len) | |
1085 | { | |
1086 | len += p->bytes; | |
1087 | p = p->next; | |
1088 | } | |
1089 | /* Give the page back to the kernel, but don't free the mapping. | |
1090 | This avoids fragmentation in the virtual memory map of the | |
1091 | process. Next time we can reuse it by just touching it. */ | |
1092 | madvise (start, len, MADV_DONTNEED); | |
1093 | /* Don't count those pages as mapped to not touch the garbage collector | |
1094 | unnecessarily. */ | |
1095 | G.bytes_mapped -= len; | |
e5207f1a | 1096 | n2 += len; |
711a3d82 AK |
1097 | while (start_p != p) |
1098 | { | |
1099 | start_p->discarded = true; | |
1100 | start_p = start_p->next; | |
1101 | } | |
1102 | } | |
1103 | #endif | |
1104 | #if defined(USING_MMAP) && !defined(USING_MADVISE) | |
130fadbb | 1105 | page_entry *p, *next; |
21341cfd AS |
1106 | char *start; |
1107 | size_t len; | |
1108 | ||
054f5e69 | 1109 | /* Gather up adjacent pages so they are unmapped together. */ |
21341cfd | 1110 | p = G.free_pages; |
21341cfd AS |
1111 | |
1112 | while (p) | |
1113 | { | |
054f5e69 | 1114 | start = p->page; |
21341cfd | 1115 | next = p->next; |
054f5e69 | 1116 | len = p->bytes; |
21341cfd AS |
1117 | free (p); |
1118 | p = next; | |
21341cfd | 1119 | |
054f5e69 ZW |
1120 | while (p && p->page == start + len) |
1121 | { | |
1122 | next = p->next; | |
1123 | len += p->bytes; | |
1124 | free (p); | |
1125 | p = next; | |
1126 | } | |
1127 | ||
1128 | munmap (start, len); | |
e5207f1a | 1129 | n1 += len; |
054f5e69 ZW |
1130 | G.bytes_mapped -= len; |
1131 | } | |
005537df | 1132 | |
21341cfd | 1133 | G.free_pages = NULL; |
130fadbb RH |
1134 | #endif |
1135 | #ifdef USING_MALLOC_PAGE_GROUPS | |
1136 | page_entry **pp, *p; | |
1137 | page_group **gp, *g; | |
1138 | ||
1139 | /* Remove all pages from free page groups from the list. */ | |
1140 | pp = &G.free_pages; | |
1141 | while ((p = *pp) != NULL) | |
1142 | if (p->group->in_use == 0) | |
1143 | { | |
1144 | *pp = p->next; | |
1145 | free (p); | |
1146 | } | |
1147 | else | |
1148 | pp = &p->next; | |
1149 | ||
1150 | /* Remove all free page groups, and release the storage. */ | |
1151 | gp = &G.page_groups; | |
1152 | while ((g = *gp) != NULL) | |
1153 | if (g->in_use == 0) | |
1154 | { | |
1155 | *gp = g->next; | |
589005ff | 1156 | G.bytes_mapped -= g->alloc_size; |
e5207f1a | 1157 | n1 += g->alloc_size; |
9e0d2031 | 1158 | free (g->allocation); |
130fadbb RH |
1159 | } |
1160 | else | |
1161 | gp = &g->next; | |
1162 | #endif | |
e5207f1a JH |
1163 | if (!quiet_flag && (n1 || n2)) |
1164 | { | |
1165 | fprintf (stderr, " {GC"); | |
1166 | if (n1) | |
1167 | fprintf (stderr, " released %luk", (unsigned long)(n1 / 1024)); | |
1168 | if (n2) | |
1169 | fprintf (stderr, " madv_dontneed %luk", (unsigned long)(n2 / 1024)); | |
1170 | fprintf (stderr, "}"); | |
1171 | } | |
21341cfd AS |
1172 | } |
1173 | ||
21341cfd | 1174 | /* This table provides a fast way to determine ceil(log_2(size)) for |
9fd51e67 | 1175 | allocation requests. The minimum allocation size is eight bytes. */ |
6583cf15 NC |
1176 | #define NUM_SIZE_LOOKUP 512 |
1177 | static unsigned char size_lookup[NUM_SIZE_LOOKUP] = | |
9fd51e67 | 1178 | { |
589005ff KH |
1179 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, |
1180 | 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, | |
1181 | 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1182 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1183 | 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1184 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1185 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
21341cfd | 1186 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
21341cfd AS |
1187 | 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1188 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1189 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1190 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1191 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1192 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1193 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1194 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
f5938dcd RG |
1195 | 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1196 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1197 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1198 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1199 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1200 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1201 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1202 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1203 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1204 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1205 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1206 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1207 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1208 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1209 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
ecf7b86f | 1210 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 |
21341cfd AS |
1211 | }; |
1212 | ||
b9bd6f74 TT |
1213 | /* For a given size of memory requested for allocation, return the |
1214 | actual size that is going to be allocated, as well as the size | |
1215 | order. */ | |
1216 | ||
1217 | static void | |
1218 | ggc_round_alloc_size_1 (size_t requested_size, | |
1219 | size_t *size_order, | |
1220 | size_t *alloced_size) | |
1221 | { | |
1222 | size_t order, object_size; | |
1223 | ||
1224 | if (requested_size < NUM_SIZE_LOOKUP) | |
1225 | { | |
1226 | order = size_lookup[requested_size]; | |
1227 | object_size = OBJECT_SIZE (order); | |
1228 | } | |
1229 | else | |
1230 | { | |
1231 | order = 10; | |
1232 | while (requested_size > (object_size = OBJECT_SIZE (order))) | |
1233 | order++; | |
1234 | } | |
1235 | ||
1236 | if (size_order) | |
1237 | *size_order = order; | |
1238 | if (alloced_size) | |
1239 | *alloced_size = object_size; | |
1240 | } | |
1241 | ||
1242 | /* For a given size of memory requested for allocation, return the | |
1243 | actual size that is going to be allocated. */ | |
1244 | ||
1245 | size_t | |
1246 | ggc_round_alloc_size (size_t requested_size) | |
1247 | { | |
1248 | size_t size = 0; | |
1249 | ||
1250 | ggc_round_alloc_size_1 (requested_size, NULL, &size); | |
1251 | return size; | |
1252 | } | |
1253 | ||
2a304777 JM |
1254 | /* Push a finalizer onto the appropriate vec. */ |
1255 | ||
1256 | static void | |
1257 | add_finalizer (void *result, void (*f)(void *), size_t s, size_t n) | |
1258 | { | |
1259 | if (f == NULL) | |
1260 | /* No finalizer. */; | |
1261 | else if (n == 1) | |
1262 | { | |
1263 | finalizer fin (result, f); | |
1264 | G.finalizers[G.context_depth].safe_push (fin); | |
1265 | } | |
1266 | else | |
1267 | { | |
1268 | vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n); | |
1269 | G.vec_finalizers[G.context_depth].safe_push (fin); | |
1270 | } | |
1271 | } | |
1272 | ||
aa40083d | 1273 | /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ |
cb2ec151 | 1274 | |
005537df | 1275 | void * |
de49ce19 TS |
1276 | ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n |
1277 | MEM_STAT_DECL) | |
21341cfd | 1278 | { |
685fe032 | 1279 | size_t order, word, bit, object_offset, object_size; |
21341cfd AS |
1280 | struct page_entry *entry; |
1281 | void *result; | |
1282 | ||
b9bd6f74 | 1283 | ggc_round_alloc_size_1 (size, &order, &object_size); |
21341cfd AS |
1284 | |
1285 | /* If there are non-full pages for this size allocation, they are at | |
1286 | the head of the list. */ | |
1287 | entry = G.pages[order]; | |
1288 | ||
1289 | /* If there is no page for this object size, or all pages in this | |
1290 | context are full, allocate a new page. */ | |
4934cc53 | 1291 | if (entry == NULL || entry->num_free_objects == 0) |
21341cfd AS |
1292 | { |
1293 | struct page_entry *new_entry; | |
1294 | new_entry = alloc_page (order); | |
589005ff | 1295 | |
c4775f82 MS |
1296 | new_entry->index_by_depth = G.by_depth_in_use; |
1297 | push_by_depth (new_entry, 0); | |
1298 | ||
1299 | /* We can skip context depths, if we do, make sure we go all the | |
1300 | way to the new depth. */ | |
1301 | while (new_entry->context_depth >= G.depth_in_use) | |
1302 | push_depth (G.by_depth_in_use-1); | |
1303 | ||
9bf793f9 JL |
1304 | /* If this is the only entry, it's also the tail. If it is not |
1305 | the only entry, then we must update the PREV pointer of the | |
1306 | ENTRY (G.pages[order]) to point to our new page entry. */ | |
21341cfd AS |
1307 | if (entry == NULL) |
1308 | G.page_tails[order] = new_entry; | |
9bf793f9 JL |
1309 | else |
1310 | entry->prev = new_entry; | |
589005ff | 1311 | |
9bf793f9 JL |
1312 | /* Put new pages at the head of the page list. By definition the |
1313 | entry at the head of the list always has a NULL pointer. */ | |
21341cfd | 1314 | new_entry->next = entry; |
9bf793f9 | 1315 | new_entry->prev = NULL; |
21341cfd AS |
1316 | entry = new_entry; |
1317 | G.pages[order] = new_entry; | |
1318 | ||
1319 | /* For a new page, we know the word and bit positions (in the | |
1320 | in_use bitmap) of the first available object -- they're zero. */ | |
1321 | new_entry->next_bit_hint = 1; | |
1322 | word = 0; | |
1323 | bit = 0; | |
1324 | object_offset = 0; | |
1325 | } | |
1326 | else | |
1327 | { | |
1328 | /* First try to use the hint left from the previous allocation | |
1329 | to locate a clear bit in the in-use bitmap. We've made sure | |
1330 | that the one-past-the-end bit is always set, so if the hint | |
1331 | has run over, this test will fail. */ | |
1332 | unsigned hint = entry->next_bit_hint; | |
1333 | word = hint / HOST_BITS_PER_LONG; | |
1334 | bit = hint % HOST_BITS_PER_LONG; | |
589005ff | 1335 | |
21341cfd AS |
1336 | /* If the hint didn't work, scan the bitmap from the beginning. */ |
1337 | if ((entry->in_use_p[word] >> bit) & 1) | |
1338 | { | |
1339 | word = bit = 0; | |
1340 | while (~entry->in_use_p[word] == 0) | |
1341 | ++word; | |
6f0947e4 SB |
1342 | |
1343 | #if GCC_VERSION >= 3004 | |
1344 | bit = __builtin_ctzl (~entry->in_use_p[word]); | |
1345 | #else | |
21341cfd AS |
1346 | while ((entry->in_use_p[word] >> bit) & 1) |
1347 | ++bit; | |
6f0947e4 SB |
1348 | #endif |
1349 | ||
21341cfd AS |
1350 | hint = word * HOST_BITS_PER_LONG + bit; |
1351 | } | |
1352 | ||
1353 | /* Next time, try the next bit. */ | |
1354 | entry->next_bit_hint = hint + 1; | |
1355 | ||
685fe032 | 1356 | object_offset = hint * object_size; |
21341cfd AS |
1357 | } |
1358 | ||
1359 | /* Set the in-use bit. */ | |
1360 | entry->in_use_p[word] |= ((unsigned long) 1 << bit); | |
1361 | ||
1362 | /* Keep a running total of the number of free objects. If this page | |
1363 | fills up, we may have to move it to the end of the list if the | |
1364 | next page isn't full. If the next page is full, all subsequent | |
1365 | pages are full, so there's no need to move it. */ | |
1366 | if (--entry->num_free_objects == 0 | |
1367 | && entry->next != NULL | |
1368 | && entry->next->num_free_objects > 0) | |
1369 | { | |
9bf793f9 | 1370 | /* We have a new head for the list. */ |
21341cfd | 1371 | G.pages[order] = entry->next; |
9bf793f9 JL |
1372 | |
1373 | /* We are moving ENTRY to the end of the page table list. | |
1374 | The new page at the head of the list will have NULL in | |
1375 | its PREV field and ENTRY will have NULL in its NEXT field. */ | |
1376 | entry->next->prev = NULL; | |
21341cfd | 1377 | entry->next = NULL; |
9bf793f9 JL |
1378 | |
1379 | /* Append ENTRY to the tail of the list. */ | |
1380 | entry->prev = G.page_tails[order]; | |
21341cfd AS |
1381 | G.page_tails[order]->next = entry; |
1382 | G.page_tails[order] = entry; | |
1383 | } | |
1384 | ||
1385 | /* Calculate the object's address. */ | |
1386 | result = entry->page + object_offset; | |
7aa6d18a SB |
1387 | if (GATHER_STATISTICS) |
1388 | ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, | |
1389 | result FINAL_PASS_MEM_STAT); | |
21341cfd | 1390 | |
3788cc17 | 1391 | #ifdef ENABLE_GC_CHECKING |
9a0a7d5d HPN |
1392 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the |
1393 | exact same semantics in presence of memory bugs, regardless of | |
1394 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | |
1395 | handle to avoid handle leak. */ | |
35dee980 | 1396 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size)); |
9a0a7d5d | 1397 | |
f8a83ee3 ZW |
1398 | /* `Poison' the entire allocated object, including any padding at |
1399 | the end. */ | |
685fe032 | 1400 | memset (result, 0xaf, object_size); |
9a0a7d5d HPN |
1401 | |
1402 | /* Make the bytes after the end of the object unaccessible. Discard the | |
1403 | handle to avoid handle leak. */ | |
35dee980 HPN |
1404 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size, |
1405 | object_size - size)); | |
21341cfd | 1406 | #endif |
cb2ec151 | 1407 | |
9a0a7d5d HPN |
1408 | /* Tell Valgrind that the memory is there, but its content isn't |
1409 | defined. The bytes at the end of the object are still marked | |
1410 | unaccessible. */ | |
35dee980 | 1411 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); |
9a0a7d5d | 1412 | |
21341cfd AS |
1413 | /* Keep track of how many bytes are being allocated. This |
1414 | information is used in deciding when to collect. */ | |
685fe032 | 1415 | G.allocated += object_size; |
21341cfd | 1416 | |
8d18c628 ZD |
1417 | /* For timevar statistics. */ |
1418 | timevar_ggc_mem_total += object_size; | |
1419 | ||
2a304777 JM |
1420 | if (f) |
1421 | add_finalizer (result, f, s, n); | |
de49ce19 | 1422 | |
7aa6d18a SB |
1423 | if (GATHER_STATISTICS) |
1424 | { | |
1425 | size_t overhead = object_size - size; | |
adc4adcd | 1426 | |
7aa6d18a SB |
1427 | G.stats.total_overhead += overhead; |
1428 | G.stats.total_allocated += object_size; | |
1429 | G.stats.total_overhead_per_order[order] += overhead; | |
1430 | G.stats.total_allocated_per_order[order] += object_size; | |
adc4adcd | 1431 | |
7aa6d18a SB |
1432 | if (size <= 32) |
1433 | { | |
1434 | G.stats.total_overhead_under32 += overhead; | |
1435 | G.stats.total_allocated_under32 += object_size; | |
1436 | } | |
1437 | if (size <= 64) | |
1438 | { | |
1439 | G.stats.total_overhead_under64 += overhead; | |
1440 | G.stats.total_allocated_under64 += object_size; | |
1441 | } | |
1442 | if (size <= 128) | |
1443 | { | |
1444 | G.stats.total_overhead_under128 += overhead; | |
1445 | G.stats.total_allocated_under128 += object_size; | |
1446 | } | |
1447 | } | |
685fe032 | 1448 | |
21341cfd | 1449 | if (GGC_DEBUG_LEVEL >= 3) |
589005ff | 1450 | fprintf (G.debug_file, |
8a951190 | 1451 | "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", |
685fe032 | 1452 | (unsigned long) size, (unsigned long) object_size, result, |
20c1dc5e | 1453 | (void *) entry); |
21341cfd AS |
1454 | |
1455 | return result; | |
1456 | } | |
1457 | ||
dae4174e TT |
1458 | /* Mark function for strings. */ |
1459 | ||
1460 | void | |
1461 | gt_ggc_m_S (const void *p) | |
1462 | { | |
1463 | page_entry *entry; | |
1464 | unsigned bit, word; | |
1465 | unsigned long mask; | |
1466 | unsigned long offset; | |
1467 | ||
acb14262 | 1468 | if (!p) |
dae4174e TT |
1469 | return; |
1470 | ||
acb14262 RB |
1471 | /* Look up the page on which the object is alloced. If it was not |
1472 | GC allocated, gracefully bail out. */ | |
1473 | entry = safe_lookup_page_table_entry (p); | |
1474 | if (!entry) | |
1475 | return; | |
dae4174e TT |
1476 | |
1477 | /* Calculate the index of the object on the page; this is its bit | |
1478 | position in the in_use_p bitmap. Note that because a char* might | |
1479 | point to the middle of an object, we need special code here to | |
1480 | make sure P points to the start of an object. */ | |
1481 | offset = ((const char *) p - entry->page) % object_size_table[entry->order]; | |
1482 | if (offset) | |
1483 | { | |
1484 | /* Here we've seen a char* which does not point to the beginning | |
1485 | of an allocated object. We assume it points to the middle of | |
1486 | a STRING_CST. */ | |
1487 | gcc_assert (offset == offsetof (struct tree_string, str)); | |
1488 | p = ((const char *) p) - offset; | |
d3bfe4de | 1489 | gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)); |
dae4174e TT |
1490 | return; |
1491 | } | |
1492 | ||
1493 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); | |
1494 | word = bit / HOST_BITS_PER_LONG; | |
1495 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
1496 | ||
1497 | /* If the bit was previously set, skip it. */ | |
1498 | if (entry->in_use_p[word] & mask) | |
1499 | return; | |
1500 | ||
1501 | /* Otherwise set it, and decrement the free object count. */ | |
1502 | entry->in_use_p[word] |= mask; | |
1503 | entry->num_free_objects -= 1; | |
1504 | ||
1505 | if (GGC_DEBUG_LEVEL >= 4) | |
1506 | fprintf (G.debug_file, "Marking %p\n", p); | |
1507 | ||
1508 | return; | |
1509 | } | |
1510 | ||
0823efed DN |
1511 | |
1512 | /* User-callable entry points for marking string X. */ | |
1513 | ||
1514 | void | |
1515 | gt_ggc_mx (const char *& x) | |
1516 | { | |
1517 | gt_ggc_m_S (x); | |
1518 | } | |
1519 | ||
1520 | void | |
1521 | gt_ggc_mx (unsigned char *& x) | |
1522 | { | |
1523 | gt_ggc_m_S (x); | |
1524 | } | |
1525 | ||
1526 | void | |
1527 | gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED) | |
1528 | { | |
1529 | } | |
1530 | ||
cb2ec151 | 1531 | /* If P is not marked, marks it and return false. Otherwise return true. |
21341cfd AS |
1532 | P must have been allocated by the GC allocator; it mustn't point to |
1533 | static objects, stack variables, or memory allocated with malloc. */ | |
cb2ec151 | 1534 | |
005537df | 1535 | int |
20c1dc5e | 1536 | ggc_set_mark (const void *p) |
21341cfd AS |
1537 | { |
1538 | page_entry *entry; | |
1539 | unsigned bit, word; | |
1540 | unsigned long mask; | |
1541 | ||
1542 | /* Look up the page on which the object is alloced. If the object | |
1543 | wasn't allocated by the collector, we'll probably die. */ | |
74c937ca | 1544 | entry = lookup_page_table_entry (p); |
282899df | 1545 | gcc_assert (entry); |
21341cfd AS |
1546 | |
1547 | /* Calculate the index of the object on the page; this is its bit | |
1548 | position in the in_use_p bitmap. */ | |
8537ed68 | 1549 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
21341cfd AS |
1550 | word = bit / HOST_BITS_PER_LONG; |
1551 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
589005ff | 1552 | |
dc297297 | 1553 | /* If the bit was previously set, skip it. */ |
21341cfd AS |
1554 | if (entry->in_use_p[word] & mask) |
1555 | return 1; | |
1556 | ||
1557 | /* Otherwise set it, and decrement the free object count. */ | |
1558 | entry->in_use_p[word] |= mask; | |
1559 | entry->num_free_objects -= 1; | |
1560 | ||
21341cfd AS |
1561 | if (GGC_DEBUG_LEVEL >= 4) |
1562 | fprintf (G.debug_file, "Marking %p\n", p); | |
1563 | ||
1564 | return 0; | |
1565 | } | |
1566 | ||
589005ff | 1567 | /* Return 1 if P has been marked, zero otherwise. |
4c160717 RK |
1568 | P must have been allocated by the GC allocator; it mustn't point to |
1569 | static objects, stack variables, or memory allocated with malloc. */ | |
1570 | ||
1571 | int | |
20c1dc5e | 1572 | ggc_marked_p (const void *p) |
4c160717 RK |
1573 | { |
1574 | page_entry *entry; | |
1575 | unsigned bit, word; | |
1576 | unsigned long mask; | |
1577 | ||
1578 | /* Look up the page on which the object is alloced. If the object | |
1579 | wasn't allocated by the collector, we'll probably die. */ | |
1580 | entry = lookup_page_table_entry (p); | |
282899df | 1581 | gcc_assert (entry); |
4c160717 RK |
1582 | |
1583 | /* Calculate the index of the object on the page; this is its bit | |
1584 | position in the in_use_p bitmap. */ | |
8537ed68 | 1585 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
4c160717 RK |
1586 | word = bit / HOST_BITS_PER_LONG; |
1587 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
589005ff | 1588 | |
a4b5b2ae | 1589 | return (entry->in_use_p[word] & mask) != 0; |
4c160717 RK |
1590 | } |
1591 | ||
cb2ec151 RH |
1592 | /* Return the size of the gc-able object P. */ |
1593 | ||
3277221c | 1594 | size_t |
20c1dc5e | 1595 | ggc_get_size (const void *p) |
3277221c MM |
1596 | { |
1597 | page_entry *pe = lookup_page_table_entry (p); | |
2be510b8 | 1598 | return OBJECT_SIZE (pe->order); |
3277221c | 1599 | } |
685fe032 RH |
1600 | |
1601 | /* Release the memory for object P. */ | |
1602 | ||
1603 | void | |
1604 | ggc_free (void *p) | |
1605 | { | |
b086d530 TS |
1606 | if (in_gc) |
1607 | return; | |
1608 | ||
685fe032 RH |
1609 | page_entry *pe = lookup_page_table_entry (p); |
1610 | size_t order = pe->order; | |
1611 | size_t size = OBJECT_SIZE (order); | |
1612 | ||
7aa6d18a SB |
1613 | if (GATHER_STATISTICS) |
1614 | ggc_free_overhead (p); | |
07724022 | 1615 | |
685fe032 RH |
1616 | if (GGC_DEBUG_LEVEL >= 3) |
1617 | fprintf (G.debug_file, | |
1618 | "Freeing object, actual size=%lu, at %p on %p\n", | |
1619 | (unsigned long) size, p, (void *) pe); | |
1620 | ||
1621 | #ifdef ENABLE_GC_CHECKING | |
1622 | /* Poison the data, to indicate the data is garbage. */ | |
35dee980 | 1623 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); |
685fe032 RH |
1624 | memset (p, 0xa5, size); |
1625 | #endif | |
1626 | /* Let valgrind know the object is free. */ | |
35dee980 | 1627 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); |
685fe032 RH |
1628 | |
1629 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
1630 | /* In the completely-anal-checking mode, we do *not* immediately free | |
b8698a0f | 1631 | the data, but instead verify that the data is *actually* not |
685fe032 RH |
1632 | reachable the next time we collect. */ |
1633 | { | |
5ed6ace5 | 1634 | struct free_object *fo = XNEW (struct free_object); |
685fe032 RH |
1635 | fo->object = p; |
1636 | fo->next = G.free_object_list; | |
1637 | G.free_object_list = fo; | |
1638 | } | |
1639 | #else | |
1640 | { | |
1641 | unsigned int bit_offset, word, bit; | |
1642 | ||
1643 | G.allocated -= size; | |
1644 | ||
1645 | /* Mark the object not-in-use. */ | |
1646 | bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); | |
1647 | word = bit_offset / HOST_BITS_PER_LONG; | |
1648 | bit = bit_offset % HOST_BITS_PER_LONG; | |
1649 | pe->in_use_p[word] &= ~(1UL << bit); | |
1650 | ||
1651 | if (pe->num_free_objects++ == 0) | |
1652 | { | |
9bf793f9 JL |
1653 | page_entry *p, *q; |
1654 | ||
685fe032 RH |
1655 | /* If the page is completely full, then it's supposed to |
1656 | be after all pages that aren't. Since we've freed one | |
1657 | object from a page that was full, we need to move the | |
b8698a0f | 1658 | page to the head of the list. |
685fe032 | 1659 | |
9bf793f9 JL |
1660 | PE is the node we want to move. Q is the previous node |
1661 | and P is the next node in the list. */ | |
1662 | q = pe->prev; | |
685fe032 RH |
1663 | if (q && q->num_free_objects == 0) |
1664 | { | |
1665 | p = pe->next; | |
9bf793f9 | 1666 | |
685fe032 | 1667 | q->next = p; |
9bf793f9 JL |
1668 | |
1669 | /* If PE was at the end of the list, then Q becomes the | |
1670 | new end of the list. If PE was not the end of the | |
1671 | list, then we need to update the PREV field for P. */ | |
685fe032 RH |
1672 | if (!p) |
1673 | G.page_tails[order] = q; | |
9bf793f9 JL |
1674 | else |
1675 | p->prev = q; | |
1676 | ||
1677 | /* Move PE to the head of the list. */ | |
685fe032 | 1678 | pe->next = G.pages[order]; |
9bf793f9 JL |
1679 | pe->prev = NULL; |
1680 | G.pages[order]->prev = pe; | |
685fe032 RH |
1681 | G.pages[order] = pe; |
1682 | } | |
1683 | ||
1684 | /* Reset the hint bit to point to the only free object. */ | |
1685 | pe->next_bit_hint = bit_offset; | |
1686 | } | |
1687 | } | |
1688 | #endif | |
1689 | } | |
21341cfd | 1690 | \f |
8537ed68 ZW |
1691 | /* Subroutine of init_ggc which computes the pair of numbers used to |
1692 | perform division by OBJECT_SIZE (order) and fills in inverse_table[]. | |
1693 | ||
1694 | This algorithm is taken from Granlund and Montgomery's paper | |
1695 | "Division by Invariant Integers using Multiplication" | |
1696 | (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by | |
1697 | constants). */ | |
1698 | ||
1699 | static void | |
20c1dc5e | 1700 | compute_inverse (unsigned order) |
8537ed68 | 1701 | { |
b8698a0f | 1702 | size_t size, inv; |
75d75435 | 1703 | unsigned int e; |
280cf02a | 1704 | |
8537ed68 ZW |
1705 | size = OBJECT_SIZE (order); |
1706 | e = 0; | |
1707 | while (size % 2 == 0) | |
1708 | { | |
1709 | e++; | |
1710 | size >>= 1; | |
1711 | } | |
cb2ec151 | 1712 | |
8537ed68 ZW |
1713 | inv = size; |
1714 | while (inv * size != 1) | |
1715 | inv = inv * (2 - inv*size); | |
1716 | ||
1717 | DIV_MULT (order) = inv; | |
1718 | DIV_SHIFT (order) = e; | |
1719 | } | |
1720 | ||
1721 | /* Initialize the ggc-mmap allocator. */ | |
21341cfd | 1722 | void |
20c1dc5e | 1723 | init_ggc (void) |
21341cfd | 1724 | { |
3edf64aa | 1725 | static bool init_p = false; |
2be510b8 MM |
1726 | unsigned order; |
1727 | ||
3edf64aa DM |
1728 | if (init_p) |
1729 | return; | |
1730 | init_p = true; | |
1731 | ||
c3284718 | 1732 | G.pagesize = getpagesize (); |
21341cfd AS |
1733 | G.lg_pagesize = exact_log2 (G.pagesize); |
1734 | ||
825b6926 | 1735 | #ifdef HAVE_MMAP_DEV_ZERO |
21341cfd AS |
1736 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY); |
1737 | if (G.dev_zero_fd == -1) | |
c770ac2b | 1738 | internal_error ("open /dev/zero: %m"); |
21341cfd AS |
1739 | #endif |
1740 | ||
1741 | #if 0 | |
1742 | G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
1743 | #else | |
1744 | G.debug_file = stdout; | |
1745 | #endif | |
1746 | ||
825b6926 | 1747 | #ifdef USING_MMAP |
1b3e1423 RH |
1748 | /* StunOS has an amazing off-by-one error for the first mmap allocation |
1749 | after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
1750 | believe, is an unaligned page allocation, which would cause us to | |
1751 | hork badly if we tried to use it. */ | |
1752 | { | |
25f0ea81 | 1753 | char *p = alloc_anon (NULL, G.pagesize, true); |
825b6926 | 1754 | struct page_entry *e; |
2a6e6fea | 1755 | if ((uintptr_t)p & (G.pagesize - 1)) |
1b3e1423 RH |
1756 | { |
1757 | /* How losing. Discard this one and try another. If we still | |
1758 | can't get something useful, give up. */ | |
1759 | ||
25f0ea81 | 1760 | p = alloc_anon (NULL, G.pagesize, true); |
2a6e6fea | 1761 | gcc_assert (!((uintptr_t)p & (G.pagesize - 1))); |
1b3e1423 | 1762 | } |
825b6926 | 1763 | |
dc297297 | 1764 | /* We have a good page, might as well hold onto it... */ |
5ed6ace5 | 1765 | e = XCNEW (struct page_entry); |
825b6926 ZW |
1766 | e->bytes = G.pagesize; |
1767 | e->page = p; | |
1768 | e->next = G.free_pages; | |
1769 | G.free_pages = e; | |
1b3e1423 RH |
1770 | } |
1771 | #endif | |
2be510b8 MM |
1772 | |
1773 | /* Initialize the object size table. */ | |
1774 | for (order = 0; order < HOST_BITS_PER_PTR; ++order) | |
1775 | object_size_table[order] = (size_t) 1 << order; | |
1776 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) | |
b1095f9c MM |
1777 | { |
1778 | size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR]; | |
a469a4f2 RG |
1779 | |
1780 | /* If S is not a multiple of the MAX_ALIGNMENT, then round it up | |
1781 | so that we're sure of getting aligned memory. */ | |
1782 | s = ROUND_UP (s, MAX_ALIGNMENT); | |
b1095f9c MM |
1783 | object_size_table[order] = s; |
1784 | } | |
2be510b8 | 1785 | |
8537ed68 | 1786 | /* Initialize the objects-per-page and inverse tables. */ |
2be510b8 MM |
1787 | for (order = 0; order < NUM_ORDERS; ++order) |
1788 | { | |
1789 | objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order); | |
1790 | if (objects_per_page_table[order] == 0) | |
1791 | objects_per_page_table[order] = 1; | |
8537ed68 | 1792 | compute_inverse (order); |
2be510b8 MM |
1793 | } |
1794 | ||
1795 | /* Reset the size_lookup array to put appropriately sized objects in | |
1796 | the special orders. All objects bigger than the previous power | |
1797 | of two, but no greater than the special size, should go in the | |
a469a4f2 | 1798 | new order. */ |
2be510b8 MM |
1799 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) |
1800 | { | |
a469a4f2 RG |
1801 | int o; |
1802 | int i; | |
c4775f82 | 1803 | |
6583cf15 NC |
1804 | i = OBJECT_SIZE (order); |
1805 | if (i >= NUM_SIZE_LOOKUP) | |
1806 | continue; | |
1807 | ||
1808 | for (o = size_lookup[i]; o == size_lookup [i]; --i) | |
a469a4f2 RG |
1809 | size_lookup[i] = order; |
1810 | } | |
ecf7b86f | 1811 | |
c4775f82 MS |
1812 | G.depth_in_use = 0; |
1813 | G.depth_max = 10; | |
5ed6ace5 | 1814 | G.depth = XNEWVEC (unsigned int, G.depth_max); |
c4775f82 MS |
1815 | |
1816 | G.by_depth_in_use = 0; | |
1817 | G.by_depth_max = INITIAL_PTE_COUNT; | |
5ed6ace5 MD |
1818 | G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
1819 | G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | |
2a304777 JM |
1820 | |
1821 | /* Allocate space for the depth 0 finalizers. */ | |
1822 | G.finalizers.safe_push (vNULL); | |
1823 | G.vec_finalizers.safe_push (vNULL); | |
1824 | gcc_assert (G.finalizers.length() == 1); | |
21341cfd AS |
1825 | } |
1826 | ||
4934cc53 MM |
1827 | /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P |
1828 | reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ | |
1829 | ||
1830 | static void | |
20c1dc5e | 1831 | ggc_recalculate_in_use_p (page_entry *p) |
4934cc53 MM |
1832 | { |
1833 | unsigned int i; | |
1834 | size_t num_objects; | |
1835 | ||
589005ff | 1836 | /* Because the past-the-end bit in in_use_p is always set, we |
4934cc53 | 1837 | pretend there is one additional object. */ |
17211ab5 | 1838 | num_objects = OBJECTS_IN_PAGE (p) + 1; |
4934cc53 MM |
1839 | |
1840 | /* Reset the free object count. */ | |
1841 | p->num_free_objects = num_objects; | |
1842 | ||
1843 | /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ | |
589005ff | 1844 | for (i = 0; |
2be510b8 MM |
1845 | i < CEIL (BITMAP_SIZE (num_objects), |
1846 | sizeof (*p->in_use_p)); | |
4934cc53 MM |
1847 | ++i) |
1848 | { | |
1849 | unsigned long j; | |
1850 | ||
1851 | /* Something is in use if it is marked, or if it was in use in a | |
1852 | context further down the context stack. */ | |
c4775f82 | 1853 | p->in_use_p[i] |= save_in_use_p (p)[i]; |
4934cc53 MM |
1854 | |
1855 | /* Decrement the free object count for every object allocated. */ | |
1856 | for (j = p->in_use_p[i]; j; j >>= 1) | |
1857 | p->num_free_objects -= (j & 1); | |
1858 | } | |
1859 | ||
282899df | 1860 | gcc_assert (p->num_free_objects < num_objects); |
4934cc53 | 1861 | } |
21341cfd | 1862 | \f |
cb2ec151 RH |
1863 | /* Unmark all objects. */ |
1864 | ||
685fe032 | 1865 | static void |
20c1dc5e | 1866 | clear_marks (void) |
21341cfd AS |
1867 | { |
1868 | unsigned order; | |
1869 | ||
2be510b8 | 1870 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 1871 | { |
21341cfd AS |
1872 | page_entry *p; |
1873 | ||
1874 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1875 | { | |
17211ab5 GK |
1876 | size_t num_objects = OBJECTS_IN_PAGE (p); |
1877 | size_t bitmap_size = BITMAP_SIZE (num_objects + 1); | |
1878 | ||
21341cfd | 1879 | /* The data should be page-aligned. */ |
2a6e6fea | 1880 | gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1))); |
21341cfd AS |
1881 | |
1882 | /* Pages that aren't in the topmost context are not collected; | |
1883 | nevertheless, we need their in-use bit vectors to store GC | |
1884 | marks. So, back them up first. */ | |
4934cc53 | 1885 | if (p->context_depth < G.context_depth) |
21341cfd | 1886 | { |
c4775f82 | 1887 | if (! save_in_use_p (p)) |
d3bfe4de | 1888 | save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size); |
c4775f82 | 1889 | memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); |
21341cfd AS |
1890 | } |
1891 | ||
1892 | /* Reset reset the number of free objects and clear the | |
1893 | in-use bits. These will be adjusted by mark_obj. */ | |
1894 | p->num_free_objects = num_objects; | |
1895 | memset (p->in_use_p, 0, bitmap_size); | |
1896 | ||
1897 | /* Make sure the one-past-the-end bit is always set. */ | |
589005ff | 1898 | p->in_use_p[num_objects / HOST_BITS_PER_LONG] |
21341cfd AS |
1899 | = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); |
1900 | } | |
1901 | } | |
1902 | } | |
1903 | ||
c3af645c TS |
1904 | /* Check if any blocks with a registered finalizer have become unmarked. If so |
1905 | run the finalizer and unregister it because the block is about to be freed. | |
1906 | Note that no garantee is made about what order finalizers will run in so | |
1907 | touching other objects in gc memory is extremely unwise. */ | |
1908 | ||
de49ce19 TS |
1909 | static void |
1910 | ggc_handle_finalizers () | |
1911 | { | |
2a304777 JM |
1912 | unsigned dlen = G.finalizers.length(); |
1913 | for (unsigned d = G.context_depth; d < dlen; ++d) | |
de49ce19 | 1914 | { |
2a304777 JM |
1915 | vec<finalizer> &v = G.finalizers[d]; |
1916 | unsigned length = v.length (); | |
1917 | for (unsigned int i = 0; i < length;) | |
de49ce19 | 1918 | { |
2a304777 JM |
1919 | finalizer &f = v[i]; |
1920 | if (!ggc_marked_p (f.addr ())) | |
1921 | { | |
1922 | f.call (); | |
1923 | v.unordered_remove (i); | |
1924 | length--; | |
1925 | } | |
1926 | else | |
1927 | i++; | |
de49ce19 | 1928 | } |
de49ce19 TS |
1929 | } |
1930 | ||
2a304777 JM |
1931 | gcc_assert (dlen == G.vec_finalizers.length()); |
1932 | for (unsigned d = G.context_depth; d < dlen; ++d) | |
de49ce19 | 1933 | { |
2a304777 JM |
1934 | vec<vec_finalizer> &vv = G.vec_finalizers[d]; |
1935 | unsigned length = vv.length (); | |
1936 | for (unsigned int i = 0; i < length;) | |
de49ce19 | 1937 | { |
2a304777 JM |
1938 | vec_finalizer &f = vv[i]; |
1939 | if (!ggc_marked_p (f.addr ())) | |
1940 | { | |
1941 | f.call (); | |
1942 | vv.unordered_remove (i); | |
1943 | length--; | |
1944 | } | |
1945 | else | |
1946 | i++; | |
de49ce19 | 1947 | } |
de49ce19 TS |
1948 | } |
1949 | } | |
1950 | ||
cb2ec151 RH |
1951 | /* Free all empty pages. Partially empty pages need no attention |
1952 | because the `mark' bit doubles as an `unused' bit. */ | |
1953 | ||
685fe032 | 1954 | static void |
20c1dc5e | 1955 | sweep_pages (void) |
21341cfd AS |
1956 | { |
1957 | unsigned order; | |
1958 | ||
2be510b8 | 1959 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd AS |
1960 | { |
1961 | /* The last page-entry to consider, regardless of entries | |
1962 | placed at the end of the list. */ | |
1963 | page_entry * const last = G.page_tails[order]; | |
1964 | ||
17211ab5 | 1965 | size_t num_objects; |
054f5e69 | 1966 | size_t live_objects; |
21341cfd AS |
1967 | page_entry *p, *previous; |
1968 | int done; | |
589005ff | 1969 | |
21341cfd AS |
1970 | p = G.pages[order]; |
1971 | if (p == NULL) | |
1972 | continue; | |
1973 | ||
1974 | previous = NULL; | |
1975 | do | |
1976 | { | |
1977 | page_entry *next = p->next; | |
1978 | ||
1979 | /* Loop until all entries have been examined. */ | |
1980 | done = (p == last); | |
20c1dc5e | 1981 | |
17211ab5 | 1982 | num_objects = OBJECTS_IN_PAGE (p); |
21341cfd | 1983 | |
054f5e69 ZW |
1984 | /* Add all live objects on this page to the count of |
1985 | allocated memory. */ | |
1986 | live_objects = num_objects - p->num_free_objects; | |
1987 | ||
2be510b8 | 1988 | G.allocated += OBJECT_SIZE (order) * live_objects; |
054f5e69 | 1989 | |
21341cfd AS |
1990 | /* Only objects on pages in the topmost context should get |
1991 | collected. */ | |
1992 | if (p->context_depth < G.context_depth) | |
1993 | ; | |
1994 | ||
1995 | /* Remove the page if it's empty. */ | |
054f5e69 | 1996 | else if (live_objects == 0) |
21341cfd | 1997 | { |
9bf793f9 JL |
1998 | /* If P was the first page in the list, then NEXT |
1999 | becomes the new first page in the list, otherwise | |
2000 | splice P out of the forward pointers. */ | |
21341cfd AS |
2001 | if (! previous) |
2002 | G.pages[order] = next; | |
2003 | else | |
2004 | previous->next = next; | |
b8698a0f | 2005 | |
9bf793f9 JL |
2006 | /* Splice P out of the back pointers too. */ |
2007 | if (next) | |
2008 | next->prev = previous; | |
21341cfd AS |
2009 | |
2010 | /* Are we removing the last element? */ | |
2011 | if (p == G.page_tails[order]) | |
2012 | G.page_tails[order] = previous; | |
2013 | free_page (p); | |
2014 | p = previous; | |
2015 | } | |
2016 | ||
2017 | /* If the page is full, move it to the end. */ | |
2018 | else if (p->num_free_objects == 0) | |
2019 | { | |
2020 | /* Don't move it if it's already at the end. */ | |
2021 | if (p != G.page_tails[order]) | |
2022 | { | |
2023 | /* Move p to the end of the list. */ | |
2024 | p->next = NULL; | |
9bf793f9 | 2025 | p->prev = G.page_tails[order]; |
21341cfd AS |
2026 | G.page_tails[order]->next = p; |
2027 | ||
2028 | /* Update the tail pointer... */ | |
2029 | G.page_tails[order] = p; | |
2030 | ||
2031 | /* ... and the head pointer, if necessary. */ | |
2032 | if (! previous) | |
2033 | G.pages[order] = next; | |
2034 | else | |
2035 | previous->next = next; | |
9bf793f9 JL |
2036 | |
2037 | /* And update the backpointer in NEXT if necessary. */ | |
2038 | if (next) | |
2039 | next->prev = previous; | |
2040 | ||
21341cfd AS |
2041 | p = previous; |
2042 | } | |
2043 | } | |
2044 | ||
2045 | /* If we've fallen through to here, it's a page in the | |
2046 | topmost context that is neither full nor empty. Such a | |
2047 | page must precede pages at lesser context depth in the | |
2048 | list, so move it to the head. */ | |
2049 | else if (p != G.pages[order]) | |
2050 | { | |
2051 | previous->next = p->next; | |
9bf793f9 JL |
2052 | |
2053 | /* Update the backchain in the next node if it exists. */ | |
2054 | if (p->next) | |
2055 | p->next->prev = previous; | |
2056 | ||
2057 | /* Move P to the head of the list. */ | |
21341cfd | 2058 | p->next = G.pages[order]; |
9bf793f9 JL |
2059 | p->prev = NULL; |
2060 | G.pages[order]->prev = p; | |
2061 | ||
2062 | /* Update the head pointer. */ | |
21341cfd | 2063 | G.pages[order] = p; |
9bf793f9 | 2064 | |
21341cfd AS |
2065 | /* Are we moving the last element? */ |
2066 | if (G.page_tails[order] == p) | |
2067 | G.page_tails[order] = previous; | |
2068 | p = previous; | |
2069 | } | |
2070 | ||
2071 | previous = p; | |
2072 | p = next; | |
589005ff | 2073 | } |
21341cfd | 2074 | while (! done); |
4934cc53 MM |
2075 | |
2076 | /* Now, restore the in_use_p vectors for any pages from contexts | |
2077 | other than the current one. */ | |
2078 | for (p = G.pages[order]; p; p = p->next) | |
2079 | if (p->context_depth != G.context_depth) | |
2080 | ggc_recalculate_in_use_p (p); | |
21341cfd AS |
2081 | } |
2082 | } | |
2083 | ||
3788cc17 | 2084 | #ifdef ENABLE_GC_CHECKING |
cb2ec151 RH |
2085 | /* Clobber all free objects. */ |
2086 | ||
685fe032 | 2087 | static void |
20c1dc5e | 2088 | poison_pages (void) |
21341cfd AS |
2089 | { |
2090 | unsigned order; | |
2091 | ||
2be510b8 | 2092 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 2093 | { |
2be510b8 | 2094 | size_t size = OBJECT_SIZE (order); |
21341cfd AS |
2095 | page_entry *p; |
2096 | ||
2097 | for (p = G.pages[order]; p != NULL; p = p->next) | |
2098 | { | |
17211ab5 | 2099 | size_t num_objects; |
21341cfd | 2100 | size_t i; |
c831fdea MM |
2101 | |
2102 | if (p->context_depth != G.context_depth) | |
2103 | /* Since we don't do any collection for pages in pushed | |
2104 | contexts, there's no need to do any poisoning. And | |
2105 | besides, the IN_USE_P array isn't valid until we pop | |
2106 | contexts. */ | |
2107 | continue; | |
2108 | ||
17211ab5 | 2109 | num_objects = OBJECTS_IN_PAGE (p); |
21341cfd AS |
2110 | for (i = 0; i < num_objects; i++) |
2111 | { | |
2112 | size_t word, bit; | |
2113 | word = i / HOST_BITS_PER_LONG; | |
2114 | bit = i % HOST_BITS_PER_LONG; | |
2115 | if (((p->in_use_p[word] >> bit) & 1) == 0) | |
9a0a7d5d HPN |
2116 | { |
2117 | char *object = p->page + i * size; | |
2118 | ||
2119 | /* Keep poison-by-write when we expect to use Valgrind, | |
2120 | so the exact same memory semantics is kept, in case | |
2121 | there are memory errors. We override this request | |
2122 | below. */ | |
35dee980 HPN |
2123 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object, |
2124 | size)); | |
9a0a7d5d HPN |
2125 | memset (object, 0xa5, size); |
2126 | ||
2127 | /* Drop the handle to avoid handle leak. */ | |
35dee980 | 2128 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size)); |
9a0a7d5d | 2129 | } |
21341cfd AS |
2130 | } |
2131 | } | |
2132 | } | |
2133 | } | |
685fe032 RH |
2134 | #else |
2135 | #define poison_pages() | |
2136 | #endif | |
2137 | ||
2138 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
2139 | /* Validate that the reportedly free objects actually are. */ | |
2140 | ||
2141 | static void | |
2142 | validate_free_objects (void) | |
2143 | { | |
2144 | struct free_object *f, *next, *still_free = NULL; | |
2145 | ||
2146 | for (f = G.free_object_list; f ; f = next) | |
2147 | { | |
2148 | page_entry *pe = lookup_page_table_entry (f->object); | |
2149 | size_t bit, word; | |
2150 | ||
2151 | bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); | |
2152 | word = bit / HOST_BITS_PER_LONG; | |
2153 | bit = bit % HOST_BITS_PER_LONG; | |
2154 | next = f->next; | |
2155 | ||
2156 | /* Make certain it isn't visible from any root. Notice that we | |
2157 | do this check before sweep_pages merges save_in_use_p. */ | |
282899df | 2158 | gcc_assert (!(pe->in_use_p[word] & (1UL << bit))); |
685fe032 RH |
2159 | |
2160 | /* If the object comes from an outer context, then retain the | |
2161 | free_object entry, so that we can verify that the address | |
2162 | isn't live on the stack in some outer context. */ | |
2163 | if (pe->context_depth != G.context_depth) | |
2164 | { | |
2165 | f->next = still_free; | |
2166 | still_free = f; | |
2167 | } | |
2168 | else | |
2169 | free (f); | |
2170 | } | |
2171 | ||
2172 | G.free_object_list = still_free; | |
2173 | } | |
2174 | #else | |
2175 | #define validate_free_objects() | |
21341cfd AS |
2176 | #endif |
2177 | ||
cb2ec151 RH |
2178 | /* Top level mark-and-sweep routine. */ |
2179 | ||
21341cfd | 2180 | void |
20c1dc5e | 2181 | ggc_collect (void) |
21341cfd | 2182 | { |
21341cfd AS |
2183 | /* Avoid frequent unnecessary work by skipping collection if the |
2184 | total allocations haven't expanded much since the last | |
2185 | collection. */ | |
19cc0dd4 | 2186 | float allocated_last_gc = |
028d4092 | 2187 | MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * 1024); |
3788cc17 | 2188 | |
9fd052e7 JH |
2189 | /* It is also good time to get memory block pool into limits. */ |
2190 | memory_block_pool::trim (); | |
2191 | ||
028d4092 | 2192 | float min_expand = allocated_last_gc * param_ggc_min_expand / 100; |
07724022 | 2193 | if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) |
21341cfd | 2194 | return; |
21341cfd | 2195 | |
2a9a326b | 2196 | timevar_push (TV_GC); |
685fe032 RH |
2197 | if (GGC_DEBUG_LEVEL >= 2) |
2198 | fprintf (G.debug_file, "BEGIN COLLECTING\n"); | |
21341cfd | 2199 | |
054f5e69 ZW |
2200 | /* Zero the total allocated bytes. This will be recalculated in the |
2201 | sweep phase. */ | |
e5207f1a | 2202 | size_t allocated = G.allocated; |
21341cfd AS |
2203 | G.allocated = 0; |
2204 | ||
589005ff | 2205 | /* Release the pages we freed the last time we collected, but didn't |
21341cfd AS |
2206 | reuse in the interim. */ |
2207 | release_pages (); | |
2208 | ||
e5207f1a JH |
2209 | /* Output this later so we do not interfere with release_pages. */ |
2210 | if (!quiet_flag) | |
2211 | fprintf (stderr, " {GC %luk -> ", (unsigned long) allocated / 1024); | |
2212 | ||
52895e1a RH |
2213 | /* Indicate that we've seen collections at this context depth. */ |
2214 | G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; | |
2215 | ||
ae2392a9 BS |
2216 | invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); |
2217 | ||
b086d530 | 2218 | in_gc = true; |
21341cfd AS |
2219 | clear_marks (); |
2220 | ggc_mark_roots (); | |
de49ce19 | 2221 | ggc_handle_finalizers (); |
7aa6d18a SB |
2222 | |
2223 | if (GATHER_STATISTICS) | |
2224 | ggc_prune_overhead_list (); | |
2225 | ||
21341cfd | 2226 | poison_pages (); |
685fe032 | 2227 | validate_free_objects (); |
cb2ec151 RH |
2228 | sweep_pages (); |
2229 | ||
b086d530 | 2230 | in_gc = false; |
21341cfd AS |
2231 | G.allocated_last_gc = G.allocated; |
2232 | ||
ae2392a9 BS |
2233 | invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); |
2234 | ||
2a9a326b | 2235 | timevar_pop (TV_GC); |
21341cfd | 2236 | |
21341cfd | 2237 | if (!quiet_flag) |
2a9a326b | 2238 | fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); |
685fe032 RH |
2239 | if (GGC_DEBUG_LEVEL >= 2) |
2240 | fprintf (G.debug_file, "END COLLECTING\n"); | |
21341cfd | 2241 | } |
3277221c | 2242 | |
e5207f1a JH |
2243 | /* Return free pages to the system. */ |
2244 | ||
2245 | void | |
2246 | ggc_trim () | |
2247 | { | |
2248 | timevar_push (TV_GC); | |
2249 | G.allocated = 0; | |
2250 | sweep_pages (); | |
2251 | release_pages (); | |
2252 | if (!quiet_flag) | |
2253 | fprintf (stderr, " {GC trimmed to %luk, %luk mapped}", | |
2254 | (unsigned long) G.allocated / 1024, | |
2255 | (unsigned long) G.bytes_mapped / 1024); | |
2256 | timevar_pop (TV_GC); | |
2257 | } | |
2258 | ||
2259 | /* Assume that all GGC memory is reachable and grow the limits for next | |
2260 | collection. With checking, trigger GGC so -Q compilation outputs how much | |
2261 | of memory really is reachable. */ | |
fd1e9302 JH |
2262 | |
2263 | void | |
2264 | ggc_grow (void) | |
2265 | { | |
b2b29377 MM |
2266 | if (!flag_checking) |
2267 | G.allocated_last_gc = MAX (G.allocated_last_gc, | |
2268 | G.allocated); | |
2269 | else | |
2270 | ggc_collect (); | |
fd1e9302 | 2271 | if (!quiet_flag) |
c9ef0409 | 2272 | fprintf (stderr, " {GC %luk} ", (unsigned long) G.allocated / 1024); |
fd1e9302 JH |
2273 | } |
2274 | ||
3277221c | 2275 | void |
20c1dc5e | 2276 | ggc_print_statistics (void) |
3277221c MM |
2277 | { |
2278 | struct ggc_statistics stats; | |
4934cc53 | 2279 | unsigned int i; |
fba0bfd4 | 2280 | size_t total_overhead = 0; |
3277221c MM |
2281 | |
2282 | /* Clear the statistics. */ | |
d219c7f1 | 2283 | memset (&stats, 0, sizeof (stats)); |
589005ff | 2284 | |
3277221c MM |
2285 | /* Make sure collection will really occur. */ |
2286 | G.allocated_last_gc = 0; | |
2287 | ||
2288 | /* Collect and print the statistics common across collectors. */ | |
fba0bfd4 | 2289 | ggc_print_common_statistics (stderr, &stats); |
3277221c | 2290 | |
4934cc53 MM |
2291 | /* Release free pages so that we will not count the bytes allocated |
2292 | there as part of the total allocated memory. */ | |
2293 | release_pages (); | |
2294 | ||
589005ff | 2295 | /* Collect some information about the various sizes of |
3277221c | 2296 | allocation. */ |
439a7e54 DN |
2297 | fprintf (stderr, |
2298 | "Memory still allocated at the end of the compilation process\n"); | |
b2b43e33 | 2299 | fprintf (stderr, "%-8s %10s %10s %10s\n", |
9fd51e67 | 2300 | "Size", "Allocated", "Used", "Overhead"); |
2be510b8 | 2301 | for (i = 0; i < NUM_ORDERS; ++i) |
3277221c MM |
2302 | { |
2303 | page_entry *p; | |
2304 | size_t allocated; | |
2305 | size_t in_use; | |
fba0bfd4 | 2306 | size_t overhead; |
3277221c MM |
2307 | |
2308 | /* Skip empty entries. */ | |
2309 | if (!G.pages[i]) | |
2310 | continue; | |
2311 | ||
fba0bfd4 | 2312 | overhead = allocated = in_use = 0; |
3277221c MM |
2313 | |
2314 | /* Figure out the total number of bytes allocated for objects of | |
fba0bfd4 ZW |
2315 | this size, and how many of them are actually in use. Also figure |
2316 | out how much memory the page table is using. */ | |
3277221c MM |
2317 | for (p = G.pages[i]; p; p = p->next) |
2318 | { | |
2319 | allocated += p->bytes; | |
20c1dc5e | 2320 | in_use += |
17211ab5 | 2321 | (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); |
fba0bfd4 ZW |
2322 | |
2323 | overhead += (sizeof (page_entry) - sizeof (long) | |
17211ab5 | 2324 | + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); |
3277221c | 2325 | } |
a0b48080 MM |
2326 | fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " " |
2327 | PRsa (10) "\n", | |
2328 | (uint64_t)OBJECT_SIZE (i), | |
40ce7fa6 ML |
2329 | SIZE_AMOUNT (allocated), |
2330 | SIZE_AMOUNT (in_use), | |
2331 | SIZE_AMOUNT (overhead)); | |
fba0bfd4 | 2332 | total_overhead += overhead; |
3277221c | 2333 | } |
a0b48080 | 2334 | fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n", |
40ce7fa6 ML |
2335 | "Total", |
2336 | SIZE_AMOUNT (G.bytes_mapped), | |
2337 | SIZE_AMOUNT (G.allocated), | |
2338 | SIZE_AMOUNT (total_overhead)); | |
adc4adcd | 2339 | |
7aa6d18a SB |
2340 | if (GATHER_STATISTICS) |
2341 | { | |
b2b43e33 ML |
2342 | fprintf (stderr, "\nTotal allocations and overheads during " |
2343 | "the compilation process\n"); | |
7aa6d18a | 2344 | |
a0b48080 MM |
2345 | fprintf (stderr, "Total Overhead: " |
2346 | PRsa (9) "\n", | |
40ce7fa6 | 2347 | SIZE_AMOUNT (G.stats.total_overhead)); |
a0b48080 MM |
2348 | fprintf (stderr, "Total Allocated: " |
2349 | PRsa (9) "\n", | |
40ce7fa6 ML |
2350 | SIZE_AMOUNT (G.stats.total_allocated)); |
2351 | ||
a0b48080 MM |
2352 | fprintf (stderr, "Total Overhead under 32B: " |
2353 | PRsa (9) "\n", | |
40ce7fa6 | 2354 | SIZE_AMOUNT (G.stats.total_overhead_under32)); |
a0b48080 MM |
2355 | fprintf (stderr, "Total Allocated under 32B: " |
2356 | PRsa (9) "\n", | |
40ce7fa6 | 2357 | SIZE_AMOUNT (G.stats.total_allocated_under32)); |
a0b48080 MM |
2358 | fprintf (stderr, "Total Overhead under 64B: " |
2359 | PRsa (9) "\n", | |
40ce7fa6 | 2360 | SIZE_AMOUNT (G.stats.total_overhead_under64)); |
a0b48080 MM |
2361 | fprintf (stderr, "Total Allocated under 64B: " |
2362 | PRsa (9) "\n", | |
40ce7fa6 | 2363 | SIZE_AMOUNT (G.stats.total_allocated_under64)); |
a0b48080 MM |
2364 | fprintf (stderr, "Total Overhead under 128B: " |
2365 | PRsa (9) "\n", | |
40ce7fa6 | 2366 | SIZE_AMOUNT (G.stats.total_overhead_under128)); |
a0b48080 MM |
2367 | fprintf (stderr, "Total Allocated under 128B: " |
2368 | PRsa (9) "\n", | |
40ce7fa6 | 2369 | SIZE_AMOUNT (G.stats.total_allocated_under128)); |
7aa6d18a SB |
2370 | |
2371 | for (i = 0; i < NUM_ORDERS; i++) | |
2372 | if (G.stats.total_allocated_per_order[i]) | |
2373 | { | |
a0b48080 MM |
2374 | fprintf (stderr, "Total Overhead page size %9" PRIu64 ": " |
2375 | PRsa (9) "\n", | |
2376 | (uint64_t)OBJECT_SIZE (i), | |
40ce7fa6 | 2377 | SIZE_AMOUNT (G.stats.total_overhead_per_order[i])); |
a0b48080 MM |
2378 | fprintf (stderr, "Total Allocated page size %9" PRIu64 ": " |
2379 | PRsa (9) "\n", | |
2380 | (uint64_t)OBJECT_SIZE (i), | |
40ce7fa6 | 2381 | SIZE_AMOUNT (G.stats.total_allocated_per_order[i])); |
7aa6d18a | 2382 | } |
adc4adcd | 2383 | } |
3277221c | 2384 | } |
17211ab5 | 2385 | \f |
24b97832 ILT |
2386 | struct ggc_pch_ondisk |
2387 | { | |
2388 | unsigned totals[NUM_ORDERS]; | |
2389 | }; | |
2390 | ||
17211ab5 GK |
2391 | struct ggc_pch_data |
2392 | { | |
24b97832 | 2393 | struct ggc_pch_ondisk d; |
2a6e6fea | 2394 | uintptr_t base[NUM_ORDERS]; |
17211ab5 GK |
2395 | size_t written[NUM_ORDERS]; |
2396 | }; | |
2397 | ||
2398 | struct ggc_pch_data * | |
20c1dc5e | 2399 | init_ggc_pch (void) |
17211ab5 | 2400 | { |
5ed6ace5 | 2401 | return XCNEW (struct ggc_pch_data); |
17211ab5 GK |
2402 | } |
2403 | ||
20c1dc5e AJ |
2404 | void |
2405 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
cd030c07 | 2406 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2407 | { |
2408 | unsigned order; | |
2409 | ||
6583cf15 | 2410 | if (size < NUM_SIZE_LOOKUP) |
17211ab5 GK |
2411 | order = size_lookup[size]; |
2412 | else | |
2413 | { | |
f5938dcd | 2414 | order = 10; |
17211ab5 GK |
2415 | while (size > OBJECT_SIZE (order)) |
2416 | order++; | |
2417 | } | |
20c1dc5e | 2418 | |
17211ab5 GK |
2419 | d->d.totals[order]++; |
2420 | } | |
20c1dc5e | 2421 | |
17211ab5 | 2422 | size_t |
20c1dc5e | 2423 | ggc_pch_total_size (struct ggc_pch_data *d) |
17211ab5 GK |
2424 | { |
2425 | size_t a = 0; | |
2426 | unsigned i; | |
2427 | ||
2428 | for (i = 0; i < NUM_ORDERS; i++) | |
3bc50163 | 2429 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
17211ab5 GK |
2430 | return a; |
2431 | } | |
2432 | ||
2433 | void | |
20c1dc5e | 2434 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) |
17211ab5 | 2435 | { |
2a6e6fea | 2436 | uintptr_t a = (uintptr_t) base; |
17211ab5 | 2437 | unsigned i; |
20c1dc5e | 2438 | |
17211ab5 GK |
2439 | for (i = 0; i < NUM_ORDERS; i++) |
2440 | { | |
2441 | d->base[i] = a; | |
3bc50163 | 2442 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
17211ab5 GK |
2443 | } |
2444 | } | |
2445 | ||
2446 | ||
2447 | char * | |
20c1dc5e | 2448 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, |
cd030c07 | 2449 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2450 | { |
2451 | unsigned order; | |
2452 | char *result; | |
20c1dc5e | 2453 | |
6583cf15 | 2454 | if (size < NUM_SIZE_LOOKUP) |
17211ab5 GK |
2455 | order = size_lookup[size]; |
2456 | else | |
2457 | { | |
f5938dcd | 2458 | order = 10; |
17211ab5 GK |
2459 | while (size > OBJECT_SIZE (order)) |
2460 | order++; | |
2461 | } | |
2462 | ||
2463 | result = (char *) d->base[order]; | |
2464 | d->base[order] += OBJECT_SIZE (order); | |
2465 | return result; | |
2466 | } | |
2467 | ||
20c1dc5e AJ |
2468 | void |
2469 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
2470 | FILE *f ATTRIBUTE_UNUSED) | |
17211ab5 GK |
2471 | { |
2472 | /* Nothing to do. */ | |
2473 | } | |
2474 | ||
2475 | void | |
8ed00d76 | 2476 | ggc_pch_write_object (struct ggc_pch_data *d, |
20c1dc5e | 2477 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, |
b6f61163 | 2478 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2479 | { |
2480 | unsigned order; | |
642324bb | 2481 | static const char emptyBytes[256] = { 0 }; |
17211ab5 | 2482 | |
6583cf15 | 2483 | if (size < NUM_SIZE_LOOKUP) |
17211ab5 GK |
2484 | order = size_lookup[size]; |
2485 | else | |
2486 | { | |
f5938dcd | 2487 | order = 10; |
17211ab5 GK |
2488 | while (size > OBJECT_SIZE (order)) |
2489 | order++; | |
2490 | } | |
20c1dc5e | 2491 | |
17211ab5 | 2492 | if (fwrite (x, size, 1, f) != 1) |
a9c697b8 | 2493 | fatal_error (input_location, "cannot write PCH file: %m"); |
17211ab5 | 2494 | |
674c7ef1 | 2495 | /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the |
0ee55ad8 | 2496 | object out to OBJECT_SIZE(order). This happens for strings. */ |
674c7ef1 RB |
2497 | |
2498 | if (size != OBJECT_SIZE (order)) | |
2499 | { | |
c3284718 | 2500 | unsigned padding = OBJECT_SIZE (order) - size; |
674c7ef1 RB |
2501 | |
2502 | /* To speed small writes, we use a nulled-out array that's larger | |
2503 | than most padding requests as the source for our null bytes. This | |
2504 | permits us to do the padding with fwrite() rather than fseek(), and | |
3f117656 | 2505 | limits the chance the OS may try to flush any outstanding writes. */ |
c3284718 | 2506 | if (padding <= sizeof (emptyBytes)) |
674c7ef1 RB |
2507 | { |
2508 | if (fwrite (emptyBytes, 1, padding, f) != padding) | |
a9c697b8 | 2509 | fatal_error (input_location, "cannot write PCH file"); |
674c7ef1 RB |
2510 | } |
2511 | else | |
2512 | { | |
0ee55ad8 | 2513 | /* Larger than our buffer? Just default to fseek. */ |
674c7ef1 | 2514 | if (fseek (f, padding, SEEK_CUR) != 0) |
a9c697b8 | 2515 | fatal_error (input_location, "cannot write PCH file"); |
674c7ef1 RB |
2516 | } |
2517 | } | |
17211ab5 GK |
2518 | |
2519 | d->written[order]++; | |
2520 | if (d->written[order] == d->d.totals[order] | |
2521 | && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), | |
2522 | G.pagesize), | |
2523 | SEEK_CUR) != 0) | |
a9c697b8 | 2524 | fatal_error (input_location, "cannot write PCH file: %m"); |
17211ab5 GK |
2525 | } |
2526 | ||
2527 | void | |
20c1dc5e | 2528 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) |
17211ab5 GK |
2529 | { |
2530 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
a9c697b8 | 2531 | fatal_error (input_location, "cannot write PCH file: %m"); |
17211ab5 GK |
2532 | free (d); |
2533 | } | |
2534 | ||
c4775f82 MS |
2535 | /* Move the PCH PTE entries just added to the end of by_depth, to the |
2536 | front. */ | |
2537 | ||
2538 | static void | |
20c1dc5e | 2539 | move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) |
c4775f82 | 2540 | { |
c4775f82 MS |
2541 | /* First, we swap the new entries to the front of the varrays. */ |
2542 | page_entry **new_by_depth; | |
2543 | unsigned long **new_save_in_use; | |
2544 | ||
5ed6ace5 MD |
2545 | new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
2546 | new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | |
c4775f82 MS |
2547 | |
2548 | memcpy (&new_by_depth[0], | |
2549 | &G.by_depth[count_old_page_tables], | |
2550 | count_new_page_tables * sizeof (void *)); | |
2551 | memcpy (&new_by_depth[count_new_page_tables], | |
2552 | &G.by_depth[0], | |
2553 | count_old_page_tables * sizeof (void *)); | |
2554 | memcpy (&new_save_in_use[0], | |
2555 | &G.save_in_use[count_old_page_tables], | |
2556 | count_new_page_tables * sizeof (void *)); | |
2557 | memcpy (&new_save_in_use[count_new_page_tables], | |
2558 | &G.save_in_use[0], | |
2559 | count_old_page_tables * sizeof (void *)); | |
2560 | ||
2561 | free (G.by_depth); | |
2562 | free (G.save_in_use); | |
20c1dc5e | 2563 | |
c4775f82 MS |
2564 | G.by_depth = new_by_depth; |
2565 | G.save_in_use = new_save_in_use; | |
2566 | ||
2567 | /* Now update all the index_by_depth fields. */ | |
3608eff9 | 2568 | for (unsigned i = G.by_depth_in_use; i--;) |
c4775f82 | 2569 | { |
3608eff9 NS |
2570 | page_entry *p = G.by_depth[i]; |
2571 | p->index_by_depth = i; | |
c4775f82 MS |
2572 | } |
2573 | ||
2574 | /* And last, we update the depth pointers in G.depth. The first | |
2575 | entry is already 0, and context 0 entries always start at index | |
2576 | 0, so there is nothing to update in the first slot. We need a | |
2577 | second slot, only if we have old ptes, and if we do, they start | |
2578 | at index count_new_page_tables. */ | |
2579 | if (count_old_page_tables) | |
2580 | push_depth (count_new_page_tables); | |
2581 | } | |
2582 | ||
17211ab5 | 2583 | void |
20c1dc5e | 2584 | ggc_pch_read (FILE *f, void *addr) |
17211ab5 GK |
2585 | { |
2586 | struct ggc_pch_ondisk d; | |
2587 | unsigned i; | |
d3bfe4de | 2588 | char *offs = (char *) addr; |
c4775f82 MS |
2589 | unsigned long count_old_page_tables; |
2590 | unsigned long count_new_page_tables; | |
2591 | ||
2592 | count_old_page_tables = G.by_depth_in_use; | |
2593 | ||
22f8849d IS |
2594 | if (fread (&d, sizeof (d), 1, f) != 1) |
2595 | fatal_error (input_location, "cannot read PCH file: %m"); | |
2596 | ||
c4775f82 MS |
2597 | /* We've just read in a PCH file. So, every object that used to be |
2598 | allocated is now free. */ | |
17211ab5 | 2599 | clear_marks (); |
c5d6d04a | 2600 | #ifdef ENABLE_GC_CHECKING |
17211ab5 GK |
2601 | poison_pages (); |
2602 | #endif | |
ead8827d LB |
2603 | /* Since we free all the allocated objects, the free list becomes |
2604 | useless. Validate it now, which will also clear it. */ | |
c3284718 | 2605 | validate_free_objects (); |
17211ab5 GK |
2606 | |
2607 | /* No object read from a PCH file should ever be freed. So, set the | |
2608 | context depth to 1, and set the depth of all the currently-allocated | |
2609 | pages to be 1 too. PCH pages will have depth 0. */ | |
282899df | 2610 | gcc_assert (!G.context_depth); |
17211ab5 | 2611 | G.context_depth = 1; |
2a304777 JM |
2612 | /* Allocate space for the depth 1 finalizers. */ |
2613 | G.finalizers.safe_push (vNULL); | |
2614 | G.vec_finalizers.safe_push (vNULL); | |
2615 | gcc_assert (G.finalizers.length() == 2); | |
17211ab5 GK |
2616 | for (i = 0; i < NUM_ORDERS; i++) |
2617 | { | |
2618 | page_entry *p; | |
2619 | for (p = G.pages[i]; p != NULL; p = p->next) | |
2620 | p->context_depth = G.context_depth; | |
2621 | } | |
2622 | ||
2623 | /* Allocate the appropriate page-table entries for the pages read from | |
2624 | the PCH file. */ | |
20c1dc5e | 2625 | |
17211ab5 GK |
2626 | for (i = 0; i < NUM_ORDERS; i++) |
2627 | { | |
2628 | struct page_entry *entry; | |
2629 | char *pte; | |
2630 | size_t bytes; | |
2631 | size_t num_objs; | |
2632 | size_t j; | |
c4775f82 | 2633 | |
17211ab5 GK |
2634 | if (d.totals[i] == 0) |
2635 | continue; | |
c4775f82 | 2636 | |
3bc50163 | 2637 | bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i)); |
17211ab5 | 2638 | num_objs = bytes / OBJECT_SIZE (i); |
d3bfe4de KG |
2639 | entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) |
2640 | - sizeof (long) | |
2641 | + BITMAP_SIZE (num_objs + 1))); | |
17211ab5 GK |
2642 | entry->bytes = bytes; |
2643 | entry->page = offs; | |
2644 | entry->context_depth = 0; | |
2645 | offs += bytes; | |
2646 | entry->num_free_objects = 0; | |
2647 | entry->order = i; | |
2648 | ||
20c1dc5e | 2649 | for (j = 0; |
17211ab5 GK |
2650 | j + HOST_BITS_PER_LONG <= num_objs + 1; |
2651 | j += HOST_BITS_PER_LONG) | |
2652 | entry->in_use_p[j / HOST_BITS_PER_LONG] = -1; | |
2653 | for (; j < num_objs + 1; j++) | |
20c1dc5e | 2654 | entry->in_use_p[j / HOST_BITS_PER_LONG] |
17211ab5 GK |
2655 | |= 1L << (j % HOST_BITS_PER_LONG); |
2656 | ||
20c1dc5e AJ |
2657 | for (pte = entry->page; |
2658 | pte < entry->page + entry->bytes; | |
17211ab5 GK |
2659 | pte += G.pagesize) |
2660 | set_page_table_entry (pte, entry); | |
2661 | ||
2662 | if (G.page_tails[i] != NULL) | |
2663 | G.page_tails[i]->next = entry; | |
2664 | else | |
2665 | G.pages[i] = entry; | |
2666 | G.page_tails[i] = entry; | |
c4775f82 MS |
2667 | |
2668 | /* We start off by just adding all the new information to the | |
2669 | end of the varrays, later, we will move the new information | |
2670 | to the front of the varrays, as the PCH page tables are at | |
2671 | context 0. */ | |
2672 | push_by_depth (entry, 0); | |
17211ab5 GK |
2673 | } |
2674 | ||
c4775f82 MS |
2675 | /* Now, we update the various data structures that speed page table |
2676 | handling. */ | |
2677 | count_new_page_tables = G.by_depth_in_use - count_old_page_tables; | |
2678 | ||
2679 | move_ptes_to_front (count_old_page_tables, count_new_page_tables); | |
2680 | ||
17211ab5 GK |
2681 | /* Update the statistics. */ |
2682 | G.allocated = G.allocated_last_gc = offs - (char *)addr; | |
2683 | } |