]>
Commit | Line | Data |
---|---|---|
21341cfd | 1 | /* "Bag-of-pages" garbage collector for the GNU compiler. |
a9429e29 | 2 | Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, |
87fb500b | 3 | 2010, 2011 Free Software Foundation, Inc. |
21341cfd | 4 | |
1322177d | 5 | This file is part of GCC. |
21341cfd | 6 | |
1322177d LB |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 9 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 10 | version. |
21341cfd | 11 | |
1322177d LB |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
21341cfd | 16 | |
b9bfacf0 | 17 | You should have received a copy of the GNU General Public License |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
21341cfd | 20 | |
21341cfd | 21 | #include "config.h" |
21341cfd | 22 | #include "system.h" |
4977bab6 ZW |
23 | #include "coretypes.h" |
24 | #include "tm.h" | |
21341cfd | 25 | #include "tree.h" |
e5ecd4ea | 26 | #include "rtl.h" |
1b42a6a9 | 27 | #include "tm_p.h" |
718f9c0f | 28 | #include "diagnostic-core.h" |
21341cfd | 29 | #include "flags.h" |
e5ecd4ea | 30 | #include "ggc.h" |
a9429e29 | 31 | #include "ggc-internal.h" |
2a9a326b | 32 | #include "timevar.h" |
3788cc17 | 33 | #include "params.h" |
07724022 | 34 | #include "tree-flow.h" |
b78cd885 | 35 | #include "cfgloop.h" |
ae2392a9 | 36 | #include "plugin.h" |
e5ecd4ea | 37 | |
825b6926 ZW |
38 | /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a |
39 | file open. Prefer either to valloc. */ | |
40 | #ifdef HAVE_MMAP_ANON | |
41 | # undef HAVE_MMAP_DEV_ZERO | |
825b6926 | 42 | # define USING_MMAP |
005537df | 43 | #endif |
21341cfd | 44 | |
825b6926 | 45 | #ifdef HAVE_MMAP_DEV_ZERO |
825b6926 | 46 | # define USING_MMAP |
8342b467 RH |
47 | #endif |
48 | ||
130fadbb RH |
49 | #ifndef USING_MMAP |
50 | #define USING_MALLOC_PAGE_GROUPS | |
5b918807 | 51 | #endif |
21341cfd | 52 | |
87fb500b RO |
53 | #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \ |
54 | && defined(USING_MMAP) | |
711a3d82 AK |
55 | # define USING_MADVISE |
56 | #endif | |
57 | ||
0fa2e4df | 58 | /* Strategy: |
21341cfd AS |
59 | |
60 | This garbage-collecting allocator allocates objects on one of a set | |
61 | of pages. Each page can allocate objects of a single size only; | |
62 | available sizes are powers of two starting at four bytes. The size | |
63 | of an allocation request is rounded up to the next power of two | |
64 | (`order'), and satisfied from the appropriate page. | |
65 | ||
66 | Each page is recorded in a page-entry, which also maintains an | |
67 | in-use bitmap of object positions on the page. This allows the | |
68 | allocation state of a particular object to be flipped without | |
69 | touching the page itself. | |
70 | ||
71 | Each page-entry also has a context depth, which is used to track | |
72 | pushing and popping of allocation contexts. Only objects allocated | |
589005ff | 73 | in the current (highest-numbered) context may be collected. |
21341cfd AS |
74 | |
75 | Page entries are arranged in an array of singly-linked lists. The | |
76 | array is indexed by the allocation size, in bits, of the pages on | |
77 | it; i.e. all pages on a list allocate objects of the same size. | |
78 | Pages are ordered on the list such that all non-full pages precede | |
79 | all full pages, with non-full pages arranged in order of decreasing | |
80 | context depth. | |
81 | ||
82 | Empty pages (of all orders) are kept on a single page cache list, | |
83 | and are considered first when new pages are required; they are | |
84 | deallocated at the start of the next collection if they haven't | |
85 | been recycled by then. */ | |
86 | ||
21341cfd AS |
87 | /* Define GGC_DEBUG_LEVEL to print debugging information. |
88 | 0: No debugging output. | |
89 | 1: GC statistics only. | |
90 | 2: Page-entry allocations/deallocations as well. | |
91 | 3: Object allocations as well. | |
6d2f8887 | 92 | 4: Object marks as well. */ |
21341cfd AS |
93 | #define GGC_DEBUG_LEVEL (0) |
94 | \f | |
95 | #ifndef HOST_BITS_PER_PTR | |
96 | #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG | |
97 | #endif | |
98 | ||
21341cfd AS |
99 | \f |
100 | /* A two-level tree is used to look up the page-entry for a given | |
101 | pointer. Two chunks of the pointer's bits are extracted to index | |
102 | the first and second levels of the tree, as follows: | |
103 | ||
104 | HOST_PAGE_SIZE_BITS | |
105 | 32 | | | |
106 | msb +----------------+----+------+------+ lsb | |
107 | | | | | |
108 | PAGE_L1_BITS | | |
109 | | | | |
110 | PAGE_L2_BITS | |
111 | ||
112 | The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry | |
113 | pages are aligned on system page boundaries. The next most | |
114 | significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first | |
589005ff | 115 | index values in the lookup table, respectively. |
21341cfd | 116 | |
005537df RH |
117 | For 32-bit architectures and the settings below, there are no |
118 | leftover bits. For architectures with wider pointers, the lookup | |
119 | tree points to a list of pages, which must be scanned to find the | |
120 | correct one. */ | |
21341cfd AS |
121 | |
122 | #define PAGE_L1_BITS (8) | |
123 | #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) | |
2a6e6fea TG |
124 | #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS) |
125 | #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS) | |
21341cfd AS |
126 | |
127 | #define LOOKUP_L1(p) \ | |
2a6e6fea | 128 | (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) |
21341cfd AS |
129 | |
130 | #define LOOKUP_L2(p) \ | |
2a6e6fea | 131 | (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) |
21341cfd | 132 | |
2be510b8 MM |
133 | /* The number of objects per allocation page, for objects on a page of |
134 | the indicated ORDER. */ | |
135 | #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] | |
136 | ||
17211ab5 GK |
137 | /* The number of objects in P. */ |
138 | #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order)) | |
139 | ||
2be510b8 MM |
140 | /* The size of an object on a page of the indicated ORDER. */ |
141 | #define OBJECT_SIZE(ORDER) object_size_table[ORDER] | |
142 | ||
8537ed68 ZW |
143 | /* For speed, we avoid doing a general integer divide to locate the |
144 | offset in the allocation bitmap, by precalculating numbers M, S | |
145 | such that (O * M) >> S == O / Z (modulo 2^32), for any offset O | |
146 | within the page which is evenly divisible by the object size Z. */ | |
147 | #define DIV_MULT(ORDER) inverse_table[ORDER].mult | |
148 | #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift | |
149 | #define OFFSET_TO_BIT(OFFSET, ORDER) \ | |
150 | (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) | |
151 | ||
b78cd885 RG |
152 | /* We use this structure to determine the alignment required for |
153 | allocations. For power-of-two sized allocations, that's not a | |
154 | problem, but it does matter for odd-sized allocations. | |
155 | We do not care about alignment for floating-point types. */ | |
156 | ||
157 | struct max_alignment { | |
158 | char c; | |
159 | union { | |
160 | HOST_WIDEST_INT i; | |
161 | void *p; | |
162 | } u; | |
163 | }; | |
164 | ||
165 | /* The biggest alignment required. */ | |
166 | ||
167 | #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
168 | ||
169 | ||
2be510b8 MM |
170 | /* The number of extra orders, not corresponding to power-of-two sized |
171 | objects. */ | |
172 | ||
ca7558fc | 173 | #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table) |
2be510b8 | 174 | |
d1f1cc6a | 175 | #define RTL_SIZE(NSLOTS) \ |
e1de1560 | 176 | (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion)) |
d1f1cc6a | 177 | |
5e26df64 SB |
178 | #define TREE_EXP_SIZE(OPS) \ |
179 | (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) | |
180 | ||
2be510b8 MM |
181 | /* The Ith entry is the maximum size of an object to be stored in the |
182 | Ith extra order. Adding a new entry to this array is the *only* | |
183 | thing you need to do to add a new special allocation size. */ | |
184 | ||
185 | static const size_t extra_order_size_table[] = { | |
b78cd885 RG |
186 | /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT. |
187 | There are a lot of structures with these sizes and explicitly | |
188 | listing them risks orders being dropped because they changed size. */ | |
189 | MAX_ALIGNMENT * 3, | |
190 | MAX_ALIGNMENT * 5, | |
191 | MAX_ALIGNMENT * 6, | |
192 | MAX_ALIGNMENT * 7, | |
193 | MAX_ALIGNMENT * 9, | |
194 | MAX_ALIGNMENT * 10, | |
195 | MAX_ALIGNMENT * 11, | |
196 | MAX_ALIGNMENT * 12, | |
197 | MAX_ALIGNMENT * 13, | |
198 | MAX_ALIGNMENT * 14, | |
199 | MAX_ALIGNMENT * 15, | |
820cc88f DB |
200 | sizeof (struct tree_decl_non_common), |
201 | sizeof (struct tree_field_decl), | |
202 | sizeof (struct tree_parm_decl), | |
203 | sizeof (struct tree_var_decl), | |
51545682 | 204 | sizeof (struct tree_type_non_common), |
f5938dcd RG |
205 | sizeof (struct function), |
206 | sizeof (struct basic_block_def), | |
b78cd885 RG |
207 | sizeof (struct cgraph_node), |
208 | sizeof (struct loop), | |
2be510b8 MM |
209 | }; |
210 | ||
211 | /* The total number of orders. */ | |
212 | ||
213 | #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) | |
214 | ||
17211ab5 GK |
215 | /* Compute the smallest nonnegative number which when added to X gives |
216 | a multiple of F. */ | |
217 | ||
218 | #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | |
219 | ||
220 | /* Compute the smallest multiple of F that is >= X. */ | |
221 | ||
222 | #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | |
223 | ||
3bc50163 AK |
224 | /* Round X to next multiple of the page size */ |
225 | ||
226 | #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1)) | |
227 | ||
2be510b8 MM |
228 | /* The Ith entry is the number of objects on a page or order I. */ |
229 | ||
230 | static unsigned objects_per_page_table[NUM_ORDERS]; | |
231 | ||
232 | /* The Ith entry is the size of an object on a page of order I. */ | |
233 | ||
234 | static size_t object_size_table[NUM_ORDERS]; | |
21341cfd | 235 | |
8537ed68 ZW |
236 | /* The Ith entry is a pair of numbers (mult, shift) such that |
237 | ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, | |
238 | for all k evenly divisible by OBJECT_SIZE(I). */ | |
239 | ||
240 | static struct | |
241 | { | |
75d75435 | 242 | size_t mult; |
8537ed68 ZW |
243 | unsigned int shift; |
244 | } | |
245 | inverse_table[NUM_ORDERS]; | |
246 | ||
21341cfd AS |
247 | /* A page_entry records the status of an allocation page. This |
248 | structure is dynamically sized to fit the bitmap in_use_p. */ | |
589005ff | 249 | typedef struct page_entry |
21341cfd AS |
250 | { |
251 | /* The next page-entry with objects of the same size, or NULL if | |
252 | this is the last page-entry. */ | |
253 | struct page_entry *next; | |
254 | ||
9bf793f9 JL |
255 | /* The previous page-entry with objects of the same size, or NULL if |
256 | this is the first page-entry. The PREV pointer exists solely to | |
71cc389b | 257 | keep the cost of ggc_free manageable. */ |
9bf793f9 JL |
258 | struct page_entry *prev; |
259 | ||
21341cfd AS |
260 | /* The number of bytes allocated. (This will always be a multiple |
261 | of the host system page size.) */ | |
262 | size_t bytes; | |
263 | ||
264 | /* The address at which the memory is allocated. */ | |
265 | char *page; | |
266 | ||
130fadbb RH |
267 | #ifdef USING_MALLOC_PAGE_GROUPS |
268 | /* Back pointer to the page group this page came from. */ | |
269 | struct page_group *group; | |
270 | #endif | |
271 | ||
c4775f82 MS |
272 | /* This is the index in the by_depth varray where this page table |
273 | can be found. */ | |
274 | unsigned long index_by_depth; | |
21341cfd AS |
275 | |
276 | /* Context depth of this page. */ | |
ae373eda | 277 | unsigned short context_depth; |
21341cfd AS |
278 | |
279 | /* The number of free objects remaining on this page. */ | |
280 | unsigned short num_free_objects; | |
281 | ||
282 | /* A likely candidate for the bit position of a free object for the | |
283 | next allocation from this page. */ | |
284 | unsigned short next_bit_hint; | |
285 | ||
ae373eda MM |
286 | /* The lg of size of objects allocated from this page. */ |
287 | unsigned char order; | |
288 | ||
711a3d82 AK |
289 | /* Discarded page? */ |
290 | bool discarded; | |
291 | ||
21341cfd AS |
292 | /* A bit vector indicating whether or not objects are in use. The |
293 | Nth bit is one if the Nth object on this page is allocated. This | |
294 | array is dynamically sized. */ | |
295 | unsigned long in_use_p[1]; | |
296 | } page_entry; | |
297 | ||
130fadbb RH |
298 | #ifdef USING_MALLOC_PAGE_GROUPS |
299 | /* A page_group describes a large allocation from malloc, from which | |
300 | we parcel out aligned pages. */ | |
301 | typedef struct page_group | |
302 | { | |
303 | /* A linked list of all extant page groups. */ | |
304 | struct page_group *next; | |
305 | ||
306 | /* The address we received from malloc. */ | |
307 | char *allocation; | |
308 | ||
309 | /* The size of the block. */ | |
310 | size_t alloc_size; | |
311 | ||
312 | /* A bitmask of pages in use. */ | |
313 | unsigned int in_use; | |
314 | } page_group; | |
315 | #endif | |
21341cfd AS |
316 | |
317 | #if HOST_BITS_PER_PTR <= 32 | |
318 | ||
319 | /* On 32-bit hosts, we use a two level page table, as pictured above. */ | |
320 | typedef page_entry **page_table[PAGE_L1_SIZE]; | |
321 | ||
322 | #else | |
323 | ||
005537df RH |
324 | /* On 64-bit hosts, we use the same two level page tables plus a linked |
325 | list that disambiguates the top 32-bits. There will almost always be | |
21341cfd AS |
326 | exactly one entry in the list. */ |
327 | typedef struct page_table_chain | |
328 | { | |
329 | struct page_table_chain *next; | |
330 | size_t high_bits; | |
331 | page_entry **table[PAGE_L1_SIZE]; | |
332 | } *page_table; | |
333 | ||
334 | #endif | |
335 | ||
9957322d DK |
336 | #ifdef ENABLE_GC_ALWAYS_COLLECT |
337 | /* List of free objects to be verified as actually free on the | |
338 | next collection. */ | |
339 | struct free_object | |
340 | { | |
341 | void *object; | |
342 | struct free_object *next; | |
343 | }; | |
344 | #endif | |
345 | ||
21341cfd AS |
346 | /* The rest of the global variables. */ |
347 | static struct globals | |
348 | { | |
349 | /* The Nth element in this array is a page with objects of size 2^N. | |
350 | If there are any pages with free objects, they will be at the | |
351 | head of the list. NULL if there are no page-entries for this | |
352 | object size. */ | |
2be510b8 | 353 | page_entry *pages[NUM_ORDERS]; |
21341cfd AS |
354 | |
355 | /* The Nth element in this array is the last page with objects of | |
356 | size 2^N. NULL if there are no page-entries for this object | |
357 | size. */ | |
2be510b8 | 358 | page_entry *page_tails[NUM_ORDERS]; |
21341cfd AS |
359 | |
360 | /* Lookup table for associating allocation pages with object addresses. */ | |
361 | page_table lookup; | |
362 | ||
363 | /* The system's page size. */ | |
364 | size_t pagesize; | |
365 | size_t lg_pagesize; | |
366 | ||
367 | /* Bytes currently allocated. */ | |
368 | size_t allocated; | |
369 | ||
370 | /* Bytes currently allocated at the end of the last collection. */ | |
371 | size_t allocated_last_gc; | |
372 | ||
3277221c MM |
373 | /* Total amount of memory mapped. */ |
374 | size_t bytes_mapped; | |
375 | ||
52895e1a RH |
376 | /* Bit N set if any allocations have been done at context depth N. */ |
377 | unsigned long context_depth_allocations; | |
378 | ||
379 | /* Bit N set if any collections have been done at context depth N. */ | |
380 | unsigned long context_depth_collections; | |
381 | ||
21341cfd | 382 | /* The current depth in the context stack. */ |
d416576b | 383 | unsigned short context_depth; |
21341cfd AS |
384 | |
385 | /* A file descriptor open to /dev/zero for reading. */ | |
825b6926 | 386 | #if defined (HAVE_MMAP_DEV_ZERO) |
21341cfd AS |
387 | int dev_zero_fd; |
388 | #endif | |
389 | ||
390 | /* A cache of free system pages. */ | |
391 | page_entry *free_pages; | |
392 | ||
130fadbb RH |
393 | #ifdef USING_MALLOC_PAGE_GROUPS |
394 | page_group *page_groups; | |
395 | #endif | |
396 | ||
21341cfd AS |
397 | /* The file descriptor for debugging output. */ |
398 | FILE *debug_file; | |
c4775f82 MS |
399 | |
400 | /* Current number of elements in use in depth below. */ | |
401 | unsigned int depth_in_use; | |
402 | ||
403 | /* Maximum number of elements that can be used before resizing. */ | |
404 | unsigned int depth_max; | |
405 | ||
fa10beec | 406 | /* Each element of this array is an index in by_depth where the given |
c4775f82 MS |
407 | depth starts. This structure is indexed by that given depth we |
408 | are interested in. */ | |
409 | unsigned int *depth; | |
410 | ||
411 | /* Current number of elements in use in by_depth below. */ | |
412 | unsigned int by_depth_in_use; | |
413 | ||
414 | /* Maximum number of elements that can be used before resizing. */ | |
415 | unsigned int by_depth_max; | |
416 | ||
417 | /* Each element of this array is a pointer to a page_entry, all | |
418 | page_entries can be found in here by increasing depth. | |
419 | index_by_depth in the page_entry is the index into this data | |
420 | structure where that page_entry can be found. This is used to | |
421 | speed up finding all page_entries at a particular depth. */ | |
422 | page_entry **by_depth; | |
423 | ||
424 | /* Each element is a pointer to the saved in_use_p bits, if any, | |
425 | zero otherwise. We allocate them all together, to enable a | |
426 | better runtime data access pattern. */ | |
427 | unsigned long **save_in_use; | |
685fe032 RH |
428 | |
429 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
430 | /* List of free objects to be verified as actually free on the | |
431 | next collection. */ | |
9957322d | 432 | struct free_object *free_object_list; |
685fe032 RH |
433 | #endif |
434 | ||
adc4adcd GP |
435 | struct |
436 | { | |
a9429e29 | 437 | /* Total GC-allocated memory. */ |
adc4adcd | 438 | unsigned long long total_allocated; |
a9429e29 | 439 | /* Total overhead for GC-allocated memory. */ |
adc4adcd GP |
440 | unsigned long long total_overhead; |
441 | ||
442 | /* Total allocations and overhead for sizes less than 32, 64 and 128. | |
443 | These sizes are interesting because they are typical cache line | |
938d968e | 444 | sizes. */ |
b8698a0f | 445 | |
adc4adcd GP |
446 | unsigned long long total_allocated_under32; |
447 | unsigned long long total_overhead_under32; | |
b8698a0f | 448 | |
adc4adcd GP |
449 | unsigned long long total_allocated_under64; |
450 | unsigned long long total_overhead_under64; | |
b8698a0f | 451 | |
adc4adcd GP |
452 | unsigned long long total_allocated_under128; |
453 | unsigned long long total_overhead_under128; | |
b8698a0f | 454 | |
439a7e54 DN |
455 | /* The allocations for each of the allocation orders. */ |
456 | unsigned long long total_allocated_per_order[NUM_ORDERS]; | |
457 | ||
938d968e | 458 | /* The overhead for each of the allocation orders. */ |
adc4adcd GP |
459 | unsigned long long total_overhead_per_order[NUM_ORDERS]; |
460 | } stats; | |
21341cfd AS |
461 | } G; |
462 | ||
21341cfd AS |
463 | /* The size in bytes required to maintain a bitmap for the objects |
464 | on a page-entry. */ | |
465 | #define BITMAP_SIZE(Num_objects) \ | |
2be510b8 | 466 | (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long)) |
21341cfd | 467 | |
130fadbb RH |
468 | /* Allocate pages in chunks of this size, to throttle calls to memory |
469 | allocation routines. The first page is used, the rest go onto the | |
470 | free list. This cannot be larger than HOST_BITS_PER_INT for the | |
772299b3 | 471 | in_use bitmask for page_group. Hosts that need a different value |
471854f8 | 472 | can override this by defining GGC_QUIRE_SIZE explicitly. */ |
772299b3 MA |
473 | #ifndef GGC_QUIRE_SIZE |
474 | # ifdef USING_MMAP | |
6c995fa5 | 475 | # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */ |
772299b3 MA |
476 | # else |
477 | # define GGC_QUIRE_SIZE 16 | |
478 | # endif | |
479 | #endif | |
c4775f82 MS |
480 | |
481 | /* Initial guess as to how many page table entries we might need. */ | |
482 | #define INITIAL_PTE_COUNT 128 | |
21341cfd | 483 | \f |
20c1dc5e AJ |
484 | static int ggc_allocated_p (const void *); |
485 | static page_entry *lookup_page_table_entry (const void *); | |
486 | static void set_page_table_entry (void *, page_entry *); | |
130fadbb | 487 | #ifdef USING_MMAP |
25f0ea81 | 488 | static char *alloc_anon (char *, size_t, bool check); |
130fadbb RH |
489 | #endif |
490 | #ifdef USING_MALLOC_PAGE_GROUPS | |
20c1dc5e AJ |
491 | static size_t page_group_index (char *, char *); |
492 | static void set_page_group_in_use (page_group *, char *); | |
493 | static void clear_page_group_in_use (page_group *, char *); | |
130fadbb | 494 | #endif |
20c1dc5e AJ |
495 | static struct page_entry * alloc_page (unsigned); |
496 | static void free_page (struct page_entry *); | |
497 | static void release_pages (void); | |
498 | static void clear_marks (void); | |
499 | static void sweep_pages (void); | |
500 | static void ggc_recalculate_in_use_p (page_entry *); | |
501 | static void compute_inverse (unsigned); | |
502 | static inline void adjust_depth (void); | |
503 | static void move_ptes_to_front (int, int); | |
21341cfd | 504 | |
20c1dc5e AJ |
505 | void debug_print_page_list (int); |
506 | static void push_depth (unsigned int); | |
507 | static void push_by_depth (page_entry *, unsigned long *); | |
b6f61163 | 508 | |
c4775f82 MS |
509 | /* Push an entry onto G.depth. */ |
510 | ||
511 | inline static void | |
20c1dc5e | 512 | push_depth (unsigned int i) |
c4775f82 MS |
513 | { |
514 | if (G.depth_in_use >= G.depth_max) | |
515 | { | |
516 | G.depth_max *= 2; | |
d3bfe4de | 517 | G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max); |
c4775f82 MS |
518 | } |
519 | G.depth[G.depth_in_use++] = i; | |
520 | } | |
521 | ||
522 | /* Push an entry onto G.by_depth and G.save_in_use. */ | |
523 | ||
524 | inline static void | |
20c1dc5e | 525 | push_by_depth (page_entry *p, unsigned long *s) |
c4775f82 MS |
526 | { |
527 | if (G.by_depth_in_use >= G.by_depth_max) | |
528 | { | |
529 | G.by_depth_max *= 2; | |
d3bfe4de KG |
530 | G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max); |
531 | G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use, | |
532 | G.by_depth_max); | |
c4775f82 MS |
533 | } |
534 | G.by_depth[G.by_depth_in_use] = p; | |
535 | G.save_in_use[G.by_depth_in_use++] = s; | |
536 | } | |
537 | ||
538 | #if (GCC_VERSION < 3001) | |
539 | #define prefetch(X) ((void) X) | |
540 | #else | |
541 | #define prefetch(X) __builtin_prefetch (X) | |
542 | #endif | |
543 | ||
544 | #define save_in_use_p_i(__i) \ | |
545 | (G.save_in_use[__i]) | |
546 | #define save_in_use_p(__p) \ | |
547 | (save_in_use_p_i (__p->index_by_depth)) | |
548 | ||
cc2902df | 549 | /* Returns nonzero if P was allocated in GC'able memory. */ |
21341cfd | 550 | |
005537df | 551 | static inline int |
20c1dc5e | 552 | ggc_allocated_p (const void *p) |
21341cfd AS |
553 | { |
554 | page_entry ***base; | |
005537df | 555 | size_t L1, L2; |
21341cfd AS |
556 | |
557 | #if HOST_BITS_PER_PTR <= 32 | |
558 | base = &G.lookup[0]; | |
559 | #else | |
560 | page_table table = G.lookup; | |
2a6e6fea | 561 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
005537df RH |
562 | while (1) |
563 | { | |
564 | if (table == NULL) | |
565 | return 0; | |
566 | if (table->high_bits == high_bits) | |
567 | break; | |
568 | table = table->next; | |
569 | } | |
21341cfd AS |
570 | base = &table->table[0]; |
571 | #endif | |
572 | ||
eaec9b3d | 573 | /* Extract the level 1 and 2 indices. */ |
74c937ca MM |
574 | L1 = LOOKUP_L1 (p); |
575 | L2 = LOOKUP_L2 (p); | |
576 | ||
577 | return base[L1] && base[L1][L2]; | |
578 | } | |
579 | ||
589005ff | 580 | /* Traverse the page table and find the entry for a page. |
74c937ca MM |
581 | Die (probably) if the object wasn't allocated via GC. */ |
582 | ||
583 | static inline page_entry * | |
20c1dc5e | 584 | lookup_page_table_entry (const void *p) |
74c937ca MM |
585 | { |
586 | page_entry ***base; | |
587 | size_t L1, L2; | |
588 | ||
005537df RH |
589 | #if HOST_BITS_PER_PTR <= 32 |
590 | base = &G.lookup[0]; | |
591 | #else | |
592 | page_table table = G.lookup; | |
2a6e6fea | 593 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
005537df RH |
594 | while (table->high_bits != high_bits) |
595 | table = table->next; | |
596 | base = &table->table[0]; | |
597 | #endif | |
74c937ca | 598 | |
eaec9b3d | 599 | /* Extract the level 1 and 2 indices. */ |
21341cfd AS |
600 | L1 = LOOKUP_L1 (p); |
601 | L2 = LOOKUP_L2 (p); | |
602 | ||
603 | return base[L1][L2]; | |
604 | } | |
605 | ||
21341cfd | 606 | /* Set the page table entry for a page. */ |
cb2ec151 | 607 | |
21341cfd | 608 | static void |
20c1dc5e | 609 | set_page_table_entry (void *p, page_entry *entry) |
21341cfd AS |
610 | { |
611 | page_entry ***base; | |
612 | size_t L1, L2; | |
613 | ||
614 | #if HOST_BITS_PER_PTR <= 32 | |
615 | base = &G.lookup[0]; | |
616 | #else | |
617 | page_table table; | |
2a6e6fea | 618 | uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
21341cfd AS |
619 | for (table = G.lookup; table; table = table->next) |
620 | if (table->high_bits == high_bits) | |
621 | goto found; | |
622 | ||
623 | /* Not found -- allocate a new table. */ | |
4dc6c528 | 624 | table = XCNEW (struct page_table_chain); |
21341cfd AS |
625 | table->next = G.lookup; |
626 | table->high_bits = high_bits; | |
627 | G.lookup = table; | |
628 | found: | |
629 | base = &table->table[0]; | |
630 | #endif | |
631 | ||
eaec9b3d | 632 | /* Extract the level 1 and 2 indices. */ |
21341cfd AS |
633 | L1 = LOOKUP_L1 (p); |
634 | L2 = LOOKUP_L2 (p); | |
635 | ||
636 | if (base[L1] == NULL) | |
5ed6ace5 | 637 | base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); |
21341cfd AS |
638 | |
639 | base[L1][L2] = entry; | |
640 | } | |
641 | ||
21341cfd | 642 | /* Prints the page-entry for object size ORDER, for debugging. */ |
cb2ec151 | 643 | |
24e47c76 | 644 | DEBUG_FUNCTION void |
20c1dc5e | 645 | debug_print_page_list (int order) |
21341cfd AS |
646 | { |
647 | page_entry *p; | |
20c1dc5e AJ |
648 | printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], |
649 | (void *) G.page_tails[order]); | |
21341cfd AS |
650 | p = G.pages[order]; |
651 | while (p != NULL) | |
652 | { | |
20c1dc5e | 653 | printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, |
683eb0e9 | 654 | p->num_free_objects); |
21341cfd AS |
655 | p = p->next; |
656 | } | |
657 | printf ("NULL\n"); | |
658 | fflush (stdout); | |
659 | } | |
660 | ||
130fadbb | 661 | #ifdef USING_MMAP |
21341cfd | 662 | /* Allocate SIZE bytes of anonymous memory, preferably near PREF, |
825b6926 ZW |
663 | (if non-null). The ifdef structure here is intended to cause a |
664 | compile error unless exactly one of the HAVE_* is defined. */ | |
cb2ec151 | 665 | |
21341cfd | 666 | static inline char * |
25f0ea81 | 667 | alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check) |
21341cfd | 668 | { |
825b6926 | 669 | #ifdef HAVE_MMAP_ANON |
d3bfe4de KG |
670 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
671 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
825b6926 ZW |
672 | #endif |
673 | #ifdef HAVE_MMAP_DEV_ZERO | |
d3bfe4de KG |
674 | char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
675 | MAP_PRIVATE, G.dev_zero_fd, 0); | |
21341cfd | 676 | #endif |
825b6926 ZW |
677 | |
678 | if (page == (char *) MAP_FAILED) | |
005537df | 679 | { |
25f0ea81 AK |
680 | if (!check) |
681 | return NULL; | |
1f978f5f | 682 | perror ("virtual memory exhausted"); |
bd0f0717 | 683 | exit (FATAL_EXIT_CODE); |
005537df | 684 | } |
21341cfd | 685 | |
3277221c MM |
686 | /* Remember that we allocated this memory. */ |
687 | G.bytes_mapped += size; | |
688 | ||
9a0a7d5d | 689 | /* Pretend we don't have access to the allocated pages. We'll enable |
a9429e29 | 690 | access to smaller pieces of the area in ggc_internal_alloc. Discard the |
9a0a7d5d | 691 | handle to avoid handle leak. */ |
35dee980 | 692 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); |
9a0a7d5d | 693 | |
21341cfd AS |
694 | return page; |
695 | } | |
130fadbb RH |
696 | #endif |
697 | #ifdef USING_MALLOC_PAGE_GROUPS | |
698 | /* Compute the index for this page into the page group. */ | |
699 | ||
700 | static inline size_t | |
20c1dc5e | 701 | page_group_index (char *allocation, char *page) |
130fadbb | 702 | { |
c4f2c499 | 703 | return (size_t) (page - allocation) >> G.lg_pagesize; |
130fadbb RH |
704 | } |
705 | ||
706 | /* Set and clear the in_use bit for this page in the page group. */ | |
707 | ||
708 | static inline void | |
20c1dc5e | 709 | set_page_group_in_use (page_group *group, char *page) |
130fadbb RH |
710 | { |
711 | group->in_use |= 1 << page_group_index (group->allocation, page); | |
712 | } | |
713 | ||
714 | static inline void | |
20c1dc5e | 715 | clear_page_group_in_use (page_group *group, char *page) |
130fadbb RH |
716 | { |
717 | group->in_use &= ~(1 << page_group_index (group->allocation, page)); | |
718 | } | |
719 | #endif | |
21341cfd AS |
720 | |
721 | /* Allocate a new page for allocating objects of size 2^ORDER, | |
722 | and return an entry for it. The entry is not added to the | |
723 | appropriate page_table list. */ | |
cb2ec151 | 724 | |
21341cfd | 725 | static inline struct page_entry * |
20c1dc5e | 726 | alloc_page (unsigned order) |
21341cfd AS |
727 | { |
728 | struct page_entry *entry, *p, **pp; | |
729 | char *page; | |
730 | size_t num_objects; | |
731 | size_t bitmap_size; | |
732 | size_t page_entry_size; | |
733 | size_t entry_size; | |
130fadbb RH |
734 | #ifdef USING_MALLOC_PAGE_GROUPS |
735 | page_group *group; | |
736 | #endif | |
21341cfd AS |
737 | |
738 | num_objects = OBJECTS_PER_PAGE (order); | |
739 | bitmap_size = BITMAP_SIZE (num_objects + 1); | |
740 | page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; | |
2be510b8 | 741 | entry_size = num_objects * OBJECT_SIZE (order); |
ca79429a RH |
742 | if (entry_size < G.pagesize) |
743 | entry_size = G.pagesize; | |
3bc50163 | 744 | entry_size = PAGE_ALIGN (entry_size); |
21341cfd AS |
745 | |
746 | entry = NULL; | |
747 | page = NULL; | |
748 | ||
749 | /* Check the list of free pages for one we can use. */ | |
bd0f0717 | 750 | for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) |
21341cfd AS |
751 | if (p->bytes == entry_size) |
752 | break; | |
753 | ||
754 | if (p != NULL) | |
755 | { | |
711a3d82 AK |
756 | if (p->discarded) |
757 | G.bytes_mapped += p->bytes; | |
758 | p->discarded = false; | |
759 | ||
dc297297 | 760 | /* Recycle the allocated memory from this page ... */ |
21341cfd AS |
761 | *pp = p->next; |
762 | page = p->page; | |
bd0f0717 | 763 | |
130fadbb RH |
764 | #ifdef USING_MALLOC_PAGE_GROUPS |
765 | group = p->group; | |
766 | #endif | |
bd0f0717 | 767 | |
21341cfd AS |
768 | /* ... and, if possible, the page entry itself. */ |
769 | if (p->order == order) | |
770 | { | |
771 | entry = p; | |
772 | memset (entry, 0, page_entry_size); | |
773 | } | |
774 | else | |
775 | free (p); | |
776 | } | |
825b6926 | 777 | #ifdef USING_MMAP |
054f5e69 | 778 | else if (entry_size == G.pagesize) |
21341cfd | 779 | { |
054f5e69 ZW |
780 | /* We want just one page. Allocate a bunch of them and put the |
781 | extras on the freelist. (Can only do this optimization with | |
782 | mmap for backing store.) */ | |
783 | struct page_entry *e, *f = G.free_pages; | |
25f0ea81 | 784 | int i, entries = GGC_QUIRE_SIZE; |
054f5e69 | 785 | |
25f0ea81 AK |
786 | page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false); |
787 | if (page == NULL) | |
788 | { | |
789 | page = alloc_anon(NULL, G.pagesize, true); | |
790 | entries = 1; | |
791 | } | |
bd0f0717 | 792 | |
054f5e69 ZW |
793 | /* This loop counts down so that the chain will be in ascending |
794 | memory order. */ | |
25f0ea81 | 795 | for (i = entries - 1; i >= 1; i--) |
054f5e69 | 796 | { |
d3bfe4de | 797 | e = XCNEWVAR (struct page_entry, page_entry_size); |
ca79429a RH |
798 | e->order = order; |
799 | e->bytes = G.pagesize; | |
800 | e->page = page + (i << G.lg_pagesize); | |
054f5e69 ZW |
801 | e->next = f; |
802 | f = e; | |
803 | } | |
bd0f0717 | 804 | |
054f5e69 | 805 | G.free_pages = f; |
21341cfd | 806 | } |
054f5e69 | 807 | else |
25f0ea81 | 808 | page = alloc_anon (NULL, entry_size, true); |
130fadbb RH |
809 | #endif |
810 | #ifdef USING_MALLOC_PAGE_GROUPS | |
811 | else | |
812 | { | |
813 | /* Allocate a large block of memory and serve out the aligned | |
814 | pages therein. This results in much less memory wastage | |
815 | than the traditional implementation of valloc. */ | |
816 | ||
817 | char *allocation, *a, *enda; | |
818 | size_t alloc_size, head_slop, tail_slop; | |
819 | int multiple_pages = (entry_size == G.pagesize); | |
820 | ||
821 | if (multiple_pages) | |
822 | alloc_size = GGC_QUIRE_SIZE * G.pagesize; | |
823 | else | |
824 | alloc_size = entry_size + G.pagesize - 1; | |
ec4d7730 | 825 | allocation = XNEWVEC (char, alloc_size); |
130fadbb | 826 | |
2a6e6fea | 827 | page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize); |
130fadbb RH |
828 | head_slop = page - allocation; |
829 | if (multiple_pages) | |
830 | tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); | |
831 | else | |
832 | tail_slop = alloc_size - entry_size - head_slop; | |
833 | enda = allocation + alloc_size - tail_slop; | |
834 | ||
835 | /* We allocated N pages, which are likely not aligned, leaving | |
836 | us with N-1 usable pages. We plan to place the page_group | |
837 | structure somewhere in the slop. */ | |
838 | if (head_slop >= sizeof (page_group)) | |
839 | group = (page_group *)page - 1; | |
840 | else | |
841 | { | |
842 | /* We magically got an aligned allocation. Too bad, we have | |
843 | to waste a page anyway. */ | |
844 | if (tail_slop == 0) | |
845 | { | |
846 | enda -= G.pagesize; | |
847 | tail_slop += G.pagesize; | |
848 | } | |
282899df | 849 | gcc_assert (tail_slop >= sizeof (page_group)); |
130fadbb RH |
850 | group = (page_group *)enda; |
851 | tail_slop -= sizeof (page_group); | |
852 | } | |
853 | ||
854 | /* Remember that we allocated this memory. */ | |
855 | group->next = G.page_groups; | |
856 | group->allocation = allocation; | |
857 | group->alloc_size = alloc_size; | |
858 | group->in_use = 0; | |
859 | G.page_groups = group; | |
860 | G.bytes_mapped += alloc_size; | |
861 | ||
862 | /* If we allocated multiple pages, put the rest on the free list. */ | |
863 | if (multiple_pages) | |
864 | { | |
865 | struct page_entry *e, *f = G.free_pages; | |
866 | for (a = enda - G.pagesize; a != page; a -= G.pagesize) | |
867 | { | |
ec4d7730 | 868 | e = XCNEWVAR (struct page_entry, page_entry_size); |
130fadbb RH |
869 | e->order = order; |
870 | e->bytes = G.pagesize; | |
871 | e->page = a; | |
872 | e->group = group; | |
873 | e->next = f; | |
874 | f = e; | |
875 | } | |
876 | G.free_pages = f; | |
877 | } | |
878 | } | |
879 | #endif | |
21341cfd AS |
880 | |
881 | if (entry == NULL) | |
d3bfe4de | 882 | entry = XCNEWVAR (struct page_entry, page_entry_size); |
21341cfd AS |
883 | |
884 | entry->bytes = entry_size; | |
885 | entry->page = page; | |
886 | entry->context_depth = G.context_depth; | |
887 | entry->order = order; | |
888 | entry->num_free_objects = num_objects; | |
889 | entry->next_bit_hint = 1; | |
890 | ||
52895e1a RH |
891 | G.context_depth_allocations |= (unsigned long)1 << G.context_depth; |
892 | ||
130fadbb RH |
893 | #ifdef USING_MALLOC_PAGE_GROUPS |
894 | entry->group = group; | |
895 | set_page_group_in_use (group, page); | |
896 | #endif | |
897 | ||
21341cfd AS |
898 | /* Set the one-past-the-end in-use bit. This acts as a sentry as we |
899 | increment the hint. */ | |
900 | entry->in_use_p[num_objects / HOST_BITS_PER_LONG] | |
901 | = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG); | |
902 | ||
903 | set_page_table_entry (page, entry); | |
904 | ||
905 | if (GGC_DEBUG_LEVEL >= 2) | |
589005ff | 906 | fprintf (G.debug_file, |
8a951190 | 907 | "Allocating page at %p, object size=%lu, data %p-%p\n", |
20c1dc5e | 908 | (void *) entry, (unsigned long) OBJECT_SIZE (order), page, |
bd0f0717 | 909 | page + entry_size - 1); |
21341cfd AS |
910 | |
911 | return entry; | |
912 | } | |
913 | ||
c4775f82 MS |
914 | /* Adjust the size of G.depth so that no index greater than the one |
915 | used by the top of the G.by_depth is used. */ | |
916 | ||
917 | static inline void | |
20c1dc5e | 918 | adjust_depth (void) |
c4775f82 MS |
919 | { |
920 | page_entry *top; | |
921 | ||
922 | if (G.by_depth_in_use) | |
923 | { | |
924 | top = G.by_depth[G.by_depth_in_use-1]; | |
925 | ||
e0bb17a8 KH |
926 | /* Peel back indices in depth that index into by_depth, so that |
927 | as new elements are added to by_depth, we note the indices | |
c4775f82 MS |
928 | of those elements, if they are for new context depths. */ |
929 | while (G.depth_in_use > (size_t)top->context_depth+1) | |
930 | --G.depth_in_use; | |
931 | } | |
932 | } | |
933 | ||
cb2ec151 | 934 | /* For a page that is no longer needed, put it on the free page list. */ |
21341cfd | 935 | |
685fe032 | 936 | static void |
20c1dc5e | 937 | free_page (page_entry *entry) |
21341cfd AS |
938 | { |
939 | if (GGC_DEBUG_LEVEL >= 2) | |
589005ff | 940 | fprintf (G.debug_file, |
20c1dc5e | 941 | "Deallocating page at %p, data %p-%p\n", (void *) entry, |
21341cfd AS |
942 | entry->page, entry->page + entry->bytes - 1); |
943 | ||
9a0a7d5d HPN |
944 | /* Mark the page as inaccessible. Discard the handle to avoid handle |
945 | leak. */ | |
35dee980 | 946 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes)); |
9a0a7d5d | 947 | |
21341cfd AS |
948 | set_page_table_entry (entry->page, NULL); |
949 | ||
130fadbb RH |
950 | #ifdef USING_MALLOC_PAGE_GROUPS |
951 | clear_page_group_in_use (entry->group, entry->page); | |
952 | #endif | |
953 | ||
c4775f82 MS |
954 | if (G.by_depth_in_use > 1) |
955 | { | |
956 | page_entry *top = G.by_depth[G.by_depth_in_use-1]; | |
282899df NS |
957 | int i = entry->index_by_depth; |
958 | ||
959 | /* We cannot free a page from a context deeper than the current | |
960 | one. */ | |
961 | gcc_assert (entry->context_depth == top->context_depth); | |
b8698a0f | 962 | |
282899df NS |
963 | /* Put top element into freed slot. */ |
964 | G.by_depth[i] = top; | |
965 | G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; | |
966 | top->index_by_depth = i; | |
c4775f82 MS |
967 | } |
968 | --G.by_depth_in_use; | |
969 | ||
970 | adjust_depth (); | |
971 | ||
21341cfd AS |
972 | entry->next = G.free_pages; |
973 | G.free_pages = entry; | |
974 | } | |
975 | ||
cb2ec151 | 976 | /* Release the free page cache to the system. */ |
21341cfd | 977 | |
4934cc53 | 978 | static void |
20c1dc5e | 979 | release_pages (void) |
21341cfd | 980 | { |
711a3d82 AK |
981 | #ifdef USING_MADVISE |
982 | page_entry *p, *start_p; | |
983 | char *start; | |
984 | size_t len; | |
d33ef9a5 AK |
985 | size_t mapped_len; |
986 | page_entry *next, *prev, *newprev; | |
987 | size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize; | |
988 | ||
989 | /* First free larger continuous areas to the OS. | |
990 | This allows other allocators to grab these areas if needed. | |
991 | This is only done on larger chunks to avoid fragmentation. | |
992 | This does not always work because the free_pages list is only | |
993 | approximately sorted. */ | |
994 | ||
995 | p = G.free_pages; | |
996 | prev = NULL; | |
997 | while (p) | |
998 | { | |
999 | start = p->page; | |
1000 | start_p = p; | |
1001 | len = 0; | |
1002 | mapped_len = 0; | |
1003 | newprev = prev; | |
1004 | while (p && p->page == start + len) | |
1005 | { | |
1006 | len += p->bytes; | |
1007 | if (!p->discarded) | |
1008 | mapped_len += p->bytes; | |
1009 | newprev = p; | |
1010 | p = p->next; | |
1011 | } | |
1012 | if (len >= free_unit) | |
1013 | { | |
1014 | while (start_p != p) | |
1015 | { | |
1016 | next = start_p->next; | |
1017 | free (start_p); | |
1018 | start_p = next; | |
1019 | } | |
1020 | munmap (start, len); | |
1021 | if (prev) | |
1022 | prev->next = p; | |
1023 | else | |
1024 | G.free_pages = p; | |
1025 | G.bytes_mapped -= mapped_len; | |
1026 | continue; | |
1027 | } | |
1028 | prev = newprev; | |
1029 | } | |
1030 | ||
1031 | /* Now give back the fragmented pages to the OS, but keep the address | |
1032 | space to reuse it next time. */ | |
711a3d82 AK |
1033 | |
1034 | for (p = G.free_pages; p; ) | |
1035 | { | |
1036 | if (p->discarded) | |
1037 | { | |
1038 | p = p->next; | |
1039 | continue; | |
1040 | } | |
1041 | start = p->page; | |
1042 | len = p->bytes; | |
1043 | start_p = p; | |
1044 | p = p->next; | |
1045 | while (p && p->page == start + len) | |
1046 | { | |
1047 | len += p->bytes; | |
1048 | p = p->next; | |
1049 | } | |
1050 | /* Give the page back to the kernel, but don't free the mapping. | |
1051 | This avoids fragmentation in the virtual memory map of the | |
1052 | process. Next time we can reuse it by just touching it. */ | |
1053 | madvise (start, len, MADV_DONTNEED); | |
1054 | /* Don't count those pages as mapped to not touch the garbage collector | |
1055 | unnecessarily. */ | |
1056 | G.bytes_mapped -= len; | |
1057 | while (start_p != p) | |
1058 | { | |
1059 | start_p->discarded = true; | |
1060 | start_p = start_p->next; | |
1061 | } | |
1062 | } | |
1063 | #endif | |
1064 | #if defined(USING_MMAP) && !defined(USING_MADVISE) | |
130fadbb | 1065 | page_entry *p, *next; |
21341cfd AS |
1066 | char *start; |
1067 | size_t len; | |
1068 | ||
054f5e69 | 1069 | /* Gather up adjacent pages so they are unmapped together. */ |
21341cfd | 1070 | p = G.free_pages; |
21341cfd AS |
1071 | |
1072 | while (p) | |
1073 | { | |
054f5e69 | 1074 | start = p->page; |
21341cfd | 1075 | next = p->next; |
054f5e69 | 1076 | len = p->bytes; |
21341cfd AS |
1077 | free (p); |
1078 | p = next; | |
21341cfd | 1079 | |
054f5e69 ZW |
1080 | while (p && p->page == start + len) |
1081 | { | |
1082 | next = p->next; | |
1083 | len += p->bytes; | |
1084 | free (p); | |
1085 | p = next; | |
1086 | } | |
1087 | ||
1088 | munmap (start, len); | |
1089 | G.bytes_mapped -= len; | |
1090 | } | |
005537df | 1091 | |
21341cfd | 1092 | G.free_pages = NULL; |
130fadbb RH |
1093 | #endif |
1094 | #ifdef USING_MALLOC_PAGE_GROUPS | |
1095 | page_entry **pp, *p; | |
1096 | page_group **gp, *g; | |
1097 | ||
1098 | /* Remove all pages from free page groups from the list. */ | |
1099 | pp = &G.free_pages; | |
1100 | while ((p = *pp) != NULL) | |
1101 | if (p->group->in_use == 0) | |
1102 | { | |
1103 | *pp = p->next; | |
1104 | free (p); | |
1105 | } | |
1106 | else | |
1107 | pp = &p->next; | |
1108 | ||
1109 | /* Remove all free page groups, and release the storage. */ | |
1110 | gp = &G.page_groups; | |
1111 | while ((g = *gp) != NULL) | |
1112 | if (g->in_use == 0) | |
1113 | { | |
1114 | *gp = g->next; | |
589005ff | 1115 | G.bytes_mapped -= g->alloc_size; |
130fadbb RH |
1116 | free (g->allocation); |
1117 | } | |
1118 | else | |
1119 | gp = &g->next; | |
1120 | #endif | |
21341cfd AS |
1121 | } |
1122 | ||
21341cfd | 1123 | /* This table provides a fast way to determine ceil(log_2(size)) for |
9fd51e67 | 1124 | allocation requests. The minimum allocation size is eight bytes. */ |
6583cf15 NC |
1125 | #define NUM_SIZE_LOOKUP 512 |
1126 | static unsigned char size_lookup[NUM_SIZE_LOOKUP] = | |
9fd51e67 | 1127 | { |
589005ff KH |
1128 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, |
1129 | 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, | |
1130 | 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1131 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | |
1132 | 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1133 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
1134 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | |
21341cfd | 1135 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
21341cfd AS |
1136 | 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, |
1137 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1138 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1139 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1140 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1141 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1142 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
1143 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | |
f5938dcd RG |
1144 | 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1145 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1146 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1147 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1148 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1149 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1150 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1151 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1152 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1153 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1154 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1155 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1156 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1157 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
1158 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | |
ecf7b86f | 1159 | 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 |
21341cfd AS |
1160 | }; |
1161 | ||
b9bd6f74 TT |
1162 | /* For a given size of memory requested for allocation, return the |
1163 | actual size that is going to be allocated, as well as the size | |
1164 | order. */ | |
1165 | ||
1166 | static void | |
1167 | ggc_round_alloc_size_1 (size_t requested_size, | |
1168 | size_t *size_order, | |
1169 | size_t *alloced_size) | |
1170 | { | |
1171 | size_t order, object_size; | |
1172 | ||
1173 | if (requested_size < NUM_SIZE_LOOKUP) | |
1174 | { | |
1175 | order = size_lookup[requested_size]; | |
1176 | object_size = OBJECT_SIZE (order); | |
1177 | } | |
1178 | else | |
1179 | { | |
1180 | order = 10; | |
1181 | while (requested_size > (object_size = OBJECT_SIZE (order))) | |
1182 | order++; | |
1183 | } | |
1184 | ||
1185 | if (size_order) | |
1186 | *size_order = order; | |
1187 | if (alloced_size) | |
1188 | *alloced_size = object_size; | |
1189 | } | |
1190 | ||
1191 | /* For a given size of memory requested for allocation, return the | |
1192 | actual size that is going to be allocated. */ | |
1193 | ||
1194 | size_t | |
1195 | ggc_round_alloc_size (size_t requested_size) | |
1196 | { | |
1197 | size_t size = 0; | |
1198 | ||
1199 | ggc_round_alloc_size_1 (requested_size, NULL, &size); | |
1200 | return size; | |
1201 | } | |
1202 | ||
b6f61163 DB |
1203 | /* Typed allocation function. Does nothing special in this collector. */ |
1204 | ||
1205 | void * | |
b9dcdee4 JH |
1206 | ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size |
1207 | MEM_STAT_DECL) | |
b6f61163 | 1208 | { |
a9429e29 | 1209 | return ggc_internal_alloc_stat (size PASS_MEM_STAT); |
b6f61163 DB |
1210 | } |
1211 | ||
aa40083d | 1212 | /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ |
cb2ec151 | 1213 | |
005537df | 1214 | void * |
a9429e29 | 1215 | ggc_internal_alloc_stat (size_t size MEM_STAT_DECL) |
21341cfd | 1216 | { |
685fe032 | 1217 | size_t order, word, bit, object_offset, object_size; |
21341cfd AS |
1218 | struct page_entry *entry; |
1219 | void *result; | |
1220 | ||
b9bd6f74 | 1221 | ggc_round_alloc_size_1 (size, &order, &object_size); |
21341cfd AS |
1222 | |
1223 | /* If there are non-full pages for this size allocation, they are at | |
1224 | the head of the list. */ | |
1225 | entry = G.pages[order]; | |
1226 | ||
1227 | /* If there is no page for this object size, or all pages in this | |
1228 | context are full, allocate a new page. */ | |
4934cc53 | 1229 | if (entry == NULL || entry->num_free_objects == 0) |
21341cfd AS |
1230 | { |
1231 | struct page_entry *new_entry; | |
1232 | new_entry = alloc_page (order); | |
589005ff | 1233 | |
c4775f82 MS |
1234 | new_entry->index_by_depth = G.by_depth_in_use; |
1235 | push_by_depth (new_entry, 0); | |
1236 | ||
1237 | /* We can skip context depths, if we do, make sure we go all the | |
1238 | way to the new depth. */ | |
1239 | while (new_entry->context_depth >= G.depth_in_use) | |
1240 | push_depth (G.by_depth_in_use-1); | |
1241 | ||
9bf793f9 JL |
1242 | /* If this is the only entry, it's also the tail. If it is not |
1243 | the only entry, then we must update the PREV pointer of the | |
1244 | ENTRY (G.pages[order]) to point to our new page entry. */ | |
21341cfd AS |
1245 | if (entry == NULL) |
1246 | G.page_tails[order] = new_entry; | |
9bf793f9 JL |
1247 | else |
1248 | entry->prev = new_entry; | |
589005ff | 1249 | |
9bf793f9 JL |
1250 | /* Put new pages at the head of the page list. By definition the |
1251 | entry at the head of the list always has a NULL pointer. */ | |
21341cfd | 1252 | new_entry->next = entry; |
9bf793f9 | 1253 | new_entry->prev = NULL; |
21341cfd AS |
1254 | entry = new_entry; |
1255 | G.pages[order] = new_entry; | |
1256 | ||
1257 | /* For a new page, we know the word and bit positions (in the | |
1258 | in_use bitmap) of the first available object -- they're zero. */ | |
1259 | new_entry->next_bit_hint = 1; | |
1260 | word = 0; | |
1261 | bit = 0; | |
1262 | object_offset = 0; | |
1263 | } | |
1264 | else | |
1265 | { | |
1266 | /* First try to use the hint left from the previous allocation | |
1267 | to locate a clear bit in the in-use bitmap. We've made sure | |
1268 | that the one-past-the-end bit is always set, so if the hint | |
1269 | has run over, this test will fail. */ | |
1270 | unsigned hint = entry->next_bit_hint; | |
1271 | word = hint / HOST_BITS_PER_LONG; | |
1272 | bit = hint % HOST_BITS_PER_LONG; | |
589005ff | 1273 | |
21341cfd AS |
1274 | /* If the hint didn't work, scan the bitmap from the beginning. */ |
1275 | if ((entry->in_use_p[word] >> bit) & 1) | |
1276 | { | |
1277 | word = bit = 0; | |
1278 | while (~entry->in_use_p[word] == 0) | |
1279 | ++word; | |
6f0947e4 SB |
1280 | |
1281 | #if GCC_VERSION >= 3004 | |
1282 | bit = __builtin_ctzl (~entry->in_use_p[word]); | |
1283 | #else | |
21341cfd AS |
1284 | while ((entry->in_use_p[word] >> bit) & 1) |
1285 | ++bit; | |
6f0947e4 SB |
1286 | #endif |
1287 | ||
21341cfd AS |
1288 | hint = word * HOST_BITS_PER_LONG + bit; |
1289 | } | |
1290 | ||
1291 | /* Next time, try the next bit. */ | |
1292 | entry->next_bit_hint = hint + 1; | |
1293 | ||
685fe032 | 1294 | object_offset = hint * object_size; |
21341cfd AS |
1295 | } |
1296 | ||
1297 | /* Set the in-use bit. */ | |
1298 | entry->in_use_p[word] |= ((unsigned long) 1 << bit); | |
1299 | ||
1300 | /* Keep a running total of the number of free objects. If this page | |
1301 | fills up, we may have to move it to the end of the list if the | |
1302 | next page isn't full. If the next page is full, all subsequent | |
1303 | pages are full, so there's no need to move it. */ | |
1304 | if (--entry->num_free_objects == 0 | |
1305 | && entry->next != NULL | |
1306 | && entry->next->num_free_objects > 0) | |
1307 | { | |
9bf793f9 | 1308 | /* We have a new head for the list. */ |
21341cfd | 1309 | G.pages[order] = entry->next; |
9bf793f9 JL |
1310 | |
1311 | /* We are moving ENTRY to the end of the page table list. | |
1312 | The new page at the head of the list will have NULL in | |
1313 | its PREV field and ENTRY will have NULL in its NEXT field. */ | |
1314 | entry->next->prev = NULL; | |
21341cfd | 1315 | entry->next = NULL; |
9bf793f9 JL |
1316 | |
1317 | /* Append ENTRY to the tail of the list. */ | |
1318 | entry->prev = G.page_tails[order]; | |
21341cfd AS |
1319 | G.page_tails[order]->next = entry; |
1320 | G.page_tails[order] = entry; | |
1321 | } | |
1322 | ||
1323 | /* Calculate the object's address. */ | |
1324 | result = entry->page + object_offset; | |
7aa6d18a SB |
1325 | if (GATHER_STATISTICS) |
1326 | ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, | |
1327 | result FINAL_PASS_MEM_STAT); | |
21341cfd | 1328 | |
3788cc17 | 1329 | #ifdef ENABLE_GC_CHECKING |
9a0a7d5d HPN |
1330 | /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the |
1331 | exact same semantics in presence of memory bugs, regardless of | |
1332 | ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | |
1333 | handle to avoid handle leak. */ | |
35dee980 | 1334 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size)); |
9a0a7d5d | 1335 | |
f8a83ee3 ZW |
1336 | /* `Poison' the entire allocated object, including any padding at |
1337 | the end. */ | |
685fe032 | 1338 | memset (result, 0xaf, object_size); |
9a0a7d5d HPN |
1339 | |
1340 | /* Make the bytes after the end of the object unaccessible. Discard the | |
1341 | handle to avoid handle leak. */ | |
35dee980 HPN |
1342 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size, |
1343 | object_size - size)); | |
21341cfd | 1344 | #endif |
cb2ec151 | 1345 | |
9a0a7d5d HPN |
1346 | /* Tell Valgrind that the memory is there, but its content isn't |
1347 | defined. The bytes at the end of the object are still marked | |
1348 | unaccessible. */ | |
35dee980 | 1349 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); |
9a0a7d5d | 1350 | |
21341cfd AS |
1351 | /* Keep track of how many bytes are being allocated. This |
1352 | information is used in deciding when to collect. */ | |
685fe032 | 1353 | G.allocated += object_size; |
21341cfd | 1354 | |
8d18c628 ZD |
1355 | /* For timevar statistics. */ |
1356 | timevar_ggc_mem_total += object_size; | |
1357 | ||
7aa6d18a SB |
1358 | if (GATHER_STATISTICS) |
1359 | { | |
1360 | size_t overhead = object_size - size; | |
adc4adcd | 1361 | |
7aa6d18a SB |
1362 | G.stats.total_overhead += overhead; |
1363 | G.stats.total_allocated += object_size; | |
1364 | G.stats.total_overhead_per_order[order] += overhead; | |
1365 | G.stats.total_allocated_per_order[order] += object_size; | |
adc4adcd | 1366 | |
7aa6d18a SB |
1367 | if (size <= 32) |
1368 | { | |
1369 | G.stats.total_overhead_under32 += overhead; | |
1370 | G.stats.total_allocated_under32 += object_size; | |
1371 | } | |
1372 | if (size <= 64) | |
1373 | { | |
1374 | G.stats.total_overhead_under64 += overhead; | |
1375 | G.stats.total_allocated_under64 += object_size; | |
1376 | } | |
1377 | if (size <= 128) | |
1378 | { | |
1379 | G.stats.total_overhead_under128 += overhead; | |
1380 | G.stats.total_allocated_under128 += object_size; | |
1381 | } | |
1382 | } | |
685fe032 | 1383 | |
21341cfd | 1384 | if (GGC_DEBUG_LEVEL >= 3) |
589005ff | 1385 | fprintf (G.debug_file, |
8a951190 | 1386 | "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", |
685fe032 | 1387 | (unsigned long) size, (unsigned long) object_size, result, |
20c1dc5e | 1388 | (void *) entry); |
21341cfd AS |
1389 | |
1390 | return result; | |
1391 | } | |
1392 | ||
dae4174e TT |
1393 | /* Mark function for strings. */ |
1394 | ||
1395 | void | |
1396 | gt_ggc_m_S (const void *p) | |
1397 | { | |
1398 | page_entry *entry; | |
1399 | unsigned bit, word; | |
1400 | unsigned long mask; | |
1401 | unsigned long offset; | |
1402 | ||
1403 | if (!p || !ggc_allocated_p (p)) | |
1404 | return; | |
1405 | ||
1406 | /* Look up the page on which the object is alloced. . */ | |
1407 | entry = lookup_page_table_entry (p); | |
1408 | gcc_assert (entry); | |
1409 | ||
1410 | /* Calculate the index of the object on the page; this is its bit | |
1411 | position in the in_use_p bitmap. Note that because a char* might | |
1412 | point to the middle of an object, we need special code here to | |
1413 | make sure P points to the start of an object. */ | |
1414 | offset = ((const char *) p - entry->page) % object_size_table[entry->order]; | |
1415 | if (offset) | |
1416 | { | |
1417 | /* Here we've seen a char* which does not point to the beginning | |
1418 | of an allocated object. We assume it points to the middle of | |
1419 | a STRING_CST. */ | |
1420 | gcc_assert (offset == offsetof (struct tree_string, str)); | |
1421 | p = ((const char *) p) - offset; | |
d3bfe4de | 1422 | gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p)); |
dae4174e TT |
1423 | return; |
1424 | } | |
1425 | ||
1426 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); | |
1427 | word = bit / HOST_BITS_PER_LONG; | |
1428 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
1429 | ||
1430 | /* If the bit was previously set, skip it. */ | |
1431 | if (entry->in_use_p[word] & mask) | |
1432 | return; | |
1433 | ||
1434 | /* Otherwise set it, and decrement the free object count. */ | |
1435 | entry->in_use_p[word] |= mask; | |
1436 | entry->num_free_objects -= 1; | |
1437 | ||
1438 | if (GGC_DEBUG_LEVEL >= 4) | |
1439 | fprintf (G.debug_file, "Marking %p\n", p); | |
1440 | ||
1441 | return; | |
1442 | } | |
1443 | ||
0823efed DN |
1444 | |
1445 | /* User-callable entry points for marking string X. */ | |
1446 | ||
1447 | void | |
1448 | gt_ggc_mx (const char *& x) | |
1449 | { | |
1450 | gt_ggc_m_S (x); | |
1451 | } | |
1452 | ||
1453 | void | |
1454 | gt_ggc_mx (unsigned char *& x) | |
1455 | { | |
1456 | gt_ggc_m_S (x); | |
1457 | } | |
1458 | ||
1459 | void | |
1460 | gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED) | |
1461 | { | |
1462 | } | |
1463 | ||
cb2ec151 | 1464 | /* If P is not marked, marks it and return false. Otherwise return true. |
21341cfd AS |
1465 | P must have been allocated by the GC allocator; it mustn't point to |
1466 | static objects, stack variables, or memory allocated with malloc. */ | |
cb2ec151 | 1467 | |
005537df | 1468 | int |
20c1dc5e | 1469 | ggc_set_mark (const void *p) |
21341cfd AS |
1470 | { |
1471 | page_entry *entry; | |
1472 | unsigned bit, word; | |
1473 | unsigned long mask; | |
1474 | ||
1475 | /* Look up the page on which the object is alloced. If the object | |
1476 | wasn't allocated by the collector, we'll probably die. */ | |
74c937ca | 1477 | entry = lookup_page_table_entry (p); |
282899df | 1478 | gcc_assert (entry); |
21341cfd AS |
1479 | |
1480 | /* Calculate the index of the object on the page; this is its bit | |
1481 | position in the in_use_p bitmap. */ | |
8537ed68 | 1482 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
21341cfd AS |
1483 | word = bit / HOST_BITS_PER_LONG; |
1484 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
589005ff | 1485 | |
dc297297 | 1486 | /* If the bit was previously set, skip it. */ |
21341cfd AS |
1487 | if (entry->in_use_p[word] & mask) |
1488 | return 1; | |
1489 | ||
1490 | /* Otherwise set it, and decrement the free object count. */ | |
1491 | entry->in_use_p[word] |= mask; | |
1492 | entry->num_free_objects -= 1; | |
1493 | ||
21341cfd AS |
1494 | if (GGC_DEBUG_LEVEL >= 4) |
1495 | fprintf (G.debug_file, "Marking %p\n", p); | |
1496 | ||
1497 | return 0; | |
1498 | } | |
1499 | ||
589005ff | 1500 | /* Return 1 if P has been marked, zero otherwise. |
4c160717 RK |
1501 | P must have been allocated by the GC allocator; it mustn't point to |
1502 | static objects, stack variables, or memory allocated with malloc. */ | |
1503 | ||
1504 | int | |
20c1dc5e | 1505 | ggc_marked_p (const void *p) |
4c160717 RK |
1506 | { |
1507 | page_entry *entry; | |
1508 | unsigned bit, word; | |
1509 | unsigned long mask; | |
1510 | ||
1511 | /* Look up the page on which the object is alloced. If the object | |
1512 | wasn't allocated by the collector, we'll probably die. */ | |
1513 | entry = lookup_page_table_entry (p); | |
282899df | 1514 | gcc_assert (entry); |
4c160717 RK |
1515 | |
1516 | /* Calculate the index of the object on the page; this is its bit | |
1517 | position in the in_use_p bitmap. */ | |
8537ed68 | 1518 | bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); |
4c160717 RK |
1519 | word = bit / HOST_BITS_PER_LONG; |
1520 | mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); | |
589005ff | 1521 | |
a4b5b2ae | 1522 | return (entry->in_use_p[word] & mask) != 0; |
4c160717 RK |
1523 | } |
1524 | ||
cb2ec151 RH |
1525 | /* Return the size of the gc-able object P. */ |
1526 | ||
3277221c | 1527 | size_t |
20c1dc5e | 1528 | ggc_get_size (const void *p) |
3277221c MM |
1529 | { |
1530 | page_entry *pe = lookup_page_table_entry (p); | |
2be510b8 | 1531 | return OBJECT_SIZE (pe->order); |
3277221c | 1532 | } |
685fe032 RH |
1533 | |
1534 | /* Release the memory for object P. */ | |
1535 | ||
1536 | void | |
1537 | ggc_free (void *p) | |
1538 | { | |
1539 | page_entry *pe = lookup_page_table_entry (p); | |
1540 | size_t order = pe->order; | |
1541 | size_t size = OBJECT_SIZE (order); | |
1542 | ||
7aa6d18a SB |
1543 | if (GATHER_STATISTICS) |
1544 | ggc_free_overhead (p); | |
07724022 | 1545 | |
685fe032 RH |
1546 | if (GGC_DEBUG_LEVEL >= 3) |
1547 | fprintf (G.debug_file, | |
1548 | "Freeing object, actual size=%lu, at %p on %p\n", | |
1549 | (unsigned long) size, p, (void *) pe); | |
1550 | ||
1551 | #ifdef ENABLE_GC_CHECKING | |
1552 | /* Poison the data, to indicate the data is garbage. */ | |
35dee980 | 1553 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); |
685fe032 RH |
1554 | memset (p, 0xa5, size); |
1555 | #endif | |
1556 | /* Let valgrind know the object is free. */ | |
35dee980 | 1557 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); |
685fe032 RH |
1558 | |
1559 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
1560 | /* In the completely-anal-checking mode, we do *not* immediately free | |
b8698a0f | 1561 | the data, but instead verify that the data is *actually* not |
685fe032 RH |
1562 | reachable the next time we collect. */ |
1563 | { | |
5ed6ace5 | 1564 | struct free_object *fo = XNEW (struct free_object); |
685fe032 RH |
1565 | fo->object = p; |
1566 | fo->next = G.free_object_list; | |
1567 | G.free_object_list = fo; | |
1568 | } | |
1569 | #else | |
1570 | { | |
1571 | unsigned int bit_offset, word, bit; | |
1572 | ||
1573 | G.allocated -= size; | |
1574 | ||
1575 | /* Mark the object not-in-use. */ | |
1576 | bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); | |
1577 | word = bit_offset / HOST_BITS_PER_LONG; | |
1578 | bit = bit_offset % HOST_BITS_PER_LONG; | |
1579 | pe->in_use_p[word] &= ~(1UL << bit); | |
1580 | ||
1581 | if (pe->num_free_objects++ == 0) | |
1582 | { | |
9bf793f9 JL |
1583 | page_entry *p, *q; |
1584 | ||
685fe032 RH |
1585 | /* If the page is completely full, then it's supposed to |
1586 | be after all pages that aren't. Since we've freed one | |
1587 | object from a page that was full, we need to move the | |
b8698a0f | 1588 | page to the head of the list. |
685fe032 | 1589 | |
9bf793f9 JL |
1590 | PE is the node we want to move. Q is the previous node |
1591 | and P is the next node in the list. */ | |
1592 | q = pe->prev; | |
685fe032 RH |
1593 | if (q && q->num_free_objects == 0) |
1594 | { | |
1595 | p = pe->next; | |
9bf793f9 | 1596 | |
685fe032 | 1597 | q->next = p; |
9bf793f9 JL |
1598 | |
1599 | /* If PE was at the end of the list, then Q becomes the | |
1600 | new end of the list. If PE was not the end of the | |
1601 | list, then we need to update the PREV field for P. */ | |
685fe032 RH |
1602 | if (!p) |
1603 | G.page_tails[order] = q; | |
9bf793f9 JL |
1604 | else |
1605 | p->prev = q; | |
1606 | ||
1607 | /* Move PE to the head of the list. */ | |
685fe032 | 1608 | pe->next = G.pages[order]; |
9bf793f9 JL |
1609 | pe->prev = NULL; |
1610 | G.pages[order]->prev = pe; | |
685fe032 RH |
1611 | G.pages[order] = pe; |
1612 | } | |
1613 | ||
1614 | /* Reset the hint bit to point to the only free object. */ | |
1615 | pe->next_bit_hint = bit_offset; | |
1616 | } | |
1617 | } | |
1618 | #endif | |
1619 | } | |
21341cfd | 1620 | \f |
8537ed68 ZW |
1621 | /* Subroutine of init_ggc which computes the pair of numbers used to |
1622 | perform division by OBJECT_SIZE (order) and fills in inverse_table[]. | |
1623 | ||
1624 | This algorithm is taken from Granlund and Montgomery's paper | |
1625 | "Division by Invariant Integers using Multiplication" | |
1626 | (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by | |
1627 | constants). */ | |
1628 | ||
1629 | static void | |
20c1dc5e | 1630 | compute_inverse (unsigned order) |
8537ed68 | 1631 | { |
b8698a0f | 1632 | size_t size, inv; |
75d75435 | 1633 | unsigned int e; |
280cf02a | 1634 | |
8537ed68 ZW |
1635 | size = OBJECT_SIZE (order); |
1636 | e = 0; | |
1637 | while (size % 2 == 0) | |
1638 | { | |
1639 | e++; | |
1640 | size >>= 1; | |
1641 | } | |
cb2ec151 | 1642 | |
8537ed68 ZW |
1643 | inv = size; |
1644 | while (inv * size != 1) | |
1645 | inv = inv * (2 - inv*size); | |
1646 | ||
1647 | DIV_MULT (order) = inv; | |
1648 | DIV_SHIFT (order) = e; | |
1649 | } | |
1650 | ||
1651 | /* Initialize the ggc-mmap allocator. */ | |
21341cfd | 1652 | void |
20c1dc5e | 1653 | init_ggc (void) |
21341cfd | 1654 | { |
2be510b8 MM |
1655 | unsigned order; |
1656 | ||
21341cfd AS |
1657 | G.pagesize = getpagesize(); |
1658 | G.lg_pagesize = exact_log2 (G.pagesize); | |
1659 | ||
825b6926 | 1660 | #ifdef HAVE_MMAP_DEV_ZERO |
21341cfd AS |
1661 | G.dev_zero_fd = open ("/dev/zero", O_RDONLY); |
1662 | if (G.dev_zero_fd == -1) | |
c770ac2b | 1663 | internal_error ("open /dev/zero: %m"); |
21341cfd AS |
1664 | #endif |
1665 | ||
1666 | #if 0 | |
1667 | G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
1668 | #else | |
1669 | G.debug_file = stdout; | |
1670 | #endif | |
1671 | ||
825b6926 | 1672 | #ifdef USING_MMAP |
1b3e1423 RH |
1673 | /* StunOS has an amazing off-by-one error for the first mmap allocation |
1674 | after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
1675 | believe, is an unaligned page allocation, which would cause us to | |
1676 | hork badly if we tried to use it. */ | |
1677 | { | |
25f0ea81 | 1678 | char *p = alloc_anon (NULL, G.pagesize, true); |
825b6926 | 1679 | struct page_entry *e; |
2a6e6fea | 1680 | if ((uintptr_t)p & (G.pagesize - 1)) |
1b3e1423 RH |
1681 | { |
1682 | /* How losing. Discard this one and try another. If we still | |
1683 | can't get something useful, give up. */ | |
1684 | ||
25f0ea81 | 1685 | p = alloc_anon (NULL, G.pagesize, true); |
2a6e6fea | 1686 | gcc_assert (!((uintptr_t)p & (G.pagesize - 1))); |
1b3e1423 | 1687 | } |
825b6926 | 1688 | |
dc297297 | 1689 | /* We have a good page, might as well hold onto it... */ |
5ed6ace5 | 1690 | e = XCNEW (struct page_entry); |
825b6926 ZW |
1691 | e->bytes = G.pagesize; |
1692 | e->page = p; | |
1693 | e->next = G.free_pages; | |
1694 | G.free_pages = e; | |
1b3e1423 RH |
1695 | } |
1696 | #endif | |
2be510b8 MM |
1697 | |
1698 | /* Initialize the object size table. */ | |
1699 | for (order = 0; order < HOST_BITS_PER_PTR; ++order) | |
1700 | object_size_table[order] = (size_t) 1 << order; | |
1701 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) | |
b1095f9c MM |
1702 | { |
1703 | size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR]; | |
a469a4f2 RG |
1704 | |
1705 | /* If S is not a multiple of the MAX_ALIGNMENT, then round it up | |
1706 | so that we're sure of getting aligned memory. */ | |
1707 | s = ROUND_UP (s, MAX_ALIGNMENT); | |
b1095f9c MM |
1708 | object_size_table[order] = s; |
1709 | } | |
2be510b8 | 1710 | |
8537ed68 | 1711 | /* Initialize the objects-per-page and inverse tables. */ |
2be510b8 MM |
1712 | for (order = 0; order < NUM_ORDERS; ++order) |
1713 | { | |
1714 | objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order); | |
1715 | if (objects_per_page_table[order] == 0) | |
1716 | objects_per_page_table[order] = 1; | |
8537ed68 | 1717 | compute_inverse (order); |
2be510b8 MM |
1718 | } |
1719 | ||
1720 | /* Reset the size_lookup array to put appropriately sized objects in | |
1721 | the special orders. All objects bigger than the previous power | |
1722 | of two, but no greater than the special size, should go in the | |
a469a4f2 | 1723 | new order. */ |
2be510b8 MM |
1724 | for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) |
1725 | { | |
a469a4f2 RG |
1726 | int o; |
1727 | int i; | |
c4775f82 | 1728 | |
6583cf15 NC |
1729 | i = OBJECT_SIZE (order); |
1730 | if (i >= NUM_SIZE_LOOKUP) | |
1731 | continue; | |
1732 | ||
1733 | for (o = size_lookup[i]; o == size_lookup [i]; --i) | |
a469a4f2 RG |
1734 | size_lookup[i] = order; |
1735 | } | |
ecf7b86f | 1736 | |
c4775f82 MS |
1737 | G.depth_in_use = 0; |
1738 | G.depth_max = 10; | |
5ed6ace5 | 1739 | G.depth = XNEWVEC (unsigned int, G.depth_max); |
c4775f82 MS |
1740 | |
1741 | G.by_depth_in_use = 0; | |
1742 | G.by_depth_max = INITIAL_PTE_COUNT; | |
5ed6ace5 MD |
1743 | G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
1744 | G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | |
21341cfd AS |
1745 | } |
1746 | ||
4934cc53 MM |
1747 | /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P |
1748 | reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ | |
1749 | ||
1750 | static void | |
20c1dc5e | 1751 | ggc_recalculate_in_use_p (page_entry *p) |
4934cc53 MM |
1752 | { |
1753 | unsigned int i; | |
1754 | size_t num_objects; | |
1755 | ||
589005ff | 1756 | /* Because the past-the-end bit in in_use_p is always set, we |
4934cc53 | 1757 | pretend there is one additional object. */ |
17211ab5 | 1758 | num_objects = OBJECTS_IN_PAGE (p) + 1; |
4934cc53 MM |
1759 | |
1760 | /* Reset the free object count. */ | |
1761 | p->num_free_objects = num_objects; | |
1762 | ||
1763 | /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ | |
589005ff | 1764 | for (i = 0; |
2be510b8 MM |
1765 | i < CEIL (BITMAP_SIZE (num_objects), |
1766 | sizeof (*p->in_use_p)); | |
4934cc53 MM |
1767 | ++i) |
1768 | { | |
1769 | unsigned long j; | |
1770 | ||
1771 | /* Something is in use if it is marked, or if it was in use in a | |
1772 | context further down the context stack. */ | |
c4775f82 | 1773 | p->in_use_p[i] |= save_in_use_p (p)[i]; |
4934cc53 MM |
1774 | |
1775 | /* Decrement the free object count for every object allocated. */ | |
1776 | for (j = p->in_use_p[i]; j; j >>= 1) | |
1777 | p->num_free_objects -= (j & 1); | |
1778 | } | |
1779 | ||
282899df | 1780 | gcc_assert (p->num_free_objects < num_objects); |
4934cc53 | 1781 | } |
21341cfd | 1782 | \f |
cb2ec151 RH |
1783 | /* Unmark all objects. */ |
1784 | ||
685fe032 | 1785 | static void |
20c1dc5e | 1786 | clear_marks (void) |
21341cfd AS |
1787 | { |
1788 | unsigned order; | |
1789 | ||
2be510b8 | 1790 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 1791 | { |
21341cfd AS |
1792 | page_entry *p; |
1793 | ||
1794 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1795 | { | |
17211ab5 GK |
1796 | size_t num_objects = OBJECTS_IN_PAGE (p); |
1797 | size_t bitmap_size = BITMAP_SIZE (num_objects + 1); | |
1798 | ||
21341cfd | 1799 | /* The data should be page-aligned. */ |
2a6e6fea | 1800 | gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1))); |
21341cfd AS |
1801 | |
1802 | /* Pages that aren't in the topmost context are not collected; | |
1803 | nevertheless, we need their in-use bit vectors to store GC | |
1804 | marks. So, back them up first. */ | |
4934cc53 | 1805 | if (p->context_depth < G.context_depth) |
21341cfd | 1806 | { |
c4775f82 | 1807 | if (! save_in_use_p (p)) |
d3bfe4de | 1808 | save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size); |
c4775f82 | 1809 | memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); |
21341cfd AS |
1810 | } |
1811 | ||
1812 | /* Reset reset the number of free objects and clear the | |
1813 | in-use bits. These will be adjusted by mark_obj. */ | |
1814 | p->num_free_objects = num_objects; | |
1815 | memset (p->in_use_p, 0, bitmap_size); | |
1816 | ||
1817 | /* Make sure the one-past-the-end bit is always set. */ | |
589005ff | 1818 | p->in_use_p[num_objects / HOST_BITS_PER_LONG] |
21341cfd AS |
1819 | = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); |
1820 | } | |
1821 | } | |
1822 | } | |
1823 | ||
cb2ec151 RH |
1824 | /* Free all empty pages. Partially empty pages need no attention |
1825 | because the `mark' bit doubles as an `unused' bit. */ | |
1826 | ||
685fe032 | 1827 | static void |
20c1dc5e | 1828 | sweep_pages (void) |
21341cfd AS |
1829 | { |
1830 | unsigned order; | |
1831 | ||
2be510b8 | 1832 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd AS |
1833 | { |
1834 | /* The last page-entry to consider, regardless of entries | |
1835 | placed at the end of the list. */ | |
1836 | page_entry * const last = G.page_tails[order]; | |
1837 | ||
17211ab5 | 1838 | size_t num_objects; |
054f5e69 | 1839 | size_t live_objects; |
21341cfd AS |
1840 | page_entry *p, *previous; |
1841 | int done; | |
589005ff | 1842 | |
21341cfd AS |
1843 | p = G.pages[order]; |
1844 | if (p == NULL) | |
1845 | continue; | |
1846 | ||
1847 | previous = NULL; | |
1848 | do | |
1849 | { | |
1850 | page_entry *next = p->next; | |
1851 | ||
1852 | /* Loop until all entries have been examined. */ | |
1853 | done = (p == last); | |
20c1dc5e | 1854 | |
17211ab5 | 1855 | num_objects = OBJECTS_IN_PAGE (p); |
21341cfd | 1856 | |
054f5e69 ZW |
1857 | /* Add all live objects on this page to the count of |
1858 | allocated memory. */ | |
1859 | live_objects = num_objects - p->num_free_objects; | |
1860 | ||
2be510b8 | 1861 | G.allocated += OBJECT_SIZE (order) * live_objects; |
054f5e69 | 1862 | |
21341cfd AS |
1863 | /* Only objects on pages in the topmost context should get |
1864 | collected. */ | |
1865 | if (p->context_depth < G.context_depth) | |
1866 | ; | |
1867 | ||
1868 | /* Remove the page if it's empty. */ | |
054f5e69 | 1869 | else if (live_objects == 0) |
21341cfd | 1870 | { |
9bf793f9 JL |
1871 | /* If P was the first page in the list, then NEXT |
1872 | becomes the new first page in the list, otherwise | |
1873 | splice P out of the forward pointers. */ | |
21341cfd AS |
1874 | if (! previous) |
1875 | G.pages[order] = next; | |
1876 | else | |
1877 | previous->next = next; | |
b8698a0f | 1878 | |
9bf793f9 JL |
1879 | /* Splice P out of the back pointers too. */ |
1880 | if (next) | |
1881 | next->prev = previous; | |
21341cfd AS |
1882 | |
1883 | /* Are we removing the last element? */ | |
1884 | if (p == G.page_tails[order]) | |
1885 | G.page_tails[order] = previous; | |
1886 | free_page (p); | |
1887 | p = previous; | |
1888 | } | |
1889 | ||
1890 | /* If the page is full, move it to the end. */ | |
1891 | else if (p->num_free_objects == 0) | |
1892 | { | |
1893 | /* Don't move it if it's already at the end. */ | |
1894 | if (p != G.page_tails[order]) | |
1895 | { | |
1896 | /* Move p to the end of the list. */ | |
1897 | p->next = NULL; | |
9bf793f9 | 1898 | p->prev = G.page_tails[order]; |
21341cfd AS |
1899 | G.page_tails[order]->next = p; |
1900 | ||
1901 | /* Update the tail pointer... */ | |
1902 | G.page_tails[order] = p; | |
1903 | ||
1904 | /* ... and the head pointer, if necessary. */ | |
1905 | if (! previous) | |
1906 | G.pages[order] = next; | |
1907 | else | |
1908 | previous->next = next; | |
9bf793f9 JL |
1909 | |
1910 | /* And update the backpointer in NEXT if necessary. */ | |
1911 | if (next) | |
1912 | next->prev = previous; | |
1913 | ||
21341cfd AS |
1914 | p = previous; |
1915 | } | |
1916 | } | |
1917 | ||
1918 | /* If we've fallen through to here, it's a page in the | |
1919 | topmost context that is neither full nor empty. Such a | |
1920 | page must precede pages at lesser context depth in the | |
1921 | list, so move it to the head. */ | |
1922 | else if (p != G.pages[order]) | |
1923 | { | |
1924 | previous->next = p->next; | |
9bf793f9 JL |
1925 | |
1926 | /* Update the backchain in the next node if it exists. */ | |
1927 | if (p->next) | |
1928 | p->next->prev = previous; | |
1929 | ||
1930 | /* Move P to the head of the list. */ | |
21341cfd | 1931 | p->next = G.pages[order]; |
9bf793f9 JL |
1932 | p->prev = NULL; |
1933 | G.pages[order]->prev = p; | |
1934 | ||
1935 | /* Update the head pointer. */ | |
21341cfd | 1936 | G.pages[order] = p; |
9bf793f9 | 1937 | |
21341cfd AS |
1938 | /* Are we moving the last element? */ |
1939 | if (G.page_tails[order] == p) | |
1940 | G.page_tails[order] = previous; | |
1941 | p = previous; | |
1942 | } | |
1943 | ||
1944 | previous = p; | |
1945 | p = next; | |
589005ff | 1946 | } |
21341cfd | 1947 | while (! done); |
4934cc53 MM |
1948 | |
1949 | /* Now, restore the in_use_p vectors for any pages from contexts | |
1950 | other than the current one. */ | |
1951 | for (p = G.pages[order]; p; p = p->next) | |
1952 | if (p->context_depth != G.context_depth) | |
1953 | ggc_recalculate_in_use_p (p); | |
21341cfd AS |
1954 | } |
1955 | } | |
1956 | ||
3788cc17 | 1957 | #ifdef ENABLE_GC_CHECKING |
cb2ec151 RH |
1958 | /* Clobber all free objects. */ |
1959 | ||
685fe032 | 1960 | static void |
20c1dc5e | 1961 | poison_pages (void) |
21341cfd AS |
1962 | { |
1963 | unsigned order; | |
1964 | ||
2be510b8 | 1965 | for (order = 2; order < NUM_ORDERS; order++) |
21341cfd | 1966 | { |
2be510b8 | 1967 | size_t size = OBJECT_SIZE (order); |
21341cfd AS |
1968 | page_entry *p; |
1969 | ||
1970 | for (p = G.pages[order]; p != NULL; p = p->next) | |
1971 | { | |
17211ab5 | 1972 | size_t num_objects; |
21341cfd | 1973 | size_t i; |
c831fdea MM |
1974 | |
1975 | if (p->context_depth != G.context_depth) | |
1976 | /* Since we don't do any collection for pages in pushed | |
1977 | contexts, there's no need to do any poisoning. And | |
1978 | besides, the IN_USE_P array isn't valid until we pop | |
1979 | contexts. */ | |
1980 | continue; | |
1981 | ||
17211ab5 | 1982 | num_objects = OBJECTS_IN_PAGE (p); |
21341cfd AS |
1983 | for (i = 0; i < num_objects; i++) |
1984 | { | |
1985 | size_t word, bit; | |
1986 | word = i / HOST_BITS_PER_LONG; | |
1987 | bit = i % HOST_BITS_PER_LONG; | |
1988 | if (((p->in_use_p[word] >> bit) & 1) == 0) | |
9a0a7d5d HPN |
1989 | { |
1990 | char *object = p->page + i * size; | |
1991 | ||
1992 | /* Keep poison-by-write when we expect to use Valgrind, | |
1993 | so the exact same memory semantics is kept, in case | |
1994 | there are memory errors. We override this request | |
1995 | below. */ | |
35dee980 HPN |
1996 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object, |
1997 | size)); | |
9a0a7d5d HPN |
1998 | memset (object, 0xa5, size); |
1999 | ||
2000 | /* Drop the handle to avoid handle leak. */ | |
35dee980 | 2001 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size)); |
9a0a7d5d | 2002 | } |
21341cfd AS |
2003 | } |
2004 | } | |
2005 | } | |
2006 | } | |
685fe032 RH |
2007 | #else |
2008 | #define poison_pages() | |
2009 | #endif | |
2010 | ||
2011 | #ifdef ENABLE_GC_ALWAYS_COLLECT | |
2012 | /* Validate that the reportedly free objects actually are. */ | |
2013 | ||
2014 | static void | |
2015 | validate_free_objects (void) | |
2016 | { | |
2017 | struct free_object *f, *next, *still_free = NULL; | |
2018 | ||
2019 | for (f = G.free_object_list; f ; f = next) | |
2020 | { | |
2021 | page_entry *pe = lookup_page_table_entry (f->object); | |
2022 | size_t bit, word; | |
2023 | ||
2024 | bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); | |
2025 | word = bit / HOST_BITS_PER_LONG; | |
2026 | bit = bit % HOST_BITS_PER_LONG; | |
2027 | next = f->next; | |
2028 | ||
2029 | /* Make certain it isn't visible from any root. Notice that we | |
2030 | do this check before sweep_pages merges save_in_use_p. */ | |
282899df | 2031 | gcc_assert (!(pe->in_use_p[word] & (1UL << bit))); |
685fe032 RH |
2032 | |
2033 | /* If the object comes from an outer context, then retain the | |
2034 | free_object entry, so that we can verify that the address | |
2035 | isn't live on the stack in some outer context. */ | |
2036 | if (pe->context_depth != G.context_depth) | |
2037 | { | |
2038 | f->next = still_free; | |
2039 | still_free = f; | |
2040 | } | |
2041 | else | |
2042 | free (f); | |
2043 | } | |
2044 | ||
2045 | G.free_object_list = still_free; | |
2046 | } | |
2047 | #else | |
2048 | #define validate_free_objects() | |
21341cfd AS |
2049 | #endif |
2050 | ||
cb2ec151 RH |
2051 | /* Top level mark-and-sweep routine. */ |
2052 | ||
21341cfd | 2053 | void |
20c1dc5e | 2054 | ggc_collect (void) |
21341cfd | 2055 | { |
21341cfd AS |
2056 | /* Avoid frequent unnecessary work by skipping collection if the |
2057 | total allocations haven't expanded much since the last | |
2058 | collection. */ | |
19cc0dd4 | 2059 | float allocated_last_gc = |
3788cc17 ZW |
2060 | MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); |
2061 | ||
19cc0dd4 | 2062 | float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; |
3788cc17 | 2063 | |
07724022 | 2064 | if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) |
21341cfd | 2065 | return; |
21341cfd | 2066 | |
2a9a326b | 2067 | timevar_push (TV_GC); |
21341cfd | 2068 | if (!quiet_flag) |
b9bfacf0 | 2069 | fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024); |
685fe032 RH |
2070 | if (GGC_DEBUG_LEVEL >= 2) |
2071 | fprintf (G.debug_file, "BEGIN COLLECTING\n"); | |
21341cfd | 2072 | |
054f5e69 ZW |
2073 | /* Zero the total allocated bytes. This will be recalculated in the |
2074 | sweep phase. */ | |
21341cfd AS |
2075 | G.allocated = 0; |
2076 | ||
589005ff | 2077 | /* Release the pages we freed the last time we collected, but didn't |
21341cfd AS |
2078 | reuse in the interim. */ |
2079 | release_pages (); | |
2080 | ||
52895e1a RH |
2081 | /* Indicate that we've seen collections at this context depth. */ |
2082 | G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; | |
2083 | ||
ae2392a9 BS |
2084 | invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); |
2085 | ||
21341cfd AS |
2086 | clear_marks (); |
2087 | ggc_mark_roots (); | |
7aa6d18a SB |
2088 | |
2089 | if (GATHER_STATISTICS) | |
2090 | ggc_prune_overhead_list (); | |
2091 | ||
21341cfd | 2092 | poison_pages (); |
685fe032 | 2093 | validate_free_objects (); |
cb2ec151 RH |
2094 | sweep_pages (); |
2095 | ||
21341cfd AS |
2096 | G.allocated_last_gc = G.allocated; |
2097 | ||
ae2392a9 BS |
2098 | invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); |
2099 | ||
2a9a326b | 2100 | timevar_pop (TV_GC); |
21341cfd | 2101 | |
21341cfd | 2102 | if (!quiet_flag) |
2a9a326b | 2103 | fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); |
685fe032 RH |
2104 | if (GGC_DEBUG_LEVEL >= 2) |
2105 | fprintf (G.debug_file, "END COLLECTING\n"); | |
21341cfd | 2106 | } |
3277221c MM |
2107 | |
2108 | /* Print allocation statistics. */ | |
fba0bfd4 ZW |
2109 | #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ |
2110 | ? (x) \ | |
2111 | : ((x) < 1024*1024*10 \ | |
2112 | ? (x) / 1024 \ | |
2113 | : (x) / (1024*1024)))) | |
07724022 | 2114 | #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) |
3277221c MM |
2115 | |
2116 | void | |
20c1dc5e | 2117 | ggc_print_statistics (void) |
3277221c MM |
2118 | { |
2119 | struct ggc_statistics stats; | |
4934cc53 | 2120 | unsigned int i; |
fba0bfd4 | 2121 | size_t total_overhead = 0; |
3277221c MM |
2122 | |
2123 | /* Clear the statistics. */ | |
d219c7f1 | 2124 | memset (&stats, 0, sizeof (stats)); |
589005ff | 2125 | |
3277221c MM |
2126 | /* Make sure collection will really occur. */ |
2127 | G.allocated_last_gc = 0; | |
2128 | ||
2129 | /* Collect and print the statistics common across collectors. */ | |
fba0bfd4 | 2130 | ggc_print_common_statistics (stderr, &stats); |
3277221c | 2131 | |
4934cc53 MM |
2132 | /* Release free pages so that we will not count the bytes allocated |
2133 | there as part of the total allocated memory. */ | |
2134 | release_pages (); | |
2135 | ||
589005ff | 2136 | /* Collect some information about the various sizes of |
3277221c | 2137 | allocation. */ |
439a7e54 DN |
2138 | fprintf (stderr, |
2139 | "Memory still allocated at the end of the compilation process\n"); | |
adc4adcd | 2140 | fprintf (stderr, "%-5s %10s %10s %10s\n", |
9fd51e67 | 2141 | "Size", "Allocated", "Used", "Overhead"); |
2be510b8 | 2142 | for (i = 0; i < NUM_ORDERS; ++i) |
3277221c MM |
2143 | { |
2144 | page_entry *p; | |
2145 | size_t allocated; | |
2146 | size_t in_use; | |
fba0bfd4 | 2147 | size_t overhead; |
3277221c MM |
2148 | |
2149 | /* Skip empty entries. */ | |
2150 | if (!G.pages[i]) | |
2151 | continue; | |
2152 | ||
fba0bfd4 | 2153 | overhead = allocated = in_use = 0; |
3277221c MM |
2154 | |
2155 | /* Figure out the total number of bytes allocated for objects of | |
fba0bfd4 ZW |
2156 | this size, and how many of them are actually in use. Also figure |
2157 | out how much memory the page table is using. */ | |
3277221c MM |
2158 | for (p = G.pages[i]; p; p = p->next) |
2159 | { | |
2160 | allocated += p->bytes; | |
20c1dc5e | 2161 | in_use += |
17211ab5 | 2162 | (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); |
fba0bfd4 ZW |
2163 | |
2164 | overhead += (sizeof (page_entry) - sizeof (long) | |
17211ab5 | 2165 | + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); |
3277221c | 2166 | } |
8a951190 AJ |
2167 | fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n", |
2168 | (unsigned long) OBJECT_SIZE (i), | |
07724022 JH |
2169 | SCALE (allocated), STAT_LABEL (allocated), |
2170 | SCALE (in_use), STAT_LABEL (in_use), | |
2171 | SCALE (overhead), STAT_LABEL (overhead)); | |
fba0bfd4 | 2172 | total_overhead += overhead; |
3277221c | 2173 | } |
8a951190 | 2174 | fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total", |
07724022 JH |
2175 | SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped), |
2176 | SCALE (G.allocated), STAT_LABEL(G.allocated), | |
2177 | SCALE (total_overhead), STAT_LABEL (total_overhead)); | |
adc4adcd | 2178 | |
7aa6d18a SB |
2179 | if (GATHER_STATISTICS) |
2180 | { | |
2181 | fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); | |
2182 | ||
2183 | fprintf (stderr, "Total Overhead: %10lld\n", | |
2184 | G.stats.total_overhead); | |
2185 | fprintf (stderr, "Total Allocated: %10lld\n", | |
2186 | G.stats.total_allocated); | |
2187 | ||
2188 | fprintf (stderr, "Total Overhead under 32B: %10lld\n", | |
2189 | G.stats.total_overhead_under32); | |
2190 | fprintf (stderr, "Total Allocated under 32B: %10lld\n", | |
2191 | G.stats.total_allocated_under32); | |
2192 | fprintf (stderr, "Total Overhead under 64B: %10lld\n", | |
2193 | G.stats.total_overhead_under64); | |
2194 | fprintf (stderr, "Total Allocated under 64B: %10lld\n", | |
2195 | G.stats.total_allocated_under64); | |
2196 | fprintf (stderr, "Total Overhead under 128B: %10lld\n", | |
2197 | G.stats.total_overhead_under128); | |
2198 | fprintf (stderr, "Total Allocated under 128B: %10lld\n", | |
2199 | G.stats.total_allocated_under128); | |
2200 | ||
2201 | for (i = 0; i < NUM_ORDERS; i++) | |
2202 | if (G.stats.total_allocated_per_order[i]) | |
2203 | { | |
2204 | fprintf (stderr, "Total Overhead page size %7lu: %10lld\n", | |
2205 | (unsigned long) OBJECT_SIZE (i), | |
2206 | G.stats.total_overhead_per_order[i]); | |
2207 | fprintf (stderr, "Total Allocated page size %7lu: %10lld\n", | |
2208 | (unsigned long) OBJECT_SIZE (i), | |
2209 | G.stats.total_allocated_per_order[i]); | |
2210 | } | |
adc4adcd | 2211 | } |
3277221c | 2212 | } |
17211ab5 | 2213 | \f |
24b97832 ILT |
2214 | struct ggc_pch_ondisk |
2215 | { | |
2216 | unsigned totals[NUM_ORDERS]; | |
2217 | }; | |
2218 | ||
17211ab5 GK |
2219 | struct ggc_pch_data |
2220 | { | |
24b97832 | 2221 | struct ggc_pch_ondisk d; |
2a6e6fea | 2222 | uintptr_t base[NUM_ORDERS]; |
17211ab5 GK |
2223 | size_t written[NUM_ORDERS]; |
2224 | }; | |
2225 | ||
2226 | struct ggc_pch_data * | |
20c1dc5e | 2227 | init_ggc_pch (void) |
17211ab5 | 2228 | { |
5ed6ace5 | 2229 | return XCNEW (struct ggc_pch_data); |
17211ab5 GK |
2230 | } |
2231 | ||
20c1dc5e AJ |
2232 | void |
2233 | ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
08cee789 DJ |
2234 | size_t size, bool is_string ATTRIBUTE_UNUSED, |
2235 | enum gt_types_enum type ATTRIBUTE_UNUSED) | |
17211ab5 GK |
2236 | { |
2237 | unsigned order; | |
2238 | ||
6583cf15 | 2239 | if (size < NUM_SIZE_LOOKUP) |
17211ab5 GK |
2240 | order = size_lookup[size]; |
2241 | else | |
2242 | { | |
f5938dcd | 2243 | order = 10; |
17211ab5 GK |
2244 | while (size > OBJECT_SIZE (order)) |
2245 | order++; | |
2246 | } | |
20c1dc5e | 2247 | |
17211ab5 GK |
2248 | d->d.totals[order]++; |
2249 | } | |
20c1dc5e | 2250 | |
17211ab5 | 2251 | size_t |
20c1dc5e | 2252 | ggc_pch_total_size (struct ggc_pch_data *d) |
17211ab5 GK |
2253 | { |
2254 | size_t a = 0; | |
2255 | unsigned i; | |
2256 | ||
2257 | for (i = 0; i < NUM_ORDERS; i++) | |
3bc50163 | 2258 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
17211ab5 GK |
2259 | return a; |
2260 | } | |
2261 | ||
2262 | void | |
20c1dc5e | 2263 | ggc_pch_this_base (struct ggc_pch_data *d, void *base) |
17211ab5 | 2264 | { |
2a6e6fea | 2265 | uintptr_t a = (uintptr_t) base; |
17211ab5 | 2266 | unsigned i; |
20c1dc5e | 2267 | |
17211ab5 GK |
2268 | for (i = 0; i < NUM_ORDERS; i++) |
2269 | { | |
2270 | d->base[i] = a; | |
3bc50163 | 2271 | a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
17211ab5 GK |
2272 | } |
2273 | } | |
2274 | ||
2275 | ||
2276 | char * | |
20c1dc5e | 2277 | ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, |
08cee789 DJ |
2278 | size_t size, bool is_string ATTRIBUTE_UNUSED, |
2279 | enum gt_types_enum type ATTRIBUTE_UNUSED) | |
17211ab5 GK |
2280 | { |
2281 | unsigned order; | |
2282 | char *result; | |
20c1dc5e | 2283 | |
6583cf15 | 2284 | if (size < NUM_SIZE_LOOKUP) |
17211ab5 GK |
2285 | order = size_lookup[size]; |
2286 | else | |
2287 | { | |
f5938dcd | 2288 | order = 10; |
17211ab5 GK |
2289 | while (size > OBJECT_SIZE (order)) |
2290 | order++; | |
2291 | } | |
2292 | ||
2293 | result = (char *) d->base[order]; | |
2294 | d->base[order] += OBJECT_SIZE (order); | |
2295 | return result; | |
2296 | } | |
2297 | ||
20c1dc5e AJ |
2298 | void |
2299 | ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | |
2300 | FILE *f ATTRIBUTE_UNUSED) | |
17211ab5 GK |
2301 | { |
2302 | /* Nothing to do. */ | |
2303 | } | |
2304 | ||
2305 | void | |
20c1dc5e AJ |
2306 | ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, |
2307 | FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, | |
b6f61163 | 2308 | size_t size, bool is_string ATTRIBUTE_UNUSED) |
17211ab5 GK |
2309 | { |
2310 | unsigned order; | |
642324bb | 2311 | static const char emptyBytes[256] = { 0 }; |
17211ab5 | 2312 | |
6583cf15 | 2313 | if (size < NUM_SIZE_LOOKUP) |
17211ab5 GK |
2314 | order = size_lookup[size]; |
2315 | else | |
2316 | { | |
f5938dcd | 2317 | order = 10; |
17211ab5 GK |
2318 | while (size > OBJECT_SIZE (order)) |
2319 | order++; | |
2320 | } | |
20c1dc5e | 2321 | |
17211ab5 | 2322 | if (fwrite (x, size, 1, f) != 1) |
d8a07487 | 2323 | fatal_error ("can%'t write PCH file: %m"); |
17211ab5 | 2324 | |
674c7ef1 | 2325 | /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the |
0ee55ad8 | 2326 | object out to OBJECT_SIZE(order). This happens for strings. */ |
674c7ef1 RB |
2327 | |
2328 | if (size != OBJECT_SIZE (order)) | |
2329 | { | |
2330 | unsigned padding = OBJECT_SIZE(order) - size; | |
2331 | ||
2332 | /* To speed small writes, we use a nulled-out array that's larger | |
2333 | than most padding requests as the source for our null bytes. This | |
2334 | permits us to do the padding with fwrite() rather than fseek(), and | |
3f117656 | 2335 | limits the chance the OS may try to flush any outstanding writes. */ |
674c7ef1 RB |
2336 | if (padding <= sizeof(emptyBytes)) |
2337 | { | |
2338 | if (fwrite (emptyBytes, 1, padding, f) != padding) | |
d8a07487 | 2339 | fatal_error ("can%'t write PCH file"); |
674c7ef1 RB |
2340 | } |
2341 | else | |
2342 | { | |
0ee55ad8 | 2343 | /* Larger than our buffer? Just default to fseek. */ |
674c7ef1 | 2344 | if (fseek (f, padding, SEEK_CUR) != 0) |
d8a07487 | 2345 | fatal_error ("can%'t write PCH file"); |
674c7ef1 RB |
2346 | } |
2347 | } | |
17211ab5 GK |
2348 | |
2349 | d->written[order]++; | |
2350 | if (d->written[order] == d->d.totals[order] | |
2351 | && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), | |
2352 | G.pagesize), | |
2353 | SEEK_CUR) != 0) | |
d8a07487 | 2354 | fatal_error ("can%'t write PCH file: %m"); |
17211ab5 GK |
2355 | } |
2356 | ||
2357 | void | |
20c1dc5e | 2358 | ggc_pch_finish (struct ggc_pch_data *d, FILE *f) |
17211ab5 GK |
2359 | { |
2360 | if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
d8a07487 | 2361 | fatal_error ("can%'t write PCH file: %m"); |
17211ab5 GK |
2362 | free (d); |
2363 | } | |
2364 | ||
c4775f82 MS |
2365 | /* Move the PCH PTE entries just added to the end of by_depth, to the |
2366 | front. */ | |
2367 | ||
2368 | static void | |
20c1dc5e | 2369 | move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) |
c4775f82 MS |
2370 | { |
2371 | unsigned i; | |
2372 | ||
2373 | /* First, we swap the new entries to the front of the varrays. */ | |
2374 | page_entry **new_by_depth; | |
2375 | unsigned long **new_save_in_use; | |
2376 | ||
5ed6ace5 MD |
2377 | new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
2378 | new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | |
c4775f82 MS |
2379 | |
2380 | memcpy (&new_by_depth[0], | |
2381 | &G.by_depth[count_old_page_tables], | |
2382 | count_new_page_tables * sizeof (void *)); | |
2383 | memcpy (&new_by_depth[count_new_page_tables], | |
2384 | &G.by_depth[0], | |
2385 | count_old_page_tables * sizeof (void *)); | |
2386 | memcpy (&new_save_in_use[0], | |
2387 | &G.save_in_use[count_old_page_tables], | |
2388 | count_new_page_tables * sizeof (void *)); | |
2389 | memcpy (&new_save_in_use[count_new_page_tables], | |
2390 | &G.save_in_use[0], | |
2391 | count_old_page_tables * sizeof (void *)); | |
2392 | ||
2393 | free (G.by_depth); | |
2394 | free (G.save_in_use); | |
20c1dc5e | 2395 | |
c4775f82 MS |
2396 | G.by_depth = new_by_depth; |
2397 | G.save_in_use = new_save_in_use; | |
2398 | ||
2399 | /* Now update all the index_by_depth fields. */ | |
2400 | for (i = G.by_depth_in_use; i > 0; --i) | |
2401 | { | |
2402 | page_entry *p = G.by_depth[i-1]; | |
2403 | p->index_by_depth = i-1; | |
2404 | } | |
2405 | ||
2406 | /* And last, we update the depth pointers in G.depth. The first | |
2407 | entry is already 0, and context 0 entries always start at index | |
2408 | 0, so there is nothing to update in the first slot. We need a | |
2409 | second slot, only if we have old ptes, and if we do, they start | |
2410 | at index count_new_page_tables. */ | |
2411 | if (count_old_page_tables) | |
2412 | push_depth (count_new_page_tables); | |
2413 | } | |
2414 | ||
17211ab5 | 2415 | void |
20c1dc5e | 2416 | ggc_pch_read (FILE *f, void *addr) |
17211ab5 GK |
2417 | { |
2418 | struct ggc_pch_ondisk d; | |
2419 | unsigned i; | |
d3bfe4de | 2420 | char *offs = (char *) addr; |
c4775f82 MS |
2421 | unsigned long count_old_page_tables; |
2422 | unsigned long count_new_page_tables; | |
2423 | ||
2424 | count_old_page_tables = G.by_depth_in_use; | |
2425 | ||
2426 | /* We've just read in a PCH file. So, every object that used to be | |
2427 | allocated is now free. */ | |
17211ab5 | 2428 | clear_marks (); |
c5d6d04a | 2429 | #ifdef ENABLE_GC_CHECKING |
17211ab5 GK |
2430 | poison_pages (); |
2431 | #endif | |
ead8827d LB |
2432 | /* Since we free all the allocated objects, the free list becomes |
2433 | useless. Validate it now, which will also clear it. */ | |
2434 | validate_free_objects(); | |
17211ab5 GK |
2435 | |
2436 | /* No object read from a PCH file should ever be freed. So, set the | |
2437 | context depth to 1, and set the depth of all the currently-allocated | |
2438 | pages to be 1 too. PCH pages will have depth 0. */ | |
282899df | 2439 | gcc_assert (!G.context_depth); |
17211ab5 GK |
2440 | G.context_depth = 1; |
2441 | for (i = 0; i < NUM_ORDERS; i++) | |
2442 | { | |
2443 | page_entry *p; | |
2444 | for (p = G.pages[i]; p != NULL; p = p->next) | |
2445 | p->context_depth = G.context_depth; | |
2446 | } | |
2447 | ||
2448 | /* Allocate the appropriate page-table entries for the pages read from | |
2449 | the PCH file. */ | |
2450 | if (fread (&d, sizeof (d), 1, f) != 1) | |
d8a07487 | 2451 | fatal_error ("can%'t read PCH file: %m"); |
20c1dc5e | 2452 | |
17211ab5 GK |
2453 | for (i = 0; i < NUM_ORDERS; i++) |
2454 | { | |
2455 | struct page_entry *entry; | |
2456 | char *pte; | |
2457 | size_t bytes; | |
2458 | size_t num_objs; | |
2459 | size_t j; | |
c4775f82 | 2460 | |
17211ab5 GK |
2461 | if (d.totals[i] == 0) |
2462 | continue; | |
c4775f82 | 2463 | |
3bc50163 | 2464 | bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i)); |
17211ab5 | 2465 | num_objs = bytes / OBJECT_SIZE (i); |
d3bfe4de KG |
2466 | entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) |
2467 | - sizeof (long) | |
2468 | + BITMAP_SIZE (num_objs + 1))); | |
17211ab5 GK |
2469 | entry->bytes = bytes; |
2470 | entry->page = offs; | |
2471 | entry->context_depth = 0; | |
2472 | offs += bytes; | |
2473 | entry->num_free_objects = 0; | |
2474 | entry->order = i; | |
2475 | ||
20c1dc5e | 2476 | for (j = 0; |
17211ab5 GK |
2477 | j + HOST_BITS_PER_LONG <= num_objs + 1; |
2478 | j += HOST_BITS_PER_LONG) | |
2479 | entry->in_use_p[j / HOST_BITS_PER_LONG] = -1; | |
2480 | for (; j < num_objs + 1; j++) | |
20c1dc5e | 2481 | entry->in_use_p[j / HOST_BITS_PER_LONG] |
17211ab5 GK |
2482 | |= 1L << (j % HOST_BITS_PER_LONG); |
2483 | ||
20c1dc5e AJ |
2484 | for (pte = entry->page; |
2485 | pte < entry->page + entry->bytes; | |
17211ab5 GK |
2486 | pte += G.pagesize) |
2487 | set_page_table_entry (pte, entry); | |
2488 | ||
2489 | if (G.page_tails[i] != NULL) | |
2490 | G.page_tails[i]->next = entry; | |
2491 | else | |
2492 | G.pages[i] = entry; | |
2493 | G.page_tails[i] = entry; | |
c4775f82 MS |
2494 | |
2495 | /* We start off by just adding all the new information to the | |
2496 | end of the varrays, later, we will move the new information | |
2497 | to the front of the varrays, as the PCH page tables are at | |
2498 | context 0. */ | |
2499 | push_by_depth (entry, 0); | |
17211ab5 GK |
2500 | } |
2501 | ||
c4775f82 MS |
2502 | /* Now, we update the various data structures that speed page table |
2503 | handling. */ | |
2504 | count_new_page_tables = G.by_depth_in_use - count_old_page_tables; | |
2505 | ||
2506 | move_ptes_to_front (count_old_page_tables, count_new_page_tables); | |
2507 | ||
17211ab5 GK |
2508 | /* Update the statistics. */ |
2509 | G.allocated = G.allocated_last_gc = offs - (char *)addr; | |
2510 | } | |
a9429e29 LB |
2511 | |
2512 | struct alloc_zone | |
2513 | { | |
2514 | int dummy; | |
2515 | }; | |
2516 | ||
2517 | struct alloc_zone rtl_zone; | |
2518 | struct alloc_zone tree_zone; | |
2519 | struct alloc_zone tree_id_zone; |