]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-zone.c
hwint.c: New.
[thirdparty/gcc.git] / gcc / ggc-zone.c
CommitLineData
b6f61163 1/* "Bag-of-pages" zone garbage collector for the GNU compiler.
e4dfaf72
LB
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
b6f61163 4
08cee789
DJ
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
b6f61163
DB
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
9dcd6f09 13Software Foundation; either version 3, or (at your option) any later
b6f61163
DB
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
9dcd6f09
NC
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
b6f61163
DB
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
31#include "tm_p.h"
718f9c0f 32#include "diagnostic-core.h"
b6f61163
DB
33#include "flags.h"
34#include "ggc.h"
a9429e29 35#include "ggc-internal.h"
b6f61163
DB
36#include "timevar.h"
37#include "params.h"
38#include "bitmap.h"
ae2392a9 39#include "plugin.h"
b6f61163 40
b6f61163
DB
41/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
43#ifdef HAVE_MMAP_ANON
44# undef HAVE_MMAP_DEV_ZERO
b6f61163 45# define USING_MMAP
b6f61163
DB
46#endif
47
48#ifdef HAVE_MMAP_DEV_ZERO
b6f61163 49# define USING_MMAP
b6f61163
DB
50#endif
51
52#ifndef USING_MMAP
08cee789 53#error Zone collector requires mmap
b6f61163
DB
54#endif
55
56#if (GCC_VERSION < 3001)
57#define prefetch(X) ((void) X)
08cee789 58#define prefetchw(X) ((void) X)
b6f61163
DB
59#else
60#define prefetch(X) __builtin_prefetch (X)
08cee789 61#define prefetchw(X) __builtin_prefetch (X, 1, 3)
b6f61163
DB
62#endif
63
08cee789
DJ
64/* FUTURE NOTES:
65
b6f61163
DB
66 If we track inter-zone pointers, we can mark single zones at a
67 time.
08cee789 68
b6f61163 69 If we have a zone where we guarantee no inter-zone pointers, we
ba228239 70 could mark that zone separately.
08cee789 71
b6f61163
DB
72 The garbage zone should not be marked, and we should return 1 in
73 ggc_set_mark for any object in the garbage zone, which cuts off
74 marking quickly. */
08cee789 75
0fa2e4df 76/* Strategy:
b6f61163
DB
77
78 This garbage-collecting allocator segregates objects into zones.
79 It also segregates objects into "large" and "small" bins. Large
08cee789 80 objects are greater than page size.
b6f61163 81
08cee789
DJ
82 Pages for small objects are broken up into chunks. The page has
83 a bitmap which marks the start position of each chunk (whether
84 allocated or free). Free chunks are on one of the zone's free
85 lists and contain a pointer to the next free chunk. Chunks in
86 most of the free lists have a fixed size determined by the
87 free list. Chunks in the "other" sized free list have their size
88 stored right after their chain pointer.
b6f61163
DB
89
90 Empty pages (of all sizes) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
08cee789 93 been recycled by then. The free page list is currently per-zone. */
b6f61163
DB
94
95/* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101#define GGC_DEBUG_LEVEL (0)
102
103#ifndef HOST_BITS_PER_PTR
104#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
105#endif
9781b6da 106
08cee789
DJ
107/* This structure manages small free chunks. The SIZE field is only
108 initialized if the chunk is in the "other" sized free list. Large
109 chunks are allocated one at a time to their own page, and so don't
110 come in here. */
b6f61163 111
08cee789
DJ
112struct alloc_chunk {
113 struct alloc_chunk *next_free;
114 unsigned int size;
115};
b6f61163 116
08cee789
DJ
117/* The size of the fixed-size portion of a small page descriptor. */
118#define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
b6f61163 119
08cee789
DJ
120/* The collector's idea of the page size. This must be a power of two
121 no larger than the system page size, because pages must be aligned
122 to this amount and are tracked at this granularity in the page
123 table. We choose a size at compile time for efficiency.
b6f61163 124
08cee789
DJ
125 We could make a better guess at compile time if PAGE_SIZE is a
126 constant in system headers, and PAGE_SHIFT is defined... */
127#define GGC_PAGE_SIZE 4096
128#define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
129#define GGC_PAGE_SHIFT 12
130
131#if 0
132/* Alternative definitions which use the runtime page size. */
133#define GGC_PAGE_SIZE G.pagesize
134#define GGC_PAGE_MASK G.page_mask
135#define GGC_PAGE_SHIFT G.lg_pagesize
b6f61163 136#endif
b6f61163 137
08cee789
DJ
138/* The size of a small page managed by the garbage collector. This
139 must currently be GGC_PAGE_SIZE, but with a few changes could
140 be any multiple of it to reduce certain kinds of overhead. */
141#define SMALL_PAGE_SIZE GGC_PAGE_SIZE
b6f61163 142
08cee789
DJ
143/* Free bin information. These numbers may be in need of re-tuning.
144 In general, decreasing the number of free bins would seem to
145 increase the time it takes to allocate... */
b6f61163 146
08cee789
DJ
147/* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
148 today. */
b6f61163 149
b6f61163 150#define NUM_FREE_BINS 64
08cee789 151#define FREE_BIN_DELTA MAX_ALIGNMENT
b6f61163
DB
152#define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
153
08cee789
DJ
154/* Allocation and marking parameters. */
155
156/* The smallest allocatable unit to keep track of. */
157#define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
158
159/* The smallest markable unit. If we require each allocated object
160 to contain at least two allocatable units, we can use half as many
161 bits for the mark bitmap. But this adds considerable complexity
162 to sweeping. */
163#define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
164
165#define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
b6f61163
DB
166
167/* We use this structure to determine the alignment required for
08cee789
DJ
168 allocations.
169
170 There are several things wrong with this estimation of alignment.
171
172 The maximum alignment for a structure is often less than the
173 maximum alignment for a basic data type; for instance, on some
174 targets long long must be aligned to sizeof (int) in a structure
175 and sizeof (long long) in a variable. i386-linux is one example;
176 Darwin is another (sometimes, depending on the compiler in use).
177
178 Also, long double is not included. Nothing in GCC uses long
179 double, so we assume that this is OK. On powerpc-darwin, adding
180 long double would bring the maximum alignment up to 16 bytes,
181 and until we need long double (or to vectorize compiler operations)
182 that's painfully wasteful. This will need to change, some day. */
b6f61163
DB
183
184struct max_alignment {
185 char c;
186 union {
187 HOST_WIDEST_INT i;
b6f61163 188 double d;
b6f61163
DB
189 } u;
190};
191
192/* The biggest alignment required. */
193
194#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
195
b6f61163
DB
196/* Compute the smallest multiple of F that is >= X. */
197
198#define ROUND_UP(x, f) (CEIL (x, f) * (f))
199
08cee789
DJ
200/* Types to use for the allocation and mark bitmaps. It might be
201 a good idea to add ffsl to libiberty and use unsigned long
202 instead; that could speed us up where long is wider than int. */
b6f61163 203
08cee789
DJ
204typedef unsigned int alloc_type;
205typedef unsigned int mark_type;
206#define alloc_ffs(x) ffs(x)
207
208/* A page_entry records the status of an allocation page. This is the
209 common data between all three kinds of pages - small, large, and
210 PCH. */
b6f61163
DB
211typedef struct page_entry
212{
08cee789
DJ
213 /* The address at which the memory is allocated. */
214 char *page;
b6f61163 215
08cee789
DJ
216 /* The zone that this page entry belongs to. */
217 struct alloc_zone *zone;
b6f61163 218
08cee789 219#ifdef GATHER_STATISTICS
b6f61163
DB
220 /* How many collections we've survived. */
221 size_t survived;
08cee789 222#endif
b6f61163
DB
223
224 /* Does this page contain small objects, or one large object? */
225 bool large_p;
226
08cee789
DJ
227 /* Is this page part of the loaded PCH? */
228 bool pch_p;
b6f61163
DB
229} page_entry;
230
08cee789
DJ
231/* Additional data needed for small pages. */
232struct small_page_entry
233{
234 struct page_entry common;
235
236 /* The next small page entry, or NULL if this is the last. */
237 struct small_page_entry *next;
238
239 /* If currently marking this zone, a pointer to the mark bits
240 for this page. If we aren't currently marking this zone,
241 this pointer may be stale (pointing to freed memory). */
242 mark_type *mark_bits;
243
244 /* The allocation bitmap. This array extends far enough to have
245 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
246 alloc_type alloc_bits[1];
247};
248
249/* Additional data needed for large pages. */
250struct large_page_entry
251{
252 struct page_entry common;
253
254 /* The next large page entry, or NULL if this is the last. */
255 struct large_page_entry *next;
256
257 /* The number of bytes allocated, not including the page entry. */
258 size_t bytes;
259
260 /* The previous page in the list, so that we can unlink this one. */
261 struct large_page_entry *prev;
262
263 /* During marking, is this object marked? */
264 bool mark_p;
265};
266
267/* A two-level tree is used to look up the page-entry for a given
268 pointer. Two chunks of the pointer's bits are extracted to index
269 the first and second levels of the tree, as follows:
270
271 HOST_PAGE_SIZE_BITS
272 32 | |
273 msb +----------------+----+------+------+ lsb
274 | | |
275 PAGE_L1_BITS |
276 | |
277 PAGE_L2_BITS
278
279 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
280 pages are aligned on system page boundaries. The next most
281 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
282 index values in the lookup table, respectively.
283
284 For 32-bit architectures and the settings below, there are no
285 leftover bits. For architectures with wider pointers, the lookup
286 tree points to a list of pages, which must be scanned to find the
287 correct one. */
288
289#define PAGE_L1_BITS (8)
290#define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
291#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
292#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
293
294#define LOOKUP_L1(p) \
295 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
296
297#define LOOKUP_L2(p) \
298 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
299
300#if HOST_BITS_PER_PTR <= 32
301
302/* On 32-bit hosts, we use a two level page table, as pictured above. */
303typedef page_entry **page_table[PAGE_L1_SIZE];
304
305#else
306
307/* On 64-bit hosts, we use the same two level page tables plus a linked
308 list that disambiguates the top 32-bits. There will almost always be
309 exactly one entry in the list. */
310typedef struct page_table_chain
311{
312 struct page_table_chain *next;
313 size_t high_bits;
314 page_entry **table[PAGE_L1_SIZE];
315} *page_table;
316
317#endif
b6f61163
DB
318
319/* The global variables. */
320static struct globals
321{
b6f61163
DB
322 /* The linked list of zones. */
323 struct alloc_zone *zones;
324
08cee789
DJ
325 /* Lookup table for associating allocation pages with object addresses. */
326 page_table lookup;
327
328 /* The system's page size, and related constants. */
b6f61163
DB
329 size_t pagesize;
330 size_t lg_pagesize;
08cee789
DJ
331 size_t page_mask;
332
333 /* The size to allocate for a small page entry. This includes
334 the size of the structure and the size of the allocation
335 bitmap. */
336 size_t small_page_overhead;
b6f61163 337
b6f61163 338#if defined (HAVE_MMAP_DEV_ZERO)
08cee789 339 /* A file descriptor open to /dev/zero for reading. */
b6f61163
DB
340 int dev_zero_fd;
341#endif
342
08cee789
DJ
343 /* Allocate pages in chunks of this size, to throttle calls to memory
344 allocation routines. The first page is used, the rest go onto the
345 free list. */
346 size_t quire_size;
347
b6f61163
DB
348 /* The file descriptor for debugging output. */
349 FILE *debug_file;
350} G;
351
08cee789
DJ
352/* A zone allocation structure. There is one of these for every
353 distinct allocation zone. */
b6f61163
DB
354struct alloc_zone
355{
08cee789
DJ
356 /* The most recent free chunk is saved here, instead of in the linked
357 free list, to decrease list manipulation. It is most likely that we
358 will want this one. */
359 char *cached_free;
360 size_t cached_free_size;
b6f61163
DB
361
362 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
363 FREE_BIN_DELTA. All other chunks are in slot 0. */
364 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
365
08cee789
DJ
366 /* The highest bin index which might be non-empty. It may turn out
367 to be empty, in which case we have to search downwards. */
368 size_t high_free_bin;
369
370 /* Bytes currently allocated in this zone. */
b6f61163
DB
371 size_t allocated;
372
08cee789
DJ
373 /* Linked list of the small pages in this zone. */
374 struct small_page_entry *pages;
b6f61163 375
08cee789
DJ
376 /* Doubly linked list of large pages in this zone. */
377 struct large_page_entry *large_pages;
378
379 /* If we are currently marking this zone, a pointer to the mark bits. */
380 mark_type *mark_bits;
381
382 /* Name of the zone. */
383 const char *name;
b6f61163 384
08cee789
DJ
385 /* The number of small pages currently allocated in this zone. */
386 size_t n_small_pages;
b6f61163 387
08cee789
DJ
388 /* Bytes allocated at the end of the last collection. */
389 size_t allocated_last_gc;
b6f61163 390
08cee789
DJ
391 /* Total amount of memory mapped. */
392 size_t bytes_mapped;
b6f61163
DB
393
394 /* A cache of free system pages. */
08cee789 395 struct small_page_entry *free_pages;
b6f61163 396
b6f61163
DB
397 /* Next zone in the linked list of zones. */
398 struct alloc_zone *next_zone;
399
b944d187 400 /* True if this zone was collected during this collection. */
b6f61163 401 bool was_collected;
b944d187
SB
402
403 /* True if this zone should be destroyed after the next collection. */
404 bool dead;
b9bfca81
DJ
405
406#ifdef GATHER_STATISTICS
407 struct
408 {
a9429e29 409 /* Total GC-allocated memory. */
b9bfca81 410 unsigned long long total_allocated;
a9429e29 411 /* Total overhead for GC-allocated memory. */
b9bfca81
DJ
412 unsigned long long total_overhead;
413
414 /* Total allocations and overhead for sizes less than 32, 64 and 128.
415 These sizes are interesting because they are typical cache line
416 sizes. */
b8698a0f 417
b9bfca81
DJ
418 unsigned long long total_allocated_under32;
419 unsigned long long total_overhead_under32;
b8698a0f 420
b9bfca81
DJ
421 unsigned long long total_allocated_under64;
422 unsigned long long total_overhead_under64;
b8698a0f 423
b9bfca81
DJ
424 unsigned long long total_allocated_under128;
425 unsigned long long total_overhead_under128;
426 } stats;
427#endif
b6f61163
DB
428} main_zone;
429
08cee789
DJ
430/* Some default zones. */
431struct alloc_zone rtl_zone;
432struct alloc_zone tree_zone;
433struct alloc_zone tree_id_zone;
434
435/* The PCH zone does not need a normal zone structure, and it does
436 not live on the linked list of zones. */
437struct pch_zone
438{
439 /* The start of the PCH zone. NULL if there is none. */
440 char *page;
441
442 /* The end of the PCH zone. NULL if there is none. */
443 char *end;
444
445 /* The size of the PCH zone. 0 if there is none. */
446 size_t bytes;
b6f61163 447
08cee789
DJ
448 /* The allocation bitmap for the PCH zone. */
449 alloc_type *alloc_bits;
b9bfca81 450
08cee789
DJ
451 /* If we are currently marking, the mark bitmap for the PCH zone.
452 When it is first read in, we could avoid marking the PCH,
453 because it will not contain any pointers to GC memory outside
454 of the PCH; however, the PCH is currently mapped as writable,
455 so we must mark it in case new pointers are added. */
456 mark_type *mark_bits;
457} pch_zone;
b6f61163 458
b6f61163
DB
459#ifdef USING_MMAP
460static char *alloc_anon (char *, size_t, struct alloc_zone *);
461#endif
08cee789
DJ
462static struct small_page_entry * alloc_small_page (struct alloc_zone *);
463static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
464static void free_chunk (char *, size_t, struct alloc_zone *);
465static void free_small_page (struct small_page_entry *);
466static void free_large_page (struct large_page_entry *);
b6f61163
DB
467static void release_pages (struct alloc_zone *);
468static void sweep_pages (struct alloc_zone *);
b6f61163 469static bool ggc_collect_1 (struct alloc_zone *, bool);
08cee789
DJ
470static void new_ggc_zone_1 (struct alloc_zone *, const char *);
471
472/* Traverse the page table and find the entry for a page.
473 Die (probably) if the object wasn't allocated via GC. */
474
475static inline page_entry *
476lookup_page_table_entry (const void *p)
477{
478 page_entry ***base;
479 size_t L1, L2;
480
481#if HOST_BITS_PER_PTR <= 32
482 base = &G.lookup[0];
483#else
484 page_table table = G.lookup;
485 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
486 while (table->high_bits != high_bits)
487 table = table->next;
488 base = &table->table[0];
489#endif
490
491 /* Extract the level 1 and 2 indices. */
492 L1 = LOOKUP_L1 (p);
493 L2 = LOOKUP_L2 (p);
494
495 return base[L1][L2];
496}
497
dae4174e
TT
498/* Traverse the page table and find the entry for a page.
499 Return NULL if the object wasn't allocated via the GC. */
500
501static inline page_entry *
502lookup_page_table_if_allocated (const void *p)
503{
504 page_entry ***base;
505 size_t L1, L2;
506
507#if HOST_BITS_PER_PTR <= 32
508 base = &G.lookup[0];
509#else
510 page_table table = G.lookup;
511 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
512 while (1)
513 {
514 if (table == NULL)
515 return NULL;
516 if (table->high_bits == high_bits)
517 break;
518 table = table->next;
519 }
520 base = &table->table[0];
521#endif
522
523 /* Extract the level 1 and 2 indices. */
524 L1 = LOOKUP_L1 (p);
525 if (! base[L1])
526 return NULL;
527
528 L2 = LOOKUP_L2 (p);
529 if (L2 >= PAGE_L2_SIZE)
530 return NULL;
531 /* We might have a page entry which does not correspond exactly to a
532 system page. */
bf8e9c49 533 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
dae4174e
TT
534 return NULL;
535
536 return base[L1][L2];
537}
538
08cee789
DJ
539/* Set the page table entry for the page that starts at P. If ENTRY
540 is NULL, clear the entry. */
541
542static void
543set_page_table_entry (void *p, page_entry *entry)
544{
545 page_entry ***base;
546 size_t L1, L2;
547
548#if HOST_BITS_PER_PTR <= 32
549 base = &G.lookup[0];
550#else
551 page_table table;
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
553 for (table = G.lookup; table; table = table->next)
554 if (table->high_bits == high_bits)
555 goto found;
556
557 /* Not found -- allocate a new table. */
bf8e9c49 558 table = XCNEW (struct page_table_chain);
08cee789
DJ
559 table->next = G.lookup;
560 table->high_bits = high_bits;
561 G.lookup = table;
562found:
563 base = &table->table[0];
564#endif
565
566 /* Extract the level 1 and 2 indices. */
567 L1 = LOOKUP_L1 (p);
568 L2 = LOOKUP_L2 (p);
569
570 if (base[L1] == NULL)
bf8e9c49 571 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
08cee789
DJ
572
573 base[L1][L2] = entry;
574}
575
576/* Find the page table entry associated with OBJECT. */
577
578static inline struct page_entry *
579zone_get_object_page (const void *object)
580{
581 return lookup_page_table_entry (object);
582}
583
584/* Find which element of the alloc_bits array OBJECT should be
585 recorded in. */
586static inline unsigned int
587zone_get_object_alloc_word (const void *object)
588{
589 return (((size_t) object & (GGC_PAGE_SIZE - 1))
590 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
591}
592
593/* Find which bit of the appropriate word in the alloc_bits array
594 OBJECT should be recorded in. */
595static inline unsigned int
596zone_get_object_alloc_bit (const void *object)
597{
598 return (((size_t) object / BYTES_PER_ALLOC_BIT)
599 % (8 * sizeof (alloc_type)));
600}
601
602/* Find which element of the mark_bits array OBJECT should be recorded
603 in. */
604static inline unsigned int
605zone_get_object_mark_word (const void *object)
606{
607 return (((size_t) object & (GGC_PAGE_SIZE - 1))
608 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
609}
610
611/* Find which bit of the appropriate word in the mark_bits array
612 OBJECT should be recorded in. */
613static inline unsigned int
614zone_get_object_mark_bit (const void *object)
615{
616 return (((size_t) object / BYTES_PER_MARK_BIT)
617 % (8 * sizeof (mark_type)));
618}
619
620/* Set the allocation bit corresponding to OBJECT in its page's
c1b97125 621 bitmap. Used to split this object from the preceding one. */
08cee789
DJ
622static inline void
623zone_set_object_alloc_bit (const void *object)
624{
625 struct small_page_entry *page
626 = (struct small_page_entry *) zone_get_object_page (object);
627 unsigned int start_word = zone_get_object_alloc_word (object);
628 unsigned int start_bit = zone_get_object_alloc_bit (object);
629
630 page->alloc_bits[start_word] |= 1L << start_bit;
631}
b6f61163 632
08cee789 633/* Clear the allocation bit corresponding to OBJECT in PAGE's
c1b97125 634 bitmap. Used to coalesce this object with the preceding
08cee789
DJ
635 one. */
636static inline void
637zone_clear_object_alloc_bit (struct small_page_entry *page,
638 const void *object)
639{
640 unsigned int start_word = zone_get_object_alloc_word (object);
641 unsigned int start_bit = zone_get_object_alloc_bit (object);
642
643 /* Would xor be quicker? */
644 page->alloc_bits[start_word] &= ~(1L << start_bit);
645}
646
647/* Find the size of the object which starts at START_WORD and
648 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
649 Helper function for ggc_get_size and zone_find_object_size. */
650
651static inline size_t
652zone_object_size_1 (alloc_type *alloc_bits,
653 size_t start_word, size_t start_bit,
654 size_t max_size)
655{
656 size_t size;
657 alloc_type alloc_word;
658 int indx;
659
660 /* Load the first word. */
661 alloc_word = alloc_bits[start_word++];
662
663 /* If that was the last bit in this word, we'll want to continue
664 with the next word. Otherwise, handle the rest of this word. */
665 if (start_bit)
666 {
667 indx = alloc_ffs (alloc_word >> start_bit);
668 if (indx)
669 /* indx is 1-based. We started at the bit after the object's
670 start, but we also ended at the bit after the object's end.
671 It cancels out. */
672 return indx * BYTES_PER_ALLOC_BIT;
673
674 /* The extra 1 accounts for the starting unit, before start_bit. */
675 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
676
677 if (size >= max_size)
678 return max_size;
679
680 alloc_word = alloc_bits[start_word++];
681 }
682 else
683 size = BYTES_PER_ALLOC_BIT;
684
685 while (alloc_word == 0)
686 {
687 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
688 if (size >= max_size)
689 return max_size;
690 alloc_word = alloc_bits[start_word++];
691 }
b6f61163 692
08cee789
DJ
693 indx = alloc_ffs (alloc_word);
694 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
695}
696
697/* Find the size of OBJECT on small page PAGE. */
b6f61163 698
08cee789
DJ
699static inline size_t
700zone_find_object_size (struct small_page_entry *page,
701 const void *object)
b6f61163 702{
08cee789
DJ
703 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
704 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
705 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
706 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
bf8e9c49 707 - (const char *) object);
08cee789
DJ
708
709 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
710 max_size);
711}
712
dae4174e
TT
713/* highest_bit assumes that alloc_type is 32 bits. */
714extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
715
716/* Find the highest set bit in VALUE. Returns the bit number of that
717 bit, using the same values as ffs. */
718static inline alloc_type
719highest_bit (alloc_type value)
720{
721 /* This also assumes that alloc_type is unsigned. */
722 value |= value >> 1;
723 value |= value >> 2;
724 value |= value >> 4;
725 value |= value >> 8;
726 value |= value >> 16;
727 value = value ^ (value >> 1);
728 return alloc_ffs (value);
729}
730
731/* Find the offset from the start of an object to P, which may point
732 into the interior of the object. */
733
734static unsigned long
735zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
736 size_t start_bit)
737{
738 unsigned int offset_in_bits;
739 alloc_type alloc_word = alloc_bits[start_word];
740
741 /* Mask off any bits after the initial bit, but make sure to include
742 the initial bit in the result. Note that START_BIT is
743 0-based. */
744 if (start_bit < 8 * sizeof (alloc_type) - 1)
745 alloc_word &= (1 << (start_bit + 1)) - 1;
746 offset_in_bits = start_bit;
747
748 /* Search for the start of the object. */
749 while (alloc_word == 0 && start_word > 0)
750 {
751 alloc_word = alloc_bits[--start_word];
752 offset_in_bits += 8 * sizeof (alloc_type);
753 }
754 /* We must always find a set bit. */
755 gcc_assert (alloc_word != 0);
756 /* Note that the result of highest_bit is 1-based. */
757 offset_in_bits -= highest_bit (alloc_word) - 1;
758
759 return BYTES_PER_ALLOC_BIT * offset_in_bits;
760}
761
08cee789
DJ
762/* Allocate the mark bits for every zone, and set the pointers on each
763 page. */
764static void
765zone_allocate_marks (void)
766{
767 struct alloc_zone *zone;
768
769 for (zone = G.zones; zone; zone = zone->next_zone)
770 {
771 struct small_page_entry *page;
772 mark_type *cur_marks;
773 size_t mark_words, mark_words_per_page;
774#ifdef ENABLE_CHECKING
775 size_t n = 0;
b6f61163 776#endif
08cee789
DJ
777
778 mark_words_per_page
779 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
780 mark_words = zone->n_small_pages * mark_words_per_page;
781 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
782 mark_words);
783 cur_marks = zone->mark_bits;
784 for (page = zone->pages; page; page = page->next)
785 {
786 page->mark_bits = cur_marks;
787 cur_marks += mark_words_per_page;
788#ifdef ENABLE_CHECKING
789 n++;
790#endif
791 }
77a74ed7 792 gcc_checking_assert (n == zone->n_small_pages);
08cee789
DJ
793 }
794
795 /* We don't collect the PCH zone, but we do have to mark it
796 (for now). */
797 if (pch_zone.bytes)
798 pch_zone.mark_bits
799 = (mark_type *) xcalloc (sizeof (mark_type),
800 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
b6f61163
DB
801}
802
08cee789
DJ
803/* After marking and sweeping, release the memory used for mark bits. */
804static void
805zone_free_marks (void)
806{
807 struct alloc_zone *zone;
808
809 for (zone = G.zones; zone; zone = zone->next_zone)
810 if (zone->mark_bits)
811 {
812 free (zone->mark_bits);
813 zone->mark_bits = NULL;
814 }
815
816 if (pch_zone.bytes)
817 {
818 free (pch_zone.mark_bits);
819 pch_zone.mark_bits = NULL;
820 }
821}
b6f61163
DB
822
823#ifdef USING_MMAP
824/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825 (if non-null). The ifdef structure here is intended to cause a
826 compile error unless exactly one of the HAVE_* is defined. */
827
828static inline char *
829alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
830{
831#ifdef HAVE_MMAP_ANON
832 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
833 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
834#endif
835#ifdef HAVE_MMAP_DEV_ZERO
836 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
837 MAP_PRIVATE, G.dev_zero_fd, 0);
838#endif
b6f61163
DB
839
840 if (page == (char *) MAP_FAILED)
841 {
842 perror ("virtual memory exhausted");
843 exit (FATAL_EXIT_CODE);
844 }
845
846 /* Remember that we allocated this memory. */
847 zone->bytes_mapped += size;
08cee789 848
b6f61163 849 /* Pretend we don't have access to the allocated pages. We'll enable
a9429e29 850 access to smaller pieces of the area in ggc_internal_alloc. Discard the
b6f61163 851 handle to avoid handle leak. */
35dee980 852 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
08cee789 853
b6f61163
DB
854 return page;
855}
856#endif
b6f61163 857
08cee789
DJ
858/* Allocate a new page for allocating small objects in ZONE, and
859 return an entry for it. */
b6f61163 860
08cee789 861static struct small_page_entry *
b6f61163
DB
862alloc_small_page (struct alloc_zone *zone)
863{
08cee789 864 struct small_page_entry *entry;
b6f61163
DB
865
866 /* Check the list of free pages for one we can use. */
867 entry = zone->free_pages;
868 if (entry != NULL)
869 {
870 /* Recycle the allocated memory from this page ... */
871 zone->free_pages = entry->next;
b6f61163 872 }
b6f61163
DB
873 else
874 {
875 /* We want just one page. Allocate a bunch of them and put the
876 extras on the freelist. (Can only do this optimization with
877 mmap for backing store.) */
08cee789 878 struct small_page_entry *e, *f = zone->free_pages;
b6f61163 879 int i;
08cee789 880 char *page;
b6f61163 881
08cee789 882 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
b6f61163
DB
883
884 /* This loop counts down so that the chain will be in ascending
885 memory order. */
08cee789 886 for (i = G.quire_size - 1; i >= 1; i--)
b6f61163 887 {
bf8e9c49 888 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
08cee789
DJ
889 e->common.page = page + (i << GGC_PAGE_SHIFT);
890 e->common.zone = zone;
b6f61163
DB
891 e->next = f;
892 f = e;
08cee789 893 set_page_table_entry (e->common.page, &e->common);
b6f61163
DB
894 }
895
896 zone->free_pages = f;
08cee789 897
bf8e9c49 898 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
08cee789
DJ
899 entry->common.page = page;
900 entry->common.zone = zone;
901 set_page_table_entry (page, &entry->common);
b6f61163 902 }
b6f61163 903
08cee789 904 zone->n_small_pages++;
b6f61163 905
b6f61163
DB
906 if (GGC_DEBUG_LEVEL >= 2)
907 fprintf (G.debug_file,
08cee789
DJ
908 "Allocating %s page at %p, data %p-%p\n",
909 entry->common.zone->name, (PTR) entry, entry->common.page,
910 entry->common.page + SMALL_PAGE_SIZE - 1);
b6f61163
DB
911
912 return entry;
913}
914
915/* Allocate a large page of size SIZE in ZONE. */
916
08cee789 917static struct large_page_entry *
b6f61163
DB
918alloc_large_page (size_t size, struct alloc_zone *zone)
919{
08cee789 920 struct large_page_entry *entry;
b6f61163 921 char *page;
08cee789
DJ
922 size_t needed_size;
923
924 needed_size = size + sizeof (struct large_page_entry);
bf8e9c49 925 page = XNEWVAR (char, needed_size);
b6f61163 926
08cee789
DJ
927 entry = (struct large_page_entry *) page;
928
929 entry->next = NULL;
930 entry->common.page = page + sizeof (struct large_page_entry);
931 entry->common.large_p = true;
932 entry->common.pch_p = false;
933 entry->common.zone = zone;
934#ifdef GATHER_STATISTICS
935 entry->common.survived = 0;
936#endif
937 entry->mark_p = false;
b6f61163 938 entry->bytes = size;
08cee789
DJ
939 entry->prev = NULL;
940
941 set_page_table_entry (entry->common.page, &entry->common);
b6f61163 942
b6f61163
DB
943 if (GGC_DEBUG_LEVEL >= 2)
944 fprintf (G.debug_file,
08cee789
DJ
945 "Allocating %s large page at %p, data %p-%p\n",
946 entry->common.zone->name, (PTR) entry, entry->common.page,
947 entry->common.page + SMALL_PAGE_SIZE - 1);
b6f61163
DB
948
949 return entry;
950}
951
952
953/* For a page that is no longer needed, put it on the free page list. */
954
955static inline void
08cee789 956free_small_page (struct small_page_entry *entry)
b6f61163
DB
957{
958 if (GGC_DEBUG_LEVEL >= 2)
959 fprintf (G.debug_file,
08cee789
DJ
960 "Deallocating %s page at %p, data %p-%p\n",
961 entry->common.zone->name, (PTR) entry,
962 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
b6f61163 963
08cee789 964 gcc_assert (!entry->common.large_p);
b6f61163 965
08cee789
DJ
966 /* Mark the page as inaccessible. Discard the handle to
967 avoid handle leak. */
35dee980
HPN
968 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
969 SMALL_PAGE_SIZE));
08cee789
DJ
970
971 entry->next = entry->common.zone->free_pages;
972 entry->common.zone->free_pages = entry;
973 entry->common.zone->n_small_pages--;
974}
975
976/* Release a large page that is no longer needed. */
977
978static inline void
979free_large_page (struct large_page_entry *entry)
980{
981 if (GGC_DEBUG_LEVEL >= 2)
982 fprintf (G.debug_file,
983 "Deallocating %s page at %p, data %p-%p\n",
984 entry->common.zone->name, (PTR) entry,
985 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
986
987 gcc_assert (entry->common.large_p);
988
989 set_page_table_entry (entry->common.page, NULL);
990 free (entry);
b6f61163
DB
991}
992
993/* Release the free page cache to the system. */
994
995static void
996release_pages (struct alloc_zone *zone)
997{
998#ifdef USING_MMAP
08cee789 999 struct small_page_entry *p, *next;
b6f61163
DB
1000 char *start;
1001 size_t len;
1002
1003 /* Gather up adjacent pages so they are unmapped together. */
1004 p = zone->free_pages;
1005
1006 while (p)
1007 {
08cee789 1008 start = p->common.page;
b6f61163 1009 next = p->next;
08cee789
DJ
1010 len = SMALL_PAGE_SIZE;
1011 set_page_table_entry (p->common.page, NULL);
b6f61163
DB
1012 p = next;
1013
08cee789 1014 while (p && p->common.page == start + len)
b6f61163
DB
1015 {
1016 next = p->next;
08cee789
DJ
1017 len += SMALL_PAGE_SIZE;
1018 set_page_table_entry (p->common.page, NULL);
b6f61163
DB
1019 p = next;
1020 }
1021
1022 munmap (start, len);
1023 zone->bytes_mapped -= len;
1024 }
1025
1026 zone->free_pages = NULL;
1027#endif
b6f61163
DB
1028}
1029
08cee789 1030/* Place the block at PTR of size SIZE on the free list for ZONE. */
b6f61163
DB
1031
1032static inline void
08cee789 1033free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
b6f61163 1034{
08cee789 1035 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
b6f61163
DB
1036 size_t bin = 0;
1037
1038 bin = SIZE_BIN_DOWN (size);
08cee789 1039 gcc_assert (bin != 0);
b6f61163 1040 if (bin > NUM_FREE_BINS)
08cee789
DJ
1041 {
1042 bin = 0;
35dee980
HPN
1043 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1044 sizeof (struct
1045 alloc_chunk)));
08cee789
DJ
1046 chunk->size = size;
1047 chunk->next_free = zone->free_chunks[bin];
35dee980
HPN
1048 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1049 + sizeof (struct
1050 alloc_chunk),
1051 size
1052 - sizeof (struct
1053 alloc_chunk)));
08cee789
DJ
1054 }
1055 else
1056 {
35dee980
HPN
1057 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1058 sizeof (struct
1059 alloc_chunk *)));
08cee789 1060 chunk->next_free = zone->free_chunks[bin];
35dee980
HPN
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1062 + sizeof (struct
1063 alloc_chunk *),
1064 size
1065 - sizeof (struct
1066 alloc_chunk *)));
08cee789
DJ
1067 }
1068
b6f61163 1069 zone->free_chunks[bin] = chunk;
08cee789
DJ
1070 if (bin > zone->high_free_bin)
1071 zone->high_free_bin = bin;
b6f61163
DB
1072 if (GGC_DEBUG_LEVEL >= 3)
1073 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
b6f61163
DB
1074}
1075
08cee789 1076/* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
b6f61163 1077
08cee789 1078void *
a9429e29
LB
1079ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1080 MEM_STAT_DECL)
b6f61163 1081{
08cee789
DJ
1082 size_t bin;
1083 size_t csize;
1084 struct small_page_entry *entry;
1085 struct alloc_chunk *chunk, **pp;
b6f61163 1086 void *result;
b9bfca81 1087 size_t size = orig_size;
b6f61163 1088
08cee789
DJ
1089 /* Make sure that zero-sized allocations get a unique and freeable
1090 pointer. */
1091 if (size == 0)
1092 size = MAX_ALIGNMENT;
1093 else
1094 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1095
1096 /* Try to allocate the object from several different sources. Each
1097 of these cases is responsible for setting RESULT and SIZE to
1098 describe the allocated block, before jumping to FOUND. If a
1099 chunk is split, the allocate bit for the new chunk should also be
1100 set.
1101
1102 Large objects are handled specially. However, they'll just fail
1103 the next couple of conditions, so we can wait to check for them
1104 below. The large object case is relatively rare (< 1%), so this
1105 is a win. */
1106
1107 /* First try to split the last chunk we allocated. For best
1108 fragmentation behavior it would be better to look for a
1109 free bin of the appropriate size for a small object. However,
1110 we're unlikely (1% - 7%) to find one, and this gives better
1111 locality behavior anyway. This case handles the lion's share
1112 of all calls to this function. */
1113 if (size <= zone->cached_free_size)
1114 {
1115 result = zone->cached_free;
1116
1117 zone->cached_free_size -= size;
1118 if (zone->cached_free_size)
1119 {
1120 zone->cached_free += size;
1121 zone_set_object_alloc_bit (zone->cached_free);
1122 }
1123
1124 goto found;
1125 }
1126
1127 /* Next, try to find a free bin of the exactly correct size. */
1128
1129 /* We want to round SIZE up, rather than down, but we know it's
1130 already aligned to at least FREE_BIN_DELTA, so we can just
1131 shift. */
1132 bin = SIZE_BIN_DOWN (size);
b6f61163 1133
08cee789
DJ
1134 if (bin <= NUM_FREE_BINS
1135 && (chunk = zone->free_chunks[bin]) != NULL)
b6f61163 1136 {
08cee789
DJ
1137 /* We have a chunk of the right size. Pull it off the free list
1138 and use it. */
b6f61163 1139
08cee789
DJ
1140 zone->free_chunks[bin] = chunk->next_free;
1141
1142 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1143 == FREE_BIN_DELTA. */
1144 result = chunk;
1145
1146 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1147 may now be wrong, if this was the last chunk in the high bin.
1148 Rather than fixing it up now, wait until we need to search
1149 the free bins. */
b6f61163
DB
1150
1151 goto found;
1152 }
1153
08cee789
DJ
1154 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1155 to split. We can find one in the too-big bin, or in the largest
1156 sized bin with a chunk in it. Try the largest normal-sized bin
1157 first. */
1158
1159 if (zone->high_free_bin > bin)
b6f61163 1160 {
08cee789
DJ
1161 /* Find the highest numbered free bin. It will be at or below
1162 the watermark. */
1163 while (zone->high_free_bin > bin
1164 && zone->free_chunks[zone->high_free_bin] == NULL)
1165 zone->high_free_bin--;
1166
1167 if (zone->high_free_bin > bin)
b6f61163 1168 {
08cee789
DJ
1169 size_t tbin = zone->high_free_bin;
1170 chunk = zone->free_chunks[tbin];
1171
1172 /* Remove the chunk from its previous bin. */
1173 zone->free_chunks[tbin] = chunk->next_free;
1174
1175 result = (char *) chunk;
1176
1177 /* Save the rest of the chunk for future allocation. */
1178 if (zone->cached_free_size)
1179 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1180
1181 chunk = (struct alloc_chunk *) ((char *) result + size);
1182 zone->cached_free = (char *) chunk;
1183 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1184
1185 /* Mark the new free chunk as an object, so that we can
1186 find the size of the newly allocated object. */
1187 zone_set_object_alloc_bit (chunk);
1188
1189 /* HIGH_FREE_BIN may now be wrong, if this was the last
1190 chunk in the high bin. Rather than fixing it up now,
1191 wait until we need to search the free bins. */
1192
b6f61163
DB
1193 goto found;
1194 }
1195 }
1196
1197 /* Failing that, look through the "other" bucket for a chunk
1198 that is large enough. */
1199 pp = &(zone->free_chunks[0]);
1200 chunk = *pp;
1201 while (chunk && chunk->size < size)
1202 {
08cee789 1203 pp = &chunk->next_free;
b6f61163
DB
1204 chunk = *pp;
1205 }
1206
08cee789 1207 if (chunk)
b6f61163 1208 {
08cee789
DJ
1209 /* Remove the chunk from its previous bin. */
1210 *pp = chunk->next_free;
b6f61163 1211
08cee789
DJ
1212 result = (char *) chunk;
1213
1214 /* Save the rest of the chunk for future allocation, if there's any
1215 left over. */
1216 csize = chunk->size;
1217 if (csize > size)
1218 {
1219 if (zone->cached_free_size)
1220 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1221
1222 chunk = (struct alloc_chunk *) ((char *) result + size);
1223 zone->cached_free = (char *) chunk;
1224 zone->cached_free_size = csize - size;
1225
1226 /* Mark the new free chunk as an object. */
1227 zone_set_object_alloc_bit (chunk);
1228 }
1229
1230 goto found;
b6f61163 1231 }
08cee789
DJ
1232
1233 /* Handle large allocations. We could choose any threshold between
1234 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1235 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1236 be guaranteed to have a unique entry in the lookup table. Large
1237 allocations will always fall through to here. */
1238 if (size > GGC_PAGE_SIZE)
b6f61163 1239 {
08cee789
DJ
1240 struct large_page_entry *entry = alloc_large_page (size, zone);
1241
1242#ifdef GATHER_STATISTICS
1243 entry->common.survived = 0;
1244#endif
1245
1246 entry->next = zone->large_pages;
1247 if (zone->large_pages)
1248 zone->large_pages->prev = entry;
1249 zone->large_pages = entry;
1250
1251 result = entry->common.page;
1252
1253 goto found;
b6f61163 1254 }
08cee789
DJ
1255
1256 /* Failing everything above, allocate a new small page. */
1257
1258 entry = alloc_small_page (zone);
1259 entry->next = zone->pages;
1260 zone->pages = entry;
1261
1262 /* Mark the first chunk in the new page. */
1263 entry->alloc_bits[0] = 1;
1264
1265 result = entry->common.page;
1266 if (size < SMALL_PAGE_SIZE)
b6f61163 1267 {
08cee789
DJ
1268 if (zone->cached_free_size)
1269 free_chunk (zone->cached_free, zone->cached_free_size, zone);
b6f61163 1270
08cee789
DJ
1271 zone->cached_free = (char *) result + size;
1272 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1273
1274 /* Mark the new free chunk as an object. */
1275 zone_set_object_alloc_bit (zone->cached_free);
b6f61163 1276 }
92e838e2 1277
b6f61163 1278 found:
08cee789 1279
d685c974 1280 /* We could save TYPE in the chunk, but we don't use that for
08cee789
DJ
1281 anything yet. If we wanted to, we could do it by adding it
1282 either before the beginning of the chunk or after its end,
1283 and adjusting the size and pointer appropriately. */
1284
1285 /* We'll probably write to this after we return. */
1286 prefetchw (result);
b6f61163
DB
1287
1288#ifdef ENABLE_GC_CHECKING
b6f61163 1289 /* `Poison' the entire allocated object. */
35dee980 1290 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
b6f61163 1291 memset (result, 0xaf, size);
35dee980
HPN
1292 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1293 size - orig_size));
b6f61163
DB
1294#endif
1295
1296 /* Tell Valgrind that the memory is there, but its content isn't
1297 defined. The bytes at the end of the object are still marked
1298 unaccessible. */
35dee980 1299 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
b6f61163
DB
1300
1301 /* Keep track of how many bytes are being allocated. This
1302 information is used in deciding when to collect. */
b9bfca81 1303 zone->allocated += size;
b8698a0f 1304
76e20664 1305 timevar_ggc_mem_total += size;
b9bfca81
DJ
1306
1307#ifdef GATHER_STATISTICS
08cee789 1308 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
b9bfca81
DJ
1309
1310 {
08cee789 1311 size_t object_size = size;
b9bfca81
DJ
1312 size_t overhead = object_size - orig_size;
1313
1314 zone->stats.total_overhead += overhead;
1315 zone->stats.total_allocated += object_size;
1316
1317 if (orig_size <= 32)
1318 {
1319 zone->stats.total_overhead_under32 += overhead;
1320 zone->stats.total_allocated_under32 += object_size;
1321 }
1322 if (orig_size <= 64)
1323 {
1324 zone->stats.total_overhead_under64 += overhead;
1325 zone->stats.total_allocated_under64 += object_size;
1326 }
1327 if (orig_size <= 128)
1328 {
1329 zone->stats.total_overhead_under128 += overhead;
1330 zone->stats.total_allocated_under128 += object_size;
1331 }
1332 }
1333#endif
b6f61163
DB
1334
1335 if (GGC_DEBUG_LEVEL >= 3)
08cee789
DJ
1336 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1337 (unsigned long) size, result);
b6f61163
DB
1338
1339 return result;
1340}
1341
a9429e29
LB
1342#define ggc_internal_alloc_zone_pass_stat(s,z) \
1343 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1344
1345void *
1346ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1347 struct alloc_zone *zone MEM_STAT_DECL)
1348{
1349 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1350 memset (result, 0, orig_size);
1351 return result;
1352}
1353
1354
d91edf86 1355/* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
b6f61163
DB
1356 for that type. */
1357
1358void *
92e838e2
PB
1359ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1360 MEM_STAT_DECL)
b6f61163 1361{
47aeffac
SB
1362 switch (gte)
1363 {
1364 case gt_ggc_e_14lang_tree_node:
a9429e29 1365 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
47aeffac
SB
1366
1367 case gt_ggc_e_7rtx_def:
a9429e29 1368 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
47aeffac
SB
1369
1370 case gt_ggc_e_9rtvec_def:
a9429e29 1371 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
47aeffac
SB
1372
1373 default:
a9429e29 1374 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
47aeffac 1375 }
b6f61163
DB
1376}
1377
a9429e29 1378/* Normal GC allocation simply allocates into the main zone. */
b6f61163
DB
1379
1380void *
a9429e29 1381ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
b6f61163 1382{
a9429e29 1383 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
b6f61163
DB
1384}
1385
9781b6da
DB
1386/* Poison the chunk. */
1387#ifdef ENABLE_GC_CHECKING
e4dfaf72
LB
1388#define poison_region(PTR, SIZE) \
1389 do { \
1390 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1391 memset ((PTR), 0xa5, (SIZE)); \
1392 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1393 } while (0)
9781b6da 1394#else
08cee789 1395#define poison_region(PTR, SIZE)
9781b6da
DB
1396#endif
1397
1398/* Free the object at P. */
1399
1400void
1401ggc_free (void *p)
1402{
08cee789
DJ
1403 struct page_entry *page;
1404
1405#ifdef GATHER_STATISTICS
1406 ggc_free_overhead (p);
1407#endif
1408
1409 poison_region (p, ggc_get_size (p));
1410
1411 page = zone_get_object_page (p);
1412
1413 if (page->large_p)
1414 {
1415 struct large_page_entry *large_page
1416 = (struct large_page_entry *) page;
1417
1418 /* Remove the page from the linked list. */
1419 if (large_page->prev)
1420 large_page->prev->next = large_page->next;
1421 else
1422 {
1423 gcc_assert (large_page->common.zone->large_pages == large_page);
1424 large_page->common.zone->large_pages = large_page->next;
1425 }
1426 if (large_page->next)
1427 large_page->next->prev = large_page->prev;
1428
1429 large_page->common.zone->allocated -= large_page->bytes;
1430
1431 /* Release the memory associated with this object. */
1432 free_large_page (large_page);
1433 }
1434 else if (page->pch_p)
1435 /* Don't do anything. We won't allocate a new object from the
1436 PCH zone so there's no point in releasing anything. */
1437 ;
1438 else
1439 {
1440 size_t size = ggc_get_size (p);
1441
1442 page->zone->allocated -= size;
1443
1444 /* Add the chunk to the free list. We don't bother with coalescing,
1445 since we are likely to want a chunk of this size again. */
bf8e9c49 1446 free_chunk ((char *)p, size, page->zone);
08cee789 1447 }
9781b6da
DB
1448}
1449
dae4174e
TT
1450/* Mark function for strings. */
1451
1452void
1453gt_ggc_m_S (const void *p)
1454{
1455 page_entry *entry;
1456 unsigned long offset;
1457
1458 if (!p)
1459 return;
1460
1461 /* Look up the page on which the object is alloced. . */
1462 entry = lookup_page_table_if_allocated (p);
1463 if (! entry)
1464 return;
1465
1466 if (entry->pch_p)
1467 {
1468 size_t alloc_word, alloc_bit, t;
1469 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1470 alloc_word = t / (8 * sizeof (alloc_type));
1471 alloc_bit = t % (8 * sizeof (alloc_type));
1472 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1473 alloc_bit);
1474 }
1475 else if (entry->large_p)
1476 {
1477 struct large_page_entry *le = (struct large_page_entry *) entry;
1478 offset = ((const char *) p) - entry->page;
1479 gcc_assert (offset < le->bytes);
1480 }
1481 else
1482 {
1483 struct small_page_entry *se = (struct small_page_entry *) entry;
1484 unsigned int start_word = zone_get_object_alloc_word (p);
1485 unsigned int start_bit = zone_get_object_alloc_bit (p);
1486 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1487
1488 /* On some platforms a char* will not necessarily line up on an
1489 allocation boundary, so we have to update the offset to
1490 account for the leftover bytes. */
1491 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1492 }
1493
1494 if (offset)
1495 {
1496 /* Here we've seen a char* which does not point to the beginning
1497 of an allocated object. We assume it points to the middle of
1498 a STRING_CST. */
1499 gcc_assert (offset == offsetof (struct tree_string, str));
1500 p = ((const char *) p) - offset;
bf8e9c49 1501 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
dae4174e
TT
1502 return;
1503 }
1504
1505 /* Inefficient, but also unlikely to matter. */
1506 ggc_set_mark (p);
1507}
1508
b6f61163
DB
1509/* If P is not marked, mark it and return false. Otherwise return true.
1510 P must have been allocated by the GC allocator; it mustn't point to
1511 static objects, stack variables, or memory allocated with malloc. */
1512
1513int
1514ggc_set_mark (const void *p)
1515{
08cee789
DJ
1516 struct page_entry *page;
1517 const char *ptr = (const char *) p;
b6f61163 1518
08cee789
DJ
1519 page = zone_get_object_page (p);
1520
1521 if (page->pch_p)
1522 {
1523 size_t mark_word, mark_bit, offset;
1524 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1525 mark_word = offset / (8 * sizeof (mark_type));
1526 mark_bit = offset % (8 * sizeof (mark_type));
b8698a0f 1527
08cee789
DJ
1528 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1529 return 1;
1530 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1531 }
1532 else if (page->large_p)
1533 {
1534 struct large_page_entry *large_page
1535 = (struct large_page_entry *) page;
1536
1537 if (large_page->mark_p)
1538 return 1;
1539 large_page->mark_p = true;
1540 }
1541 else
1542 {
1543 struct small_page_entry *small_page
1544 = (struct small_page_entry *) page;
1545
1546 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1547 & (1 << zone_get_object_mark_bit (p)))
1548 return 1;
1549 small_page->mark_bits[zone_get_object_mark_word (p)]
1550 |= (1 << zone_get_object_mark_bit (p));
1551 }
b6f61163 1552
b6f61163
DB
1553 if (GGC_DEBUG_LEVEL >= 4)
1554 fprintf (G.debug_file, "Marking %p\n", p);
1555
1556 return 0;
1557}
1558
1559/* Return 1 if P has been marked, zero otherwise.
1560 P must have been allocated by the GC allocator; it mustn't point to
1561 static objects, stack variables, or memory allocated with malloc. */
1562
1563int
1564ggc_marked_p (const void *p)
1565{
08cee789 1566 struct page_entry *page;
bf8e9c49 1567 const char *ptr = (const char *) p;
b6f61163 1568
08cee789
DJ
1569 page = zone_get_object_page (p);
1570
1571 if (page->pch_p)
1572 {
1573 size_t mark_word, mark_bit, offset;
1574 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1575 mark_word = offset / (8 * sizeof (mark_type));
1576 mark_bit = offset % (8 * sizeof (mark_type));
b8698a0f 1577
08cee789
DJ
1578 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1579 }
1580
1581 if (page->large_p)
1582 {
1583 struct large_page_entry *large_page
1584 = (struct large_page_entry *) page;
1585
1586 return large_page->mark_p;
1587 }
1588 else
1589 {
1590 struct small_page_entry *small_page
1591 = (struct small_page_entry *) page;
1592
1593 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1594 & (1 << zone_get_object_mark_bit (p)));
1595 }
b6f61163
DB
1596}
1597
1598/* Return the size of the gc-able object P. */
1599
1600size_t
1601ggc_get_size (const void *p)
1602{
08cee789
DJ
1603 struct page_entry *page;
1604 const char *ptr = (const char *) p;
b6f61163 1605
08cee789
DJ
1606 page = zone_get_object_page (p);
1607
1608 if (page->pch_p)
1609 {
1610 size_t alloc_word, alloc_bit, offset, max_size;
1611 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1612 alloc_word = offset / (8 * sizeof (alloc_type));
1613 alloc_bit = offset % (8 * sizeof (alloc_type));
1614 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1615 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1616 max_size);
1617 }
b6f61163 1618
08cee789
DJ
1619 if (page->large_p)
1620 return ((struct large_page_entry *)page)->bytes;
1621 else
1622 return zone_find_object_size ((struct small_page_entry *) page, p);
b6f61163
DB
1623}
1624
1625/* Initialize the ggc-zone-mmap allocator. */
1626void
578e8170 1627init_ggc (void)
b6f61163 1628{
08cee789
DJ
1629 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1630 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1631 the current assumptions to hold. */
1632
1633 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1634
47aeffac 1635 /* Set up the main zone by hand. */
b6f61163
DB
1636 main_zone.name = "Main zone";
1637 G.zones = &main_zone;
1638
47aeffac 1639 /* Allocate the default zones. */
08cee789
DJ
1640 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1641 new_ggc_zone_1 (&tree_zone, "Tree zone");
1642 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
b6f61163
DB
1643
1644 G.pagesize = getpagesize();
1645 G.lg_pagesize = exact_log2 (G.pagesize);
08cee789
DJ
1646 G.page_mask = ~(G.pagesize - 1);
1647
1648 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1649 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1650
1651 /* Allocate 16 system pages at a time. */
1652 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1653
1654 /* Calculate the size of the allocation bitmap and other overhead. */
1655 /* Right now we allocate bits for the page header and bitmap. These
1656 are wasted, but a little tricky to eliminate. */
1657 G.small_page_overhead
1658 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1659 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1660
b6f61163
DB
1661#ifdef HAVE_MMAP_DEV_ZERO
1662 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
282899df 1663 gcc_assert (G.dev_zero_fd != -1);
b6f61163
DB
1664#endif
1665
1666#if 0
1667 G.debug_file = fopen ("ggc-mmap.debug", "w");
1668 setlinebuf (G.debug_file);
1669#else
1670 G.debug_file = stdout;
1671#endif
1672
1673#ifdef USING_MMAP
1674 /* StunOS has an amazing off-by-one error for the first mmap allocation
1675 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1676 believe, is an unaligned page allocation, which would cause us to
1677 hork badly if we tried to use it. */
1678 {
1679 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
08cee789 1680 struct small_page_entry *e;
b6f61163
DB
1681 if ((size_t)p & (G.pagesize - 1))
1682 {
1683 /* How losing. Discard this one and try another. If we still
1684 can't get something useful, give up. */
1685
1686 p = alloc_anon (NULL, G.pagesize, &main_zone);
282899df 1687 gcc_assert (!((size_t)p & (G.pagesize - 1)));
b6f61163
DB
1688 }
1689
08cee789
DJ
1690 if (GGC_PAGE_SIZE == G.pagesize)
1691 {
1692 /* We have a good page, might as well hold onto it... */
bf8e9c49 1693 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
08cee789
DJ
1694 e->common.page = p;
1695 e->common.zone = &main_zone;
1696 e->next = main_zone.free_pages;
1697 set_page_table_entry (e->common.page, &e->common);
1698 main_zone.free_pages = e;
1699 }
1700 else
1701 {
1702 munmap (p, G.pagesize);
1703 }
b6f61163
DB
1704 }
1705#endif
1706}
1707
47aeffac
SB
1708/* Start a new GGC zone. */
1709
08cee789
DJ
1710static void
1711new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
47aeffac 1712{
47aeffac
SB
1713 new_zone->name = name;
1714 new_zone->next_zone = G.zones->next_zone;
1715 G.zones->next_zone = new_zone;
08cee789
DJ
1716}
1717
b6f61163
DB
1718/* Free all empty pages and objects within a page for a given zone */
1719
1720static void
1721sweep_pages (struct alloc_zone *zone)
1722{
08cee789
DJ
1723 struct large_page_entry **lpp, *lp, *lnext;
1724 struct small_page_entry **spp, *sp, *snext;
1725 char *last_free;
1726 size_t allocated = 0;
02fef853 1727 bool nomarksinpage;
08cee789 1728
b6f61163
DB
1729 /* First, reset the free_chunks lists, since we are going to
1730 re-free free chunks in hopes of coalescing them into large chunks. */
1731 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
08cee789
DJ
1732 zone->high_free_bin = 0;
1733 zone->cached_free = NULL;
1734 zone->cached_free_size = 0;
1735
1736 /* Large pages are all or none affairs. Either they are completely
1737 empty, or they are completely full. */
1738 lpp = &zone->large_pages;
1739 for (lp = zone->large_pages; lp != NULL; lp = lnext)
b6f61163 1740 {
08cee789
DJ
1741 gcc_assert (lp->common.large_p);
1742
1743 lnext = lp->next;
1744
1745#ifdef GATHER_STATISTICS
1746 /* This page has now survived another collection. */
1747 lp->common.survived++;
1748#endif
1749
1750 if (lp->mark_p)
b6f61163 1751 {
08cee789
DJ
1752 lp->mark_p = false;
1753 allocated += lp->bytes;
1754 lpp = &lp->next;
1755 }
1756 else
1757 {
1758 *lpp = lnext;
b6f61163 1759#ifdef ENABLE_GC_CHECKING
08cee789
DJ
1760 /* Poison the page. */
1761 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
b6f61163 1762#endif
08cee789
DJ
1763 if (lp->prev)
1764 lp->prev->next = lp->next;
1765 if (lp->next)
1766 lp->next->prev = lp->prev;
1767 free_large_page (lp);
b6f61163 1768 }
08cee789
DJ
1769 }
1770
1771 spp = &zone->pages;
1772 for (sp = zone->pages; sp != NULL; sp = snext)
1773 {
1774 char *object, *last_object;
1775 char *end;
1776 alloc_type *alloc_word_p;
1777 mark_type *mark_word_p;
1778
1779 gcc_assert (!sp->common.large_p);
b6f61163 1780
08cee789
DJ
1781 snext = sp->next;
1782
1783#ifdef GATHER_STATISTICS
b6f61163 1784 /* This page has now survived another collection. */
08cee789
DJ
1785 sp->common.survived++;
1786#endif
b6f61163 1787
08cee789
DJ
1788 /* Step through all chunks, consolidate those that are free and
1789 insert them into the free lists. Note that consolidation
1790 slows down collection slightly. */
b6f61163 1791
08cee789
DJ
1792 last_object = object = sp->common.page;
1793 end = sp->common.page + SMALL_PAGE_SIZE;
b6f61163 1794 last_free = NULL;
02fef853 1795 nomarksinpage = true;
08cee789
DJ
1796 mark_word_p = sp->mark_bits;
1797 alloc_word_p = sp->alloc_bits;
1798
1799 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1800
1801 object = sp->common.page;
b6f61163
DB
1802 do
1803 {
08cee789
DJ
1804 unsigned int i, n;
1805 alloc_type alloc_word;
1806 mark_type mark_word;
1807
1808 alloc_word = *alloc_word_p++;
1809 mark_word = *mark_word_p++;
1810
1811 if (mark_word)
1812 nomarksinpage = false;
1813
1814 /* There ought to be some way to do this without looping... */
1815 i = 0;
1816 while ((n = alloc_ffs (alloc_word)) != 0)
b6f61163 1817 {
08cee789
DJ
1818 /* Extend the current state for n - 1 bits. We can't
1819 shift alloc_word by n, even though it isn't used in the
1820 loop, in case only the highest bit was set. */
1821 alloc_word >>= n - 1;
1822 mark_word >>= n - 1;
1823 object += BYTES_PER_MARK_BIT * (n - 1);
1824
1825 if (mark_word & 1)
b6f61163 1826 {
08cee789
DJ
1827 if (last_free)
1828 {
35dee980
HPN
1829 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1830 object
1831 - last_free));
08cee789
DJ
1832 poison_region (last_free, object - last_free);
1833 free_chunk (last_free, object - last_free, zone);
1834 last_free = NULL;
1835 }
1836 else
1837 allocated += object - last_object;
1838 last_object = object;
b6f61163
DB
1839 }
1840 else
1841 {
08cee789
DJ
1842 if (last_free == NULL)
1843 {
1844 last_free = object;
1845 allocated += object - last_object;
1846 }
1847 else
1848 zone_clear_object_alloc_bit (sp, object);
b6f61163 1849 }
08cee789
DJ
1850
1851 /* Shift to just after the alloc bit we handled. */
1852 alloc_word >>= 1;
1853 mark_word >>= 1;
1854 object += BYTES_PER_MARK_BIT;
1855
1856 i += n;
b6f61163
DB
1857 }
1858
08cee789 1859 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
b6f61163 1860 }
08cee789 1861 while (object < end);
b6f61163 1862
02fef853
DB
1863 if (nomarksinpage)
1864 {
08cee789 1865 *spp = snext;
02fef853 1866#ifdef ENABLE_GC_CHECKING
35dee980
HPN
1867 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1868 SMALL_PAGE_SIZE));
02fef853 1869 /* Poison the page. */
08cee789 1870 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
02fef853 1871#endif
08cee789 1872 free_small_page (sp);
02fef853
DB
1873 continue;
1874 }
1875 else if (last_free)
b6f61163 1876 {
35dee980
HPN
1877 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1878 object - last_free));
08cee789
DJ
1879 poison_region (last_free, object - last_free);
1880 free_chunk (last_free, object - last_free, zone);
b6f61163 1881 }
08cee789
DJ
1882 else
1883 allocated += object - last_object;
1884
1885 spp = &sp->next;
b6f61163
DB
1886 }
1887
1888 zone->allocated = allocated;
1889}
1890
1891/* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1892 is true if we need to mark before sweeping, false if some other
1893 zone collection has already performed marking for us. Returns true
1894 if we collected, false otherwise. */
1895
1896static bool
1897ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1898{
08cee789
DJ
1899#if 0
1900 /* */
1901 {
1902 int i;
1903 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1904 {
1905 struct alloc_chunk *chunk;
1906 int n, tot;
1907
1908 n = 0;
1909 tot = 0;
1910 chunk = zone->free_chunks[i];
1911 while (chunk)
1912 {
1913 n++;
1914 tot += chunk->size;
1915 chunk = chunk->next_free;
1916 }
1917 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1918 i, n, tot);
1919 }
1920 }
1921 /* */
1922#endif
1923
b6f61163 1924 if (!quiet_flag)
b944d187
SB
1925 fprintf (stderr, " {%s GC %luk -> ",
1926 zone->name, (unsigned long) zone->allocated / 1024);
b6f61163
DB
1927
1928 /* Zero the total allocated bytes. This will be recalculated in the
1929 sweep phase. */
1930 zone->allocated = 0;
1931
1932 /* Release the pages we freed the last time we collected, but didn't
1933 reuse in the interim. */
1934 release_pages (zone);
1935
b6f61163 1936 if (need_marking)
08cee789
DJ
1937 {
1938 zone_allocate_marks ();
1939 ggc_mark_roots ();
1940#ifdef GATHER_STATISTICS
1941 ggc_prune_overhead_list ();
1942#endif
1943 }
b8698a0f 1944
b6f61163
DB
1945 sweep_pages (zone);
1946 zone->was_collected = true;
1947 zone->allocated_last_gc = zone->allocated;
1948
b6f61163
DB
1949 if (!quiet_flag)
1950 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1951 return true;
1952}
1953
08cee789 1954#ifdef GATHER_STATISTICS
b6f61163
DB
1955/* Calculate the average page survival rate in terms of number of
1956 collections. */
1957
1958static float
1959calculate_average_page_survival (struct alloc_zone *zone)
1960{
1961 float count = 0.0;
1962 float survival = 0.0;
08cee789
DJ
1963 struct small_page_entry *p;
1964 struct large_page_entry *lp;
b6f61163
DB
1965 for (p = zone->pages; p; p = p->next)
1966 {
1967 count += 1.0;
08cee789 1968 survival += p->common.survived;
b6f61163 1969 }
08cee789 1970 for (lp = zone->large_pages; lp; lp = lp->next)
b6f61163 1971 {
08cee789
DJ
1972 count += 1.0;
1973 survival += lp->common.survived;
b6f61163 1974 }
08cee789 1975 return survival/count;
b6f61163 1976}
08cee789
DJ
1977#endif
1978
b6f61163
DB
1979/* Top level collection routine. */
1980
1981void
578e8170 1982ggc_collect (void)
b6f61163
DB
1983{
1984 struct alloc_zone *zone;
1985 bool marked = false;
b6f61163
DB
1986
1987 timevar_push (TV_GC);
b9bfca81 1988
08cee789 1989 if (!ggc_force_collect)
b9bfca81
DJ
1990 {
1991 float allocated_last_gc = 0, allocated = 0, min_expand;
1992
1993 for (zone = G.zones; zone; zone = zone->next_zone)
1994 {
1995 allocated_last_gc += zone->allocated_last_gc;
1996 allocated += zone->allocated;
1997 }
1998
1999 allocated_last_gc =
2000 MAX (allocated_last_gc,
2001 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2002 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2003
2004 if (allocated < allocated_last_gc + min_expand)
2005 {
2006 timevar_pop (TV_GC);
2007 return;
2008 }
2009 }
2010
ae2392a9
BS
2011 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2012
b6f61163
DB
2013 /* Start by possibly collecting the main zone. */
2014 main_zone.was_collected = false;
2015 marked |= ggc_collect_1 (&main_zone, true);
47aeffac 2016
b6f61163
DB
2017 /* In order to keep the number of collections down, we don't
2018 collect other zones unless we are collecting the main zone. This
2019 gives us roughly the same number of collections as we used to
2020 have with the old gc. The number of collection is important
2021 because our main slowdown (according to profiling) is now in
2022 marking. So if we mark twice as often as we used to, we'll be
2023 twice as slow. Hopefully we'll avoid this cost when we mark
2024 zone-at-a-time. */
b9bfca81
DJ
2025 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2026 keep this code in case the heuristics are further refined. */
b6f61163
DB
2027
2028 if (main_zone.was_collected)
2029 {
47aeffac
SB
2030 struct alloc_zone *zone;
2031
2032 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2033 {
47aeffac
SB
2034 zone->was_collected = false;
2035 marked |= ggc_collect_1 (zone, !marked);
2036 }
b6f61163
DB
2037 }
2038
08cee789 2039#ifdef GATHER_STATISTICS
b6f61163
DB
2040 /* Print page survival stats, if someone wants them. */
2041 if (GGC_DEBUG_LEVEL >= 2)
2042 {
47aeffac 2043 for (zone = G.zones; zone; zone = zone->next_zone)
b6f61163 2044 {
47aeffac
SB
2045 if (zone->was_collected)
2046 {
08cee789 2047 float f = calculate_average_page_survival (zone);
47aeffac
SB
2048 printf ("Average page survival in zone `%s' is %f\n",
2049 zone->name, f);
2050 }
b6f61163
DB
2051 }
2052 }
08cee789 2053#endif
47aeffac 2054
b6f61163 2055 if (marked)
08cee789 2056 zone_free_marks ();
b944d187
SB
2057
2058 /* Free dead zones. */
2059 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2060 {
2061 if (zone->next_zone->dead)
2062 {
2063 struct alloc_zone *dead_zone = zone->next_zone;
2064
2065 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2066
2067 /* The zone must be empty. */
282899df 2068 gcc_assert (!dead_zone->allocated);
b944d187
SB
2069
2070 /* Unchain the dead zone, release all its pages and free it. */
2071 zone->next_zone = zone->next_zone->next_zone;
2072 release_pages (dead_zone);
2073 free (dead_zone);
2074 }
2075 }
2076
ae2392a9
BS
2077 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2078
b6f61163
DB
2079 timevar_pop (TV_GC);
2080}
2081
2082/* Print allocation statistics. */
b9bfca81
DJ
2083#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2084 ? (x) \
2085 : ((x) < 1024*1024*10 \
2086 ? (x) / 1024 \
2087 : (x) / (1024*1024))))
2088#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
b6f61163
DB
2089
2090void
578e8170 2091ggc_print_statistics (void)
b6f61163 2092{
b9bfca81
DJ
2093 struct alloc_zone *zone;
2094 struct ggc_statistics stats;
2095 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
08cee789 2096 size_t pte_overhead, i;
b9bfca81
DJ
2097
2098 /* Clear the statistics. */
2099 memset (&stats, 0, sizeof (stats));
2100
08cee789
DJ
2101 /* Make sure collection will really occur. */
2102 ggc_force_collect = true;
b9bfca81
DJ
2103
2104 /* Collect and print the statistics common across collectors. */
2105 ggc_print_common_statistics (stderr, &stats);
2106
08cee789 2107 ggc_force_collect = false;
b9bfca81
DJ
2108
2109 /* Release free pages so that we will not count the bytes allocated
2110 there as part of the total allocated memory. */
2111 for (zone = G.zones; zone; zone = zone->next_zone)
2112 release_pages (zone);
2113
2114 /* Collect some information about the various sizes of
2115 allocation. */
2116 fprintf (stderr,
2117 "Memory still allocated at the end of the compilation process\n");
2118
2119 fprintf (stderr, "%20s %10s %10s %10s\n",
2120 "Zone", "Allocated", "Used", "Overhead");
2121 for (zone = G.zones; zone; zone = zone->next_zone)
2122 {
08cee789
DJ
2123 struct large_page_entry *large_page;
2124 size_t overhead, allocated, in_use;
b9bfca81 2125
08cee789
DJ
2126 /* Skip empty zones. */
2127 if (!zone->pages && !zone->large_pages)
b9bfca81
DJ
2128 continue;
2129
08cee789 2130 allocated = in_use = 0;
b9bfca81 2131
08cee789
DJ
2132 overhead = sizeof (struct alloc_zone);
2133
2134 for (large_page = zone->large_pages; large_page != NULL;
2135 large_page = large_page->next)
b9bfca81 2136 {
08cee789
DJ
2137 allocated += large_page->bytes;
2138 in_use += large_page->bytes;
2139 overhead += sizeof (struct large_page_entry);
2140 }
b9bfca81 2141
08cee789
DJ
2142 /* There's no easy way to walk through the small pages finding
2143 used and unused objects. Instead, add all the pages, and
2144 subtract out the free list. */
b9bfca81 2145
08cee789
DJ
2146 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2147 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2148 overhead += G.small_page_overhead * zone->n_small_pages;
b9bfca81 2149
08cee789
DJ
2150 for (i = 0; i <= NUM_FREE_BINS; i++)
2151 {
2152 struct alloc_chunk *chunk = zone->free_chunks[i];
2153 while (chunk)
b9bfca81 2154 {
08cee789
DJ
2155 in_use -= ggc_get_size (chunk);
2156 chunk = chunk->next_free;
b9bfca81
DJ
2157 }
2158 }
b8698a0f 2159
b9bfca81
DJ
2160 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2161 zone->name,
2162 SCALE (allocated), LABEL (allocated),
2163 SCALE (in_use), LABEL (in_use),
2164 SCALE (overhead), LABEL (overhead));
2165
282899df 2166 gcc_assert (in_use == zone->allocated);
b9bfca81
DJ
2167
2168 total_overhead += overhead;
2169 total_allocated += zone->allocated;
2170 total_bytes_mapped += zone->bytes_mapped;
2171 }
2172
08cee789
DJ
2173 /* Count the size of the page table as best we can. */
2174#if HOST_BITS_PER_PTR <= 32
2175 pte_overhead = sizeof (G.lookup);
2176 for (i = 0; i < PAGE_L1_SIZE; i++)
2177 if (G.lookup[i])
2178 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2179#else
2180 {
76e20664 2181 page_table table = G.lookup;
08cee789
DJ
2182 pte_overhead = 0;
2183 while (table)
2184 {
2185 pte_overhead += sizeof (*table);
2186 for (i = 0; i < PAGE_L1_SIZE; i++)
2187 if (table->table[i])
2188 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2189 table = table->next;
2190 }
2191 }
2192#endif
2193 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2194 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2195 total_overhead += pte_overhead;
2196
b9bfca81
DJ
2197 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2198 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2199 SCALE (total_allocated), LABEL(total_allocated),
2200 SCALE (total_overhead), LABEL (total_overhead));
2201
b8698a0f 2202#ifdef GATHER_STATISTICS
b9bfca81
DJ
2203 {
2204 unsigned long long all_overhead = 0, all_allocated = 0;
2205 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2206 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2207 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2208
2209 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2210
2211 for (zone = G.zones; zone; zone = zone->next_zone)
2212 {
2213 all_overhead += zone->stats.total_overhead;
2214 all_allocated += zone->stats.total_allocated;
2215
2216 all_allocated_under32 += zone->stats.total_allocated_under32;
2217 all_overhead_under32 += zone->stats.total_overhead_under32;
2218
2219 all_allocated_under64 += zone->stats.total_allocated_under64;
2220 all_overhead_under64 += zone->stats.total_overhead_under64;
b8698a0f 2221
b9bfca81
DJ
2222 all_allocated_under128 += zone->stats.total_allocated_under128;
2223 all_overhead_under128 += zone->stats.total_overhead_under128;
2224
2225 fprintf (stderr, "%20s: %10lld\n",
2226 zone->name, zone->stats.total_allocated);
2227 }
2228
2229 fprintf (stderr, "\n");
2230
2231 fprintf (stderr, "Total Overhead: %10lld\n",
2232 all_overhead);
2233 fprintf (stderr, "Total Allocated: %10lld\n",
2234 all_allocated);
2235
2236 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2237 all_overhead_under32);
2238 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2239 all_allocated_under32);
2240 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2241 all_overhead_under64);
2242 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2243 all_allocated_under64);
2244 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2245 all_overhead_under128);
2246 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2247 all_allocated_under128);
2248 }
2249#endif
b6f61163
DB
2250}
2251
08cee789
DJ
2252/* Precompiled header support. */
2253
2254/* For precompiled headers, we sort objects based on their type. We
2255 also sort various objects into their own buckets; currently this
2256 covers strings and IDENTIFIER_NODE trees. The choices of how
2257 to sort buckets have not yet been tuned. */
2258
2259#define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2260
2261#define OTHER_BUCKET (gt_types_enum_last + 0)
2262#define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2263#define STRING_BUCKET (gt_types_enum_last + 2)
2264
2265struct ggc_pch_ondisk
2266{
2267 size_t total;
2268 size_t type_totals[NUM_PCH_BUCKETS];
2269};
2270
b6f61163
DB
2271struct ggc_pch_data
2272{
08cee789 2273 struct ggc_pch_ondisk d;
b6f61163 2274 size_t base;
08cee789
DJ
2275 size_t orig_base;
2276 size_t alloc_size;
2277 alloc_type *alloc_bits;
2278 size_t type_bases[NUM_PCH_BUCKETS];
2279 size_t start_offset;
b6f61163
DB
2280};
2281
1ae58c30 2282/* Initialize the PCH data structure. */
b6f61163
DB
2283
2284struct ggc_pch_data *
2285init_ggc_pch (void)
2286{
bf8e9c49 2287 return XCNEW (struct ggc_pch_data);
b6f61163
DB
2288}
2289
08cee789
DJ
2290/* Return which of the page-aligned buckets the object at X, with type
2291 TYPE, should be sorted into in the PCH. Strings will have
2292 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2293 of unknown type will also have TYPE equal to gt_types_enum_last. */
2294
2295static int
2296pch_bucket (void *x, enum gt_types_enum type,
2297 bool is_string)
2298{
2299 /* Sort identifiers into their own bucket, to improve locality
2300 when searching the identifier hash table. */
2301 if (type == gt_ggc_e_14lang_tree_node
2302 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2303 return IDENTIFIER_BUCKET;
2304 else if (type == gt_types_enum_last)
2305 {
2306 if (is_string)
2307 return STRING_BUCKET;
2308 return OTHER_BUCKET;
2309 }
2310 return type;
2311}
2312
b6f61163
DB
2313/* Add the size of object X to the size of the PCH data. */
2314
2315void
2316ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
08cee789 2317 size_t size, bool is_string, enum gt_types_enum type)
b6f61163 2318{
08cee789
DJ
2319 /* NOTE: Right now we don't need to align up the size of any objects.
2320 Strings can be unaligned, and everything else is allocated to a
2321 MAX_ALIGNMENT boundary already. */
2322
2323 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
b6f61163
DB
2324}
2325
2326/* Return the total size of the PCH data. */
2327
2328size_t
2329ggc_pch_total_size (struct ggc_pch_data *d)
2330{
e4dfaf72 2331 int i;
08cee789
DJ
2332 size_t alloc_size, total_size;
2333
2334 total_size = 0;
2335 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2336 {
2337 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2338 total_size += d->d.type_totals[i];
2339 }
2340 d->d.total = total_size;
2341
2342 /* Include the size of the allocation bitmap. */
2343 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2344 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2345 d->alloc_size = alloc_size;
2346
2347 return d->d.total + alloc_size;
b6f61163
DB
2348}
2349
2350/* Set the base address for the objects in the PCH file. */
2351
2352void
08cee789 2353ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
b6f61163 2354{
08cee789
DJ
2355 int i;
2356 size_t base = (size_t) base_;
2357
2358 d->base = d->orig_base = base;
2359 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2360 {
2361 d->type_bases[i] = base;
2362 base += d->d.type_totals[i];
2363 }
2364
2365 if (d->alloc_bits == NULL)
bf8e9c49 2366 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
b6f61163
DB
2367}
2368
2369/* Allocate a place for object X of size SIZE in the PCH file. */
2370
2371char *
2372ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
08cee789
DJ
2373 size_t size, bool is_string,
2374 enum gt_types_enum type)
b6f61163 2375{
08cee789 2376 size_t alloc_word, alloc_bit;
b6f61163 2377 char *result;
08cee789
DJ
2378 int bucket = pch_bucket (x, type, is_string);
2379
2380 /* Record the start of the object in the allocation bitmap. We
2381 can't assert that the allocation bit is previously clear, because
2382 strings may violate the invariant that they are at least
2383 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2384 should not be called for strings. */
2385 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2386 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2387 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2388 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2389 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2390
2391 /* Place the object at the current pointer for this bucket. */
2392 result = (char *) d->type_bases[bucket];
2393 d->type_bases[bucket] += size;
2394 return result;
b6f61163
DB
2395}
2396
2397/* Prepare to write out the PCH data to file F. */
2398
2399void
08cee789
DJ
2400ggc_pch_prepare_write (struct ggc_pch_data *d,
2401 FILE *f)
b6f61163 2402{
08cee789
DJ
2403 /* We seek around a lot while writing. Record where the end
2404 of the padding in the PCH file is, so that we can
2405 locate each object's offset. */
2406 d->start_offset = ftell (f);
b6f61163
DB
2407}
2408
2409/* Write out object X of SIZE to file F. */
2410
2411void
08cee789
DJ
2412ggc_pch_write_object (struct ggc_pch_data *d,
2413 FILE *f, void *x, void *newx,
2414 size_t size, bool is_string ATTRIBUTE_UNUSED)
b6f61163 2415{
08cee789 2416 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
d8a07487 2417 fatal_error ("can%'t seek PCH file: %m");
08cee789
DJ
2418
2419 if (fwrite (x, size, 1, f) != 1)
d8a07487 2420 fatal_error ("can%'t write PCH file: %m");
b6f61163
DB
2421}
2422
2423void
2424ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2425{
08cee789
DJ
2426 /* Write out the allocation bitmap. */
2427 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
d8a07487 2428 fatal_error ("can%'t seek PCH file: %m");
08cee789
DJ
2429
2430 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
d8a07487 2431 fatal_error ("can%'t write PCH file: %m");
08cee789
DJ
2432
2433 /* Done with the PCH, so write out our footer. */
b6f61163 2434 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
d8a07487 2435 fatal_error ("can%'t write PCH file: %m");
08cee789
DJ
2436
2437 free (d->alloc_bits);
b6f61163
DB
2438 free (d);
2439}
08cee789
DJ
2440
2441/* The PCH file from F has been mapped at ADDR. Read in any
2442 additional data from the file and set up the GC state. */
2443
b6f61163
DB
2444void
2445ggc_pch_read (FILE *f, void *addr)
2446{
2447 struct ggc_pch_ondisk d;
08cee789
DJ
2448 size_t alloc_size;
2449 struct alloc_zone *zone;
2450 struct page_entry *pch_page;
2451 char *p;
2452
b6f61163 2453 if (fread (&d, sizeof (d), 1, f) != 1)
d8a07487 2454 fatal_error ("can%'t read PCH file: %m");
08cee789
DJ
2455
2456 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2457 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2458
2459 pch_zone.bytes = d.total;
2460 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2461 pch_zone.page = (char *) addr;
2462 pch_zone.end = (char *) pch_zone.alloc_bits;
2463
2464 /* We've just read in a PCH file. So, every object that used to be
2465 allocated is now free. */
d88f54b3 2466#ifdef GATHER_STATISTICS
a9429e29
LB
2467 zone_allocate_marks ();
2468 ggc_prune_overhead_list ();
2469 zone_free_marks ();
2470#endif
2471
08cee789
DJ
2472 for (zone = G.zones; zone; zone = zone->next_zone)
2473 {
2474 struct small_page_entry *page, *next_page;
2475 struct large_page_entry *large_page, *next_large_page;
2476
2477 zone->allocated = 0;
2478
2479 /* Clear the zone's free chunk list. */
2480 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2481 zone->high_free_bin = 0;
2482 zone->cached_free = NULL;
2483 zone->cached_free_size = 0;
2484
2485 /* Move all the small pages onto the free list. */
2486 for (page = zone->pages; page != NULL; page = next_page)
2487 {
2488 next_page = page->next;
2489 memset (page->alloc_bits, 0,
2490 G.small_page_overhead - PAGE_OVERHEAD);
2491 free_small_page (page);
2492 }
2493
2494 /* Discard all the large pages. */
2495 for (large_page = zone->large_pages; large_page != NULL;
2496 large_page = next_large_page)
2497 {
2498 next_large_page = large_page->next;
2499 free_large_page (large_page);
2500 }
2501
2502 zone->pages = NULL;
2503 zone->large_pages = NULL;
2504 }
2505
2506 /* Allocate the dummy page entry for the PCH, and set all pages
2507 mapped into the PCH to reference it. */
bf8e9c49 2508 pch_page = XCNEW (struct page_entry);
08cee789
DJ
2509 pch_page->page = pch_zone.page;
2510 pch_page->pch_p = true;
2511
2512 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2513 set_page_table_entry (p, pch_page);
b6f61163 2514}