]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-zone.c
PR target/42811 (prerequisite)
[thirdparty/gcc.git] / gcc / ggc-zone.c
CommitLineData
b6f61163 1/* "Bag-of-pages" zone garbage collector for the GNU compiler.
dae4174e 2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
d9221e01 3 Free Software Foundation, Inc.
b6f61163 4
08cee789
DJ
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
b6f61163
DB
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
9dcd6f09 13Software Foundation; either version 3, or (at your option) any later
b6f61163
DB
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
9dcd6f09
NC
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
b6f61163
DB
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
31#include "tm_p.h"
32#include "toplev.h"
33#include "varray.h"
34#include "flags.h"
35#include "ggc.h"
36#include "timevar.h"
37#include "params.h"
38#include "bitmap.h"
ae2392a9 39#include "plugin.h"
b6f61163 40
b6f61163
DB
41/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
43#ifdef HAVE_MMAP_ANON
44# undef HAVE_MMAP_DEV_ZERO
45
46# include <sys/mman.h>
47# ifndef MAP_FAILED
48# define MAP_FAILED -1
49# endif
50# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
51# define MAP_ANONYMOUS MAP_ANON
52# endif
53# define USING_MMAP
b6f61163
DB
54#endif
55
56#ifdef HAVE_MMAP_DEV_ZERO
b6f61163
DB
57# include <sys/mman.h>
58# ifndef MAP_FAILED
59# define MAP_FAILED -1
60# endif
61# define USING_MMAP
b6f61163
DB
62#endif
63
64#ifndef USING_MMAP
08cee789 65#error Zone collector requires mmap
b6f61163
DB
66#endif
67
68#if (GCC_VERSION < 3001)
69#define prefetch(X) ((void) X)
08cee789 70#define prefetchw(X) ((void) X)
b6f61163
DB
71#else
72#define prefetch(X) __builtin_prefetch (X)
08cee789 73#define prefetchw(X) __builtin_prefetch (X, 1, 3)
b6f61163
DB
74#endif
75
08cee789
DJ
76/* FUTURE NOTES:
77
b6f61163
DB
78 If we track inter-zone pointers, we can mark single zones at a
79 time.
08cee789 80
b6f61163 81 If we have a zone where we guarantee no inter-zone pointers, we
ba228239 82 could mark that zone separately.
08cee789 83
b6f61163
DB
84 The garbage zone should not be marked, and we should return 1 in
85 ggc_set_mark for any object in the garbage zone, which cuts off
86 marking quickly. */
08cee789 87
0fa2e4df 88/* Strategy:
b6f61163
DB
89
90 This garbage-collecting allocator segregates objects into zones.
91 It also segregates objects into "large" and "small" bins. Large
08cee789 92 objects are greater than page size.
b6f61163 93
08cee789
DJ
94 Pages for small objects are broken up into chunks. The page has
95 a bitmap which marks the start position of each chunk (whether
96 allocated or free). Free chunks are on one of the zone's free
97 lists and contain a pointer to the next free chunk. Chunks in
98 most of the free lists have a fixed size determined by the
99 free list. Chunks in the "other" sized free list have their size
100 stored right after their chain pointer.
b6f61163
DB
101
102 Empty pages (of all sizes) are kept on a single page cache list,
103 and are considered first when new pages are required; they are
104 deallocated at the start of the next collection if they haven't
08cee789 105 been recycled by then. The free page list is currently per-zone. */
b6f61163
DB
106
107/* Define GGC_DEBUG_LEVEL to print debugging information.
108 0: No debugging output.
109 1: GC statistics only.
110 2: Page-entry allocations/deallocations as well.
111 3: Object allocations as well.
112 4: Object marks as well. */
113#define GGC_DEBUG_LEVEL (0)
114
115#ifndef HOST_BITS_PER_PTR
116#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
117#endif
9781b6da 118
08cee789
DJ
119/* This structure manages small free chunks. The SIZE field is only
120 initialized if the chunk is in the "other" sized free list. Large
121 chunks are allocated one at a time to their own page, and so don't
122 come in here. */
b6f61163 123
08cee789
DJ
124struct alloc_chunk {
125 struct alloc_chunk *next_free;
126 unsigned int size;
127};
b6f61163 128
08cee789
DJ
129/* The size of the fixed-size portion of a small page descriptor. */
130#define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
b6f61163 131
08cee789
DJ
132/* The collector's idea of the page size. This must be a power of two
133 no larger than the system page size, because pages must be aligned
134 to this amount and are tracked at this granularity in the page
135 table. We choose a size at compile time for efficiency.
b6f61163 136
08cee789
DJ
137 We could make a better guess at compile time if PAGE_SIZE is a
138 constant in system headers, and PAGE_SHIFT is defined... */
139#define GGC_PAGE_SIZE 4096
140#define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
141#define GGC_PAGE_SHIFT 12
142
143#if 0
144/* Alternative definitions which use the runtime page size. */
145#define GGC_PAGE_SIZE G.pagesize
146#define GGC_PAGE_MASK G.page_mask
147#define GGC_PAGE_SHIFT G.lg_pagesize
b6f61163 148#endif
b6f61163 149
08cee789
DJ
150/* The size of a small page managed by the garbage collector. This
151 must currently be GGC_PAGE_SIZE, but with a few changes could
152 be any multiple of it to reduce certain kinds of overhead. */
153#define SMALL_PAGE_SIZE GGC_PAGE_SIZE
b6f61163 154
08cee789
DJ
155/* Free bin information. These numbers may be in need of re-tuning.
156 In general, decreasing the number of free bins would seem to
157 increase the time it takes to allocate... */
b6f61163 158
08cee789
DJ
159/* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
160 today. */
b6f61163 161
b6f61163 162#define NUM_FREE_BINS 64
08cee789 163#define FREE_BIN_DELTA MAX_ALIGNMENT
b6f61163
DB
164#define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
165
08cee789
DJ
166/* Allocation and marking parameters. */
167
168/* The smallest allocatable unit to keep track of. */
169#define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
170
171/* The smallest markable unit. If we require each allocated object
172 to contain at least two allocatable units, we can use half as many
173 bits for the mark bitmap. But this adds considerable complexity
174 to sweeping. */
175#define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
176
177#define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
b6f61163
DB
178
179/* We use this structure to determine the alignment required for
08cee789
DJ
180 allocations.
181
182 There are several things wrong with this estimation of alignment.
183
184 The maximum alignment for a structure is often less than the
185 maximum alignment for a basic data type; for instance, on some
186 targets long long must be aligned to sizeof (int) in a structure
187 and sizeof (long long) in a variable. i386-linux is one example;
188 Darwin is another (sometimes, depending on the compiler in use).
189
190 Also, long double is not included. Nothing in GCC uses long
191 double, so we assume that this is OK. On powerpc-darwin, adding
192 long double would bring the maximum alignment up to 16 bytes,
193 and until we need long double (or to vectorize compiler operations)
194 that's painfully wasteful. This will need to change, some day. */
b6f61163
DB
195
196struct max_alignment {
197 char c;
198 union {
199 HOST_WIDEST_INT i;
b6f61163 200 double d;
b6f61163
DB
201 } u;
202};
203
204/* The biggest alignment required. */
205
206#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
207
b6f61163
DB
208/* Compute the smallest multiple of F that is >= X. */
209
210#define ROUND_UP(x, f) (CEIL (x, f) * (f))
211
08cee789
DJ
212/* Types to use for the allocation and mark bitmaps. It might be
213 a good idea to add ffsl to libiberty and use unsigned long
214 instead; that could speed us up where long is wider than int. */
b6f61163 215
08cee789
DJ
216typedef unsigned int alloc_type;
217typedef unsigned int mark_type;
218#define alloc_ffs(x) ffs(x)
219
220/* A page_entry records the status of an allocation page. This is the
221 common data between all three kinds of pages - small, large, and
222 PCH. */
b6f61163
DB
223typedef struct page_entry
224{
08cee789
DJ
225 /* The address at which the memory is allocated. */
226 char *page;
b6f61163 227
08cee789
DJ
228 /* The zone that this page entry belongs to. */
229 struct alloc_zone *zone;
b6f61163 230
08cee789 231#ifdef GATHER_STATISTICS
b6f61163
DB
232 /* How many collections we've survived. */
233 size_t survived;
08cee789 234#endif
b6f61163
DB
235
236 /* Does this page contain small objects, or one large object? */
237 bool large_p;
238
08cee789
DJ
239 /* Is this page part of the loaded PCH? */
240 bool pch_p;
b6f61163
DB
241} page_entry;
242
08cee789
DJ
243/* Additional data needed for small pages. */
244struct small_page_entry
245{
246 struct page_entry common;
247
248 /* The next small page entry, or NULL if this is the last. */
249 struct small_page_entry *next;
250
251 /* If currently marking this zone, a pointer to the mark bits
252 for this page. If we aren't currently marking this zone,
253 this pointer may be stale (pointing to freed memory). */
254 mark_type *mark_bits;
255
256 /* The allocation bitmap. This array extends far enough to have
257 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
258 alloc_type alloc_bits[1];
259};
260
261/* Additional data needed for large pages. */
262struct large_page_entry
263{
264 struct page_entry common;
265
266 /* The next large page entry, or NULL if this is the last. */
267 struct large_page_entry *next;
268
269 /* The number of bytes allocated, not including the page entry. */
270 size_t bytes;
271
272 /* The previous page in the list, so that we can unlink this one. */
273 struct large_page_entry *prev;
274
275 /* During marking, is this object marked? */
276 bool mark_p;
277};
278
279/* A two-level tree is used to look up the page-entry for a given
280 pointer. Two chunks of the pointer's bits are extracted to index
281 the first and second levels of the tree, as follows:
282
283 HOST_PAGE_SIZE_BITS
284 32 | |
285 msb +----------------+----+------+------+ lsb
286 | | |
287 PAGE_L1_BITS |
288 | |
289 PAGE_L2_BITS
290
291 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
292 pages are aligned on system page boundaries. The next most
293 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
294 index values in the lookup table, respectively.
295
296 For 32-bit architectures and the settings below, there are no
297 leftover bits. For architectures with wider pointers, the lookup
298 tree points to a list of pages, which must be scanned to find the
299 correct one. */
300
301#define PAGE_L1_BITS (8)
302#define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
303#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
304#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
305
306#define LOOKUP_L1(p) \
307 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
308
309#define LOOKUP_L2(p) \
310 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
311
312#if HOST_BITS_PER_PTR <= 32
313
314/* On 32-bit hosts, we use a two level page table, as pictured above. */
315typedef page_entry **page_table[PAGE_L1_SIZE];
316
317#else
318
319/* On 64-bit hosts, we use the same two level page tables plus a linked
320 list that disambiguates the top 32-bits. There will almost always be
321 exactly one entry in the list. */
322typedef struct page_table_chain
323{
324 struct page_table_chain *next;
325 size_t high_bits;
326 page_entry **table[PAGE_L1_SIZE];
327} *page_table;
328
329#endif
b6f61163
DB
330
331/* The global variables. */
332static struct globals
333{
b6f61163
DB
334 /* The linked list of zones. */
335 struct alloc_zone *zones;
336
08cee789
DJ
337 /* Lookup table for associating allocation pages with object addresses. */
338 page_table lookup;
339
340 /* The system's page size, and related constants. */
b6f61163
DB
341 size_t pagesize;
342 size_t lg_pagesize;
08cee789
DJ
343 size_t page_mask;
344
345 /* The size to allocate for a small page entry. This includes
346 the size of the structure and the size of the allocation
347 bitmap. */
348 size_t small_page_overhead;
b6f61163 349
b6f61163 350#if defined (HAVE_MMAP_DEV_ZERO)
08cee789 351 /* A file descriptor open to /dev/zero for reading. */
b6f61163
DB
352 int dev_zero_fd;
353#endif
354
08cee789
DJ
355 /* Allocate pages in chunks of this size, to throttle calls to memory
356 allocation routines. The first page is used, the rest go onto the
357 free list. */
358 size_t quire_size;
359
b6f61163
DB
360 /* The file descriptor for debugging output. */
361 FILE *debug_file;
362} G;
363
08cee789
DJ
364/* A zone allocation structure. There is one of these for every
365 distinct allocation zone. */
b6f61163
DB
366struct alloc_zone
367{
08cee789
DJ
368 /* The most recent free chunk is saved here, instead of in the linked
369 free list, to decrease list manipulation. It is most likely that we
370 will want this one. */
371 char *cached_free;
372 size_t cached_free_size;
b6f61163
DB
373
374 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
375 FREE_BIN_DELTA. All other chunks are in slot 0. */
376 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
377
08cee789
DJ
378 /* The highest bin index which might be non-empty. It may turn out
379 to be empty, in which case we have to search downwards. */
380 size_t high_free_bin;
381
382 /* Bytes currently allocated in this zone. */
b6f61163
DB
383 size_t allocated;
384
08cee789
DJ
385 /* Linked list of the small pages in this zone. */
386 struct small_page_entry *pages;
b6f61163 387
08cee789
DJ
388 /* Doubly linked list of large pages in this zone. */
389 struct large_page_entry *large_pages;
390
391 /* If we are currently marking this zone, a pointer to the mark bits. */
392 mark_type *mark_bits;
393
394 /* Name of the zone. */
395 const char *name;
b6f61163 396
08cee789
DJ
397 /* The number of small pages currently allocated in this zone. */
398 size_t n_small_pages;
b6f61163 399
08cee789
DJ
400 /* Bytes allocated at the end of the last collection. */
401 size_t allocated_last_gc;
b6f61163 402
08cee789
DJ
403 /* Total amount of memory mapped. */
404 size_t bytes_mapped;
b6f61163
DB
405
406 /* A cache of free system pages. */
08cee789 407 struct small_page_entry *free_pages;
b6f61163 408
b6f61163
DB
409 /* Next zone in the linked list of zones. */
410 struct alloc_zone *next_zone;
411
b944d187 412 /* True if this zone was collected during this collection. */
b6f61163 413 bool was_collected;
b944d187
SB
414
415 /* True if this zone should be destroyed after the next collection. */
416 bool dead;
b9bfca81
DJ
417
418#ifdef GATHER_STATISTICS
419 struct
420 {
421 /* Total memory allocated with ggc_alloc. */
422 unsigned long long total_allocated;
423 /* Total overhead for memory to be allocated with ggc_alloc. */
424 unsigned long long total_overhead;
425
426 /* Total allocations and overhead for sizes less than 32, 64 and 128.
427 These sizes are interesting because they are typical cache line
428 sizes. */
b8698a0f 429
b9bfca81
DJ
430 unsigned long long total_allocated_under32;
431 unsigned long long total_overhead_under32;
b8698a0f 432
b9bfca81
DJ
433 unsigned long long total_allocated_under64;
434 unsigned long long total_overhead_under64;
b8698a0f 435
b9bfca81
DJ
436 unsigned long long total_allocated_under128;
437 unsigned long long total_overhead_under128;
438 } stats;
439#endif
b6f61163
DB
440} main_zone;
441
08cee789
DJ
442/* Some default zones. */
443struct alloc_zone rtl_zone;
444struct alloc_zone tree_zone;
445struct alloc_zone tree_id_zone;
446
447/* The PCH zone does not need a normal zone structure, and it does
448 not live on the linked list of zones. */
449struct pch_zone
450{
451 /* The start of the PCH zone. NULL if there is none. */
452 char *page;
453
454 /* The end of the PCH zone. NULL if there is none. */
455 char *end;
456
457 /* The size of the PCH zone. 0 if there is none. */
458 size_t bytes;
b6f61163 459
08cee789
DJ
460 /* The allocation bitmap for the PCH zone. */
461 alloc_type *alloc_bits;
b9bfca81 462
08cee789
DJ
463 /* If we are currently marking, the mark bitmap for the PCH zone.
464 When it is first read in, we could avoid marking the PCH,
465 because it will not contain any pointers to GC memory outside
466 of the PCH; however, the PCH is currently mapped as writable,
467 so we must mark it in case new pointers are added. */
468 mark_type *mark_bits;
469} pch_zone;
b6f61163 470
b6f61163
DB
471#ifdef USING_MMAP
472static char *alloc_anon (char *, size_t, struct alloc_zone *);
473#endif
08cee789
DJ
474static struct small_page_entry * alloc_small_page (struct alloc_zone *);
475static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
476static void free_chunk (char *, size_t, struct alloc_zone *);
477static void free_small_page (struct small_page_entry *);
478static void free_large_page (struct large_page_entry *);
b6f61163
DB
479static void release_pages (struct alloc_zone *);
480static void sweep_pages (struct alloc_zone *);
b6f61163 481static bool ggc_collect_1 (struct alloc_zone *, bool);
08cee789
DJ
482static void new_ggc_zone_1 (struct alloc_zone *, const char *);
483
484/* Traverse the page table and find the entry for a page.
485 Die (probably) if the object wasn't allocated via GC. */
486
487static inline page_entry *
488lookup_page_table_entry (const void *p)
489{
490 page_entry ***base;
491 size_t L1, L2;
492
493#if HOST_BITS_PER_PTR <= 32
494 base = &G.lookup[0];
495#else
496 page_table table = G.lookup;
497 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
498 while (table->high_bits != high_bits)
499 table = table->next;
500 base = &table->table[0];
501#endif
502
503 /* Extract the level 1 and 2 indices. */
504 L1 = LOOKUP_L1 (p);
505 L2 = LOOKUP_L2 (p);
506
507 return base[L1][L2];
508}
509
dae4174e
TT
510/* Traverse the page table and find the entry for a page.
511 Return NULL if the object wasn't allocated via the GC. */
512
513static inline page_entry *
514lookup_page_table_if_allocated (const void *p)
515{
516 page_entry ***base;
517 size_t L1, L2;
518
519#if HOST_BITS_PER_PTR <= 32
520 base = &G.lookup[0];
521#else
522 page_table table = G.lookup;
523 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
524 while (1)
525 {
526 if (table == NULL)
527 return NULL;
528 if (table->high_bits == high_bits)
529 break;
530 table = table->next;
531 }
532 base = &table->table[0];
533#endif
534
535 /* Extract the level 1 and 2 indices. */
536 L1 = LOOKUP_L1 (p);
537 if (! base[L1])
538 return NULL;
539
540 L2 = LOOKUP_L2 (p);
541 if (L2 >= PAGE_L2_SIZE)
542 return NULL;
543 /* We might have a page entry which does not correspond exactly to a
544 system page. */
bf8e9c49 545 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
dae4174e
TT
546 return NULL;
547
548 return base[L1][L2];
549}
550
08cee789
DJ
551/* Set the page table entry for the page that starts at P. If ENTRY
552 is NULL, clear the entry. */
553
554static void
555set_page_table_entry (void *p, page_entry *entry)
556{
557 page_entry ***base;
558 size_t L1, L2;
559
560#if HOST_BITS_PER_PTR <= 32
561 base = &G.lookup[0];
562#else
563 page_table table;
564 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
565 for (table = G.lookup; table; table = table->next)
566 if (table->high_bits == high_bits)
567 goto found;
568
569 /* Not found -- allocate a new table. */
bf8e9c49 570 table = XCNEW (struct page_table_chain);
08cee789
DJ
571 table->next = G.lookup;
572 table->high_bits = high_bits;
573 G.lookup = table;
574found:
575 base = &table->table[0];
576#endif
577
578 /* Extract the level 1 and 2 indices. */
579 L1 = LOOKUP_L1 (p);
580 L2 = LOOKUP_L2 (p);
581
582 if (base[L1] == NULL)
bf8e9c49 583 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
08cee789
DJ
584
585 base[L1][L2] = entry;
586}
587
588/* Find the page table entry associated with OBJECT. */
589
590static inline struct page_entry *
591zone_get_object_page (const void *object)
592{
593 return lookup_page_table_entry (object);
594}
595
596/* Find which element of the alloc_bits array OBJECT should be
597 recorded in. */
598static inline unsigned int
599zone_get_object_alloc_word (const void *object)
600{
601 return (((size_t) object & (GGC_PAGE_SIZE - 1))
602 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
603}
604
605/* Find which bit of the appropriate word in the alloc_bits array
606 OBJECT should be recorded in. */
607static inline unsigned int
608zone_get_object_alloc_bit (const void *object)
609{
610 return (((size_t) object / BYTES_PER_ALLOC_BIT)
611 % (8 * sizeof (alloc_type)));
612}
613
614/* Find which element of the mark_bits array OBJECT should be recorded
615 in. */
616static inline unsigned int
617zone_get_object_mark_word (const void *object)
618{
619 return (((size_t) object & (GGC_PAGE_SIZE - 1))
620 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
621}
622
623/* Find which bit of the appropriate word in the mark_bits array
624 OBJECT should be recorded in. */
625static inline unsigned int
626zone_get_object_mark_bit (const void *object)
627{
628 return (((size_t) object / BYTES_PER_MARK_BIT)
629 % (8 * sizeof (mark_type)));
630}
631
632/* Set the allocation bit corresponding to OBJECT in its page's
c1b97125 633 bitmap. Used to split this object from the preceding one. */
08cee789
DJ
634static inline void
635zone_set_object_alloc_bit (const void *object)
636{
637 struct small_page_entry *page
638 = (struct small_page_entry *) zone_get_object_page (object);
639 unsigned int start_word = zone_get_object_alloc_word (object);
640 unsigned int start_bit = zone_get_object_alloc_bit (object);
641
642 page->alloc_bits[start_word] |= 1L << start_bit;
643}
b6f61163 644
08cee789 645/* Clear the allocation bit corresponding to OBJECT in PAGE's
c1b97125 646 bitmap. Used to coalesce this object with the preceding
08cee789
DJ
647 one. */
648static inline void
649zone_clear_object_alloc_bit (struct small_page_entry *page,
650 const void *object)
651{
652 unsigned int start_word = zone_get_object_alloc_word (object);
653 unsigned int start_bit = zone_get_object_alloc_bit (object);
654
655 /* Would xor be quicker? */
656 page->alloc_bits[start_word] &= ~(1L << start_bit);
657}
658
659/* Find the size of the object which starts at START_WORD and
660 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
661 Helper function for ggc_get_size and zone_find_object_size. */
662
663static inline size_t
664zone_object_size_1 (alloc_type *alloc_bits,
665 size_t start_word, size_t start_bit,
666 size_t max_size)
667{
668 size_t size;
669 alloc_type alloc_word;
670 int indx;
671
672 /* Load the first word. */
673 alloc_word = alloc_bits[start_word++];
674
675 /* If that was the last bit in this word, we'll want to continue
676 with the next word. Otherwise, handle the rest of this word. */
677 if (start_bit)
678 {
679 indx = alloc_ffs (alloc_word >> start_bit);
680 if (indx)
681 /* indx is 1-based. We started at the bit after the object's
682 start, but we also ended at the bit after the object's end.
683 It cancels out. */
684 return indx * BYTES_PER_ALLOC_BIT;
685
686 /* The extra 1 accounts for the starting unit, before start_bit. */
687 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
688
689 if (size >= max_size)
690 return max_size;
691
692 alloc_word = alloc_bits[start_word++];
693 }
694 else
695 size = BYTES_PER_ALLOC_BIT;
696
697 while (alloc_word == 0)
698 {
699 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
700 if (size >= max_size)
701 return max_size;
702 alloc_word = alloc_bits[start_word++];
703 }
b6f61163 704
08cee789
DJ
705 indx = alloc_ffs (alloc_word);
706 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
707}
708
709/* Find the size of OBJECT on small page PAGE. */
b6f61163 710
08cee789
DJ
711static inline size_t
712zone_find_object_size (struct small_page_entry *page,
713 const void *object)
b6f61163 714{
08cee789
DJ
715 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
716 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
717 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
718 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
bf8e9c49 719 - (const char *) object);
08cee789
DJ
720
721 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
722 max_size);
723}
724
dae4174e
TT
725/* highest_bit assumes that alloc_type is 32 bits. */
726extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
727
728/* Find the highest set bit in VALUE. Returns the bit number of that
729 bit, using the same values as ffs. */
730static inline alloc_type
731highest_bit (alloc_type value)
732{
733 /* This also assumes that alloc_type is unsigned. */
734 value |= value >> 1;
735 value |= value >> 2;
736 value |= value >> 4;
737 value |= value >> 8;
738 value |= value >> 16;
739 value = value ^ (value >> 1);
740 return alloc_ffs (value);
741}
742
743/* Find the offset from the start of an object to P, which may point
744 into the interior of the object. */
745
746static unsigned long
747zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
748 size_t start_bit)
749{
750 unsigned int offset_in_bits;
751 alloc_type alloc_word = alloc_bits[start_word];
752
753 /* Mask off any bits after the initial bit, but make sure to include
754 the initial bit in the result. Note that START_BIT is
755 0-based. */
756 if (start_bit < 8 * sizeof (alloc_type) - 1)
757 alloc_word &= (1 << (start_bit + 1)) - 1;
758 offset_in_bits = start_bit;
759
760 /* Search for the start of the object. */
761 while (alloc_word == 0 && start_word > 0)
762 {
763 alloc_word = alloc_bits[--start_word];
764 offset_in_bits += 8 * sizeof (alloc_type);
765 }
766 /* We must always find a set bit. */
767 gcc_assert (alloc_word != 0);
768 /* Note that the result of highest_bit is 1-based. */
769 offset_in_bits -= highest_bit (alloc_word) - 1;
770
771 return BYTES_PER_ALLOC_BIT * offset_in_bits;
772}
773
08cee789
DJ
774/* Allocate the mark bits for every zone, and set the pointers on each
775 page. */
776static void
777zone_allocate_marks (void)
778{
779 struct alloc_zone *zone;
780
781 for (zone = G.zones; zone; zone = zone->next_zone)
782 {
783 struct small_page_entry *page;
784 mark_type *cur_marks;
785 size_t mark_words, mark_words_per_page;
786#ifdef ENABLE_CHECKING
787 size_t n = 0;
b6f61163 788#endif
08cee789
DJ
789
790 mark_words_per_page
791 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
792 mark_words = zone->n_small_pages * mark_words_per_page;
793 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
794 mark_words);
795 cur_marks = zone->mark_bits;
796 for (page = zone->pages; page; page = page->next)
797 {
798 page->mark_bits = cur_marks;
799 cur_marks += mark_words_per_page;
800#ifdef ENABLE_CHECKING
801 n++;
802#endif
803 }
804#ifdef ENABLE_CHECKING
805 gcc_assert (n == zone->n_small_pages);
806#endif
807 }
808
809 /* We don't collect the PCH zone, but we do have to mark it
810 (for now). */
811 if (pch_zone.bytes)
812 pch_zone.mark_bits
813 = (mark_type *) xcalloc (sizeof (mark_type),
814 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
b6f61163
DB
815}
816
08cee789
DJ
817/* After marking and sweeping, release the memory used for mark bits. */
818static void
819zone_free_marks (void)
820{
821 struct alloc_zone *zone;
822
823 for (zone = G.zones; zone; zone = zone->next_zone)
824 if (zone->mark_bits)
825 {
826 free (zone->mark_bits);
827 zone->mark_bits = NULL;
828 }
829
830 if (pch_zone.bytes)
831 {
832 free (pch_zone.mark_bits);
833 pch_zone.mark_bits = NULL;
834 }
835}
b6f61163
DB
836
837#ifdef USING_MMAP
838/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
839 (if non-null). The ifdef structure here is intended to cause a
840 compile error unless exactly one of the HAVE_* is defined. */
841
842static inline char *
843alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
844{
845#ifdef HAVE_MMAP_ANON
846 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
847 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
848#endif
849#ifdef HAVE_MMAP_DEV_ZERO
850 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
851 MAP_PRIVATE, G.dev_zero_fd, 0);
852#endif
b6f61163
DB
853
854 if (page == (char *) MAP_FAILED)
855 {
856 perror ("virtual memory exhausted");
857 exit (FATAL_EXIT_CODE);
858 }
859
860 /* Remember that we allocated this memory. */
861 zone->bytes_mapped += size;
08cee789 862
b6f61163
DB
863 /* Pretend we don't have access to the allocated pages. We'll enable
864 access to smaller pieces of the area in ggc_alloc. Discard the
865 handle to avoid handle leak. */
35dee980 866 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
08cee789 867
b6f61163
DB
868 return page;
869}
870#endif
b6f61163 871
08cee789
DJ
872/* Allocate a new page for allocating small objects in ZONE, and
873 return an entry for it. */
b6f61163 874
08cee789 875static struct small_page_entry *
b6f61163
DB
876alloc_small_page (struct alloc_zone *zone)
877{
08cee789 878 struct small_page_entry *entry;
b6f61163
DB
879
880 /* Check the list of free pages for one we can use. */
881 entry = zone->free_pages;
882 if (entry != NULL)
883 {
884 /* Recycle the allocated memory from this page ... */
885 zone->free_pages = entry->next;
b6f61163 886 }
b6f61163
DB
887 else
888 {
889 /* We want just one page. Allocate a bunch of them and put the
890 extras on the freelist. (Can only do this optimization with
891 mmap for backing store.) */
08cee789 892 struct small_page_entry *e, *f = zone->free_pages;
b6f61163 893 int i;
08cee789 894 char *page;
b6f61163 895
08cee789 896 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
b6f61163
DB
897
898 /* This loop counts down so that the chain will be in ascending
899 memory order. */
08cee789 900 for (i = G.quire_size - 1; i >= 1; i--)
b6f61163 901 {
bf8e9c49 902 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
08cee789
DJ
903 e->common.page = page + (i << GGC_PAGE_SHIFT);
904 e->common.zone = zone;
b6f61163
DB
905 e->next = f;
906 f = e;
08cee789 907 set_page_table_entry (e->common.page, &e->common);
b6f61163
DB
908 }
909
910 zone->free_pages = f;
08cee789 911
bf8e9c49 912 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
08cee789
DJ
913 entry->common.page = page;
914 entry->common.zone = zone;
915 set_page_table_entry (page, &entry->common);
b6f61163 916 }
b6f61163 917
08cee789 918 zone->n_small_pages++;
b6f61163 919
b6f61163
DB
920 if (GGC_DEBUG_LEVEL >= 2)
921 fprintf (G.debug_file,
08cee789
DJ
922 "Allocating %s page at %p, data %p-%p\n",
923 entry->common.zone->name, (PTR) entry, entry->common.page,
924 entry->common.page + SMALL_PAGE_SIZE - 1);
b6f61163
DB
925
926 return entry;
927}
928
929/* Allocate a large page of size SIZE in ZONE. */
930
08cee789 931static struct large_page_entry *
b6f61163
DB
932alloc_large_page (size_t size, struct alloc_zone *zone)
933{
08cee789 934 struct large_page_entry *entry;
b6f61163 935 char *page;
08cee789
DJ
936 size_t needed_size;
937
938 needed_size = size + sizeof (struct large_page_entry);
bf8e9c49 939 page = XNEWVAR (char, needed_size);
b6f61163 940
08cee789
DJ
941 entry = (struct large_page_entry *) page;
942
943 entry->next = NULL;
944 entry->common.page = page + sizeof (struct large_page_entry);
945 entry->common.large_p = true;
946 entry->common.pch_p = false;
947 entry->common.zone = zone;
948#ifdef GATHER_STATISTICS
949 entry->common.survived = 0;
950#endif
951 entry->mark_p = false;
b6f61163 952 entry->bytes = size;
08cee789
DJ
953 entry->prev = NULL;
954
955 set_page_table_entry (entry->common.page, &entry->common);
b6f61163 956
b6f61163
DB
957 if (GGC_DEBUG_LEVEL >= 2)
958 fprintf (G.debug_file,
08cee789
DJ
959 "Allocating %s large page at %p, data %p-%p\n",
960 entry->common.zone->name, (PTR) entry, entry->common.page,
961 entry->common.page + SMALL_PAGE_SIZE - 1);
b6f61163
DB
962
963 return entry;
964}
965
966
967/* For a page that is no longer needed, put it on the free page list. */
968
969static inline void
08cee789 970free_small_page (struct small_page_entry *entry)
b6f61163
DB
971{
972 if (GGC_DEBUG_LEVEL >= 2)
973 fprintf (G.debug_file,
08cee789
DJ
974 "Deallocating %s page at %p, data %p-%p\n",
975 entry->common.zone->name, (PTR) entry,
976 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
b6f61163 977
08cee789 978 gcc_assert (!entry->common.large_p);
b6f61163 979
08cee789
DJ
980 /* Mark the page as inaccessible. Discard the handle to
981 avoid handle leak. */
35dee980
HPN
982 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
983 SMALL_PAGE_SIZE));
08cee789
DJ
984
985 entry->next = entry->common.zone->free_pages;
986 entry->common.zone->free_pages = entry;
987 entry->common.zone->n_small_pages--;
988}
989
990/* Release a large page that is no longer needed. */
991
992static inline void
993free_large_page (struct large_page_entry *entry)
994{
995 if (GGC_DEBUG_LEVEL >= 2)
996 fprintf (G.debug_file,
997 "Deallocating %s page at %p, data %p-%p\n",
998 entry->common.zone->name, (PTR) entry,
999 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
1000
1001 gcc_assert (entry->common.large_p);
1002
1003 set_page_table_entry (entry->common.page, NULL);
1004 free (entry);
b6f61163
DB
1005}
1006
1007/* Release the free page cache to the system. */
1008
1009static void
1010release_pages (struct alloc_zone *zone)
1011{
1012#ifdef USING_MMAP
08cee789 1013 struct small_page_entry *p, *next;
b6f61163
DB
1014 char *start;
1015 size_t len;
1016
1017 /* Gather up adjacent pages so they are unmapped together. */
1018 p = zone->free_pages;
1019
1020 while (p)
1021 {
08cee789 1022 start = p->common.page;
b6f61163 1023 next = p->next;
08cee789
DJ
1024 len = SMALL_PAGE_SIZE;
1025 set_page_table_entry (p->common.page, NULL);
b6f61163
DB
1026 p = next;
1027
08cee789 1028 while (p && p->common.page == start + len)
b6f61163
DB
1029 {
1030 next = p->next;
08cee789
DJ
1031 len += SMALL_PAGE_SIZE;
1032 set_page_table_entry (p->common.page, NULL);
b6f61163
DB
1033 p = next;
1034 }
1035
1036 munmap (start, len);
1037 zone->bytes_mapped -= len;
1038 }
1039
1040 zone->free_pages = NULL;
1041#endif
b6f61163
DB
1042}
1043
08cee789 1044/* Place the block at PTR of size SIZE on the free list for ZONE. */
b6f61163
DB
1045
1046static inline void
08cee789 1047free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
b6f61163 1048{
08cee789 1049 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
b6f61163
DB
1050 size_t bin = 0;
1051
1052 bin = SIZE_BIN_DOWN (size);
08cee789 1053 gcc_assert (bin != 0);
b6f61163 1054 if (bin > NUM_FREE_BINS)
08cee789
DJ
1055 {
1056 bin = 0;
35dee980
HPN
1057 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1058 sizeof (struct
1059 alloc_chunk)));
08cee789
DJ
1060 chunk->size = size;
1061 chunk->next_free = zone->free_chunks[bin];
35dee980
HPN
1062 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1063 + sizeof (struct
1064 alloc_chunk),
1065 size
1066 - sizeof (struct
1067 alloc_chunk)));
08cee789
DJ
1068 }
1069 else
1070 {
35dee980
HPN
1071 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1072 sizeof (struct
1073 alloc_chunk *)));
08cee789 1074 chunk->next_free = zone->free_chunks[bin];
35dee980
HPN
1075 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1076 + sizeof (struct
1077 alloc_chunk *),
1078 size
1079 - sizeof (struct
1080 alloc_chunk *)));
08cee789
DJ
1081 }
1082
b6f61163 1083 zone->free_chunks[bin] = chunk;
08cee789
DJ
1084 if (bin > zone->high_free_bin)
1085 zone->high_free_bin = bin;
b6f61163
DB
1086 if (GGC_DEBUG_LEVEL >= 3)
1087 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
b6f61163
DB
1088}
1089
08cee789 1090/* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
b6f61163 1091
08cee789
DJ
1092void *
1093ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1094 MEM_STAT_DECL)
b6f61163 1095{
08cee789
DJ
1096 size_t bin;
1097 size_t csize;
1098 struct small_page_entry *entry;
1099 struct alloc_chunk *chunk, **pp;
b6f61163 1100 void *result;
b9bfca81 1101 size_t size = orig_size;
b6f61163 1102
08cee789
DJ
1103 /* Make sure that zero-sized allocations get a unique and freeable
1104 pointer. */
1105 if (size == 0)
1106 size = MAX_ALIGNMENT;
1107 else
1108 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1109
1110 /* Try to allocate the object from several different sources. Each
1111 of these cases is responsible for setting RESULT and SIZE to
1112 describe the allocated block, before jumping to FOUND. If a
1113 chunk is split, the allocate bit for the new chunk should also be
1114 set.
1115
1116 Large objects are handled specially. However, they'll just fail
1117 the next couple of conditions, so we can wait to check for them
1118 below. The large object case is relatively rare (< 1%), so this
1119 is a win. */
1120
1121 /* First try to split the last chunk we allocated. For best
1122 fragmentation behavior it would be better to look for a
1123 free bin of the appropriate size for a small object. However,
1124 we're unlikely (1% - 7%) to find one, and this gives better
1125 locality behavior anyway. This case handles the lion's share
1126 of all calls to this function. */
1127 if (size <= zone->cached_free_size)
1128 {
1129 result = zone->cached_free;
1130
1131 zone->cached_free_size -= size;
1132 if (zone->cached_free_size)
1133 {
1134 zone->cached_free += size;
1135 zone_set_object_alloc_bit (zone->cached_free);
1136 }
1137
1138 goto found;
1139 }
1140
1141 /* Next, try to find a free bin of the exactly correct size. */
1142
1143 /* We want to round SIZE up, rather than down, but we know it's
1144 already aligned to at least FREE_BIN_DELTA, so we can just
1145 shift. */
1146 bin = SIZE_BIN_DOWN (size);
b6f61163 1147
08cee789
DJ
1148 if (bin <= NUM_FREE_BINS
1149 && (chunk = zone->free_chunks[bin]) != NULL)
b6f61163 1150 {
08cee789
DJ
1151 /* We have a chunk of the right size. Pull it off the free list
1152 and use it. */
b6f61163 1153
08cee789
DJ
1154 zone->free_chunks[bin] = chunk->next_free;
1155
1156 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1157 == FREE_BIN_DELTA. */
1158 result = chunk;
1159
1160 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1161 may now be wrong, if this was the last chunk in the high bin.
1162 Rather than fixing it up now, wait until we need to search
1163 the free bins. */
b6f61163
DB
1164
1165 goto found;
1166 }
1167
08cee789
DJ
1168 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1169 to split. We can find one in the too-big bin, or in the largest
1170 sized bin with a chunk in it. Try the largest normal-sized bin
1171 first. */
1172
1173 if (zone->high_free_bin > bin)
b6f61163 1174 {
08cee789
DJ
1175 /* Find the highest numbered free bin. It will be at or below
1176 the watermark. */
1177 while (zone->high_free_bin > bin
1178 && zone->free_chunks[zone->high_free_bin] == NULL)
1179 zone->high_free_bin--;
1180
1181 if (zone->high_free_bin > bin)
b6f61163 1182 {
08cee789
DJ
1183 size_t tbin = zone->high_free_bin;
1184 chunk = zone->free_chunks[tbin];
1185
1186 /* Remove the chunk from its previous bin. */
1187 zone->free_chunks[tbin] = chunk->next_free;
1188
1189 result = (char *) chunk;
1190
1191 /* Save the rest of the chunk for future allocation. */
1192 if (zone->cached_free_size)
1193 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1194
1195 chunk = (struct alloc_chunk *) ((char *) result + size);
1196 zone->cached_free = (char *) chunk;
1197 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1198
1199 /* Mark the new free chunk as an object, so that we can
1200 find the size of the newly allocated object. */
1201 zone_set_object_alloc_bit (chunk);
1202
1203 /* HIGH_FREE_BIN may now be wrong, if this was the last
1204 chunk in the high bin. Rather than fixing it up now,
1205 wait until we need to search the free bins. */
1206
b6f61163
DB
1207 goto found;
1208 }
1209 }
1210
1211 /* Failing that, look through the "other" bucket for a chunk
1212 that is large enough. */
1213 pp = &(zone->free_chunks[0]);
1214 chunk = *pp;
1215 while (chunk && chunk->size < size)
1216 {
08cee789 1217 pp = &chunk->next_free;
b6f61163
DB
1218 chunk = *pp;
1219 }
1220
08cee789 1221 if (chunk)
b6f61163 1222 {
08cee789
DJ
1223 /* Remove the chunk from its previous bin. */
1224 *pp = chunk->next_free;
b6f61163 1225
08cee789
DJ
1226 result = (char *) chunk;
1227
1228 /* Save the rest of the chunk for future allocation, if there's any
1229 left over. */
1230 csize = chunk->size;
1231 if (csize > size)
1232 {
1233 if (zone->cached_free_size)
1234 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1235
1236 chunk = (struct alloc_chunk *) ((char *) result + size);
1237 zone->cached_free = (char *) chunk;
1238 zone->cached_free_size = csize - size;
1239
1240 /* Mark the new free chunk as an object. */
1241 zone_set_object_alloc_bit (chunk);
1242 }
1243
1244 goto found;
b6f61163 1245 }
08cee789
DJ
1246
1247 /* Handle large allocations. We could choose any threshold between
1248 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1249 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1250 be guaranteed to have a unique entry in the lookup table. Large
1251 allocations will always fall through to here. */
1252 if (size > GGC_PAGE_SIZE)
b6f61163 1253 {
08cee789
DJ
1254 struct large_page_entry *entry = alloc_large_page (size, zone);
1255
1256#ifdef GATHER_STATISTICS
1257 entry->common.survived = 0;
1258#endif
1259
1260 entry->next = zone->large_pages;
1261 if (zone->large_pages)
1262 zone->large_pages->prev = entry;
1263 zone->large_pages = entry;
1264
1265 result = entry->common.page;
1266
1267 goto found;
b6f61163 1268 }
08cee789
DJ
1269
1270 /* Failing everything above, allocate a new small page. */
1271
1272 entry = alloc_small_page (zone);
1273 entry->next = zone->pages;
1274 zone->pages = entry;
1275
1276 /* Mark the first chunk in the new page. */
1277 entry->alloc_bits[0] = 1;
1278
1279 result = entry->common.page;
1280 if (size < SMALL_PAGE_SIZE)
b6f61163 1281 {
08cee789
DJ
1282 if (zone->cached_free_size)
1283 free_chunk (zone->cached_free, zone->cached_free_size, zone);
b6f61163 1284
08cee789
DJ
1285 zone->cached_free = (char *) result + size;
1286 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1287
1288 /* Mark the new free chunk as an object. */
1289 zone_set_object_alloc_bit (zone->cached_free);
b6f61163 1290 }
92e838e2 1291
b6f61163 1292 found:
08cee789 1293
d685c974 1294 /* We could save TYPE in the chunk, but we don't use that for
08cee789
DJ
1295 anything yet. If we wanted to, we could do it by adding it
1296 either before the beginning of the chunk or after its end,
1297 and adjusting the size and pointer appropriately. */
1298
1299 /* We'll probably write to this after we return. */
1300 prefetchw (result);
b6f61163
DB
1301
1302#ifdef ENABLE_GC_CHECKING
b6f61163 1303 /* `Poison' the entire allocated object. */
35dee980 1304 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
b6f61163 1305 memset (result, 0xaf, size);
35dee980
HPN
1306 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1307 size - orig_size));
b6f61163
DB
1308#endif
1309
1310 /* Tell Valgrind that the memory is there, but its content isn't
1311 defined. The bytes at the end of the object are still marked
1312 unaccessible. */
35dee980 1313 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
b6f61163
DB
1314
1315 /* Keep track of how many bytes are being allocated. This
1316 information is used in deciding when to collect. */
b9bfca81 1317 zone->allocated += size;
b8698a0f 1318
76e20664 1319 timevar_ggc_mem_total += size;
b9bfca81
DJ
1320
1321#ifdef GATHER_STATISTICS
08cee789 1322 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
b9bfca81
DJ
1323
1324 {
08cee789 1325 size_t object_size = size;
b9bfca81
DJ
1326 size_t overhead = object_size - orig_size;
1327
1328 zone->stats.total_overhead += overhead;
1329 zone->stats.total_allocated += object_size;
1330
1331 if (orig_size <= 32)
1332 {
1333 zone->stats.total_overhead_under32 += overhead;
1334 zone->stats.total_allocated_under32 += object_size;
1335 }
1336 if (orig_size <= 64)
1337 {
1338 zone->stats.total_overhead_under64 += overhead;
1339 zone->stats.total_allocated_under64 += object_size;
1340 }
1341 if (orig_size <= 128)
1342 {
1343 zone->stats.total_overhead_under128 += overhead;
1344 zone->stats.total_allocated_under128 += object_size;
1345 }
1346 }
1347#endif
b6f61163
DB
1348
1349 if (GGC_DEBUG_LEVEL >= 3)
08cee789
DJ
1350 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1351 (unsigned long) size, result);
b6f61163
DB
1352
1353 return result;
1354}
1355
d91edf86 1356/* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
b6f61163
DB
1357 for that type. */
1358
1359void *
92e838e2
PB
1360ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1361 MEM_STAT_DECL)
b6f61163 1362{
47aeffac
SB
1363 switch (gte)
1364 {
1365 case gt_ggc_e_14lang_tree_node:
a6e4d85b 1366 return ggc_alloc_zone_pass_stat (size, &tree_zone);
47aeffac
SB
1367
1368 case gt_ggc_e_7rtx_def:
a6e4d85b 1369 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
47aeffac
SB
1370
1371 case gt_ggc_e_9rtvec_def:
a6e4d85b 1372 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
47aeffac
SB
1373
1374 default:
a6e4d85b 1375 return ggc_alloc_zone_pass_stat (size, &main_zone);
47aeffac 1376 }
b6f61163
DB
1377}
1378
1379/* Normal ggc_alloc simply allocates into the main zone. */
1380
1381void *
92e838e2 1382ggc_alloc_stat (size_t size MEM_STAT_DECL)
b6f61163 1383{
a6e4d85b 1384 return ggc_alloc_zone_pass_stat (size, &main_zone);
b6f61163
DB
1385}
1386
9781b6da
DB
1387/* Poison the chunk. */
1388#ifdef ENABLE_GC_CHECKING
08cee789
DJ
1389#define poison_region(PTR, SIZE) \
1390 memset ((PTR), 0xa5, (SIZE))
9781b6da 1391#else
08cee789 1392#define poison_region(PTR, SIZE)
9781b6da
DB
1393#endif
1394
1395/* Free the object at P. */
1396
1397void
1398ggc_free (void *p)
1399{
08cee789
DJ
1400 struct page_entry *page;
1401
1402#ifdef GATHER_STATISTICS
1403 ggc_free_overhead (p);
1404#endif
1405
1406 poison_region (p, ggc_get_size (p));
1407
1408 page = zone_get_object_page (p);
1409
1410 if (page->large_p)
1411 {
1412 struct large_page_entry *large_page
1413 = (struct large_page_entry *) page;
1414
1415 /* Remove the page from the linked list. */
1416 if (large_page->prev)
1417 large_page->prev->next = large_page->next;
1418 else
1419 {
1420 gcc_assert (large_page->common.zone->large_pages == large_page);
1421 large_page->common.zone->large_pages = large_page->next;
1422 }
1423 if (large_page->next)
1424 large_page->next->prev = large_page->prev;
1425
1426 large_page->common.zone->allocated -= large_page->bytes;
1427
1428 /* Release the memory associated with this object. */
1429 free_large_page (large_page);
1430 }
1431 else if (page->pch_p)
1432 /* Don't do anything. We won't allocate a new object from the
1433 PCH zone so there's no point in releasing anything. */
1434 ;
1435 else
1436 {
1437 size_t size = ggc_get_size (p);
1438
1439 page->zone->allocated -= size;
1440
1441 /* Add the chunk to the free list. We don't bother with coalescing,
1442 since we are likely to want a chunk of this size again. */
bf8e9c49 1443 free_chunk ((char *)p, size, page->zone);
08cee789 1444 }
9781b6da
DB
1445}
1446
dae4174e
TT
1447/* Mark function for strings. */
1448
1449void
1450gt_ggc_m_S (const void *p)
1451{
1452 page_entry *entry;
1453 unsigned long offset;
1454
1455 if (!p)
1456 return;
1457
1458 /* Look up the page on which the object is alloced. . */
1459 entry = lookup_page_table_if_allocated (p);
1460 if (! entry)
1461 return;
1462
1463 if (entry->pch_p)
1464 {
1465 size_t alloc_word, alloc_bit, t;
1466 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1467 alloc_word = t / (8 * sizeof (alloc_type));
1468 alloc_bit = t % (8 * sizeof (alloc_type));
1469 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1470 alloc_bit);
1471 }
1472 else if (entry->large_p)
1473 {
1474 struct large_page_entry *le = (struct large_page_entry *) entry;
1475 offset = ((const char *) p) - entry->page;
1476 gcc_assert (offset < le->bytes);
1477 }
1478 else
1479 {
1480 struct small_page_entry *se = (struct small_page_entry *) entry;
1481 unsigned int start_word = zone_get_object_alloc_word (p);
1482 unsigned int start_bit = zone_get_object_alloc_bit (p);
1483 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1484
1485 /* On some platforms a char* will not necessarily line up on an
1486 allocation boundary, so we have to update the offset to
1487 account for the leftover bytes. */
1488 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1489 }
1490
1491 if (offset)
1492 {
1493 /* Here we've seen a char* which does not point to the beginning
1494 of an allocated object. We assume it points to the middle of
1495 a STRING_CST. */
1496 gcc_assert (offset == offsetof (struct tree_string, str));
1497 p = ((const char *) p) - offset;
bf8e9c49 1498 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
dae4174e
TT
1499 return;
1500 }
1501
1502 /* Inefficient, but also unlikely to matter. */
1503 ggc_set_mark (p);
1504}
1505
b6f61163
DB
1506/* If P is not marked, mark it and return false. Otherwise return true.
1507 P must have been allocated by the GC allocator; it mustn't point to
1508 static objects, stack variables, or memory allocated with malloc. */
1509
1510int
1511ggc_set_mark (const void *p)
1512{
08cee789
DJ
1513 struct page_entry *page;
1514 const char *ptr = (const char *) p;
b6f61163 1515
08cee789
DJ
1516 page = zone_get_object_page (p);
1517
1518 if (page->pch_p)
1519 {
1520 size_t mark_word, mark_bit, offset;
1521 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1522 mark_word = offset / (8 * sizeof (mark_type));
1523 mark_bit = offset % (8 * sizeof (mark_type));
b8698a0f 1524
08cee789
DJ
1525 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1526 return 1;
1527 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1528 }
1529 else if (page->large_p)
1530 {
1531 struct large_page_entry *large_page
1532 = (struct large_page_entry *) page;
1533
1534 if (large_page->mark_p)
1535 return 1;
1536 large_page->mark_p = true;
1537 }
1538 else
1539 {
1540 struct small_page_entry *small_page
1541 = (struct small_page_entry *) page;
1542
1543 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1544 & (1 << zone_get_object_mark_bit (p)))
1545 return 1;
1546 small_page->mark_bits[zone_get_object_mark_word (p)]
1547 |= (1 << zone_get_object_mark_bit (p));
1548 }
b6f61163 1549
b6f61163
DB
1550 if (GGC_DEBUG_LEVEL >= 4)
1551 fprintf (G.debug_file, "Marking %p\n", p);
1552
1553 return 0;
1554}
1555
1556/* Return 1 if P has been marked, zero otherwise.
1557 P must have been allocated by the GC allocator; it mustn't point to
1558 static objects, stack variables, or memory allocated with malloc. */
1559
1560int
1561ggc_marked_p (const void *p)
1562{
08cee789 1563 struct page_entry *page;
bf8e9c49 1564 const char *ptr = (const char *) p;
b6f61163 1565
08cee789
DJ
1566 page = zone_get_object_page (p);
1567
1568 if (page->pch_p)
1569 {
1570 size_t mark_word, mark_bit, offset;
1571 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1572 mark_word = offset / (8 * sizeof (mark_type));
1573 mark_bit = offset % (8 * sizeof (mark_type));
b8698a0f 1574
08cee789
DJ
1575 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1576 }
1577
1578 if (page->large_p)
1579 {
1580 struct large_page_entry *large_page
1581 = (struct large_page_entry *) page;
1582
1583 return large_page->mark_p;
1584 }
1585 else
1586 {
1587 struct small_page_entry *small_page
1588 = (struct small_page_entry *) page;
1589
1590 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1591 & (1 << zone_get_object_mark_bit (p)));
1592 }
b6f61163
DB
1593}
1594
1595/* Return the size of the gc-able object P. */
1596
1597size_t
1598ggc_get_size (const void *p)
1599{
08cee789
DJ
1600 struct page_entry *page;
1601 const char *ptr = (const char *) p;
b6f61163 1602
08cee789
DJ
1603 page = zone_get_object_page (p);
1604
1605 if (page->pch_p)
1606 {
1607 size_t alloc_word, alloc_bit, offset, max_size;
1608 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1609 alloc_word = offset / (8 * sizeof (alloc_type));
1610 alloc_bit = offset % (8 * sizeof (alloc_type));
1611 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1612 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1613 max_size);
1614 }
b6f61163 1615
08cee789
DJ
1616 if (page->large_p)
1617 return ((struct large_page_entry *)page)->bytes;
1618 else
1619 return zone_find_object_size ((struct small_page_entry *) page, p);
b6f61163
DB
1620}
1621
1622/* Initialize the ggc-zone-mmap allocator. */
1623void
578e8170 1624init_ggc (void)
b6f61163 1625{
08cee789
DJ
1626 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1627 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1628 the current assumptions to hold. */
1629
1630 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1631
47aeffac 1632 /* Set up the main zone by hand. */
b6f61163
DB
1633 main_zone.name = "Main zone";
1634 G.zones = &main_zone;
1635
47aeffac 1636 /* Allocate the default zones. */
08cee789
DJ
1637 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1638 new_ggc_zone_1 (&tree_zone, "Tree zone");
1639 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
b6f61163
DB
1640
1641 G.pagesize = getpagesize();
1642 G.lg_pagesize = exact_log2 (G.pagesize);
08cee789
DJ
1643 G.page_mask = ~(G.pagesize - 1);
1644
1645 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1646 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1647
1648 /* Allocate 16 system pages at a time. */
1649 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1650
1651 /* Calculate the size of the allocation bitmap and other overhead. */
1652 /* Right now we allocate bits for the page header and bitmap. These
1653 are wasted, but a little tricky to eliminate. */
1654 G.small_page_overhead
1655 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1656 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1657
b6f61163
DB
1658#ifdef HAVE_MMAP_DEV_ZERO
1659 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
282899df 1660 gcc_assert (G.dev_zero_fd != -1);
b6f61163
DB
1661#endif
1662
1663#if 0
1664 G.debug_file = fopen ("ggc-mmap.debug", "w");
1665 setlinebuf (G.debug_file);
1666#else
1667 G.debug_file = stdout;
1668#endif
1669
1670#ifdef USING_MMAP
1671 /* StunOS has an amazing off-by-one error for the first mmap allocation
1672 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1673 believe, is an unaligned page allocation, which would cause us to
1674 hork badly if we tried to use it. */
1675 {
1676 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
08cee789 1677 struct small_page_entry *e;
b6f61163
DB
1678 if ((size_t)p & (G.pagesize - 1))
1679 {
1680 /* How losing. Discard this one and try another. If we still
1681 can't get something useful, give up. */
1682
1683 p = alloc_anon (NULL, G.pagesize, &main_zone);
282899df 1684 gcc_assert (!((size_t)p & (G.pagesize - 1)));
b6f61163
DB
1685 }
1686
08cee789
DJ
1687 if (GGC_PAGE_SIZE == G.pagesize)
1688 {
1689 /* We have a good page, might as well hold onto it... */
bf8e9c49 1690 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
08cee789
DJ
1691 e->common.page = p;
1692 e->common.zone = &main_zone;
1693 e->next = main_zone.free_pages;
1694 set_page_table_entry (e->common.page, &e->common);
1695 main_zone.free_pages = e;
1696 }
1697 else
1698 {
1699 munmap (p, G.pagesize);
1700 }
b6f61163
DB
1701 }
1702#endif
1703}
1704
47aeffac
SB
1705/* Start a new GGC zone. */
1706
08cee789
DJ
1707static void
1708new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
47aeffac 1709{
47aeffac
SB
1710 new_zone->name = name;
1711 new_zone->next_zone = G.zones->next_zone;
1712 G.zones->next_zone = new_zone;
08cee789
DJ
1713}
1714
1715struct alloc_zone *
1716new_ggc_zone (const char * name)
1717{
bf8e9c49 1718 struct alloc_zone *new_zone = XCNEW (struct alloc_zone);
08cee789 1719 new_ggc_zone_1 (new_zone, name);
47aeffac
SB
1720 return new_zone;
1721}
1722
1723/* Destroy a GGC zone. */
1724void
1725destroy_ggc_zone (struct alloc_zone * dead_zone)
1726{
1727 struct alloc_zone *z;
1728
1729 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
282899df
NS
1730 /* Just find that zone. */
1731 continue;
47aeffac 1732
b944d187 1733 /* We should have found the zone in the list. Anything else is fatal. */
282899df 1734 gcc_assert (z);
47aeffac 1735
b944d187 1736 /* z is dead, baby. z is dead. */
08cee789 1737 z->dead = true;
b6f61163 1738}
b6f61163
DB
1739
1740/* Free all empty pages and objects within a page for a given zone */
1741
1742static void
1743sweep_pages (struct alloc_zone *zone)
1744{
08cee789
DJ
1745 struct large_page_entry **lpp, *lp, *lnext;
1746 struct small_page_entry **spp, *sp, *snext;
1747 char *last_free;
1748 size_t allocated = 0;
02fef853 1749 bool nomarksinpage;
08cee789 1750
b6f61163
DB
1751 /* First, reset the free_chunks lists, since we are going to
1752 re-free free chunks in hopes of coalescing them into large chunks. */
1753 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
08cee789
DJ
1754 zone->high_free_bin = 0;
1755 zone->cached_free = NULL;
1756 zone->cached_free_size = 0;
1757
1758 /* Large pages are all or none affairs. Either they are completely
1759 empty, or they are completely full. */
1760 lpp = &zone->large_pages;
1761 for (lp = zone->large_pages; lp != NULL; lp = lnext)
b6f61163 1762 {
08cee789
DJ
1763 gcc_assert (lp->common.large_p);
1764
1765 lnext = lp->next;
1766
1767#ifdef GATHER_STATISTICS
1768 /* This page has now survived another collection. */
1769 lp->common.survived++;
1770#endif
1771
1772 if (lp->mark_p)
b6f61163 1773 {
08cee789
DJ
1774 lp->mark_p = false;
1775 allocated += lp->bytes;
1776 lpp = &lp->next;
1777 }
1778 else
1779 {
1780 *lpp = lnext;
b6f61163 1781#ifdef ENABLE_GC_CHECKING
08cee789
DJ
1782 /* Poison the page. */
1783 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
b6f61163 1784#endif
08cee789
DJ
1785 if (lp->prev)
1786 lp->prev->next = lp->next;
1787 if (lp->next)
1788 lp->next->prev = lp->prev;
1789 free_large_page (lp);
b6f61163 1790 }
08cee789
DJ
1791 }
1792
1793 spp = &zone->pages;
1794 for (sp = zone->pages; sp != NULL; sp = snext)
1795 {
1796 char *object, *last_object;
1797 char *end;
1798 alloc_type *alloc_word_p;
1799 mark_type *mark_word_p;
1800
1801 gcc_assert (!sp->common.large_p);
b6f61163 1802
08cee789
DJ
1803 snext = sp->next;
1804
1805#ifdef GATHER_STATISTICS
b6f61163 1806 /* This page has now survived another collection. */
08cee789
DJ
1807 sp->common.survived++;
1808#endif
b6f61163 1809
08cee789
DJ
1810 /* Step through all chunks, consolidate those that are free and
1811 insert them into the free lists. Note that consolidation
1812 slows down collection slightly. */
b6f61163 1813
08cee789
DJ
1814 last_object = object = sp->common.page;
1815 end = sp->common.page + SMALL_PAGE_SIZE;
b6f61163 1816 last_free = NULL;
02fef853 1817 nomarksinpage = true;
08cee789
DJ
1818 mark_word_p = sp->mark_bits;
1819 alloc_word_p = sp->alloc_bits;
1820
1821 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1822
1823 object = sp->common.page;
b6f61163
DB
1824 do
1825 {
08cee789
DJ
1826 unsigned int i, n;
1827 alloc_type alloc_word;
1828 mark_type mark_word;
1829
1830 alloc_word = *alloc_word_p++;
1831 mark_word = *mark_word_p++;
1832
1833 if (mark_word)
1834 nomarksinpage = false;
1835
1836 /* There ought to be some way to do this without looping... */
1837 i = 0;
1838 while ((n = alloc_ffs (alloc_word)) != 0)
b6f61163 1839 {
08cee789
DJ
1840 /* Extend the current state for n - 1 bits. We can't
1841 shift alloc_word by n, even though it isn't used in the
1842 loop, in case only the highest bit was set. */
1843 alloc_word >>= n - 1;
1844 mark_word >>= n - 1;
1845 object += BYTES_PER_MARK_BIT * (n - 1);
1846
1847 if (mark_word & 1)
b6f61163 1848 {
08cee789
DJ
1849 if (last_free)
1850 {
35dee980
HPN
1851 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1852 object
1853 - last_free));
08cee789
DJ
1854 poison_region (last_free, object - last_free);
1855 free_chunk (last_free, object - last_free, zone);
1856 last_free = NULL;
1857 }
1858 else
1859 allocated += object - last_object;
1860 last_object = object;
b6f61163
DB
1861 }
1862 else
1863 {
08cee789
DJ
1864 if (last_free == NULL)
1865 {
1866 last_free = object;
1867 allocated += object - last_object;
1868 }
1869 else
1870 zone_clear_object_alloc_bit (sp, object);
b6f61163 1871 }
08cee789
DJ
1872
1873 /* Shift to just after the alloc bit we handled. */
1874 alloc_word >>= 1;
1875 mark_word >>= 1;
1876 object += BYTES_PER_MARK_BIT;
1877
1878 i += n;
b6f61163
DB
1879 }
1880
08cee789 1881 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
b6f61163 1882 }
08cee789 1883 while (object < end);
b6f61163 1884
02fef853
DB
1885 if (nomarksinpage)
1886 {
08cee789 1887 *spp = snext;
02fef853 1888#ifdef ENABLE_GC_CHECKING
35dee980
HPN
1889 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1890 SMALL_PAGE_SIZE));
02fef853 1891 /* Poison the page. */
08cee789 1892 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
02fef853 1893#endif
08cee789 1894 free_small_page (sp);
02fef853
DB
1895 continue;
1896 }
1897 else if (last_free)
b6f61163 1898 {
35dee980
HPN
1899 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1900 object - last_free));
08cee789
DJ
1901 poison_region (last_free, object - last_free);
1902 free_chunk (last_free, object - last_free, zone);
b6f61163 1903 }
08cee789
DJ
1904 else
1905 allocated += object - last_object;
1906
1907 spp = &sp->next;
b6f61163
DB
1908 }
1909
1910 zone->allocated = allocated;
1911}
1912
1913/* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1914 is true if we need to mark before sweeping, false if some other
1915 zone collection has already performed marking for us. Returns true
1916 if we collected, false otherwise. */
1917
1918static bool
1919ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1920{
08cee789
DJ
1921#if 0
1922 /* */
1923 {
1924 int i;
1925 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1926 {
1927 struct alloc_chunk *chunk;
1928 int n, tot;
1929
1930 n = 0;
1931 tot = 0;
1932 chunk = zone->free_chunks[i];
1933 while (chunk)
1934 {
1935 n++;
1936 tot += chunk->size;
1937 chunk = chunk->next_free;
1938 }
1939 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1940 i, n, tot);
1941 }
1942 }
1943 /* */
1944#endif
1945
b6f61163 1946 if (!quiet_flag)
b944d187
SB
1947 fprintf (stderr, " {%s GC %luk -> ",
1948 zone->name, (unsigned long) zone->allocated / 1024);
b6f61163
DB
1949
1950 /* Zero the total allocated bytes. This will be recalculated in the
1951 sweep phase. */
1952 zone->allocated = 0;
1953
1954 /* Release the pages we freed the last time we collected, but didn't
1955 reuse in the interim. */
1956 release_pages (zone);
1957
b6f61163 1958 if (need_marking)
08cee789
DJ
1959 {
1960 zone_allocate_marks ();
1961 ggc_mark_roots ();
1962#ifdef GATHER_STATISTICS
1963 ggc_prune_overhead_list ();
1964#endif
1965 }
b8698a0f 1966
b6f61163
DB
1967 sweep_pages (zone);
1968 zone->was_collected = true;
1969 zone->allocated_last_gc = zone->allocated;
1970
b6f61163
DB
1971 if (!quiet_flag)
1972 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1973 return true;
1974}
1975
08cee789 1976#ifdef GATHER_STATISTICS
b6f61163
DB
1977/* Calculate the average page survival rate in terms of number of
1978 collections. */
1979
1980static float
1981calculate_average_page_survival (struct alloc_zone *zone)
1982{
1983 float count = 0.0;
1984 float survival = 0.0;
08cee789
DJ
1985 struct small_page_entry *p;
1986 struct large_page_entry *lp;
b6f61163
DB
1987 for (p = zone->pages; p; p = p->next)
1988 {
1989 count += 1.0;
08cee789 1990 survival += p->common.survived;
b6f61163 1991 }
08cee789 1992 for (lp = zone->large_pages; lp; lp = lp->next)
b6f61163 1993 {
08cee789
DJ
1994 count += 1.0;
1995 survival += lp->common.survived;
b6f61163 1996 }
08cee789 1997 return survival/count;
b6f61163 1998}
08cee789
DJ
1999#endif
2000
b6f61163
DB
2001/* Top level collection routine. */
2002
2003void
578e8170 2004ggc_collect (void)
b6f61163
DB
2005{
2006 struct alloc_zone *zone;
2007 bool marked = false;
b6f61163
DB
2008
2009 timevar_push (TV_GC);
b9bfca81 2010
08cee789 2011 if (!ggc_force_collect)
b9bfca81
DJ
2012 {
2013 float allocated_last_gc = 0, allocated = 0, min_expand;
2014
2015 for (zone = G.zones; zone; zone = zone->next_zone)
2016 {
2017 allocated_last_gc += zone->allocated_last_gc;
2018 allocated += zone->allocated;
2019 }
2020
2021 allocated_last_gc =
2022 MAX (allocated_last_gc,
2023 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2024 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2025
2026 if (allocated < allocated_last_gc + min_expand)
2027 {
2028 timevar_pop (TV_GC);
2029 return;
2030 }
2031 }
2032
ae2392a9
BS
2033 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2034
b6f61163
DB
2035 /* Start by possibly collecting the main zone. */
2036 main_zone.was_collected = false;
2037 marked |= ggc_collect_1 (&main_zone, true);
47aeffac 2038
b6f61163
DB
2039 /* In order to keep the number of collections down, we don't
2040 collect other zones unless we are collecting the main zone. This
2041 gives us roughly the same number of collections as we used to
2042 have with the old gc. The number of collection is important
2043 because our main slowdown (according to profiling) is now in
2044 marking. So if we mark twice as often as we used to, we'll be
2045 twice as slow. Hopefully we'll avoid this cost when we mark
2046 zone-at-a-time. */
b9bfca81
DJ
2047 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2048 keep this code in case the heuristics are further refined. */
b6f61163
DB
2049
2050 if (main_zone.was_collected)
2051 {
47aeffac
SB
2052 struct alloc_zone *zone;
2053
2054 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2055 {
47aeffac
SB
2056 zone->was_collected = false;
2057 marked |= ggc_collect_1 (zone, !marked);
2058 }
b6f61163
DB
2059 }
2060
08cee789 2061#ifdef GATHER_STATISTICS
b6f61163
DB
2062 /* Print page survival stats, if someone wants them. */
2063 if (GGC_DEBUG_LEVEL >= 2)
2064 {
47aeffac 2065 for (zone = G.zones; zone; zone = zone->next_zone)
b6f61163 2066 {
47aeffac
SB
2067 if (zone->was_collected)
2068 {
08cee789 2069 float f = calculate_average_page_survival (zone);
47aeffac
SB
2070 printf ("Average page survival in zone `%s' is %f\n",
2071 zone->name, f);
2072 }
b6f61163
DB
2073 }
2074 }
08cee789 2075#endif
47aeffac 2076
b6f61163 2077 if (marked)
08cee789 2078 zone_free_marks ();
b944d187
SB
2079
2080 /* Free dead zones. */
2081 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2082 {
2083 if (zone->next_zone->dead)
2084 {
2085 struct alloc_zone *dead_zone = zone->next_zone;
2086
2087 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2088
2089 /* The zone must be empty. */
282899df 2090 gcc_assert (!dead_zone->allocated);
b944d187
SB
2091
2092 /* Unchain the dead zone, release all its pages and free it. */
2093 zone->next_zone = zone->next_zone->next_zone;
2094 release_pages (dead_zone);
2095 free (dead_zone);
2096 }
2097 }
2098
ae2392a9
BS
2099 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2100
b6f61163
DB
2101 timevar_pop (TV_GC);
2102}
2103
2104/* Print allocation statistics. */
b9bfca81
DJ
2105#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2106 ? (x) \
2107 : ((x) < 1024*1024*10 \
2108 ? (x) / 1024 \
2109 : (x) / (1024*1024))))
2110#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
b6f61163
DB
2111
2112void
578e8170 2113ggc_print_statistics (void)
b6f61163 2114{
b9bfca81
DJ
2115 struct alloc_zone *zone;
2116 struct ggc_statistics stats;
2117 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
08cee789 2118 size_t pte_overhead, i;
b9bfca81
DJ
2119
2120 /* Clear the statistics. */
2121 memset (&stats, 0, sizeof (stats));
2122
08cee789
DJ
2123 /* Make sure collection will really occur. */
2124 ggc_force_collect = true;
b9bfca81
DJ
2125
2126 /* Collect and print the statistics common across collectors. */
2127 ggc_print_common_statistics (stderr, &stats);
2128
08cee789 2129 ggc_force_collect = false;
b9bfca81
DJ
2130
2131 /* Release free pages so that we will not count the bytes allocated
2132 there as part of the total allocated memory. */
2133 for (zone = G.zones; zone; zone = zone->next_zone)
2134 release_pages (zone);
2135
2136 /* Collect some information about the various sizes of
2137 allocation. */
2138 fprintf (stderr,
2139 "Memory still allocated at the end of the compilation process\n");
2140
2141 fprintf (stderr, "%20s %10s %10s %10s\n",
2142 "Zone", "Allocated", "Used", "Overhead");
2143 for (zone = G.zones; zone; zone = zone->next_zone)
2144 {
08cee789
DJ
2145 struct large_page_entry *large_page;
2146 size_t overhead, allocated, in_use;
b9bfca81 2147
08cee789
DJ
2148 /* Skip empty zones. */
2149 if (!zone->pages && !zone->large_pages)
b9bfca81
DJ
2150 continue;
2151
08cee789 2152 allocated = in_use = 0;
b9bfca81 2153
08cee789
DJ
2154 overhead = sizeof (struct alloc_zone);
2155
2156 for (large_page = zone->large_pages; large_page != NULL;
2157 large_page = large_page->next)
b9bfca81 2158 {
08cee789
DJ
2159 allocated += large_page->bytes;
2160 in_use += large_page->bytes;
2161 overhead += sizeof (struct large_page_entry);
2162 }
b9bfca81 2163
08cee789
DJ
2164 /* There's no easy way to walk through the small pages finding
2165 used and unused objects. Instead, add all the pages, and
2166 subtract out the free list. */
b9bfca81 2167
08cee789
DJ
2168 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2169 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2170 overhead += G.small_page_overhead * zone->n_small_pages;
b9bfca81 2171
08cee789
DJ
2172 for (i = 0; i <= NUM_FREE_BINS; i++)
2173 {
2174 struct alloc_chunk *chunk = zone->free_chunks[i];
2175 while (chunk)
b9bfca81 2176 {
08cee789
DJ
2177 in_use -= ggc_get_size (chunk);
2178 chunk = chunk->next_free;
b9bfca81
DJ
2179 }
2180 }
b8698a0f 2181
b9bfca81
DJ
2182 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2183 zone->name,
2184 SCALE (allocated), LABEL (allocated),
2185 SCALE (in_use), LABEL (in_use),
2186 SCALE (overhead), LABEL (overhead));
2187
282899df 2188 gcc_assert (in_use == zone->allocated);
b9bfca81
DJ
2189
2190 total_overhead += overhead;
2191 total_allocated += zone->allocated;
2192 total_bytes_mapped += zone->bytes_mapped;
2193 }
2194
08cee789
DJ
2195 /* Count the size of the page table as best we can. */
2196#if HOST_BITS_PER_PTR <= 32
2197 pte_overhead = sizeof (G.lookup);
2198 for (i = 0; i < PAGE_L1_SIZE; i++)
2199 if (G.lookup[i])
2200 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2201#else
2202 {
76e20664 2203 page_table table = G.lookup;
08cee789
DJ
2204 pte_overhead = 0;
2205 while (table)
2206 {
2207 pte_overhead += sizeof (*table);
2208 for (i = 0; i < PAGE_L1_SIZE; i++)
2209 if (table->table[i])
2210 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2211 table = table->next;
2212 }
2213 }
2214#endif
2215 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2216 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2217 total_overhead += pte_overhead;
2218
b9bfca81
DJ
2219 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2220 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2221 SCALE (total_allocated), LABEL(total_allocated),
2222 SCALE (total_overhead), LABEL (total_overhead));
2223
b8698a0f 2224#ifdef GATHER_STATISTICS
b9bfca81
DJ
2225 {
2226 unsigned long long all_overhead = 0, all_allocated = 0;
2227 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2228 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2229 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2230
2231 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2232
2233 for (zone = G.zones; zone; zone = zone->next_zone)
2234 {
2235 all_overhead += zone->stats.total_overhead;
2236 all_allocated += zone->stats.total_allocated;
2237
2238 all_allocated_under32 += zone->stats.total_allocated_under32;
2239 all_overhead_under32 += zone->stats.total_overhead_under32;
2240
2241 all_allocated_under64 += zone->stats.total_allocated_under64;
2242 all_overhead_under64 += zone->stats.total_overhead_under64;
b8698a0f 2243
b9bfca81
DJ
2244 all_allocated_under128 += zone->stats.total_allocated_under128;
2245 all_overhead_under128 += zone->stats.total_overhead_under128;
2246
2247 fprintf (stderr, "%20s: %10lld\n",
2248 zone->name, zone->stats.total_allocated);
2249 }
2250
2251 fprintf (stderr, "\n");
2252
2253 fprintf (stderr, "Total Overhead: %10lld\n",
2254 all_overhead);
2255 fprintf (stderr, "Total Allocated: %10lld\n",
2256 all_allocated);
2257
2258 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2259 all_overhead_under32);
2260 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2261 all_allocated_under32);
2262 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2263 all_overhead_under64);
2264 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2265 all_allocated_under64);
2266 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2267 all_overhead_under128);
2268 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2269 all_allocated_under128);
2270 }
2271#endif
b6f61163
DB
2272}
2273
08cee789
DJ
2274/* Precompiled header support. */
2275
2276/* For precompiled headers, we sort objects based on their type. We
2277 also sort various objects into their own buckets; currently this
2278 covers strings and IDENTIFIER_NODE trees. The choices of how
2279 to sort buckets have not yet been tuned. */
2280
2281#define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2282
2283#define OTHER_BUCKET (gt_types_enum_last + 0)
2284#define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2285#define STRING_BUCKET (gt_types_enum_last + 2)
2286
2287struct ggc_pch_ondisk
2288{
2289 size_t total;
2290 size_t type_totals[NUM_PCH_BUCKETS];
2291};
2292
b6f61163
DB
2293struct ggc_pch_data
2294{
08cee789 2295 struct ggc_pch_ondisk d;
b6f61163 2296 size_t base;
08cee789
DJ
2297 size_t orig_base;
2298 size_t alloc_size;
2299 alloc_type *alloc_bits;
2300 size_t type_bases[NUM_PCH_BUCKETS];
2301 size_t start_offset;
b6f61163
DB
2302};
2303
1ae58c30 2304/* Initialize the PCH data structure. */
b6f61163
DB
2305
2306struct ggc_pch_data *
2307init_ggc_pch (void)
2308{
bf8e9c49 2309 return XCNEW (struct ggc_pch_data);
b6f61163
DB
2310}
2311
08cee789
DJ
2312/* Return which of the page-aligned buckets the object at X, with type
2313 TYPE, should be sorted into in the PCH. Strings will have
2314 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2315 of unknown type will also have TYPE equal to gt_types_enum_last. */
2316
2317static int
2318pch_bucket (void *x, enum gt_types_enum type,
2319 bool is_string)
2320{
2321 /* Sort identifiers into their own bucket, to improve locality
2322 when searching the identifier hash table. */
2323 if (type == gt_ggc_e_14lang_tree_node
2324 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2325 return IDENTIFIER_BUCKET;
2326 else if (type == gt_types_enum_last)
2327 {
2328 if (is_string)
2329 return STRING_BUCKET;
2330 return OTHER_BUCKET;
2331 }
2332 return type;
2333}
2334
b6f61163
DB
2335/* Add the size of object X to the size of the PCH data. */
2336
2337void
2338ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
08cee789 2339 size_t size, bool is_string, enum gt_types_enum type)
b6f61163 2340{
08cee789
DJ
2341 /* NOTE: Right now we don't need to align up the size of any objects.
2342 Strings can be unaligned, and everything else is allocated to a
2343 MAX_ALIGNMENT boundary already. */
2344
2345 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
b6f61163
DB
2346}
2347
2348/* Return the total size of the PCH data. */
2349
2350size_t
2351ggc_pch_total_size (struct ggc_pch_data *d)
2352{
08cee789
DJ
2353 enum gt_types_enum i;
2354 size_t alloc_size, total_size;
2355
2356 total_size = 0;
2357 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2358 {
2359 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2360 total_size += d->d.type_totals[i];
2361 }
2362 d->d.total = total_size;
2363
2364 /* Include the size of the allocation bitmap. */
2365 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2366 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2367 d->alloc_size = alloc_size;
2368
2369 return d->d.total + alloc_size;
b6f61163
DB
2370}
2371
2372/* Set the base address for the objects in the PCH file. */
2373
2374void
08cee789 2375ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
b6f61163 2376{
08cee789
DJ
2377 int i;
2378 size_t base = (size_t) base_;
2379
2380 d->base = d->orig_base = base;
2381 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2382 {
2383 d->type_bases[i] = base;
2384 base += d->d.type_totals[i];
2385 }
2386
2387 if (d->alloc_bits == NULL)
bf8e9c49 2388 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
b6f61163
DB
2389}
2390
2391/* Allocate a place for object X of size SIZE in the PCH file. */
2392
2393char *
2394ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
08cee789
DJ
2395 size_t size, bool is_string,
2396 enum gt_types_enum type)
b6f61163 2397{
08cee789 2398 size_t alloc_word, alloc_bit;
b6f61163 2399 char *result;
08cee789
DJ
2400 int bucket = pch_bucket (x, type, is_string);
2401
2402 /* Record the start of the object in the allocation bitmap. We
2403 can't assert that the allocation bit is previously clear, because
2404 strings may violate the invariant that they are at least
2405 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2406 should not be called for strings. */
2407 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2408 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2409 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2410 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2411 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2412
2413 /* Place the object at the current pointer for this bucket. */
2414 result = (char *) d->type_bases[bucket];
2415 d->type_bases[bucket] += size;
2416 return result;
b6f61163
DB
2417}
2418
2419/* Prepare to write out the PCH data to file F. */
2420
2421void
08cee789
DJ
2422ggc_pch_prepare_write (struct ggc_pch_data *d,
2423 FILE *f)
b6f61163 2424{
08cee789
DJ
2425 /* We seek around a lot while writing. Record where the end
2426 of the padding in the PCH file is, so that we can
2427 locate each object's offset. */
2428 d->start_offset = ftell (f);
b6f61163
DB
2429}
2430
2431/* Write out object X of SIZE to file F. */
2432
2433void
08cee789
DJ
2434ggc_pch_write_object (struct ggc_pch_data *d,
2435 FILE *f, void *x, void *newx,
2436 size_t size, bool is_string ATTRIBUTE_UNUSED)
b6f61163 2437{
08cee789
DJ
2438 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2439 fatal_error ("can't seek PCH file: %m");
2440
2441 if (fwrite (x, size, 1, f) != 1)
2442 fatal_error ("can't write PCH file: %m");
b6f61163
DB
2443}
2444
2445void
2446ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2447{
08cee789
DJ
2448 /* Write out the allocation bitmap. */
2449 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2450 fatal_error ("can't seek PCH file: %m");
2451
2452 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
fa10beec 2453 fatal_error ("can't write PCH file: %m");
08cee789
DJ
2454
2455 /* Done with the PCH, so write out our footer. */
b6f61163
DB
2456 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2457 fatal_error ("can't write PCH file: %m");
08cee789
DJ
2458
2459 free (d->alloc_bits);
b6f61163
DB
2460 free (d);
2461}
08cee789
DJ
2462
2463/* The PCH file from F has been mapped at ADDR. Read in any
2464 additional data from the file and set up the GC state. */
2465
b6f61163
DB
2466void
2467ggc_pch_read (FILE *f, void *addr)
2468{
2469 struct ggc_pch_ondisk d;
08cee789
DJ
2470 size_t alloc_size;
2471 struct alloc_zone *zone;
2472 struct page_entry *pch_page;
2473 char *p;
2474
b6f61163
DB
2475 if (fread (&d, sizeof (d), 1, f) != 1)
2476 fatal_error ("can't read PCH file: %m");
08cee789
DJ
2477
2478 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2479 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2480
2481 pch_zone.bytes = d.total;
2482 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2483 pch_zone.page = (char *) addr;
2484 pch_zone.end = (char *) pch_zone.alloc_bits;
2485
2486 /* We've just read in a PCH file. So, every object that used to be
2487 allocated is now free. */
2488 for (zone = G.zones; zone; zone = zone->next_zone)
2489 {
2490 struct small_page_entry *page, *next_page;
2491 struct large_page_entry *large_page, *next_large_page;
2492
2493 zone->allocated = 0;
2494
2495 /* Clear the zone's free chunk list. */
2496 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2497 zone->high_free_bin = 0;
2498 zone->cached_free = NULL;
2499 zone->cached_free_size = 0;
2500
2501 /* Move all the small pages onto the free list. */
2502 for (page = zone->pages; page != NULL; page = next_page)
2503 {
2504 next_page = page->next;
2505 memset (page->alloc_bits, 0,
2506 G.small_page_overhead - PAGE_OVERHEAD);
2507 free_small_page (page);
2508 }
2509
2510 /* Discard all the large pages. */
2511 for (large_page = zone->large_pages; large_page != NULL;
2512 large_page = next_large_page)
2513 {
2514 next_large_page = large_page->next;
2515 free_large_page (large_page);
2516 }
2517
2518 zone->pages = NULL;
2519 zone->large_pages = NULL;
2520 }
2521
2522 /* Allocate the dummy page entry for the PCH, and set all pages
2523 mapped into the PCH to reference it. */
bf8e9c49 2524 pch_page = XCNEW (struct page_entry);
08cee789
DJ
2525 pch_page->page = pch_zone.page;
2526 pch_page->pch_p = true;
2527
2528 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2529 set_page_table_entry (p, pch_page);
b6f61163 2530}