]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-zone.c
c-typeck.c: Fix comment typos.
[thirdparty/gcc.git] / gcc / ggc-zone.c
CommitLineData
b6f61163
DB
1/* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
4
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING. If not, write to the Free
20Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2102111-1307, USA. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "tree.h"
28#include "rtl.h"
29#include "tm_p.h"
30#include "toplev.h"
31#include "varray.h"
32#include "flags.h"
33#include "ggc.h"
34#include "timevar.h"
35#include "params.h"
36#include "bitmap.h"
37
38#ifdef ENABLE_VALGRIND_CHECKING
a207b594
HPN
39# ifdef HAVE_VALGRIND_MEMCHECK_H
40# include <valgrind/memcheck.h>
41# elif defined HAVE_MEMCHECK_H
42# include <memcheck.h>
43# else
44# include <valgrind.h>
45# endif
b6f61163
DB
46#else
47/* Avoid #ifdef:s when we can help it. */
48#define VALGRIND_DISCARD(x)
49#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
50#define VALGRIND_FREELIKE_BLOCK(x,y)
51#endif
52/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
53 file open. Prefer either to valloc. */
54#ifdef HAVE_MMAP_ANON
55# undef HAVE_MMAP_DEV_ZERO
56
57# include <sys/mman.h>
58# ifndef MAP_FAILED
59# define MAP_FAILED -1
60# endif
61# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
62# define MAP_ANONYMOUS MAP_ANON
63# endif
64# define USING_MMAP
65
66#endif
67
68#ifdef HAVE_MMAP_DEV_ZERO
69
70# include <sys/mman.h>
71# ifndef MAP_FAILED
72# define MAP_FAILED -1
73# endif
74# define USING_MMAP
75
76#endif
77
78#ifndef USING_MMAP
79#define USING_MALLOC_PAGE_GROUPS
80#endif
81
82#if (GCC_VERSION < 3001)
83#define prefetch(X) ((void) X)
84#else
85#define prefetch(X) __builtin_prefetch (X)
86#endif
87
88/* NOTES:
89 If we track inter-zone pointers, we can mark single zones at a
90 time.
91 If we have a zone where we guarantee no inter-zone pointers, we
ba228239 92 could mark that zone separately.
b6f61163
DB
93 The garbage zone should not be marked, and we should return 1 in
94 ggc_set_mark for any object in the garbage zone, which cuts off
95 marking quickly. */
96/* Stategy:
97
98 This garbage-collecting allocator segregates objects into zones.
99 It also segregates objects into "large" and "small" bins. Large
100 objects are greater or equal to page size.
101
102 Pages for small objects are broken up into chunks, each of which
103 are described by a struct alloc_chunk. One can walk over all
104 chunks on the page by adding the chunk size to the chunk's data
105 address. The free space for a page exists in the free chunk bins.
106
107 Each page-entry also has a context depth, which is used to track
108 pushing and popping of allocation contexts. Only objects allocated
109 in the current (highest-numbered) context may be collected.
110
111 Empty pages (of all sizes) are kept on a single page cache list,
112 and are considered first when new pages are required; they are
113 deallocated at the start of the next collection if they haven't
114 been recycled by then. */
115
116/* Define GGC_DEBUG_LEVEL to print debugging information.
117 0: No debugging output.
118 1: GC statistics only.
119 2: Page-entry allocations/deallocations as well.
120 3: Object allocations as well.
121 4: Object marks as well. */
122#define GGC_DEBUG_LEVEL (0)
123
124#ifndef HOST_BITS_PER_PTR
125#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
126#endif
127#ifdef COOKIE_CHECKING
128#define CHUNK_MAGIC 0x95321123
129#define DEADCHUNK_MAGIC 0x12817317
130#endif
131
132/* This structure manages small chunks. When the chunk is free, it's
133 linked with other chunks via free_next. When the chunk is allocated,
134 the data starts at u. Large chunks are allocated one at a time to
135 their own page, and so don't come in here.
136
137 The "type" field is a placeholder for a future change to do
138 generational collection. At present it is 0 when free and
139 and 1 when allocated. */
140
141struct alloc_chunk {
142#ifdef COOKIE_CHECKING
143 unsigned int magic;
144#endif
145 unsigned int type:1;
146 unsigned int typecode:15;
147 unsigned int size:15;
148 unsigned int mark:1;
149 union {
150 struct alloc_chunk *next_free;
151 char data[1];
152
153 /* Make sure the data is sufficiently aligned. */
154 HOST_WIDEST_INT align_i;
155#ifdef HAVE_LONG_DOUBLE
156 long double align_d;
157#else
158 double align_d;
159#endif
160 } u;
161} __attribute__ ((packed));
162
163#define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
164
165/* We maintain several bins of free lists for chunks for very small
166 objects. We never exhaustively search other bins -- if we don't
167 find one of the proper size, we allocate from the "larger" bin. */
168
169/* Decreasing the number of free bins increases the time it takes to allocate.
170 Similar with increasing max_free_bin_size without increasing num_free_bins.
171
172 After much histogramming of allocation sizes and time spent on gc,
173 on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz,
174 these were determined to be the optimal values. */
175#define NUM_FREE_BINS 64
176#define MAX_FREE_BIN_SIZE 256
177#define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
178#define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
179#define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
180
181/* Marker used as chunk->size for a large object. Should correspond
182 to the size of the bitfield above. */
183#define LARGE_OBJECT_SIZE 0x7fff
184
185/* We use this structure to determine the alignment required for
186 allocations. For power-of-two sized allocations, that's not a
187 problem, but it does matter for odd-sized allocations. */
188
189struct max_alignment {
190 char c;
191 union {
192 HOST_WIDEST_INT i;
193#ifdef HAVE_LONG_DOUBLE
194 long double d;
195#else
196 double d;
197#endif
198 } u;
199};
200
201/* The biggest alignment required. */
202
203#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
204
205/* Compute the smallest nonnegative number which when added to X gives
206 a multiple of F. */
207
208#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
209
210/* Compute the smallest multiple of F that is >= X. */
211
212#define ROUND_UP(x, f) (CEIL (x, f) * (f))
213
214/* A two-level tree is used to look up the page-entry for a given
215 pointer. Two chunks of the pointer's bits are extracted to index
216 the first and second levels of the tree, as follows:
217
218 HOST_PAGE_SIZE_BITS
219 32 | |
220 msb +----------------+----+------+------+ lsb
221 | | |
222 PAGE_L1_BITS |
223 | |
224 PAGE_L2_BITS
225
226 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
227 pages are aligned on system page boundaries. The next most
228 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
229 index values in the lookup table, respectively.
230
231 For 32-bit architectures and the settings below, there are no
232 leftover bits. For architectures with wider pointers, the lookup
233 tree points to a list of pages, which must be scanned to find the
234 correct one. */
235
236#define PAGE_L1_BITS (8)
237#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
238#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
239#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
240
241#define LOOKUP_L1(p) \
242 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
243
244#define LOOKUP_L2(p) \
245 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
246
b6f61163
DB
247/* A page_entry records the status of an allocation page. */
248typedef struct page_entry
249{
250 /* The next page-entry with objects of the same size, or NULL if
251 this is the last page-entry. */
252 struct page_entry *next;
253
254 /* The number of bytes allocated. (This will always be a multiple
255 of the host system page size.) */
256 size_t bytes;
257
258 /* How many collections we've survived. */
259 size_t survived;
260
261 /* The address at which the memory is allocated. */
262 char *page;
263
264#ifdef USING_MALLOC_PAGE_GROUPS
265 /* Back pointer to the page group this page came from. */
266 struct page_group *group;
267#endif
268
269 /* Number of bytes on the page unallocated. Only used during
5c425df5 270 collection, and even then large pages merely set this nonzero. */
b6f61163
DB
271 size_t bytes_free;
272
273 /* Context depth of this page. */
274 unsigned short context_depth;
275
276 /* Does this page contain small objects, or one large object? */
277 bool large_p;
278
47aeffac 279 /* The zone that this page entry belongs to. */
b6f61163
DB
280 struct alloc_zone *zone;
281} page_entry;
282
283#ifdef USING_MALLOC_PAGE_GROUPS
284/* A page_group describes a large allocation from malloc, from which
285 we parcel out aligned pages. */
286typedef struct page_group
287{
288 /* A linked list of all extant page groups. */
289 struct page_group *next;
290
291 /* The address we received from malloc. */
292 char *allocation;
293
294 /* The size of the block. */
295 size_t alloc_size;
296
297 /* A bitmask of pages in use. */
298 unsigned int in_use;
299} page_group;
300#endif
301
302#if HOST_BITS_PER_PTR <= 32
303
304/* On 32-bit hosts, we use a two level page table, as pictured above. */
305typedef page_entry **page_table[PAGE_L1_SIZE];
306
307#else
308
309/* On 64-bit hosts, we use the same two level page tables plus a linked
310 list that disambiguates the top 32-bits. There will almost always be
311 exactly one entry in the list. */
312typedef struct page_table_chain
313{
314 struct page_table_chain *next;
315 size_t high_bits;
316 page_entry **table[PAGE_L1_SIZE];
317} *page_table;
318
319#endif
320
321/* The global variables. */
322static struct globals
323{
324 /* The page lookup table. A single page can only belong to one
325 zone. This means free pages are zone-specific ATM. */
326 page_table lookup;
327 /* The linked list of zones. */
328 struct alloc_zone *zones;
329
330 /* The system's page size. */
331 size_t pagesize;
332 size_t lg_pagesize;
333
334 /* A file descriptor open to /dev/zero for reading. */
335#if defined (HAVE_MMAP_DEV_ZERO)
336 int dev_zero_fd;
337#endif
338
339 /* The file descriptor for debugging output. */
340 FILE *debug_file;
341} G;
342
343/* The zone allocation structure. */
344struct alloc_zone
345{
346 /* Name of the zone. */
347 const char *name;
348
349 /* Linked list of pages in a zone. */
350 page_entry *pages;
351
352 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
353 FREE_BIN_DELTA. All other chunks are in slot 0. */
354 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
355
356 /* Bytes currently allocated. */
357 size_t allocated;
358
359 /* Bytes currently allocated at the end of the last collection. */
360 size_t allocated_last_gc;
361
362 /* Total amount of memory mapped. */
363 size_t bytes_mapped;
364
365 /* Bit N set if any allocations have been done at context depth N. */
366 unsigned long context_depth_allocations;
367
368 /* Bit N set if any collections have been done at context depth N. */
369 unsigned long context_depth_collections;
370
371 /* The current depth in the context stack. */
372 unsigned short context_depth;
373
374 /* A cache of free system pages. */
375 page_entry *free_pages;
376
377#ifdef USING_MALLOC_PAGE_GROUPS
378 page_group *page_groups;
379#endif
380
381 /* Next zone in the linked list of zones. */
382 struct alloc_zone *next_zone;
383
b944d187 384 /* True if this zone was collected during this collection. */
b6f61163 385 bool was_collected;
b944d187
SB
386
387 /* True if this zone should be destroyed after the next collection. */
388 bool dead;
b6f61163
DB
389} main_zone;
390
391struct alloc_zone *rtl_zone;
392struct alloc_zone *garbage_zone;
393struct alloc_zone *tree_zone;
394
395/* Allocate pages in chunks of this size, to throttle calls to memory
396 allocation routines. The first page is used, the rest go onto the
397 free list. This cannot be larger than HOST_BITS_PER_INT for the
398 in_use bitmask for page_group. */
399#define GGC_QUIRE_SIZE 16
400
401static int ggc_allocated_p (const void *);
402static page_entry *lookup_page_table_entry (const void *);
403static void set_page_table_entry (void *, page_entry *);
404#ifdef USING_MMAP
405static char *alloc_anon (char *, size_t, struct alloc_zone *);
406#endif
407#ifdef USING_MALLOC_PAGE_GROUPS
408static size_t page_group_index (char *, char *);
409static void set_page_group_in_use (page_group *, char *);
410static void clear_page_group_in_use (page_group *, char *);
411#endif
412static struct page_entry * alloc_small_page ( struct alloc_zone *);
413static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
414static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
415static void free_page (struct page_entry *);
416static void release_pages (struct alloc_zone *);
417static void sweep_pages (struct alloc_zone *);
418static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
419static bool ggc_collect_1 (struct alloc_zone *, bool);
420static void check_cookies (void);
421
422
423/* Returns nonzero if P was allocated in GC'able memory. */
424
425static inline int
426ggc_allocated_p (const void *p)
427{
428 page_entry ***base;
429 size_t L1, L2;
430
431#if HOST_BITS_PER_PTR <= 32
432 base = &G.lookup[0];
433#else
434 page_table table = G.lookup;
435 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
436 while (1)
437 {
438 if (table == NULL)
439 return 0;
440 if (table->high_bits == high_bits)
441 break;
442 table = table->next;
443 }
444 base = &table->table[0];
445#endif
446
447 /* Extract the level 1 and 2 indices. */
448 L1 = LOOKUP_L1 (p);
449 L2 = LOOKUP_L2 (p);
450
451 return base[L1] && base[L1][L2];
452}
453
454/* Traverse the page table and find the entry for a page.
455 Die (probably) if the object wasn't allocated via GC. */
456
457static inline page_entry *
458lookup_page_table_entry(const void *p)
459{
460 page_entry ***base;
461 size_t L1, L2;
462
463#if HOST_BITS_PER_PTR <= 32
464 base = &G.lookup[0];
465#else
466 page_table table = G.lookup;
467 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
468 while (table->high_bits != high_bits)
469 table = table->next;
470 base = &table->table[0];
471#endif
472
473 /* Extract the level 1 and 2 indices. */
474 L1 = LOOKUP_L1 (p);
475 L2 = LOOKUP_L2 (p);
476
477 return base[L1][L2];
478
479}
480
481/* Set the page table entry for a page. */
482
483static void
484set_page_table_entry(void *p, page_entry *entry)
485{
486 page_entry ***base;
487 size_t L1, L2;
488
489#if HOST_BITS_PER_PTR <= 32
490 base = &G.lookup[0];
491#else
492 page_table table;
493 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
494 for (table = G.lookup; table; table = table->next)
495 if (table->high_bits == high_bits)
496 goto found;
497
498 /* Not found -- allocate a new table. */
499 table = (page_table) xcalloc (1, sizeof(*table));
500 table->next = G.lookup;
501 table->high_bits = high_bits;
502 G.lookup = table;
503found:
504 base = &table->table[0];
505#endif
506
507 /* Extract the level 1 and 2 indices. */
508 L1 = LOOKUP_L1 (p);
509 L2 = LOOKUP_L2 (p);
510
511 if (base[L1] == NULL)
512 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
513
514 base[L1][L2] = entry;
515}
516
517#ifdef USING_MMAP
518/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
519 (if non-null). The ifdef structure here is intended to cause a
520 compile error unless exactly one of the HAVE_* is defined. */
521
522static inline char *
523alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
524{
525#ifdef HAVE_MMAP_ANON
526 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
527 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
528#endif
529#ifdef HAVE_MMAP_DEV_ZERO
530 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
531 MAP_PRIVATE, G.dev_zero_fd, 0);
532#endif
533 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
534
535 if (page == (char *) MAP_FAILED)
536 {
537 perror ("virtual memory exhausted");
538 exit (FATAL_EXIT_CODE);
539 }
540
541 /* Remember that we allocated this memory. */
542 zone->bytes_mapped += size;
543 /* Pretend we don't have access to the allocated pages. We'll enable
544 access to smaller pieces of the area in ggc_alloc. Discard the
545 handle to avoid handle leak. */
546 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
547 return page;
548}
549#endif
550#ifdef USING_MALLOC_PAGE_GROUPS
551/* Compute the index for this page into the page group. */
552
553static inline size_t
554page_group_index (char *allocation, char *page)
555{
556 return (size_t) (page - allocation) >> G.lg_pagesize;
557}
558
559/* Set and clear the in_use bit for this page in the page group. */
560
561static inline void
562set_page_group_in_use (page_group *group, char *page)
563{
564 group->in_use |= 1 << page_group_index (group->allocation, page);
565}
566
567static inline void
568clear_page_group_in_use (page_group *group, char *page)
569{
570 group->in_use &= ~(1 << page_group_index (group->allocation, page));
571}
572#endif
573
574/* Allocate a new page for allocating objects of size 2^ORDER,
575 and return an entry for it. The entry is not added to the
576 appropriate page_table list. */
577
578static inline struct page_entry *
579alloc_small_page (struct alloc_zone *zone)
580{
581 struct page_entry *entry;
582 char *page;
583#ifdef USING_MALLOC_PAGE_GROUPS
584 page_group *group;
585#endif
586
587 page = NULL;
588
589 /* Check the list of free pages for one we can use. */
590 entry = zone->free_pages;
591 if (entry != NULL)
592 {
593 /* Recycle the allocated memory from this page ... */
594 zone->free_pages = entry->next;
595 page = entry->page;
596
597#ifdef USING_MALLOC_PAGE_GROUPS
598 group = entry->group;
599#endif
600 }
601#ifdef USING_MMAP
602 else
603 {
604 /* We want just one page. Allocate a bunch of them and put the
605 extras on the freelist. (Can only do this optimization with
606 mmap for backing store.) */
607 struct page_entry *e, *f = zone->free_pages;
608 int i;
609
610 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
611
612 /* This loop counts down so that the chain will be in ascending
613 memory order. */
614 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
615 {
616 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
617 e->bytes = G.pagesize;
618 e->page = page + (i << G.lg_pagesize);
619 e->next = f;
620 f = e;
621 }
622
623 zone->free_pages = f;
624 }
625#endif
626#ifdef USING_MALLOC_PAGE_GROUPS
627 else
628 {
629 /* Allocate a large block of memory and serve out the aligned
630 pages therein. This results in much less memory wastage
631 than the traditional implementation of valloc. */
632
633 char *allocation, *a, *enda;
634 size_t alloc_size, head_slop, tail_slop;
635 int multiple_pages = (entry_size == G.pagesize);
636
637 if (multiple_pages)
638 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
639 else
640 alloc_size = entry_size + G.pagesize - 1;
641 allocation = xmalloc (alloc_size);
642 VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0);
643
644 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
645 head_slop = page - allocation;
646 if (multiple_pages)
647 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
648 else
649 tail_slop = alloc_size - entry_size - head_slop;
650 enda = allocation + alloc_size - tail_slop;
651
652 /* We allocated N pages, which are likely not aligned, leaving
653 us with N-1 usable pages. We plan to place the page_group
654 structure somewhere in the slop. */
655 if (head_slop >= sizeof (page_group))
656 group = (page_group *)page - 1;
657 else
658 {
659 /* We magically got an aligned allocation. Too bad, we have
660 to waste a page anyway. */
661 if (tail_slop == 0)
662 {
663 enda -= G.pagesize;
664 tail_slop += G.pagesize;
665 }
666 if (tail_slop < sizeof (page_group))
667 abort ();
668 group = (page_group *)enda;
669 tail_slop -= sizeof (page_group);
670 }
671
672 /* Remember that we allocated this memory. */
673 group->next = G.page_groups;
674 group->allocation = allocation;
675 group->alloc_size = alloc_size;
676 group->in_use = 0;
677 zone->page_groups = group;
678 G.bytes_mapped += alloc_size;
679
680 /* If we allocated multiple pages, put the rest on the free list. */
681 if (multiple_pages)
682 {
683 struct page_entry *e, *f = G.free_pages;
684 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
685 {
686 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
687 e->bytes = G.pagesize;
688 e->page = a;
689 e->group = group;
690 e->next = f;
691 f = e;
692 }
693 zone->free_pages = f;
694 }
695 }
696#endif
697
698 if (entry == NULL)
699 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
700
701 entry->next = 0;
702 entry->bytes = G.pagesize;
703 entry->bytes_free = G.pagesize;
704 entry->page = page;
705 entry->context_depth = zone->context_depth;
706 entry->large_p = false;
707 entry->zone = zone;
708 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
709
710#ifdef USING_MALLOC_PAGE_GROUPS
711 entry->group = group;
712 set_page_group_in_use (group, page);
713#endif
714
715 set_page_table_entry (page, entry);
716
717 if (GGC_DEBUG_LEVEL >= 2)
718 fprintf (G.debug_file,
719 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
720 (PTR) entry, page, page + G.pagesize - 1);
721
722 return entry;
723}
724
725/* Allocate a large page of size SIZE in ZONE. */
726
727static inline struct page_entry *
728alloc_large_page (size_t size, struct alloc_zone *zone)
729{
730 struct page_entry *entry;
731 char *page;
732
733 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
734 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
735
736 entry->next = 0;
737 entry->bytes = size;
738 entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD;
739 entry->page = page;
740 entry->context_depth = zone->context_depth;
741 entry->large_p = true;
742 entry->zone = zone;
743 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
744
745#ifdef USING_MALLOC_PAGE_GROUPS
746 entry->group = NULL;
747#endif
748 set_page_table_entry (page, entry);
749
750 if (GGC_DEBUG_LEVEL >= 2)
751 fprintf (G.debug_file,
752 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
753 (PTR) entry, page, page + size - 1);
754
755 return entry;
756}
757
758
759/* For a page that is no longer needed, put it on the free page list. */
760
761static inline void
762free_page (page_entry *entry)
763{
764 if (GGC_DEBUG_LEVEL >= 2)
765 fprintf (G.debug_file,
766 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
767 entry->page, entry->page + entry->bytes - 1);
768
769 set_page_table_entry (entry->page, NULL);
770
771 if (entry->large_p)
772 {
773 free (entry->page);
774 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
775 }
776 else
777 {
778 /* Mark the page as inaccessible. Discard the handle to
779 avoid handle leak. */
780 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
781
782#ifdef USING_MALLOC_PAGE_GROUPS
783 clear_page_group_in_use (entry->group, entry->page);
784#endif
785
786 entry->next = entry->zone->free_pages;
787 entry->zone->free_pages = entry;
788 }
789}
790
791/* Release the free page cache to the system. */
792
793static void
794release_pages (struct alloc_zone *zone)
795{
796#ifdef USING_MMAP
797 page_entry *p, *next;
798 char *start;
799 size_t len;
800
801 /* Gather up adjacent pages so they are unmapped together. */
802 p = zone->free_pages;
803
804 while (p)
805 {
806 start = p->page;
807 next = p->next;
808 len = p->bytes;
809 free (p);
810 p = next;
811
812 while (p && p->page == start + len)
813 {
814 next = p->next;
815 len += p->bytes;
816 free (p);
817 p = next;
818 }
819
820 munmap (start, len);
821 zone->bytes_mapped -= len;
822 }
823
824 zone->free_pages = NULL;
825#endif
826#ifdef USING_MALLOC_PAGE_GROUPS
827 page_entry **pp, *p;
828 page_group **gp, *g;
829
830 /* Remove all pages from free page groups from the list. */
831 pp = &(zone->free_pages);
832 while ((p = *pp) != NULL)
833 if (p->group->in_use == 0)
834 {
835 *pp = p->next;
836 free (p);
837 }
838 else
839 pp = &p->next;
840
841 /* Remove all free page groups, and release the storage. */
842 gp = &(zone->page_groups);
843 while ((g = *gp) != NULL)
844 if (g->in_use == 0)
845 {
846 *gp = g->next;
847 zone->bytes_mapped -= g->alloc_size;
848 free (g->allocation);
849 VALGRIND_FREELIKE_BLOCK(g->allocation, 0);
850 }
851 else
852 gp = &g->next;
853#endif
854}
855
856/* Place CHUNK of size SIZE on the free list for ZONE. */
857
858static inline void
859free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
860{
861 size_t bin = 0;
862
863 bin = SIZE_BIN_DOWN (size);
864 if (bin == 0)
865 abort ();
866 if (bin > NUM_FREE_BINS)
867 bin = 0;
868#ifdef COOKIE_CHECKING
869 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
870 abort ();
871 chunk->magic = DEADCHUNK_MAGIC;
872#endif
873 chunk->u.next_free = zone->free_chunks[bin];
874 zone->free_chunks[bin] = chunk;
875 if (GGC_DEBUG_LEVEL >= 3)
876 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
877 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
878}
879
880/* Allocate a chunk of memory of SIZE bytes. */
881
882static void *
883ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
884{
885 size_t bin = 0;
886 size_t lsize = 0;
887 struct page_entry *entry;
888 struct alloc_chunk *chunk, *lchunk, **pp;
889 void *result;
890
891 /* Align size, so that we're assured of aligned allocations. */
892 if (size < FREE_BIN_DELTA)
893 size = FREE_BIN_DELTA;
894 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
895
896 /* Large objects are handled specially. */
897 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
898 {
899 entry = alloc_large_page (size, zone);
900 entry->survived = 0;
901 entry->next = entry->zone->pages;
902 entry->zone->pages = entry;
903
904
905 chunk = (struct alloc_chunk *) entry->page;
906 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
907 chunk->size = LARGE_OBJECT_SIZE;
908
909 goto found;
910 }
911
912 /* First look for a tiny object already segregated into its own
913 size bucket. */
914 bin = SIZE_BIN_UP (size);
915 if (bin <= NUM_FREE_BINS)
916 {
917 chunk = zone->free_chunks[bin];
918 if (chunk)
919 {
920 zone->free_chunks[bin] = chunk->u.next_free;
921 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
922 goto found;
923 }
924 }
925
926 /* Failing that, look through the "other" bucket for a chunk
927 that is large enough. */
928 pp = &(zone->free_chunks[0]);
929 chunk = *pp;
930 while (chunk && chunk->size < size)
931 {
932 pp = &chunk->u.next_free;
933 chunk = *pp;
934 }
935
936 /* Failing that, allocate new storage. */
937 if (!chunk)
938 {
939 entry = alloc_small_page (zone);
940 entry->next = entry->zone->pages;
941 entry->zone->pages = entry;
942
943 chunk = (struct alloc_chunk *) entry->page;
944 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
945 chunk->size = G.pagesize - CHUNK_OVERHEAD;
946 }
947 else
948 {
949 *pp = chunk->u.next_free;
950 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
951 }
952 /* Release extra memory from a chunk that's too big. */
953 lsize = chunk->size - size;
954 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
955 {
956 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
957 chunk->size = size;
958
959 lsize -= CHUNK_OVERHEAD;
960 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
961 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
962#ifdef COOKIE_CHECKING
963 lchunk->magic = CHUNK_MAGIC;
964#endif
965 lchunk->type = 0;
966 lchunk->mark = 0;
967 lchunk->size = lsize;
968 free_chunk (lchunk, lsize, zone);
969 }
970 /* Calculate the object's address. */
971 found:
972#ifdef COOKIE_CHECKING
973 chunk->magic = CHUNK_MAGIC;
974#endif
975 chunk->type = 1;
976 chunk->mark = 0;
977 chunk->typecode = type;
978 result = chunk->u.data;
979
980#ifdef ENABLE_GC_CHECKING
981 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
982 exact same semantics in presence of memory bugs, regardless of
983 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
984 handle to avoid handle leak. */
985 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
986
987 /* `Poison' the entire allocated object. */
988 memset (result, 0xaf, size);
989#endif
990
991 /* Tell Valgrind that the memory is there, but its content isn't
992 defined. The bytes at the end of the object are still marked
993 unaccessible. */
994 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
995
996 /* Keep track of how many bytes are being allocated. This
997 information is used in deciding when to collect. */
998 zone->allocated += size + CHUNK_OVERHEAD;
999
1000 if (GGC_DEBUG_LEVEL >= 3)
1001 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
1002 (void *)chunk, (unsigned long) size, result);
1003
1004 return result;
1005}
1006
d91edf86 1007/* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
b6f61163
DB
1008 for that type. */
1009
1010void *
1011ggc_alloc_typed (enum gt_types_enum gte, size_t size)
1012{
47aeffac
SB
1013 switch (gte)
1014 {
1015 case gt_ggc_e_14lang_tree_node:
1016 return ggc_alloc_zone_1 (size, tree_zone, gte);
1017
1018 case gt_ggc_e_7rtx_def:
1019 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1020
1021 case gt_ggc_e_9rtvec_def:
1022 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1023
1024 default:
1025 return ggc_alloc_zone_1 (size, &main_zone, gte);
1026 }
b6f61163
DB
1027}
1028
1029/* Normal ggc_alloc simply allocates into the main zone. */
1030
1031void *
1032ggc_alloc (size_t size)
1033{
1034 return ggc_alloc_zone_1 (size, &main_zone, -1);
1035}
1036
1037/* Zone allocation allocates into the specified zone. */
1038
1039void *
1040ggc_alloc_zone (size_t size, struct alloc_zone *zone)
1041{
1042 return ggc_alloc_zone_1 (size, zone, -1);
1043}
1044
1045/* If P is not marked, mark it and return false. Otherwise return true.
1046 P must have been allocated by the GC allocator; it mustn't point to
1047 static objects, stack variables, or memory allocated with malloc. */
1048
1049int
1050ggc_set_mark (const void *p)
1051{
1052 page_entry *entry;
1053 struct alloc_chunk *chunk;
1054
1055#ifdef ENABLE_CHECKING
1056 /* Look up the page on which the object is alloced. If the object
1057 wasn't allocated by the collector, we'll probably die. */
1058 entry = lookup_page_table_entry (p);
1059 if (entry == NULL)
1060 abort ();
1061#endif
1062 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1063#ifdef COOKIE_CHECKING
1064 if (chunk->magic != CHUNK_MAGIC)
1065 abort ();
1066#endif
1067 if (chunk->mark)
1068 return 1;
1069 chunk->mark = 1;
1070
1071#ifndef ENABLE_CHECKING
1072 entry = lookup_page_table_entry (p);
1073#endif
1074
1075 /* Large pages are either completely full or completely empty. So if
1076 they are marked, they are completely full. */
1077 if (entry->large_p)
1078 entry->bytes_free = 0;
1079 else
1080 entry->bytes_free -= chunk->size + CHUNK_OVERHEAD;
1081
1082 if (GGC_DEBUG_LEVEL >= 4)
1083 fprintf (G.debug_file, "Marking %p\n", p);
1084
1085 return 0;
1086}
1087
1088/* Return 1 if P has been marked, zero otherwise.
1089 P must have been allocated by the GC allocator; it mustn't point to
1090 static objects, stack variables, or memory allocated with malloc. */
1091
1092int
1093ggc_marked_p (const void *p)
1094{
1095 struct alloc_chunk *chunk;
1096
1097#ifdef ENABLE_CHECKING
1098 {
1099 page_entry *entry = lookup_page_table_entry (p);
1100 if (entry == NULL)
1101 abort ();
1102 }
1103#endif
1104
1105 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1106#ifdef COOKIE_CHECKING
1107 if (chunk->magic != CHUNK_MAGIC)
1108 abort ();
1109#endif
1110 return chunk->mark;
1111}
1112
1113/* Return the size of the gc-able object P. */
1114
1115size_t
1116ggc_get_size (const void *p)
1117{
1118 struct alloc_chunk *chunk;
1119 struct page_entry *entry;
1120
1121#ifdef ENABLE_CHECKING
1122 entry = lookup_page_table_entry (p);
1123 if (entry == NULL)
1124 abort ();
1125#endif
1126
1127 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1128#ifdef COOKIE_CHECKING
1129 if (chunk->magic != CHUNK_MAGIC)
1130 abort ();
1131#endif
1132 if (chunk->size == LARGE_OBJECT_SIZE)
1133 {
1134#ifndef ENABLE_CHECKING
1135 entry = lookup_page_table_entry (p);
1136#endif
1137 return entry->bytes;
1138 }
1139
1140 return chunk->size;
1141}
1142
1143/* Initialize the ggc-zone-mmap allocator. */
1144void
578e8170 1145init_ggc (void)
b6f61163 1146{
47aeffac 1147 /* Set up the main zone by hand. */
b6f61163
DB
1148 main_zone.name = "Main zone";
1149 G.zones = &main_zone;
1150
47aeffac
SB
1151 /* Allocate the default zones. */
1152 rtl_zone = new_ggc_zone ("RTL zone");
1153 tree_zone = new_ggc_zone ("Tree zone");
1154 garbage_zone = new_ggc_zone ("Garbage zone");
b6f61163
DB
1155
1156 G.pagesize = getpagesize();
1157 G.lg_pagesize = exact_log2 (G.pagesize);
1158#ifdef HAVE_MMAP_DEV_ZERO
1159 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1160 if (G.dev_zero_fd == -1)
1161 abort ();
1162#endif
1163
1164#if 0
1165 G.debug_file = fopen ("ggc-mmap.debug", "w");
1166 setlinebuf (G.debug_file);
1167#else
1168 G.debug_file = stdout;
1169#endif
1170
1171#ifdef USING_MMAP
1172 /* StunOS has an amazing off-by-one error for the first mmap allocation
1173 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1174 believe, is an unaligned page allocation, which would cause us to
1175 hork badly if we tried to use it. */
1176 {
1177 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1178 struct page_entry *e;
1179 if ((size_t)p & (G.pagesize - 1))
1180 {
1181 /* How losing. Discard this one and try another. If we still
1182 can't get something useful, give up. */
1183
1184 p = alloc_anon (NULL, G.pagesize, &main_zone);
1185 if ((size_t)p & (G.pagesize - 1))
1186 abort ();
1187 }
1188
1189 /* We have a good page, might as well hold onto it... */
1190 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
1191 e->bytes = G.pagesize;
1192 e->page = p;
1193 e->next = main_zone.free_pages;
1194 main_zone.free_pages = e;
1195 }
1196#endif
1197}
1198
47aeffac
SB
1199/* Start a new GGC zone. */
1200
1201struct alloc_zone *
1202new_ggc_zone (const char * name)
1203{
1204 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1205 new_zone->name = name;
1206 new_zone->next_zone = G.zones->next_zone;
1207 G.zones->next_zone = new_zone;
1208 return new_zone;
1209}
1210
1211/* Destroy a GGC zone. */
1212void
1213destroy_ggc_zone (struct alloc_zone * dead_zone)
1214{
1215 struct alloc_zone *z;
1216
1217 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1218 /* Just find that zone. */ ;
1219
b944d187
SB
1220#ifdef ENABLE_CHECKING
1221 /* We should have found the zone in the list. Anything else is fatal. */
1222 if (!z)
47aeffac 1223 abort ();
b944d187 1224#endif
47aeffac 1225
b944d187
SB
1226 /* z is dead, baby. z is dead. */
1227 z->dead= true;
47aeffac
SB
1228}
1229
b6f61163
DB
1230/* Increment the `GC context'. Objects allocated in an outer context
1231 are never freed, eliminating the need to register their roots. */
1232
1233void
578e8170 1234ggc_push_context (void)
b6f61163
DB
1235{
1236 struct alloc_zone *zone;
1237 for (zone = G.zones; zone; zone = zone->next_zone)
1238 ++(zone->context_depth);
1239 /* Die on wrap. */
1240 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
1241 abort ();
1242}
1243
1244/* Decrement the `GC context'. All objects allocated since the
1245 previous ggc_push_context are migrated to the outer context. */
1246
1247static void
1248ggc_pop_context_1 (struct alloc_zone *zone)
1249{
1250 unsigned long omask;
1251 unsigned depth;
1252 page_entry *p;
1253
1254 depth = --(zone->context_depth);
1255 omask = (unsigned long)1 << (depth + 1);
1256
1257 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
1258 return;
1259
1260 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
1261 zone->context_depth_allocations &= omask - 1;
1262 zone->context_depth_collections &= omask - 1;
1263
1264 /* Any remaining pages in the popped context are lowered to the new
1265 current context; i.e. objects allocated in the popped context and
1266 left over are imported into the previous context. */
47aeffac
SB
1267 for (p = zone->pages; p != NULL; p = p->next)
1268 if (p->context_depth > depth)
1269 p->context_depth = depth;
b6f61163
DB
1270}
1271
1272/* Pop all the zone contexts. */
1273
1274void
578e8170 1275ggc_pop_context (void)
b6f61163
DB
1276{
1277 struct alloc_zone *zone;
1278 for (zone = G.zones; zone; zone = zone->next_zone)
1279 ggc_pop_context_1 (zone);
1280}
1281
1282
1283/* Poison the chunk. */
1284#ifdef ENABLE_GC_CHECKING
1285#define poison_chunk(CHUNK, SIZE) \
1286 memset ((CHUNK)->u.data, 0xa5, (SIZE))
1287#else
1288#define poison_chunk(CHUNK, SIZE)
1289#endif
1290
1291/* Free all empty pages and objects within a page for a given zone */
1292
1293static void
1294sweep_pages (struct alloc_zone *zone)
1295{
1296 page_entry **pp, *p, *next;
1297 struct alloc_chunk *chunk, *last_free, *end;
1298 size_t last_free_size, allocated = 0;
1299
1300 /* First, reset the free_chunks lists, since we are going to
1301 re-free free chunks in hopes of coalescing them into large chunks. */
1302 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1303 pp = &zone->pages;
1304 for (p = zone->pages; p ; p = next)
1305 {
1306 next = p->next;
1307
1308 /* For empty pages, just free the page. */
1309 if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth)
1310 {
1311 *pp = next;
1312#ifdef ENABLE_GC_CHECKING
1313 /* Poison the page. */
1314 memset (p->page, 0xb5, p->bytes);
1315#endif
1316 free_page (p);
1317 continue;
1318 }
1319
1320 /* Large pages are all or none affairs. Either they are
d91edf86 1321 completely empty, or they are completely full.
b6f61163
DB
1322 Thus, if the above didn't catch it, we need not do anything
1323 except remove the mark and reset the bytes_free.
1324
1325 XXX: Should we bother to increment allocated. */
1326 else if (p->large_p)
1327 {
1328 p->bytes_free = p->bytes;
1329 ((struct alloc_chunk *)p->page)->mark = 0;
1330 continue;
1331 }
1332 pp = &p->next;
1333
1334 /* This page has now survived another collection. */
1335 p->survived++;
1336
1337 /* Which leaves full and partial pages. Step through all chunks,
1338 consolidate those that are free and insert them into the free
1339 lists. Note that consolidation slows down collection
1340 slightly. */
1341
1342 chunk = (struct alloc_chunk *)p->page;
1343 end = (struct alloc_chunk *)(p->page + G.pagesize);
1344 last_free = NULL;
1345 last_free_size = 0;
1346
1347 do
1348 {
1349 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1350 if (chunk->mark || p->context_depth < zone->context_depth)
1351 {
1352 if (last_free)
1353 {
1354 last_free->type = 0;
1355 last_free->size = last_free_size;
1356 last_free->mark = 0;
1357 poison_chunk (last_free, last_free_size);
1358 free_chunk (last_free, last_free_size, zone);
1359 last_free = NULL;
1360 }
1361 if (chunk->mark)
1362 {
1363 allocated += chunk->size + CHUNK_OVERHEAD;
1364 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1365 }
1366 chunk->mark = 0;
1367#ifdef ENABLE_CHECKING
1368 if (p->bytes_free > p->bytes)
1369 abort ();
1370#endif
1371 }
1372 else
1373 {
1374 if (last_free)
1375 {
1376 last_free_size += CHUNK_OVERHEAD + chunk->size;
1377 }
1378 else
1379 {
1380 last_free = chunk;
1381 last_free_size = chunk->size;
1382 }
1383 }
1384
1385 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1386 }
1387 while (chunk < end);
1388
1389 if (last_free)
1390 {
1391 last_free->type = 0;
1392 last_free->size = last_free_size;
1393 last_free->mark = 0;
1394 poison_chunk (last_free, last_free_size);
1395 free_chunk (last_free, last_free_size, zone);
1396 }
1397 }
1398
1399 zone->allocated = allocated;
1400}
1401
1402/* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1403 is true if we need to mark before sweeping, false if some other
1404 zone collection has already performed marking for us. Returns true
1405 if we collected, false otherwise. */
1406
1407static bool
1408ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1409{
b944d187
SB
1410 if (!zone->dead)
1411 {
1412 /* Avoid frequent unnecessary work by skipping collection if the
1413 total allocations haven't expanded much since the last
1414 collection. */
1415 float allocated_last_gc =
1416 MAX (zone->allocated_last_gc,
1417 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
b6f61163 1418
b944d187 1419 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
b6f61163 1420
b944d187
SB
1421 if (zone->allocated < allocated_last_gc + min_expand)
1422 return false;
1423 }
b6f61163
DB
1424
1425 if (!quiet_flag)
b944d187
SB
1426 fprintf (stderr, " {%s GC %luk -> ",
1427 zone->name, (unsigned long) zone->allocated / 1024);
b6f61163
DB
1428
1429 /* Zero the total allocated bytes. This will be recalculated in the
1430 sweep phase. */
1431 zone->allocated = 0;
1432
1433 /* Release the pages we freed the last time we collected, but didn't
1434 reuse in the interim. */
1435 release_pages (zone);
1436
1437 /* Indicate that we've seen collections at this context depth. */
1438 zone->context_depth_collections
1439 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1440 if (need_marking)
1441 ggc_mark_roots ();
1442 sweep_pages (zone);
1443 zone->was_collected = true;
1444 zone->allocated_last_gc = zone->allocated;
1445
b6f61163
DB
1446 if (!quiet_flag)
1447 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1448 return true;
1449}
1450
1451/* Calculate the average page survival rate in terms of number of
1452 collections. */
1453
1454static float
1455calculate_average_page_survival (struct alloc_zone *zone)
1456{
1457 float count = 0.0;
1458 float survival = 0.0;
1459 page_entry *p;
1460 for (p = zone->pages; p; p = p->next)
1461 {
1462 count += 1.0;
1463 survival += p->survived;
1464 }
1465 return survival/count;
1466}
1467
1468/* Check the magic cookies all of the chunks contain, to make sure we
1469 aren't doing anything stupid, like stomping on alloc_chunk
1470 structures. */
1471
1472static inline void
578e8170 1473check_cookies (void)
b6f61163
DB
1474{
1475#ifdef COOKIE_CHECKING
1476 page_entry *p;
578e8170
AJ
1477 struct alloc_zone *zone;
1478
b6f61163
DB
1479 for (zone = G.zones; zone; zone = zone->next_zone)
1480 {
1481 for (p = zone->pages; p; p = p->next)
1482 {
1483 if (!p->large_p)
1484 {
1485 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1486 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1487 do
1488 {
1489 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1490 abort ();
1491 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1492 }
1493 while (chunk < end);
1494 }
1495 }
1496 }
1497#endif
1498}
1499
1500
1501/* Top level collection routine. */
1502
1503void
578e8170 1504ggc_collect (void)
b6f61163
DB
1505{
1506 struct alloc_zone *zone;
1507 bool marked = false;
1508 float f;
1509
1510 timevar_push (TV_GC);
1511 check_cookies ();
1512 /* Start by possibly collecting the main zone. */
1513 main_zone.was_collected = false;
1514 marked |= ggc_collect_1 (&main_zone, true);
47aeffac 1515
b6f61163
DB
1516 /* In order to keep the number of collections down, we don't
1517 collect other zones unless we are collecting the main zone. This
1518 gives us roughly the same number of collections as we used to
1519 have with the old gc. The number of collection is important
1520 because our main slowdown (according to profiling) is now in
1521 marking. So if we mark twice as often as we used to, we'll be
1522 twice as slow. Hopefully we'll avoid this cost when we mark
1523 zone-at-a-time. */
1524
1525 if (main_zone.was_collected)
1526 {
47aeffac
SB
1527 struct alloc_zone *zone;
1528
1529 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1530 {
1531 check_cookies ();
1532 zone->was_collected = false;
1533 marked |= ggc_collect_1 (zone, !marked);
1534 }
b6f61163
DB
1535 }
1536
1537 /* Print page survival stats, if someone wants them. */
1538 if (GGC_DEBUG_LEVEL >= 2)
1539 {
47aeffac 1540 for (zone = G.zones; zone; zone = zone->next_zone)
b6f61163 1541 {
47aeffac
SB
1542 if (zone->was_collected)
1543 {
1544 f = calculate_average_page_survival (zone);
1545 printf ("Average page survival in zone `%s' is %f\n",
1546 zone->name, f);
1547 }
b6f61163
DB
1548 }
1549 }
47aeffac 1550
b6f61163
DB
1551 /* Since we don't mark zone at a time right now, marking in any
1552 zone means marking in every zone. So we have to clear all the
1553 marks in all the zones that weren't collected already. */
1554 if (marked)
1555 {
1556 page_entry *p;
1557 for (zone = G.zones; zone; zone = zone->next_zone)
1558 {
1559 if (zone->was_collected)
1560 continue;
1561 for (p = zone->pages; p; p = p->next)
1562 {
1563 if (!p->large_p)
1564 {
1565 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1566 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1567 do
1568 {
1569 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1570 if (chunk->mark || p->context_depth < zone->context_depth)
1571 {
1572 if (chunk->mark)
1573 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1574#ifdef ENABLE_CHECKING
1575 if (p->bytes_free > p->bytes)
1576 abort ();
1577#endif
1578 chunk->mark = 0;
1579 }
1580 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1581 }
1582 while (chunk < end);
1583 }
1584 else
1585 {
1586 p->bytes_free = p->bytes;
1587 ((struct alloc_chunk *)p->page)->mark = 0;
1588 }
1589 }
1590 }
1591 }
b944d187
SB
1592
1593 /* Free dead zones. */
1594 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1595 {
1596 if (zone->next_zone->dead)
1597 {
1598 struct alloc_zone *dead_zone = zone->next_zone;
1599
1600 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1601
1602 /* The zone must be empty. */
1603 if (dead_zone->allocated != 0)
1604 abort ();
1605
1606 /* Unchain the dead zone, release all its pages and free it. */
1607 zone->next_zone = zone->next_zone->next_zone;
1608 release_pages (dead_zone);
1609 free (dead_zone);
1610 }
1611 }
1612
b6f61163
DB
1613 timevar_pop (TV_GC);
1614}
1615
1616/* Print allocation statistics. */
1617
1618void
578e8170 1619ggc_print_statistics (void)
b6f61163
DB
1620{
1621}
1622
1623struct ggc_pch_data
1624{
1625 struct ggc_pch_ondisk
1626 {
1627 unsigned total;
1628 } d;
1629 size_t base;
1630 size_t written;
b6f61163
DB
1631};
1632
1633/* Initialize the PCH datastructure. */
1634
1635struct ggc_pch_data *
1636init_ggc_pch (void)
1637{
1638 return xcalloc (sizeof (struct ggc_pch_data), 1);
1639}
1640
1641/* Add the size of object X to the size of the PCH data. */
1642
1643void
1644ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1645 size_t size, bool is_string)
1646{
1647 if (!is_string)
1648 {
1649 d->d.total += size + CHUNK_OVERHEAD;
1650 }
1651 else
1652 d->d.total += size;
1653}
1654
1655/* Return the total size of the PCH data. */
1656
1657size_t
1658ggc_pch_total_size (struct ggc_pch_data *d)
1659{
1660 return d->d.total;
1661}
1662
1663/* Set the base address for the objects in the PCH file. */
1664
1665void
1666ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1667{
1668 d->base = (size_t) base;
1669}
1670
1671/* Allocate a place for object X of size SIZE in the PCH file. */
1672
1673char *
1674ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1675 size_t size, bool is_string)
1676{
1677 char *result;
1678 result = (char *)d->base;
1679 if (!is_string)
1680 {
1681 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1682 if (chunk->size == LARGE_OBJECT_SIZE)
1683 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1684 else
1685 d->base += chunk->size + CHUNK_OVERHEAD;
1686 return result + CHUNK_OVERHEAD;
1687 }
1688 else
1689 {
1690 d->base += size;
1691 return result;
1692 }
1693
1694}
1695
1696/* Prepare to write out the PCH data to file F. */
1697
1698void
1699ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1700 FILE *f ATTRIBUTE_UNUSED)
1701{
1702 /* Nothing to do. */
1703}
1704
1705/* Write out object X of SIZE to file F. */
1706
1707void
1708ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1709 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1710 size_t size, bool is_string)
1711{
1712 if (!is_string)
1713 {
1714 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
7297afa0 1715 size = ggc_get_size (x);
b6f61163
DB
1716 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1717 fatal_error ("can't write PCH file: %m");
1718 d->written += size + CHUNK_OVERHEAD;
1719 }
1720 else
1721 {
1722 if (fwrite (x, size, 1, f) != 1)
1723 fatal_error ("can't write PCH file: %m");
1724 d->written += size;
1725 }
1726 if (d->written == d->d.total
1727 && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1728 fatal_error ("can't write PCH file: %m");
1729}
1730
1731void
1732ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1733{
1734 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1735 fatal_error ("can't write PCH file: %m");
1736 free (d);
1737}
1738
1739
1740void
1741ggc_pch_read (FILE *f, void *addr)
1742{
1743 struct ggc_pch_ondisk d;
1744 struct page_entry *entry;
1745 char *pte;
1746 if (fread (&d, sizeof (d), 1, f) != 1)
1747 fatal_error ("can't read PCH file: %m");
1748 entry = xcalloc (1, sizeof (struct page_entry));
1749 entry->bytes = d.total;
1750 entry->page = addr;
1751 entry->context_depth = 0;
1752 entry->zone = &main_zone;
1753 for (pte = entry->page;
1754 pte < entry->page + entry->bytes;
1755 pte += G.pagesize)
1756 set_page_table_entry (pte, entry);
1757
1758}