]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-page.c
dbxout.c: Follow spelling conventions.
[thirdparty/gcc.git] / gcc / ggc-page.c
CommitLineData
21341cfd 1/* "Bag-of-pages" garbage collector for the GNU compiler.
c4f2c499 2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
21341cfd 3
1322177d 4This file is part of GCC.
21341cfd 5
1322177d
LB
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 2, or (at your option) any later
9version.
21341cfd 10
1322177d
LB
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
21341cfd 15
b9bfacf0 16You should have received a copy of the GNU General Public License
1322177d
LB
17along with GCC; see the file COPYING. If not, write to the Free
18Software Foundation, 59 Temple Place - Suite 330, Boston, MA
1902111-1307, USA. */
21341cfd 20
21341cfd 21#include "config.h"
21341cfd
AS
22#include "system.h"
23#include "tree.h"
e5ecd4ea 24#include "rtl.h"
1b42a6a9 25#include "tm_p.h"
b9bfacf0 26#include "toplev.h"
21341cfd
AS
27#include "varray.h"
28#include "flags.h"
e5ecd4ea 29#include "ggc.h"
2a9a326b 30#include "timevar.h"
e5ecd4ea 31
825b6926
ZW
32/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
34#ifdef HAVE_MMAP_ANON
35# undef HAVE_MMAP_DEV_ZERO
825b6926
ZW
36
37# include <sys/mman.h>
38# ifndef MAP_FAILED
39# define MAP_FAILED -1
40# endif
41# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
42# define MAP_ANONYMOUS MAP_ANON
43# endif
44# define USING_MMAP
45
005537df 46#endif
21341cfd 47
825b6926 48#ifdef HAVE_MMAP_DEV_ZERO
825b6926
ZW
49
50# include <sys/mman.h>
51# ifndef MAP_FAILED
52# define MAP_FAILED -1
53# endif
54# define USING_MMAP
55
8342b467
RH
56#endif
57
130fadbb
RH
58#ifndef USING_MMAP
59#define USING_MALLOC_PAGE_GROUPS
5b918807 60#endif
21341cfd 61
589005ff 62/* Stategy:
21341cfd
AS
63
64 This garbage-collecting allocator allocates objects on one of a set
65 of pages. Each page can allocate objects of a single size only;
66 available sizes are powers of two starting at four bytes. The size
67 of an allocation request is rounded up to the next power of two
68 (`order'), and satisfied from the appropriate page.
69
70 Each page is recorded in a page-entry, which also maintains an
71 in-use bitmap of object positions on the page. This allows the
72 allocation state of a particular object to be flipped without
73 touching the page itself.
74
75 Each page-entry also has a context depth, which is used to track
76 pushing and popping of allocation contexts. Only objects allocated
589005ff 77 in the current (highest-numbered) context may be collected.
21341cfd
AS
78
79 Page entries are arranged in an array of singly-linked lists. The
80 array is indexed by the allocation size, in bits, of the pages on
81 it; i.e. all pages on a list allocate objects of the same size.
82 Pages are ordered on the list such that all non-full pages precede
83 all full pages, with non-full pages arranged in order of decreasing
84 context depth.
85
86 Empty pages (of all orders) are kept on a single page cache list,
87 and are considered first when new pages are required; they are
88 deallocated at the start of the next collection if they haven't
89 been recycled by then. */
90
91
92/* Define GGC_POISON to poison memory marked unused by the collector. */
93#undef GGC_POISON
94
95/* Define GGC_ALWAYS_COLLECT to perform collection every time
96 ggc_collect is invoked. Otherwise, collection is performed only
97 when a significant amount of memory has been allocated since the
98 last collection. */
85f88abf 99#undef GGC_ALWAYS_COLLECT
21341cfd 100
f4524c9e 101#ifdef ENABLE_GC_CHECKING
21341cfd 102#define GGC_POISON
f4524c9e
ZW
103#endif
104#ifdef ENABLE_GC_ALWAYS_COLLECT
21341cfd
AS
105#define GGC_ALWAYS_COLLECT
106#endif
107
108/* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
6d2f8887 113 4: Object marks as well. */
21341cfd
AS
114#define GGC_DEBUG_LEVEL (0)
115\f
116#ifndef HOST_BITS_PER_PTR
117#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
118#endif
119
21341cfd
AS
120\f
121/* A two-level tree is used to look up the page-entry for a given
122 pointer. Two chunks of the pointer's bits are extracted to index
123 the first and second levels of the tree, as follows:
124
125 HOST_PAGE_SIZE_BITS
126 32 | |
127 msb +----------------+----+------+------+ lsb
128 | | |
129 PAGE_L1_BITS |
130 | |
131 PAGE_L2_BITS
132
133 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
134 pages are aligned on system page boundaries. The next most
135 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
589005ff 136 index values in the lookup table, respectively.
21341cfd 137
005537df
RH
138 For 32-bit architectures and the settings below, there are no
139 leftover bits. For architectures with wider pointers, the lookup
140 tree points to a list of pages, which must be scanned to find the
141 correct one. */
21341cfd
AS
142
143#define PAGE_L1_BITS (8)
144#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
145#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
146#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
147
148#define LOOKUP_L1(p) \
149 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
150
151#define LOOKUP_L2(p) \
152 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
153
2be510b8
MM
154/* The number of objects per allocation page, for objects on a page of
155 the indicated ORDER. */
156#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
157
158/* The size of an object on a page of the indicated ORDER. */
159#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
160
8537ed68
ZW
161/* For speed, we avoid doing a general integer divide to locate the
162 offset in the allocation bitmap, by precalculating numbers M, S
163 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
164 within the page which is evenly divisible by the object size Z. */
165#define DIV_MULT(ORDER) inverse_table[ORDER].mult
166#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
167#define OFFSET_TO_BIT(OFFSET, ORDER) \
168 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
169
2be510b8
MM
170/* The number of extra orders, not corresponding to power-of-two sized
171 objects. */
172
ca7558fc 173#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
2be510b8 174
d1f1cc6a
RH
175#define RTL_SIZE(NSLOTS) \
176 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
177
2be510b8
MM
178/* The Ith entry is the maximum size of an object to be stored in the
179 Ith extra order. Adding a new entry to this array is the *only*
180 thing you need to do to add a new special allocation size. */
181
182static const size_t extra_order_size_table[] = {
183 sizeof (struct tree_decl),
d1f1cc6a
RH
184 sizeof (struct tree_list),
185 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
186 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
2be510b8
MM
187};
188
189/* The total number of orders. */
190
191#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
192
b1095f9c
MM
193/* We use this structure to determine the alignment required for
194 allocations. For power-of-two sized allocations, that's not a
195 problem, but it does matter for odd-sized allocations. */
196
197struct max_alignment {
198 char c;
199 union {
200 HOST_WIDEST_INT i;
201#ifdef HAVE_LONG_DOUBLE
202 long double d;
203#else
204 double d;
205#endif
206 } u;
207};
208
209/* The biggest alignment required. */
210
211#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
212
2be510b8
MM
213/* The Ith entry is the number of objects on a page or order I. */
214
215static unsigned objects_per_page_table[NUM_ORDERS];
216
217/* The Ith entry is the size of an object on a page of order I. */
218
219static size_t object_size_table[NUM_ORDERS];
21341cfd 220
8537ed68
ZW
221/* The Ith entry is a pair of numbers (mult, shift) such that
222 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
223 for all k evenly divisible by OBJECT_SIZE(I). */
224
225static struct
226{
227 unsigned int mult;
228 unsigned int shift;
229}
230inverse_table[NUM_ORDERS];
231
21341cfd
AS
232/* A page_entry records the status of an allocation page. This
233 structure is dynamically sized to fit the bitmap in_use_p. */
589005ff 234typedef struct page_entry
21341cfd
AS
235{
236 /* The next page-entry with objects of the same size, or NULL if
237 this is the last page-entry. */
238 struct page_entry *next;
239
240 /* The number of bytes allocated. (This will always be a multiple
241 of the host system page size.) */
242 size_t bytes;
243
244 /* The address at which the memory is allocated. */
245 char *page;
246
130fadbb
RH
247#ifdef USING_MALLOC_PAGE_GROUPS
248 /* Back pointer to the page group this page came from. */
249 struct page_group *group;
250#endif
251
21341cfd
AS
252 /* Saved in-use bit vector for pages that aren't in the topmost
253 context during collection. */
254 unsigned long *save_in_use_p;
255
256 /* Context depth of this page. */
ae373eda 257 unsigned short context_depth;
21341cfd
AS
258
259 /* The number of free objects remaining on this page. */
260 unsigned short num_free_objects;
261
262 /* A likely candidate for the bit position of a free object for the
263 next allocation from this page. */
264 unsigned short next_bit_hint;
265
ae373eda
MM
266 /* The lg of size of objects allocated from this page. */
267 unsigned char order;
268
21341cfd
AS
269 /* A bit vector indicating whether or not objects are in use. The
270 Nth bit is one if the Nth object on this page is allocated. This
271 array is dynamically sized. */
272 unsigned long in_use_p[1];
273} page_entry;
274
130fadbb
RH
275#ifdef USING_MALLOC_PAGE_GROUPS
276/* A page_group describes a large allocation from malloc, from which
277 we parcel out aligned pages. */
278typedef struct page_group
279{
280 /* A linked list of all extant page groups. */
281 struct page_group *next;
282
283 /* The address we received from malloc. */
284 char *allocation;
285
286 /* The size of the block. */
287 size_t alloc_size;
288
289 /* A bitmask of pages in use. */
290 unsigned int in_use;
291} page_group;
292#endif
21341cfd
AS
293
294#if HOST_BITS_PER_PTR <= 32
295
296/* On 32-bit hosts, we use a two level page table, as pictured above. */
297typedef page_entry **page_table[PAGE_L1_SIZE];
298
299#else
300
005537df
RH
301/* On 64-bit hosts, we use the same two level page tables plus a linked
302 list that disambiguates the top 32-bits. There will almost always be
21341cfd
AS
303 exactly one entry in the list. */
304typedef struct page_table_chain
305{
306 struct page_table_chain *next;
307 size_t high_bits;
308 page_entry **table[PAGE_L1_SIZE];
309} *page_table;
310
311#endif
312
313/* The rest of the global variables. */
314static struct globals
315{
316 /* The Nth element in this array is a page with objects of size 2^N.
317 If there are any pages with free objects, they will be at the
318 head of the list. NULL if there are no page-entries for this
319 object size. */
2be510b8 320 page_entry *pages[NUM_ORDERS];
21341cfd
AS
321
322 /* The Nth element in this array is the last page with objects of
323 size 2^N. NULL if there are no page-entries for this object
324 size. */
2be510b8 325 page_entry *page_tails[NUM_ORDERS];
21341cfd
AS
326
327 /* Lookup table for associating allocation pages with object addresses. */
328 page_table lookup;
329
330 /* The system's page size. */
331 size_t pagesize;
332 size_t lg_pagesize;
333
334 /* Bytes currently allocated. */
335 size_t allocated;
336
337 /* Bytes currently allocated at the end of the last collection. */
338 size_t allocated_last_gc;
339
3277221c
MM
340 /* Total amount of memory mapped. */
341 size_t bytes_mapped;
342
21341cfd 343 /* The current depth in the context stack. */
d416576b 344 unsigned short context_depth;
21341cfd
AS
345
346 /* A file descriptor open to /dev/zero for reading. */
825b6926 347#if defined (HAVE_MMAP_DEV_ZERO)
21341cfd
AS
348 int dev_zero_fd;
349#endif
350
351 /* A cache of free system pages. */
352 page_entry *free_pages;
353
130fadbb
RH
354#ifdef USING_MALLOC_PAGE_GROUPS
355 page_group *page_groups;
356#endif
357
21341cfd
AS
358 /* The file descriptor for debugging output. */
359 FILE *debug_file;
360} G;
361
21341cfd
AS
362/* The size in bytes required to maintain a bitmap for the objects
363 on a page-entry. */
364#define BITMAP_SIZE(Num_objects) \
2be510b8 365 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
21341cfd
AS
366
367/* Skip garbage collection if the current allocation is not at least
368 this factor times the allocation at the end of the last collection.
369 In other words, total allocation must expand by (this factor minus
370 one) before collection is performed. */
371#define GGC_MIN_EXPAND_FOR_GC (1.3)
372
a70261ee
RH
373/* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
374 test from triggering too often when the heap is small. */
375#define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
376
130fadbb
RH
377/* Allocate pages in chunks of this size, to throttle calls to memory
378 allocation routines. The first page is used, the rest go onto the
379 free list. This cannot be larger than HOST_BITS_PER_INT for the
380 in_use bitmask for page_group. */
054f5e69 381#define GGC_QUIRE_SIZE 16
21341cfd 382\f
3fe41456
KG
383static int ggc_allocated_p PARAMS ((const void *));
384static page_entry *lookup_page_table_entry PARAMS ((const void *));
385static void set_page_table_entry PARAMS ((void *, page_entry *));
130fadbb 386#ifdef USING_MMAP
3fe41456 387static char *alloc_anon PARAMS ((char *, size_t));
130fadbb
RH
388#endif
389#ifdef USING_MALLOC_PAGE_GROUPS
390static size_t page_group_index PARAMS ((char *, char *));
391static void set_page_group_in_use PARAMS ((page_group *, char *));
392static void clear_page_group_in_use PARAMS ((page_group *, char *));
393#endif
3fe41456
KG
394static struct page_entry * alloc_page PARAMS ((unsigned));
395static void free_page PARAMS ((struct page_entry *));
396static void release_pages PARAMS ((void));
397static void clear_marks PARAMS ((void));
398static void sweep_pages PARAMS ((void));
399static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
8537ed68 400static void compute_inverse PARAMS ((unsigned));
21341cfd
AS
401
402#ifdef GGC_POISON
3fe41456 403static void poison_pages PARAMS ((void));
21341cfd
AS
404#endif
405
3fe41456 406void debug_print_page_list PARAMS ((int));
21341cfd 407\f
cc2902df 408/* Returns nonzero if P was allocated in GC'able memory. */
21341cfd 409
005537df
RH
410static inline int
411ggc_allocated_p (p)
412 const void *p;
21341cfd
AS
413{
414 page_entry ***base;
005537df 415 size_t L1, L2;
21341cfd
AS
416
417#if HOST_BITS_PER_PTR <= 32
418 base = &G.lookup[0];
419#else
420 page_table table = G.lookup;
421 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
005537df
RH
422 while (1)
423 {
424 if (table == NULL)
425 return 0;
426 if (table->high_bits == high_bits)
427 break;
428 table = table->next;
429 }
21341cfd
AS
430 base = &table->table[0];
431#endif
432
eaec9b3d 433 /* Extract the level 1 and 2 indices. */
74c937ca
MM
434 L1 = LOOKUP_L1 (p);
435 L2 = LOOKUP_L2 (p);
436
437 return base[L1] && base[L1][L2];
438}
439
589005ff 440/* Traverse the page table and find the entry for a page.
74c937ca
MM
441 Die (probably) if the object wasn't allocated via GC. */
442
443static inline page_entry *
444lookup_page_table_entry(p)
005537df 445 const void *p;
74c937ca
MM
446{
447 page_entry ***base;
448 size_t L1, L2;
449
005537df
RH
450#if HOST_BITS_PER_PTR <= 32
451 base = &G.lookup[0];
452#else
453 page_table table = G.lookup;
454 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
455 while (table->high_bits != high_bits)
456 table = table->next;
457 base = &table->table[0];
458#endif
74c937ca 459
eaec9b3d 460 /* Extract the level 1 and 2 indices. */
21341cfd
AS
461 L1 = LOOKUP_L1 (p);
462 L2 = LOOKUP_L2 (p);
463
464 return base[L1][L2];
465}
466
21341cfd 467/* Set the page table entry for a page. */
cb2ec151 468
21341cfd
AS
469static void
470set_page_table_entry(p, entry)
471 void *p;
472 page_entry *entry;
473{
474 page_entry ***base;
475 size_t L1, L2;
476
477#if HOST_BITS_PER_PTR <= 32
478 base = &G.lookup[0];
479#else
480 page_table table;
481 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
482 for (table = G.lookup; table; table = table->next)
483 if (table->high_bits == high_bits)
484 goto found;
485
486 /* Not found -- allocate a new table. */
487 table = (page_table) xcalloc (1, sizeof(*table));
488 table->next = G.lookup;
489 table->high_bits = high_bits;
490 G.lookup = table;
491found:
492 base = &table->table[0];
493#endif
494
eaec9b3d 495 /* Extract the level 1 and 2 indices. */
21341cfd
AS
496 L1 = LOOKUP_L1 (p);
497 L2 = LOOKUP_L2 (p);
498
499 if (base[L1] == NULL)
500 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
501
502 base[L1][L2] = entry;
503}
504
21341cfd 505/* Prints the page-entry for object size ORDER, for debugging. */
cb2ec151 506
21341cfd
AS
507void
508debug_print_page_list (order)
509 int order;
510{
511 page_entry *p;
683eb0e9
JM
512 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
513 (PTR) G.page_tails[order]);
21341cfd
AS
514 p = G.pages[order];
515 while (p != NULL)
516 {
683eb0e9
JM
517 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
518 p->num_free_objects);
21341cfd
AS
519 p = p->next;
520 }
521 printf ("NULL\n");
522 fflush (stdout);
523}
524
130fadbb 525#ifdef USING_MMAP
21341cfd 526/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825b6926
ZW
527 (if non-null). The ifdef structure here is intended to cause a
528 compile error unless exactly one of the HAVE_* is defined. */
cb2ec151 529
21341cfd
AS
530static inline char *
531alloc_anon (pref, size)
005537df 532 char *pref ATTRIBUTE_UNUSED;
21341cfd
AS
533 size_t size;
534{
825b6926
ZW
535#ifdef HAVE_MMAP_ANON
536 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
537 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
538#endif
539#ifdef HAVE_MMAP_DEV_ZERO
540 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
541 MAP_PRIVATE, G.dev_zero_fd, 0);
21341cfd 542#endif
825b6926
ZW
543
544 if (page == (char *) MAP_FAILED)
005537df 545 {
1f978f5f 546 perror ("virtual memory exhausted");
bd0f0717 547 exit (FATAL_EXIT_CODE);
005537df 548 }
21341cfd 549
3277221c
MM
550 /* Remember that we allocated this memory. */
551 G.bytes_mapped += size;
552
21341cfd
AS
553 return page;
554}
130fadbb
RH
555#endif
556#ifdef USING_MALLOC_PAGE_GROUPS
557/* Compute the index for this page into the page group. */
558
559static inline size_t
560page_group_index (allocation, page)
561 char *allocation, *page;
562{
c4f2c499 563 return (size_t) (page - allocation) >> G.lg_pagesize;
130fadbb
RH
564}
565
566/* Set and clear the in_use bit for this page in the page group. */
567
568static inline void
569set_page_group_in_use (group, page)
570 page_group *group;
571 char *page;
572{
573 group->in_use |= 1 << page_group_index (group->allocation, page);
574}
575
576static inline void
577clear_page_group_in_use (group, page)
578 page_group *group;
579 char *page;
580{
581 group->in_use &= ~(1 << page_group_index (group->allocation, page));
582}
583#endif
21341cfd
AS
584
585/* Allocate a new page for allocating objects of size 2^ORDER,
586 and return an entry for it. The entry is not added to the
587 appropriate page_table list. */
cb2ec151 588
21341cfd
AS
589static inline struct page_entry *
590alloc_page (order)
591 unsigned order;
592{
593 struct page_entry *entry, *p, **pp;
594 char *page;
595 size_t num_objects;
596 size_t bitmap_size;
597 size_t page_entry_size;
598 size_t entry_size;
130fadbb
RH
599#ifdef USING_MALLOC_PAGE_GROUPS
600 page_group *group;
601#endif
21341cfd
AS
602
603 num_objects = OBJECTS_PER_PAGE (order);
604 bitmap_size = BITMAP_SIZE (num_objects + 1);
605 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
2be510b8 606 entry_size = num_objects * OBJECT_SIZE (order);
ca79429a
RH
607 if (entry_size < G.pagesize)
608 entry_size = G.pagesize;
21341cfd
AS
609
610 entry = NULL;
611 page = NULL;
612
613 /* Check the list of free pages for one we can use. */
bd0f0717 614 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
21341cfd
AS
615 if (p->bytes == entry_size)
616 break;
617
618 if (p != NULL)
619 {
dc297297 620 /* Recycle the allocated memory from this page ... */
21341cfd
AS
621 *pp = p->next;
622 page = p->page;
bd0f0717 623
130fadbb
RH
624#ifdef USING_MALLOC_PAGE_GROUPS
625 group = p->group;
626#endif
bd0f0717 627
21341cfd
AS
628 /* ... and, if possible, the page entry itself. */
629 if (p->order == order)
630 {
631 entry = p;
632 memset (entry, 0, page_entry_size);
633 }
634 else
635 free (p);
636 }
825b6926 637#ifdef USING_MMAP
054f5e69 638 else if (entry_size == G.pagesize)
21341cfd 639 {
054f5e69
ZW
640 /* We want just one page. Allocate a bunch of them and put the
641 extras on the freelist. (Can only do this optimization with
642 mmap for backing store.) */
643 struct page_entry *e, *f = G.free_pages;
644 int i;
645
ca79429a 646 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
bd0f0717 647
054f5e69
ZW
648 /* This loop counts down so that the chain will be in ascending
649 memory order. */
650 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
651 {
ca79429a
RH
652 e = (struct page_entry *) xcalloc (1, page_entry_size);
653 e->order = order;
654 e->bytes = G.pagesize;
655 e->page = page + (i << G.lg_pagesize);
054f5e69
ZW
656 e->next = f;
657 f = e;
658 }
bd0f0717 659
054f5e69 660 G.free_pages = f;
21341cfd 661 }
054f5e69
ZW
662 else
663 page = alloc_anon (NULL, entry_size);
130fadbb
RH
664#endif
665#ifdef USING_MALLOC_PAGE_GROUPS
666 else
667 {
668 /* Allocate a large block of memory and serve out the aligned
669 pages therein. This results in much less memory wastage
670 than the traditional implementation of valloc. */
671
672 char *allocation, *a, *enda;
673 size_t alloc_size, head_slop, tail_slop;
674 int multiple_pages = (entry_size == G.pagesize);
675
676 if (multiple_pages)
677 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
678 else
679 alloc_size = entry_size + G.pagesize - 1;
680 allocation = xmalloc (alloc_size);
681
c4f2c499 682 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
130fadbb
RH
683 head_slop = page - allocation;
684 if (multiple_pages)
685 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
686 else
687 tail_slop = alloc_size - entry_size - head_slop;
688 enda = allocation + alloc_size - tail_slop;
689
690 /* We allocated N pages, which are likely not aligned, leaving
691 us with N-1 usable pages. We plan to place the page_group
692 structure somewhere in the slop. */
693 if (head_slop >= sizeof (page_group))
694 group = (page_group *)page - 1;
695 else
696 {
697 /* We magically got an aligned allocation. Too bad, we have
698 to waste a page anyway. */
699 if (tail_slop == 0)
700 {
701 enda -= G.pagesize;
702 tail_slop += G.pagesize;
703 }
704 if (tail_slop < sizeof (page_group))
705 abort ();
706 group = (page_group *)enda;
707 tail_slop -= sizeof (page_group);
708 }
709
710 /* Remember that we allocated this memory. */
711 group->next = G.page_groups;
712 group->allocation = allocation;
713 group->alloc_size = alloc_size;
714 group->in_use = 0;
715 G.page_groups = group;
716 G.bytes_mapped += alloc_size;
717
718 /* If we allocated multiple pages, put the rest on the free list. */
719 if (multiple_pages)
720 {
721 struct page_entry *e, *f = G.free_pages;
722 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
723 {
724 e = (struct page_entry *) xcalloc (1, page_entry_size);
725 e->order = order;
726 e->bytes = G.pagesize;
727 e->page = a;
728 e->group = group;
729 e->next = f;
730 f = e;
731 }
732 G.free_pages = f;
733 }
734 }
735#endif
21341cfd
AS
736
737 if (entry == NULL)
738 entry = (struct page_entry *) xcalloc (1, page_entry_size);
739
740 entry->bytes = entry_size;
741 entry->page = page;
742 entry->context_depth = G.context_depth;
743 entry->order = order;
744 entry->num_free_objects = num_objects;
745 entry->next_bit_hint = 1;
746
130fadbb
RH
747#ifdef USING_MALLOC_PAGE_GROUPS
748 entry->group = group;
749 set_page_group_in_use (group, page);
750#endif
751
21341cfd
AS
752 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
753 increment the hint. */
754 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
755 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
756
757 set_page_table_entry (page, entry);
758
759 if (GGC_DEBUG_LEVEL >= 2)
589005ff 760 fprintf (G.debug_file,
8a951190
AJ
761 "Allocating page at %p, object size=%lu, data %p-%p\n",
762 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
bd0f0717 763 page + entry_size - 1);
21341cfd
AS
764
765 return entry;
766}
767
cb2ec151 768/* For a page that is no longer needed, put it on the free page list. */
21341cfd 769
21341cfd
AS
770static inline void
771free_page (entry)
772 page_entry *entry;
773{
774 if (GGC_DEBUG_LEVEL >= 2)
589005ff 775 fprintf (G.debug_file,
683eb0e9 776 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
21341cfd
AS
777 entry->page, entry->page + entry->bytes - 1);
778
779 set_page_table_entry (entry->page, NULL);
780
130fadbb
RH
781#ifdef USING_MALLOC_PAGE_GROUPS
782 clear_page_group_in_use (entry->group, entry->page);
783#endif
784
21341cfd
AS
785 entry->next = G.free_pages;
786 G.free_pages = entry;
787}
788
cb2ec151 789/* Release the free page cache to the system. */
21341cfd 790
4934cc53 791static void
21341cfd
AS
792release_pages ()
793{
825b6926 794#ifdef USING_MMAP
130fadbb 795 page_entry *p, *next;
21341cfd
AS
796 char *start;
797 size_t len;
798
054f5e69 799 /* Gather up adjacent pages so they are unmapped together. */
21341cfd 800 p = G.free_pages;
21341cfd
AS
801
802 while (p)
803 {
054f5e69 804 start = p->page;
21341cfd 805 next = p->next;
054f5e69 806 len = p->bytes;
21341cfd
AS
807 free (p);
808 p = next;
21341cfd 809
054f5e69
ZW
810 while (p && p->page == start + len)
811 {
812 next = p->next;
813 len += p->bytes;
814 free (p);
815 p = next;
816 }
817
818 munmap (start, len);
819 G.bytes_mapped -= len;
820 }
005537df 821
21341cfd 822 G.free_pages = NULL;
130fadbb
RH
823#endif
824#ifdef USING_MALLOC_PAGE_GROUPS
825 page_entry **pp, *p;
826 page_group **gp, *g;
827
828 /* Remove all pages from free page groups from the list. */
829 pp = &G.free_pages;
830 while ((p = *pp) != NULL)
831 if (p->group->in_use == 0)
832 {
833 *pp = p->next;
834 free (p);
835 }
836 else
837 pp = &p->next;
838
839 /* Remove all free page groups, and release the storage. */
840 gp = &G.page_groups;
841 while ((g = *gp) != NULL)
842 if (g->in_use == 0)
843 {
844 *gp = g->next;
589005ff 845 G.bytes_mapped -= g->alloc_size;
130fadbb
RH
846 free (g->allocation);
847 }
848 else
849 gp = &g->next;
850#endif
21341cfd
AS
851}
852
21341cfd 853/* This table provides a fast way to determine ceil(log_2(size)) for
9fd51e67 854 allocation requests. The minimum allocation size is eight bytes. */
cb2ec151 855
589005ff 856static unsigned char size_lookup[257] =
9fd51e67 857{
589005ff
KH
858 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
859 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
860 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
861 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
862 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
863 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
864 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21341cfd 865 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21341cfd
AS
866 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
867 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
868 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
869 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
870 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
871 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
872 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
873 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
874 8
875};
876
cc2902df 877/* Allocate a chunk of memory of SIZE bytes. If ZERO is nonzero, the
21341cfd 878 memory is zeroed; otherwise, its contents are undefined. */
cb2ec151 879
005537df 880void *
f8a83ee3 881ggc_alloc (size)
21341cfd 882 size_t size;
21341cfd
AS
883{
884 unsigned order, word, bit, object_offset;
885 struct page_entry *entry;
886 void *result;
887
888 if (size <= 256)
889 order = size_lookup[size];
890 else
891 {
892 order = 9;
2be510b8 893 while (size > OBJECT_SIZE (order))
21341cfd
AS
894 order++;
895 }
896
897 /* If there are non-full pages for this size allocation, they are at
898 the head of the list. */
899 entry = G.pages[order];
900
901 /* If there is no page for this object size, or all pages in this
902 context are full, allocate a new page. */
4934cc53 903 if (entry == NULL || entry->num_free_objects == 0)
21341cfd
AS
904 {
905 struct page_entry *new_entry;
906 new_entry = alloc_page (order);
589005ff 907
21341cfd
AS
908 /* If this is the only entry, it's also the tail. */
909 if (entry == NULL)
910 G.page_tails[order] = new_entry;
589005ff 911
21341cfd
AS
912 /* Put new pages at the head of the page list. */
913 new_entry->next = entry;
914 entry = new_entry;
915 G.pages[order] = new_entry;
916
917 /* For a new page, we know the word and bit positions (in the
918 in_use bitmap) of the first available object -- they're zero. */
919 new_entry->next_bit_hint = 1;
920 word = 0;
921 bit = 0;
922 object_offset = 0;
923 }
924 else
925 {
926 /* First try to use the hint left from the previous allocation
927 to locate a clear bit in the in-use bitmap. We've made sure
928 that the one-past-the-end bit is always set, so if the hint
929 has run over, this test will fail. */
930 unsigned hint = entry->next_bit_hint;
931 word = hint / HOST_BITS_PER_LONG;
932 bit = hint % HOST_BITS_PER_LONG;
589005ff 933
21341cfd
AS
934 /* If the hint didn't work, scan the bitmap from the beginning. */
935 if ((entry->in_use_p[word] >> bit) & 1)
936 {
937 word = bit = 0;
938 while (~entry->in_use_p[word] == 0)
939 ++word;
940 while ((entry->in_use_p[word] >> bit) & 1)
941 ++bit;
942 hint = word * HOST_BITS_PER_LONG + bit;
943 }
944
945 /* Next time, try the next bit. */
946 entry->next_bit_hint = hint + 1;
947
2be510b8 948 object_offset = hint * OBJECT_SIZE (order);
21341cfd
AS
949 }
950
951 /* Set the in-use bit. */
952 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
953
954 /* Keep a running total of the number of free objects. If this page
955 fills up, we may have to move it to the end of the list if the
956 next page isn't full. If the next page is full, all subsequent
957 pages are full, so there's no need to move it. */
958 if (--entry->num_free_objects == 0
959 && entry->next != NULL
960 && entry->next->num_free_objects > 0)
961 {
962 G.pages[order] = entry->next;
963 entry->next = NULL;
964 G.page_tails[order]->next = entry;
965 G.page_tails[order] = entry;
966 }
967
968 /* Calculate the object's address. */
969 result = entry->page + object_offset;
970
971#ifdef GGC_POISON
f8a83ee3
ZW
972 /* `Poison' the entire allocated object, including any padding at
973 the end. */
2be510b8 974 memset (result, 0xaf, OBJECT_SIZE (order));
21341cfd 975#endif
cb2ec151 976
21341cfd
AS
977 /* Keep track of how many bytes are being allocated. This
978 information is used in deciding when to collect. */
2be510b8 979 G.allocated += OBJECT_SIZE (order);
21341cfd
AS
980
981 if (GGC_DEBUG_LEVEL >= 3)
589005ff 982 fprintf (G.debug_file,
8a951190
AJ
983 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
984 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
985 (PTR) entry);
21341cfd
AS
986
987 return result;
988}
989
cb2ec151 990/* If P is not marked, marks it and return false. Otherwise return true.
21341cfd
AS
991 P must have been allocated by the GC allocator; it mustn't point to
992 static objects, stack variables, or memory allocated with malloc. */
cb2ec151 993
005537df
RH
994int
995ggc_set_mark (p)
3cce094d 996 const void *p;
21341cfd
AS
997{
998 page_entry *entry;
999 unsigned bit, word;
1000 unsigned long mask;
1001
1002 /* Look up the page on which the object is alloced. If the object
1003 wasn't allocated by the collector, we'll probably die. */
74c937ca 1004 entry = lookup_page_table_entry (p);
21341cfd
AS
1005#ifdef ENABLE_CHECKING
1006 if (entry == NULL)
1007 abort ();
1008#endif
1009
1010 /* Calculate the index of the object on the page; this is its bit
1011 position in the in_use_p bitmap. */
8537ed68 1012 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
21341cfd
AS
1013 word = bit / HOST_BITS_PER_LONG;
1014 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
589005ff 1015
dc297297 1016 /* If the bit was previously set, skip it. */
21341cfd
AS
1017 if (entry->in_use_p[word] & mask)
1018 return 1;
1019
1020 /* Otherwise set it, and decrement the free object count. */
1021 entry->in_use_p[word] |= mask;
1022 entry->num_free_objects -= 1;
1023
21341cfd
AS
1024 if (GGC_DEBUG_LEVEL >= 4)
1025 fprintf (G.debug_file, "Marking %p\n", p);
1026
1027 return 0;
1028}
1029
589005ff 1030/* Return 1 if P has been marked, zero otherwise.
4c160717
RK
1031 P must have been allocated by the GC allocator; it mustn't point to
1032 static objects, stack variables, or memory allocated with malloc. */
1033
1034int
1035ggc_marked_p (p)
1036 const void *p;
1037{
1038 page_entry *entry;
1039 unsigned bit, word;
1040 unsigned long mask;
1041
1042 /* Look up the page on which the object is alloced. If the object
1043 wasn't allocated by the collector, we'll probably die. */
1044 entry = lookup_page_table_entry (p);
1045#ifdef ENABLE_CHECKING
1046 if (entry == NULL)
1047 abort ();
1048#endif
1049
1050 /* Calculate the index of the object on the page; this is its bit
1051 position in the in_use_p bitmap. */
8537ed68 1052 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
4c160717
RK
1053 word = bit / HOST_BITS_PER_LONG;
1054 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
589005ff 1055
a4b5b2ae 1056 return (entry->in_use_p[word] & mask) != 0;
4c160717
RK
1057}
1058
cb2ec151
RH
1059/* Return the size of the gc-able object P. */
1060
3277221c
MM
1061size_t
1062ggc_get_size (p)
3cce094d 1063 const void *p;
3277221c
MM
1064{
1065 page_entry *pe = lookup_page_table_entry (p);
2be510b8 1066 return OBJECT_SIZE (pe->order);
3277221c 1067}
21341cfd 1068\f
8537ed68
ZW
1069/* Subroutine of init_ggc which computes the pair of numbers used to
1070 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1071
1072 This algorithm is taken from Granlund and Montgomery's paper
1073 "Division by Invariant Integers using Multiplication"
1074 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1075 constants). */
1076
1077static void
1078compute_inverse (order)
1079 unsigned order;
1080{
1081 unsigned size, inv, e;
1082
280cf02a
ZW
1083 /* There can be only one object per "page" in a bucket for sizes
1084 larger than half a machine page; it will always have offset zero. */
1085 if (OBJECT_SIZE (order) > G.pagesize/2)
1086 {
1087 if (OBJECTS_PER_PAGE (order) != 1)
1088 abort ();
1089
1090 DIV_MULT (order) = 1;
1091 DIV_SHIFT (order) = 0;
1092 return;
1093 }
1094
8537ed68
ZW
1095 size = OBJECT_SIZE (order);
1096 e = 0;
1097 while (size % 2 == 0)
1098 {
1099 e++;
1100 size >>= 1;
1101 }
cb2ec151 1102
8537ed68
ZW
1103 inv = size;
1104 while (inv * size != 1)
1105 inv = inv * (2 - inv*size);
1106
1107 DIV_MULT (order) = inv;
1108 DIV_SHIFT (order) = e;
1109}
1110
1111/* Initialize the ggc-mmap allocator. */
21341cfd
AS
1112void
1113init_ggc ()
1114{
2be510b8
MM
1115 unsigned order;
1116
21341cfd
AS
1117 G.pagesize = getpagesize();
1118 G.lg_pagesize = exact_log2 (G.pagesize);
1119
825b6926 1120#ifdef HAVE_MMAP_DEV_ZERO
21341cfd
AS
1121 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1122 if (G.dev_zero_fd == -1)
1123 abort ();
1124#endif
1125
1126#if 0
1127 G.debug_file = fopen ("ggc-mmap.debug", "w");
1128#else
1129 G.debug_file = stdout;
1130#endif
1131
a70261ee 1132 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
21341cfd 1133
825b6926 1134#ifdef USING_MMAP
1b3e1423
RH
1135 /* StunOS has an amazing off-by-one error for the first mmap allocation
1136 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1137 believe, is an unaligned page allocation, which would cause us to
1138 hork badly if we tried to use it. */
1139 {
1140 char *p = alloc_anon (NULL, G.pagesize);
825b6926 1141 struct page_entry *e;
1b3e1423
RH
1142 if ((size_t)p & (G.pagesize - 1))
1143 {
1144 /* How losing. Discard this one and try another. If we still
1145 can't get something useful, give up. */
1146
1147 p = alloc_anon (NULL, G.pagesize);
1148 if ((size_t)p & (G.pagesize - 1))
1149 abort ();
1150 }
825b6926 1151
dc297297 1152 /* We have a good page, might as well hold onto it... */
825b6926
ZW
1153 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1154 e->bytes = G.pagesize;
1155 e->page = p;
1156 e->next = G.free_pages;
1157 G.free_pages = e;
1b3e1423
RH
1158 }
1159#endif
2be510b8
MM
1160
1161 /* Initialize the object size table. */
1162 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1163 object_size_table[order] = (size_t) 1 << order;
1164 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
b1095f9c
MM
1165 {
1166 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1167
1168 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1169 so that we're sure of getting aligned memory. */
1170 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1171 object_size_table[order] = s;
1172 }
2be510b8 1173
8537ed68 1174 /* Initialize the objects-per-page and inverse tables. */
2be510b8
MM
1175 for (order = 0; order < NUM_ORDERS; ++order)
1176 {
1177 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1178 if (objects_per_page_table[order] == 0)
1179 objects_per_page_table[order] = 1;
8537ed68 1180 compute_inverse (order);
2be510b8
MM
1181 }
1182
1183 /* Reset the size_lookup array to put appropriately sized objects in
1184 the special orders. All objects bigger than the previous power
1185 of two, but no greater than the special size, should go in the
1186 new order. */
1187 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1188 {
1189 int o;
1190 int i;
1191
1192 o = size_lookup[OBJECT_SIZE (order)];
1193 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1194 size_lookup[i] = order;
1195 }
21341cfd
AS
1196}
1197
cb2ec151
RH
1198/* Increment the `GC context'. Objects allocated in an outer context
1199 are never freed, eliminating the need to register their roots. */
21341cfd
AS
1200
1201void
1202ggc_push_context ()
1203{
1204 ++G.context_depth;
1205
1206 /* Die on wrap. */
1207 if (G.context_depth == 0)
1208 abort ();
1209}
1210
4934cc53
MM
1211/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1212 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1213
1214static void
1215ggc_recalculate_in_use_p (p)
1216 page_entry *p;
1217{
1218 unsigned int i;
1219 size_t num_objects;
1220
589005ff 1221 /* Because the past-the-end bit in in_use_p is always set, we
4934cc53
MM
1222 pretend there is one additional object. */
1223 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1224
1225 /* Reset the free object count. */
1226 p->num_free_objects = num_objects;
1227
1228 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
589005ff 1229 for (i = 0;
2be510b8
MM
1230 i < CEIL (BITMAP_SIZE (num_objects),
1231 sizeof (*p->in_use_p));
4934cc53
MM
1232 ++i)
1233 {
1234 unsigned long j;
1235
1236 /* Something is in use if it is marked, or if it was in use in a
1237 context further down the context stack. */
1238 p->in_use_p[i] |= p->save_in_use_p[i];
1239
1240 /* Decrement the free object count for every object allocated. */
1241 for (j = p->in_use_p[i]; j; j >>= 1)
1242 p->num_free_objects -= (j & 1);
1243 }
1244
1245 if (p->num_free_objects >= num_objects)
1246 abort ();
1247}
1248
589005ff 1249/* Decrement the `GC context'. All objects allocated since the
cb2ec151 1250 previous ggc_push_context are migrated to the outer context. */
21341cfd
AS
1251
1252void
1253ggc_pop_context ()
1254{
1255 unsigned order, depth;
1256
1257 depth = --G.context_depth;
1258
1259 /* Any remaining pages in the popped context are lowered to the new
1260 current context; i.e. objects allocated in the popped context and
1261 left over are imported into the previous context. */
2be510b8 1262 for (order = 2; order < NUM_ORDERS; order++)
21341cfd 1263 {
21341cfd
AS
1264 page_entry *p;
1265
1266 for (p = G.pages[order]; p != NULL; p = p->next)
1267 {
1268 if (p->context_depth > depth)
4934cc53 1269 p->context_depth = depth;
21341cfd
AS
1270
1271 /* If this page is now in the topmost context, and we'd
1272 saved its allocation state, restore it. */
1273 else if (p->context_depth == depth && p->save_in_use_p)
1274 {
4934cc53 1275 ggc_recalculate_in_use_p (p);
21341cfd
AS
1276 free (p->save_in_use_p);
1277 p->save_in_use_p = 0;
21341cfd
AS
1278 }
1279 }
1280 }
1281}
21341cfd 1282\f
cb2ec151
RH
1283/* Unmark all objects. */
1284
21341cfd
AS
1285static inline void
1286clear_marks ()
1287{
1288 unsigned order;
1289
2be510b8 1290 for (order = 2; order < NUM_ORDERS; order++)
21341cfd
AS
1291 {
1292 size_t num_objects = OBJECTS_PER_PAGE (order);
4934cc53 1293 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
21341cfd
AS
1294 page_entry *p;
1295
1296 for (p = G.pages[order]; p != NULL; p = p->next)
1297 {
1298#ifdef ENABLE_CHECKING
1299 /* The data should be page-aligned. */
1300 if ((size_t) p->page & (G.pagesize - 1))
1301 abort ();
1302#endif
1303
1304 /* Pages that aren't in the topmost context are not collected;
1305 nevertheless, we need their in-use bit vectors to store GC
1306 marks. So, back them up first. */
4934cc53 1307 if (p->context_depth < G.context_depth)
21341cfd 1308 {
4934cc53
MM
1309 if (! p->save_in_use_p)
1310 p->save_in_use_p = xmalloc (bitmap_size);
21341cfd 1311 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
21341cfd
AS
1312 }
1313
1314 /* Reset reset the number of free objects and clear the
1315 in-use bits. These will be adjusted by mark_obj. */
1316 p->num_free_objects = num_objects;
1317 memset (p->in_use_p, 0, bitmap_size);
1318
1319 /* Make sure the one-past-the-end bit is always set. */
589005ff 1320 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
21341cfd
AS
1321 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1322 }
1323 }
1324}
1325
cb2ec151
RH
1326/* Free all empty pages. Partially empty pages need no attention
1327 because the `mark' bit doubles as an `unused' bit. */
1328
21341cfd
AS
1329static inline void
1330sweep_pages ()
1331{
1332 unsigned order;
1333
2be510b8 1334 for (order = 2; order < NUM_ORDERS; order++)
21341cfd
AS
1335 {
1336 /* The last page-entry to consider, regardless of entries
1337 placed at the end of the list. */
1338 page_entry * const last = G.page_tails[order];
1339
1340 size_t num_objects = OBJECTS_PER_PAGE (order);
054f5e69 1341 size_t live_objects;
21341cfd
AS
1342 page_entry *p, *previous;
1343 int done;
589005ff 1344
21341cfd
AS
1345 p = G.pages[order];
1346 if (p == NULL)
1347 continue;
1348
1349 previous = NULL;
1350 do
1351 {
1352 page_entry *next = p->next;
1353
1354 /* Loop until all entries have been examined. */
1355 done = (p == last);
1356
054f5e69
ZW
1357 /* Add all live objects on this page to the count of
1358 allocated memory. */
1359 live_objects = num_objects - p->num_free_objects;
1360
2be510b8 1361 G.allocated += OBJECT_SIZE (order) * live_objects;
054f5e69 1362
21341cfd
AS
1363 /* Only objects on pages in the topmost context should get
1364 collected. */
1365 if (p->context_depth < G.context_depth)
1366 ;
1367
1368 /* Remove the page if it's empty. */
054f5e69 1369 else if (live_objects == 0)
21341cfd
AS
1370 {
1371 if (! previous)
1372 G.pages[order] = next;
1373 else
1374 previous->next = next;
1375
1376 /* Are we removing the last element? */
1377 if (p == G.page_tails[order])
1378 G.page_tails[order] = previous;
1379 free_page (p);
1380 p = previous;
1381 }
1382
1383 /* If the page is full, move it to the end. */
1384 else if (p->num_free_objects == 0)
1385 {
1386 /* Don't move it if it's already at the end. */
1387 if (p != G.page_tails[order])
1388 {
1389 /* Move p to the end of the list. */
1390 p->next = NULL;
1391 G.page_tails[order]->next = p;
1392
1393 /* Update the tail pointer... */
1394 G.page_tails[order] = p;
1395
1396 /* ... and the head pointer, if necessary. */
1397 if (! previous)
1398 G.pages[order] = next;
1399 else
1400 previous->next = next;
1401 p = previous;
1402 }
1403 }
1404
1405 /* If we've fallen through to here, it's a page in the
1406 topmost context that is neither full nor empty. Such a
1407 page must precede pages at lesser context depth in the
1408 list, so move it to the head. */
1409 else if (p != G.pages[order])
1410 {
1411 previous->next = p->next;
1412 p->next = G.pages[order];
1413 G.pages[order] = p;
1414 /* Are we moving the last element? */
1415 if (G.page_tails[order] == p)
1416 G.page_tails[order] = previous;
1417 p = previous;
1418 }
1419
1420 previous = p;
1421 p = next;
589005ff 1422 }
21341cfd 1423 while (! done);
4934cc53
MM
1424
1425 /* Now, restore the in_use_p vectors for any pages from contexts
1426 other than the current one. */
1427 for (p = G.pages[order]; p; p = p->next)
1428 if (p->context_depth != G.context_depth)
1429 ggc_recalculate_in_use_p (p);
21341cfd
AS
1430 }
1431}
1432
1433#ifdef GGC_POISON
cb2ec151
RH
1434/* Clobber all free objects. */
1435
21341cfd
AS
1436static inline void
1437poison_pages ()
1438{
1439 unsigned order;
1440
2be510b8 1441 for (order = 2; order < NUM_ORDERS; order++)
21341cfd
AS
1442 {
1443 size_t num_objects = OBJECTS_PER_PAGE (order);
2be510b8 1444 size_t size = OBJECT_SIZE (order);
21341cfd
AS
1445 page_entry *p;
1446
1447 for (p = G.pages[order]; p != NULL; p = p->next)
1448 {
1449 size_t i;
c831fdea
MM
1450
1451 if (p->context_depth != G.context_depth)
1452 /* Since we don't do any collection for pages in pushed
1453 contexts, there's no need to do any poisoning. And
1454 besides, the IN_USE_P array isn't valid until we pop
1455 contexts. */
1456 continue;
1457
21341cfd
AS
1458 for (i = 0; i < num_objects; i++)
1459 {
1460 size_t word, bit;
1461 word = i / HOST_BITS_PER_LONG;
1462 bit = i % HOST_BITS_PER_LONG;
1463 if (((p->in_use_p[word] >> bit) & 1) == 0)
cb2ec151 1464 memset (p->page + i * size, 0xa5, size);
21341cfd
AS
1465 }
1466 }
1467 }
1468}
1469#endif
1470
cb2ec151
RH
1471/* Top level mark-and-sweep routine. */
1472
21341cfd
AS
1473void
1474ggc_collect ()
1475{
21341cfd
AS
1476 /* Avoid frequent unnecessary work by skipping collection if the
1477 total allocations haven't expanded much since the last
1478 collection. */
1479#ifndef GGC_ALWAYS_COLLECT
1480 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1481 return;
1482#endif
1483
2a9a326b 1484 timevar_push (TV_GC);
21341cfd 1485 if (!quiet_flag)
b9bfacf0 1486 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
21341cfd 1487
054f5e69
ZW
1488 /* Zero the total allocated bytes. This will be recalculated in the
1489 sweep phase. */
21341cfd
AS
1490 G.allocated = 0;
1491
589005ff 1492 /* Release the pages we freed the last time we collected, but didn't
21341cfd
AS
1493 reuse in the interim. */
1494 release_pages ();
1495
1496 clear_marks ();
1497 ggc_mark_roots ();
589005ff 1498
21341cfd
AS
1499#ifdef GGC_POISON
1500 poison_pages ();
1501#endif
1502
cb2ec151
RH
1503 sweep_pages ();
1504
21341cfd 1505 G.allocated_last_gc = G.allocated;
a70261ee
RH
1506 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1507 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
21341cfd 1508
2a9a326b 1509 timevar_pop (TV_GC);
21341cfd 1510
21341cfd 1511 if (!quiet_flag)
2a9a326b 1512 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
21341cfd 1513}
3277221c
MM
1514
1515/* Print allocation statistics. */
fba0bfd4
ZW
1516#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1517 ? (x) \
1518 : ((x) < 1024*1024*10 \
1519 ? (x) / 1024 \
1520 : (x) / (1024*1024))))
1521#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
3277221c
MM
1522
1523void
fba0bfd4 1524ggc_print_statistics ()
3277221c
MM
1525{
1526 struct ggc_statistics stats;
4934cc53 1527 unsigned int i;
fba0bfd4 1528 size_t total_overhead = 0;
3277221c
MM
1529
1530 /* Clear the statistics. */
d219c7f1 1531 memset (&stats, 0, sizeof (stats));
589005ff 1532
3277221c
MM
1533 /* Make sure collection will really occur. */
1534 G.allocated_last_gc = 0;
1535
1536 /* Collect and print the statistics common across collectors. */
fba0bfd4 1537 ggc_print_common_statistics (stderr, &stats);
3277221c 1538
4934cc53
MM
1539 /* Release free pages so that we will not count the bytes allocated
1540 there as part of the total allocated memory. */
1541 release_pages ();
1542
589005ff 1543 /* Collect some information about the various sizes of
3277221c 1544 allocation. */
fba0bfd4 1545 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
9fd51e67 1546 "Size", "Allocated", "Used", "Overhead");
2be510b8 1547 for (i = 0; i < NUM_ORDERS; ++i)
3277221c
MM
1548 {
1549 page_entry *p;
1550 size_t allocated;
1551 size_t in_use;
fba0bfd4 1552 size_t overhead;
3277221c
MM
1553
1554 /* Skip empty entries. */
1555 if (!G.pages[i])
1556 continue;
1557
fba0bfd4 1558 overhead = allocated = in_use = 0;
3277221c
MM
1559
1560 /* Figure out the total number of bytes allocated for objects of
fba0bfd4
ZW
1561 this size, and how many of them are actually in use. Also figure
1562 out how much memory the page table is using. */
3277221c
MM
1563 for (p = G.pages[i]; p; p = p->next)
1564 {
1565 allocated += p->bytes;
589005ff 1566 in_use +=
2be510b8 1567 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
fba0bfd4
ZW
1568
1569 overhead += (sizeof (page_entry) - sizeof (long)
1570 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
3277221c 1571 }
8a951190
AJ
1572 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1573 (unsigned long) OBJECT_SIZE (i),
fba0bfd4
ZW
1574 SCALE (allocated), LABEL (allocated),
1575 SCALE (in_use), LABEL (in_use),
1576 SCALE (overhead), LABEL (overhead));
1577 total_overhead += overhead;
3277221c 1578 }
8a951190 1579 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
fba0bfd4
ZW
1580 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1581 SCALE (G.allocated), LABEL(G.allocated),
1582 SCALE (total_overhead), LABEL (total_overhead));
3277221c 1583}