]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-page.c
Oops, missed ChangeLog in last checkin...
[thirdparty/gcc.git] / gcc / ggc-page.c
CommitLineData
21341cfd 1/* "Bag-of-pages" garbage collector for the GNU compiler.
4acab94b 2 Copyright (C) 1999, 2000 Free Software Foundation, Inc.
21341cfd 3
b9bfacf0 4This file is part of GNU CC.
21341cfd 5
b9bfacf0
RK
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
21341cfd 10
b9bfacf0
RK
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
21341cfd 15
b9bfacf0
RK
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
21341cfd 20
21341cfd 21#include "config.h"
21341cfd
AS
22#include "system.h"
23#include "tree.h"
e5ecd4ea 24#include "rtl.h"
1b42a6a9 25#include "tm_p.h"
b9bfacf0 26#include "toplev.h"
21341cfd
AS
27#include "varray.h"
28#include "flags.h"
e5ecd4ea
RH
29#include "ggc.h"
30
4acab94b 31#ifdef HAVE_MMAP_ANYWHERE
e5ecd4ea 32#include <sys/mman.h>
005537df 33#endif
21341cfd 34
8342b467
RH
35#ifndef MAP_FAILED
36#define MAP_FAILED -1
37#endif
38
5b918807
RE
39#if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
40#define MAP_ANONYMOUS MAP_ANON
41#endif
21341cfd
AS
42
43/* Stategy:
44
45 This garbage-collecting allocator allocates objects on one of a set
46 of pages. Each page can allocate objects of a single size only;
47 available sizes are powers of two starting at four bytes. The size
48 of an allocation request is rounded up to the next power of two
49 (`order'), and satisfied from the appropriate page.
50
51 Each page is recorded in a page-entry, which also maintains an
52 in-use bitmap of object positions on the page. This allows the
53 allocation state of a particular object to be flipped without
54 touching the page itself.
55
56 Each page-entry also has a context depth, which is used to track
57 pushing and popping of allocation contexts. Only objects allocated
58 in the current (highest-numbered) context may be collected.
59
60 Page entries are arranged in an array of singly-linked lists. The
61 array is indexed by the allocation size, in bits, of the pages on
62 it; i.e. all pages on a list allocate objects of the same size.
63 Pages are ordered on the list such that all non-full pages precede
64 all full pages, with non-full pages arranged in order of decreasing
65 context depth.
66
67 Empty pages (of all orders) are kept on a single page cache list,
68 and are considered first when new pages are required; they are
69 deallocated at the start of the next collection if they haven't
70 been recycled by then. */
71
72
73/* Define GGC_POISON to poison memory marked unused by the collector. */
74#undef GGC_POISON
75
76/* Define GGC_ALWAYS_COLLECT to perform collection every time
77 ggc_collect is invoked. Otherwise, collection is performed only
78 when a significant amount of memory has been allocated since the
79 last collection. */
85f88abf 80#undef GGC_ALWAYS_COLLECT
21341cfd 81
f4524c9e 82#ifdef ENABLE_GC_CHECKING
21341cfd 83#define GGC_POISON
f4524c9e
ZW
84#endif
85#ifdef ENABLE_GC_ALWAYS_COLLECT
21341cfd
AS
86#define GGC_ALWAYS_COLLECT
87#endif
88
89/* Define GGC_DEBUG_LEVEL to print debugging information.
90 0: No debugging output.
91 1: GC statistics only.
92 2: Page-entry allocations/deallocations as well.
93 3: Object allocations as well.
94 4: Object marks as well. */
95#define GGC_DEBUG_LEVEL (0)
96\f
97#ifndef HOST_BITS_PER_PTR
98#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
99#endif
100
21341cfd
AS
101/* The "" allocated string. */
102char *empty_string;
103\f
104/* A two-level tree is used to look up the page-entry for a given
105 pointer. Two chunks of the pointer's bits are extracted to index
106 the first and second levels of the tree, as follows:
107
108 HOST_PAGE_SIZE_BITS
109 32 | |
110 msb +----------------+----+------+------+ lsb
111 | | |
112 PAGE_L1_BITS |
113 | |
114 PAGE_L2_BITS
115
116 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
117 pages are aligned on system page boundaries. The next most
118 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
119 index values in the lookup table, respectively.
120
005537df
RH
121 For 32-bit architectures and the settings below, there are no
122 leftover bits. For architectures with wider pointers, the lookup
123 tree points to a list of pages, which must be scanned to find the
124 correct one. */
21341cfd
AS
125
126#define PAGE_L1_BITS (8)
127#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
128#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
129#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
130
131#define LOOKUP_L1(p) \
132 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
133
134#define LOOKUP_L2(p) \
135 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
136
137
138/* A page_entry records the status of an allocation page. This
139 structure is dynamically sized to fit the bitmap in_use_p. */
140typedef struct page_entry
141{
142 /* The next page-entry with objects of the same size, or NULL if
143 this is the last page-entry. */
144 struct page_entry *next;
145
146 /* The number of bytes allocated. (This will always be a multiple
147 of the host system page size.) */
148 size_t bytes;
149
150 /* The address at which the memory is allocated. */
151 char *page;
152
153 /* Saved in-use bit vector for pages that aren't in the topmost
154 context during collection. */
155 unsigned long *save_in_use_p;
156
157 /* Context depth of this page. */
ae373eda 158 unsigned short context_depth;
21341cfd
AS
159
160 /* The number of free objects remaining on this page. */
161 unsigned short num_free_objects;
162
163 /* A likely candidate for the bit position of a free object for the
164 next allocation from this page. */
165 unsigned short next_bit_hint;
166
ae373eda
MM
167 /* The lg of size of objects allocated from this page. */
168 unsigned char order;
169
21341cfd
AS
170 /* A bit vector indicating whether or not objects are in use. The
171 Nth bit is one if the Nth object on this page is allocated. This
172 array is dynamically sized. */
173 unsigned long in_use_p[1];
174} page_entry;
175
176
177#if HOST_BITS_PER_PTR <= 32
178
179/* On 32-bit hosts, we use a two level page table, as pictured above. */
180typedef page_entry **page_table[PAGE_L1_SIZE];
181
182#else
183
005537df
RH
184/* On 64-bit hosts, we use the same two level page tables plus a linked
185 list that disambiguates the top 32-bits. There will almost always be
21341cfd
AS
186 exactly one entry in the list. */
187typedef struct page_table_chain
188{
189 struct page_table_chain *next;
190 size_t high_bits;
191 page_entry **table[PAGE_L1_SIZE];
192} *page_table;
193
194#endif
195
196/* The rest of the global variables. */
197static struct globals
198{
199 /* The Nth element in this array is a page with objects of size 2^N.
200 If there are any pages with free objects, they will be at the
201 head of the list. NULL if there are no page-entries for this
202 object size. */
203 page_entry *pages[HOST_BITS_PER_PTR];
204
205 /* The Nth element in this array is the last page with objects of
206 size 2^N. NULL if there are no page-entries for this object
207 size. */
208 page_entry *page_tails[HOST_BITS_PER_PTR];
209
210 /* Lookup table for associating allocation pages with object addresses. */
211 page_table lookup;
212
213 /* The system's page size. */
214 size_t pagesize;
215 size_t lg_pagesize;
216
217 /* Bytes currently allocated. */
218 size_t allocated;
219
220 /* Bytes currently allocated at the end of the last collection. */
221 size_t allocated_last_gc;
222
3277221c
MM
223 /* Total amount of memory mapped. */
224 size_t bytes_mapped;
225
21341cfd 226 /* The current depth in the context stack. */
d416576b 227 unsigned short context_depth;
21341cfd
AS
228
229 /* A file descriptor open to /dev/zero for reading. */
4acab94b 230#if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
21341cfd
AS
231 int dev_zero_fd;
232#endif
233
234 /* A cache of free system pages. */
235 page_entry *free_pages;
236
237 /* The file descriptor for debugging output. */
238 FILE *debug_file;
239} G;
240
241
242/* Compute DIVIDEND / DIVISOR, rounded up. */
243#define DIV_ROUND_UP(Dividend, Divisor) \
4934cc53 244 (((Dividend) + (Divisor) - 1) / (Divisor))
21341cfd
AS
245
246/* The number of objects per allocation page, for objects of size
247 2^ORDER. */
248#define OBJECTS_PER_PAGE(Order) \
249 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
250
251/* The size in bytes required to maintain a bitmap for the objects
252 on a page-entry. */
253#define BITMAP_SIZE(Num_objects) \
254 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
255
256/* Skip garbage collection if the current allocation is not at least
257 this factor times the allocation at the end of the last collection.
258 In other words, total allocation must expand by (this factor minus
259 one) before collection is performed. */
260#define GGC_MIN_EXPAND_FOR_GC (1.3)
261
a70261ee
RH
262/* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
263 test from triggering too often when the heap is small. */
264#define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
265
21341cfd 266\f
3fe41456
KG
267static int ggc_allocated_p PARAMS ((const void *));
268static page_entry *lookup_page_table_entry PARAMS ((const void *));
269static void set_page_table_entry PARAMS ((void *, page_entry *));
270static char *alloc_anon PARAMS ((char *, size_t));
271static struct page_entry * alloc_page PARAMS ((unsigned));
272static void free_page PARAMS ((struct page_entry *));
273static void release_pages PARAMS ((void));
274static void clear_marks PARAMS ((void));
275static void sweep_pages PARAMS ((void));
276static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
21341cfd
AS
277
278#ifdef GGC_POISON
3fe41456 279static void poison_pages PARAMS ((void));
21341cfd
AS
280#endif
281
3fe41456 282void debug_print_page_list PARAMS ((int));
21341cfd 283\f
005537df 284/* Returns non-zero if P was allocated in GC'able memory. */
21341cfd 285
005537df
RH
286static inline int
287ggc_allocated_p (p)
288 const void *p;
21341cfd
AS
289{
290 page_entry ***base;
005537df 291 size_t L1, L2;
21341cfd
AS
292
293#if HOST_BITS_PER_PTR <= 32
294 base = &G.lookup[0];
295#else
296 page_table table = G.lookup;
297 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
005537df
RH
298 while (1)
299 {
300 if (table == NULL)
301 return 0;
302 if (table->high_bits == high_bits)
303 break;
304 table = table->next;
305 }
21341cfd
AS
306 base = &table->table[0];
307#endif
308
74c937ca
MM
309 /* Extract the level 1 and 2 indicies. */
310 L1 = LOOKUP_L1 (p);
311 L2 = LOOKUP_L2 (p);
312
313 return base[L1] && base[L1][L2];
314}
315
316/* Traverse the page table and find the entry for a page.
317 Die (probably) if the object wasn't allocated via GC. */
318
319static inline page_entry *
320lookup_page_table_entry(p)
005537df 321 const void *p;
74c937ca
MM
322{
323 page_entry ***base;
324 size_t L1, L2;
325
005537df
RH
326#if HOST_BITS_PER_PTR <= 32
327 base = &G.lookup[0];
328#else
329 page_table table = G.lookup;
330 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
331 while (table->high_bits != high_bits)
332 table = table->next;
333 base = &table->table[0];
334#endif
74c937ca 335
21341cfd
AS
336 /* Extract the level 1 and 2 indicies. */
337 L1 = LOOKUP_L1 (p);
338 L2 = LOOKUP_L2 (p);
339
340 return base[L1][L2];
341}
342
21341cfd 343/* Set the page table entry for a page. */
cb2ec151 344
21341cfd
AS
345static void
346set_page_table_entry(p, entry)
347 void *p;
348 page_entry *entry;
349{
350 page_entry ***base;
351 size_t L1, L2;
352
353#if HOST_BITS_PER_PTR <= 32
354 base = &G.lookup[0];
355#else
356 page_table table;
357 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
358 for (table = G.lookup; table; table = table->next)
359 if (table->high_bits == high_bits)
360 goto found;
361
362 /* Not found -- allocate a new table. */
363 table = (page_table) xcalloc (1, sizeof(*table));
364 table->next = G.lookup;
365 table->high_bits = high_bits;
366 G.lookup = table;
367found:
368 base = &table->table[0];
369#endif
370
371 /* Extract the level 1 and 2 indicies. */
372 L1 = LOOKUP_L1 (p);
373 L2 = LOOKUP_L2 (p);
374
375 if (base[L1] == NULL)
376 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
377
378 base[L1][L2] = entry;
379}
380
21341cfd 381/* Prints the page-entry for object size ORDER, for debugging. */
cb2ec151 382
21341cfd
AS
383void
384debug_print_page_list (order)
385 int order;
386{
387 page_entry *p;
388 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
389 p = G.pages[order];
390 while (p != NULL)
391 {
392 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
393 p = p->next;
394 }
395 printf ("NULL\n");
396 fflush (stdout);
397}
398
21341cfd
AS
399/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
400 (if non-null). */
cb2ec151 401
21341cfd
AS
402static inline char *
403alloc_anon (pref, size)
005537df 404 char *pref ATTRIBUTE_UNUSED;
21341cfd
AS
405 size_t size;
406{
407 char *page;
408
4acab94b 409#ifdef HAVE_MMAP_ANYWHERE
21341cfd
AS
410#ifdef MAP_ANONYMOUS
411 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
412 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
413#else
414 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
415 MAP_PRIVATE, G.dev_zero_fd, 0);
416#endif
417 if (page == (char *) MAP_FAILED)
418 {
419 fputs ("Virtual memory exhausted!\n", stderr);
420 exit(1);
421 }
005537df
RH
422#else
423#ifdef HAVE_VALLOC
424 page = (char *) valloc (size);
425 if (!page)
426 {
427 fputs ("Virtual memory exhausted!\n", stderr);
428 exit(1);
429 }
430#endif /* HAVE_VALLOC */
4acab94b 431#endif /* HAVE_MMAP_ANYWHERE */
21341cfd 432
3277221c
MM
433 /* Remember that we allocated this memory. */
434 G.bytes_mapped += size;
435
21341cfd
AS
436 return page;
437}
438
439/* Allocate a new page for allocating objects of size 2^ORDER,
440 and return an entry for it. The entry is not added to the
441 appropriate page_table list. */
cb2ec151 442
21341cfd
AS
443static inline struct page_entry *
444alloc_page (order)
445 unsigned order;
446{
447 struct page_entry *entry, *p, **pp;
448 char *page;
449 size_t num_objects;
450 size_t bitmap_size;
451 size_t page_entry_size;
452 size_t entry_size;
453
454 num_objects = OBJECTS_PER_PAGE (order);
455 bitmap_size = BITMAP_SIZE (num_objects + 1);
456 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
457 entry_size = num_objects * (1 << order);
458
459 entry = NULL;
460 page = NULL;
461
462 /* Check the list of free pages for one we can use. */
463 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
464 if (p->bytes == entry_size)
465 break;
466
467 if (p != NULL)
468 {
469 /* Recycle the allocated memory from this page ... */
470 *pp = p->next;
471 page = p->page;
472 /* ... and, if possible, the page entry itself. */
473 if (p->order == order)
474 {
475 entry = p;
476 memset (entry, 0, page_entry_size);
477 }
478 else
479 free (p);
480 }
481 else
482 {
cb2ec151 483 /* Actually allocate the memory. */
21341cfd
AS
484 page = alloc_anon (NULL, entry_size);
485 }
486
487 if (entry == NULL)
488 entry = (struct page_entry *) xcalloc (1, page_entry_size);
489
490 entry->bytes = entry_size;
491 entry->page = page;
492 entry->context_depth = G.context_depth;
493 entry->order = order;
494 entry->num_free_objects = num_objects;
495 entry->next_bit_hint = 1;
496
497 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
498 increment the hint. */
499 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
500 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
501
502 set_page_table_entry (page, entry);
503
504 if (GGC_DEBUG_LEVEL >= 2)
505 fprintf (G.debug_file,
506 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
507 1 << order, page, page + entry_size - 1);
508
509 return entry;
510}
511
cb2ec151 512/* For a page that is no longer needed, put it on the free page list. */
21341cfd 513
21341cfd
AS
514static inline void
515free_page (entry)
516 page_entry *entry;
517{
518 if (GGC_DEBUG_LEVEL >= 2)
519 fprintf (G.debug_file,
520 "Deallocating page at %p, data %p-%p\n", entry,
521 entry->page, entry->page + entry->bytes - 1);
522
523 set_page_table_entry (entry->page, NULL);
524
525 entry->next = G.free_pages;
526 G.free_pages = entry;
527}
528
cb2ec151 529/* Release the free page cache to the system. */
21341cfd 530
4934cc53 531static void
21341cfd
AS
532release_pages ()
533{
4acab94b 534#ifdef HAVE_MMAP_ANYWHERE
21341cfd
AS
535 page_entry *p, *next;
536 char *start;
537 size_t len;
538
539 p = G.free_pages;
540 if (p == NULL)
541 return;
542
543 next = p->next;
544 start = p->page;
545 len = p->bytes;
546 free (p);
547 p = next;
548
549 while (p)
550 {
551 next = p->next;
552 /* Gather up adjacent pages so they are unmapped together. */
553 if (p->page == start + len)
554 len += p->bytes;
555 else
556 {
557 munmap (start, len);
3277221c 558 G.bytes_mapped -= len;
21341cfd
AS
559 start = p->page;
560 len = p->bytes;
561 }
562 free (p);
563 p = next;
564 }
565
566 munmap (start, len);
3277221c 567 G.bytes_mapped -= len;
005537df
RH
568#else
569#ifdef HAVE_VALLOC
570 page_entry *p, *next;
571
572 for (p = G.free_pages; p ; p = next)
573 {
574 next = p->next;
575 free (p->page);
3277221c 576 G.bytes_mapped -= p->bytes;
005537df
RH
577 free (p);
578 }
579#endif /* HAVE_VALLOC */
4acab94b 580#endif /* HAVE_MMAP_ANYWHERE */
005537df 581
21341cfd
AS
582 G.free_pages = NULL;
583}
584
21341cfd
AS
585/* This table provides a fast way to determine ceil(log_2(size)) for
586 allocation requests. The minimum allocation size is four bytes. */
cb2ec151 587
21341cfd
AS
588static unsigned char const size_lookup[257] =
589{
590 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
591 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
592 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
593 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
594 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
595 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
596 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
597 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
598 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
599 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
600 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
601 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
602 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
603 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
604 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
605 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
606 8
607};
608
609/* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
610 memory is zeroed; otherwise, its contents are undefined. */
cb2ec151 611
005537df
RH
612void *
613ggc_alloc_obj (size, zero)
21341cfd
AS
614 size_t size;
615 int zero;
616{
617 unsigned order, word, bit, object_offset;
618 struct page_entry *entry;
619 void *result;
620
621 if (size <= 256)
622 order = size_lookup[size];
623 else
624 {
625 order = 9;
626 while (size > ((size_t) 1 << order))
627 order++;
628 }
629
630 /* If there are non-full pages for this size allocation, they are at
631 the head of the list. */
632 entry = G.pages[order];
633
634 /* If there is no page for this object size, or all pages in this
635 context are full, allocate a new page. */
4934cc53 636 if (entry == NULL || entry->num_free_objects == 0)
21341cfd
AS
637 {
638 struct page_entry *new_entry;
639 new_entry = alloc_page (order);
640
641 /* If this is the only entry, it's also the tail. */
642 if (entry == NULL)
643 G.page_tails[order] = new_entry;
644
645 /* Put new pages at the head of the page list. */
646 new_entry->next = entry;
647 entry = new_entry;
648 G.pages[order] = new_entry;
649
650 /* For a new page, we know the word and bit positions (in the
651 in_use bitmap) of the first available object -- they're zero. */
652 new_entry->next_bit_hint = 1;
653 word = 0;
654 bit = 0;
655 object_offset = 0;
656 }
657 else
658 {
659 /* First try to use the hint left from the previous allocation
660 to locate a clear bit in the in-use bitmap. We've made sure
661 that the one-past-the-end bit is always set, so if the hint
662 has run over, this test will fail. */
663 unsigned hint = entry->next_bit_hint;
664 word = hint / HOST_BITS_PER_LONG;
665 bit = hint % HOST_BITS_PER_LONG;
666
667 /* If the hint didn't work, scan the bitmap from the beginning. */
668 if ((entry->in_use_p[word] >> bit) & 1)
669 {
670 word = bit = 0;
671 while (~entry->in_use_p[word] == 0)
672 ++word;
673 while ((entry->in_use_p[word] >> bit) & 1)
674 ++bit;
675 hint = word * HOST_BITS_PER_LONG + bit;
676 }
677
678 /* Next time, try the next bit. */
679 entry->next_bit_hint = hint + 1;
680
681 object_offset = hint << order;
682 }
683
684 /* Set the in-use bit. */
685 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
686
687 /* Keep a running total of the number of free objects. If this page
688 fills up, we may have to move it to the end of the list if the
689 next page isn't full. If the next page is full, all subsequent
690 pages are full, so there's no need to move it. */
691 if (--entry->num_free_objects == 0
692 && entry->next != NULL
693 && entry->next->num_free_objects > 0)
694 {
695 G.pages[order] = entry->next;
696 entry->next = NULL;
697 G.page_tails[order]->next = entry;
698 G.page_tails[order] = entry;
699 }
700
701 /* Calculate the object's address. */
702 result = entry->page + object_offset;
703
704#ifdef GGC_POISON
705 /* `Poison' the entire allocated object before zeroing the requested area,
706 so that bytes beyond the end, if any, will not necessarily be zero. */
cb2ec151 707 memset (result, 0xaf, 1 << order);
21341cfd 708#endif
cb2ec151 709
21341cfd
AS
710 if (zero)
711 memset (result, 0, size);
712
713 /* Keep track of how many bytes are being allocated. This
714 information is used in deciding when to collect. */
715 G.allocated += (size_t) 1 << order;
716
717 if (GGC_DEBUG_LEVEL >= 3)
718 fprintf (G.debug_file,
719 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
720 (int) size, 1 << order, result, entry);
721
722 return result;
723}
724
cb2ec151 725/* If P is not marked, marks it and return false. Otherwise return true.
21341cfd
AS
726 P must have been allocated by the GC allocator; it mustn't point to
727 static objects, stack variables, or memory allocated with malloc. */
cb2ec151 728
005537df
RH
729int
730ggc_set_mark (p)
3cce094d 731 const void *p;
21341cfd
AS
732{
733 page_entry *entry;
734 unsigned bit, word;
735 unsigned long mask;
736
737 /* Look up the page on which the object is alloced. If the object
738 wasn't allocated by the collector, we'll probably die. */
74c937ca 739 entry = lookup_page_table_entry (p);
21341cfd
AS
740#ifdef ENABLE_CHECKING
741 if (entry == NULL)
742 abort ();
743#endif
744
745 /* Calculate the index of the object on the page; this is its bit
746 position in the in_use_p bitmap. */
3cce094d 747 bit = (((const char *) p) - entry->page) >> entry->order;
21341cfd
AS
748 word = bit / HOST_BITS_PER_LONG;
749 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
750
751 /* If the bit was previously set, skip it. */
752 if (entry->in_use_p[word] & mask)
753 return 1;
754
755 /* Otherwise set it, and decrement the free object count. */
756 entry->in_use_p[word] |= mask;
757 entry->num_free_objects -= 1;
758
759 G.allocated += (size_t) 1 << entry->order;
760
761 if (GGC_DEBUG_LEVEL >= 4)
762 fprintf (G.debug_file, "Marking %p\n", p);
763
764 return 0;
765}
766
cb2ec151
RH
767/* Mark P, but check first that it was allocated by the collector. */
768
005537df
RH
769void
770ggc_mark_if_gcable (p)
3cce094d 771 const void *p;
005537df
RH
772{
773 if (p && ggc_allocated_p (p))
774 ggc_set_mark (p);
775}
3277221c 776
cb2ec151
RH
777/* Return the size of the gc-able object P. */
778
3277221c
MM
779size_t
780ggc_get_size (p)
3cce094d 781 const void *p;
3277221c
MM
782{
783 page_entry *pe = lookup_page_table_entry (p);
784 return 1 << pe->order;
785}
21341cfd
AS
786\f
787/* Initialize the ggc-mmap allocator. */
cb2ec151 788
21341cfd
AS
789void
790init_ggc ()
791{
792 G.pagesize = getpagesize();
793 G.lg_pagesize = exact_log2 (G.pagesize);
794
4acab94b 795#if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
21341cfd
AS
796 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
797 if (G.dev_zero_fd == -1)
798 abort ();
799#endif
800
801#if 0
802 G.debug_file = fopen ("ggc-mmap.debug", "w");
803#else
804 G.debug_file = stdout;
805#endif
806
a70261ee 807 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
21341cfd 808
4acab94b 809#ifdef HAVE_MMAP_ANYWHERE
1b3e1423
RH
810 /* StunOS has an amazing off-by-one error for the first mmap allocation
811 after fiddling with RLIMIT_STACK. The result, as hard as it is to
812 believe, is an unaligned page allocation, which would cause us to
813 hork badly if we tried to use it. */
814 {
815 char *p = alloc_anon (NULL, G.pagesize);
816 if ((size_t)p & (G.pagesize - 1))
817 {
818 /* How losing. Discard this one and try another. If we still
819 can't get something useful, give up. */
820
821 p = alloc_anon (NULL, G.pagesize);
822 if ((size_t)p & (G.pagesize - 1))
823 abort ();
824 }
825 munmap (p, G.pagesize);
826 }
827#endif
828
21341cfd
AS
829 empty_string = ggc_alloc_string ("", 0);
830 ggc_add_string_root (&empty_string, 1);
831}
832
cb2ec151
RH
833/* Increment the `GC context'. Objects allocated in an outer context
834 are never freed, eliminating the need to register their roots. */
21341cfd
AS
835
836void
837ggc_push_context ()
838{
839 ++G.context_depth;
840
841 /* Die on wrap. */
842 if (G.context_depth == 0)
843 abort ();
844}
845
4934cc53
MM
846/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
847 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
848
849static void
850ggc_recalculate_in_use_p (p)
851 page_entry *p;
852{
853 unsigned int i;
854 size_t num_objects;
855
856 /* Because the past-the-end bit in in_use_p is always set, we
857 pretend there is one additional object. */
858 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
859
860 /* Reset the free object count. */
861 p->num_free_objects = num_objects;
862
863 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
864 for (i = 0;
865 i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
866 sizeof (*p->in_use_p));
867 ++i)
868 {
869 unsigned long j;
870
871 /* Something is in use if it is marked, or if it was in use in a
872 context further down the context stack. */
873 p->in_use_p[i] |= p->save_in_use_p[i];
874
875 /* Decrement the free object count for every object allocated. */
876 for (j = p->in_use_p[i]; j; j >>= 1)
877 p->num_free_objects -= (j & 1);
878 }
879
880 if (p->num_free_objects >= num_objects)
881 abort ();
882}
883
cb2ec151
RH
884/* Decrement the `GC context'. All objects allocated since the
885 previous ggc_push_context are migrated to the outer context. */
21341cfd
AS
886
887void
888ggc_pop_context ()
889{
890 unsigned order, depth;
891
892 depth = --G.context_depth;
893
894 /* Any remaining pages in the popped context are lowered to the new
895 current context; i.e. objects allocated in the popped context and
896 left over are imported into the previous context. */
897 for (order = 2; order < HOST_BITS_PER_PTR; order++)
898 {
21341cfd
AS
899 page_entry *p;
900
901 for (p = G.pages[order]; p != NULL; p = p->next)
902 {
903 if (p->context_depth > depth)
4934cc53 904 p->context_depth = depth;
21341cfd
AS
905
906 /* If this page is now in the topmost context, and we'd
907 saved its allocation state, restore it. */
908 else if (p->context_depth == depth && p->save_in_use_p)
909 {
4934cc53 910 ggc_recalculate_in_use_p (p);
21341cfd
AS
911 free (p->save_in_use_p);
912 p->save_in_use_p = 0;
21341cfd
AS
913 }
914 }
915 }
916}
21341cfd 917\f
cb2ec151
RH
918/* Unmark all objects. */
919
21341cfd
AS
920static inline void
921clear_marks ()
922{
923 unsigned order;
924
925 for (order = 2; order < HOST_BITS_PER_PTR; order++)
926 {
927 size_t num_objects = OBJECTS_PER_PAGE (order);
4934cc53 928 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
21341cfd
AS
929 page_entry *p;
930
931 for (p = G.pages[order]; p != NULL; p = p->next)
932 {
933#ifdef ENABLE_CHECKING
934 /* The data should be page-aligned. */
935 if ((size_t) p->page & (G.pagesize - 1))
936 abort ();
937#endif
938
939 /* Pages that aren't in the topmost context are not collected;
940 nevertheless, we need their in-use bit vectors to store GC
941 marks. So, back them up first. */
4934cc53 942 if (p->context_depth < G.context_depth)
21341cfd 943 {
4934cc53
MM
944 if (! p->save_in_use_p)
945 p->save_in_use_p = xmalloc (bitmap_size);
21341cfd 946 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
21341cfd
AS
947 }
948
949 /* Reset reset the number of free objects and clear the
950 in-use bits. These will be adjusted by mark_obj. */
951 p->num_free_objects = num_objects;
952 memset (p->in_use_p, 0, bitmap_size);
953
954 /* Make sure the one-past-the-end bit is always set. */
955 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
956 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
957 }
958 }
959}
960
cb2ec151
RH
961/* Free all empty pages. Partially empty pages need no attention
962 because the `mark' bit doubles as an `unused' bit. */
963
21341cfd
AS
964static inline void
965sweep_pages ()
966{
967 unsigned order;
968
969 for (order = 2; order < HOST_BITS_PER_PTR; order++)
970 {
971 /* The last page-entry to consider, regardless of entries
972 placed at the end of the list. */
973 page_entry * const last = G.page_tails[order];
974
975 size_t num_objects = OBJECTS_PER_PAGE (order);
976 page_entry *p, *previous;
977 int done;
978
979 p = G.pages[order];
980 if (p == NULL)
981 continue;
982
983 previous = NULL;
984 do
985 {
986 page_entry *next = p->next;
987
988 /* Loop until all entries have been examined. */
989 done = (p == last);
990
991 /* Only objects on pages in the topmost context should get
992 collected. */
993 if (p->context_depth < G.context_depth)
994 ;
995
996 /* Remove the page if it's empty. */
997 else if (p->num_free_objects == num_objects)
998 {
999 if (! previous)
1000 G.pages[order] = next;
1001 else
1002 previous->next = next;
1003
1004 /* Are we removing the last element? */
1005 if (p == G.page_tails[order])
1006 G.page_tails[order] = previous;
1007 free_page (p);
1008 p = previous;
1009 }
1010
1011 /* If the page is full, move it to the end. */
1012 else if (p->num_free_objects == 0)
1013 {
1014 /* Don't move it if it's already at the end. */
1015 if (p != G.page_tails[order])
1016 {
1017 /* Move p to the end of the list. */
1018 p->next = NULL;
1019 G.page_tails[order]->next = p;
1020
1021 /* Update the tail pointer... */
1022 G.page_tails[order] = p;
1023
1024 /* ... and the head pointer, if necessary. */
1025 if (! previous)
1026 G.pages[order] = next;
1027 else
1028 previous->next = next;
1029 p = previous;
1030 }
1031 }
1032
1033 /* If we've fallen through to here, it's a page in the
1034 topmost context that is neither full nor empty. Such a
1035 page must precede pages at lesser context depth in the
1036 list, so move it to the head. */
1037 else if (p != G.pages[order])
1038 {
1039 previous->next = p->next;
1040 p->next = G.pages[order];
1041 G.pages[order] = p;
1042 /* Are we moving the last element? */
1043 if (G.page_tails[order] == p)
1044 G.page_tails[order] = previous;
1045 p = previous;
1046 }
1047
1048 previous = p;
1049 p = next;
1050 }
1051 while (! done);
4934cc53
MM
1052
1053 /* Now, restore the in_use_p vectors for any pages from contexts
1054 other than the current one. */
1055 for (p = G.pages[order]; p; p = p->next)
1056 if (p->context_depth != G.context_depth)
1057 ggc_recalculate_in_use_p (p);
21341cfd
AS
1058 }
1059}
1060
1061#ifdef GGC_POISON
cb2ec151
RH
1062/* Clobber all free objects. */
1063
21341cfd
AS
1064static inline void
1065poison_pages ()
1066{
1067 unsigned order;
1068
1069 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1070 {
1071 size_t num_objects = OBJECTS_PER_PAGE (order);
1072 size_t size = (size_t) 1 << order;
1073 page_entry *p;
1074
1075 for (p = G.pages[order]; p != NULL; p = p->next)
1076 {
1077 size_t i;
c831fdea
MM
1078
1079 if (p->context_depth != G.context_depth)
1080 /* Since we don't do any collection for pages in pushed
1081 contexts, there's no need to do any poisoning. And
1082 besides, the IN_USE_P array isn't valid until we pop
1083 contexts. */
1084 continue;
1085
21341cfd
AS
1086 for (i = 0; i < num_objects; i++)
1087 {
1088 size_t word, bit;
1089 word = i / HOST_BITS_PER_LONG;
1090 bit = i % HOST_BITS_PER_LONG;
1091 if (((p->in_use_p[word] >> bit) & 1) == 0)
cb2ec151 1092 memset (p->page + i * size, 0xa5, size);
21341cfd
AS
1093 }
1094 }
1095 }
1096}
1097#endif
1098
cb2ec151
RH
1099/* Top level mark-and-sweep routine. */
1100
21341cfd
AS
1101void
1102ggc_collect ()
1103{
b9bfacf0 1104 long time;
21341cfd
AS
1105
1106 /* Avoid frequent unnecessary work by skipping collection if the
1107 total allocations haven't expanded much since the last
1108 collection. */
1109#ifndef GGC_ALWAYS_COLLECT
1110 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1111 return;
1112#endif
1113
1114 time = get_run_time ();
1115 if (!quiet_flag)
b9bfacf0 1116 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
21341cfd
AS
1117
1118 /* Zero the total allocated bytes. We'll reaccumulate this while
1119 marking. */
1120 G.allocated = 0;
1121
1122 /* Release the pages we freed the last time we collected, but didn't
1123 reuse in the interim. */
1124 release_pages ();
1125
1126 clear_marks ();
1127 ggc_mark_roots ();
21341cfd
AS
1128
1129#ifdef GGC_POISON
1130 poison_pages ();
1131#endif
1132
cb2ec151
RH
1133 sweep_pages ();
1134
21341cfd 1135 G.allocated_last_gc = G.allocated;
a70261ee
RH
1136 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1137 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
21341cfd
AS
1138
1139 time = get_run_time () - time;
1140 gc_time += time;
1141
21341cfd 1142 if (!quiet_flag)
005537df
RH
1143 {
1144 fprintf (stderr, "%luk in %.3f}",
1145 (unsigned long) G.allocated / 1024, time * 1e-6);
1146 }
21341cfd 1147}
3277221c
MM
1148
1149/* Print allocation statistics. */
1150
1151void
1152ggc_page_print_statistics ()
1153{
1154 struct ggc_statistics stats;
4934cc53 1155 unsigned int i;
3277221c
MM
1156
1157 /* Clear the statistics. */
d219c7f1 1158 memset (&stats, 0, sizeof (stats));
3277221c
MM
1159
1160 /* Make sure collection will really occur. */
1161 G.allocated_last_gc = 0;
1162
1163 /* Collect and print the statistics common across collectors. */
1164 ggc_print_statistics (stderr, &stats);
1165
4934cc53
MM
1166 /* Release free pages so that we will not count the bytes allocated
1167 there as part of the total allocated memory. */
1168 release_pages ();
1169
3277221c
MM
1170 /* Collect some information about the various sizes of
1171 allocation. */
1172 fprintf (stderr, "\n%-4s%-16s%-16s\n", "Log", "Allocated", "Used");
1173 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1174 {
1175 page_entry *p;
1176 size_t allocated;
1177 size_t in_use;
1178
1179 /* Skip empty entries. */
1180 if (!G.pages[i])
1181 continue;
1182
1183 allocated = in_use = 0;
1184
1185 /* Figure out the total number of bytes allocated for objects of
1186 this size, and how many of them are actually in use. */
1187 for (p = G.pages[i]; p; p = p->next)
1188 {
1189 allocated += p->bytes;
1190 in_use +=
1191 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
1192 }
e5b7ca32
RH
1193 fprintf (stderr, "%-3d %-15lu %-15lu\n", i,
1194 (unsigned long) allocated, (unsigned long) in_use);
3277221c
MM
1195 }
1196
1197 /* Print out some global information. */
e225758a
MM
1198 fprintf (stderr, "\nTotal bytes marked: %lu\n",
1199 (unsigned long) G.allocated);
1200 fprintf (stderr, "Total bytes mapped: %lu\n",
1201 (unsigned long) G.bytes_mapped);
3277221c 1202}