]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-page.c
Remove docs for removed option.
[thirdparty/gcc.git] / gcc / ggc-page.c
CommitLineData
21341cfd 1/* "Bag-of-pages" garbage collector for the GNU compiler.
ad616de1 2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
283334f0 3 Free Software Foundation, Inc.
21341cfd 4
1322177d 5This file is part of GCC.
21341cfd 6
1322177d
LB
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
21341cfd 11
1322177d
LB
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
21341cfd 16
b9bfacf0 17You should have received a copy of the GNU General Public License
1322177d
LB
18along with GCC; see the file COPYING. If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA. */
21341cfd 21
21341cfd 22#include "config.h"
21341cfd 23#include "system.h"
4977bab6
ZW
24#include "coretypes.h"
25#include "tm.h"
21341cfd 26#include "tree.h"
e5ecd4ea 27#include "rtl.h"
1b42a6a9 28#include "tm_p.h"
b9bfacf0 29#include "toplev.h"
21341cfd 30#include "flags.h"
e5ecd4ea 31#include "ggc.h"
2a9a326b 32#include "timevar.h"
3788cc17 33#include "params.h"
07724022 34#include "tree-flow.h"
9a0a7d5d 35#ifdef ENABLE_VALGRIND_CHECKING
a207b594
HPN
36# ifdef HAVE_VALGRIND_MEMCHECK_H
37# include <valgrind/memcheck.h>
38# elif defined HAVE_MEMCHECK_H
39# include <memcheck.h>
14011ca4 40# else
a207b594 41# include <valgrind.h>
14011ca4 42# endif
9a0a7d5d
HPN
43#else
44/* Avoid #ifdef:s when we can help it. */
45#define VALGRIND_DISCARD(x)
46#endif
e5ecd4ea 47
825b6926
ZW
48/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
49 file open. Prefer either to valloc. */
50#ifdef HAVE_MMAP_ANON
51# undef HAVE_MMAP_DEV_ZERO
825b6926
ZW
52
53# include <sys/mman.h>
54# ifndef MAP_FAILED
55# define MAP_FAILED -1
56# endif
57# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
58# define MAP_ANONYMOUS MAP_ANON
59# endif
60# define USING_MMAP
61
005537df 62#endif
21341cfd 63
825b6926 64#ifdef HAVE_MMAP_DEV_ZERO
825b6926
ZW
65
66# include <sys/mman.h>
67# ifndef MAP_FAILED
68# define MAP_FAILED -1
69# endif
70# define USING_MMAP
71
8342b467
RH
72#endif
73
130fadbb
RH
74#ifndef USING_MMAP
75#define USING_MALLOC_PAGE_GROUPS
5b918807 76#endif
21341cfd 77
589005ff 78/* Stategy:
21341cfd
AS
79
80 This garbage-collecting allocator allocates objects on one of a set
81 of pages. Each page can allocate objects of a single size only;
82 available sizes are powers of two starting at four bytes. The size
83 of an allocation request is rounded up to the next power of two
84 (`order'), and satisfied from the appropriate page.
85
86 Each page is recorded in a page-entry, which also maintains an
87 in-use bitmap of object positions on the page. This allows the
88 allocation state of a particular object to be flipped without
89 touching the page itself.
90
91 Each page-entry also has a context depth, which is used to track
92 pushing and popping of allocation contexts. Only objects allocated
589005ff 93 in the current (highest-numbered) context may be collected.
21341cfd
AS
94
95 Page entries are arranged in an array of singly-linked lists. The
96 array is indexed by the allocation size, in bits, of the pages on
97 it; i.e. all pages on a list allocate objects of the same size.
98 Pages are ordered on the list such that all non-full pages precede
99 all full pages, with non-full pages arranged in order of decreasing
100 context depth.
101
102 Empty pages (of all orders) are kept on a single page cache list,
103 and are considered first when new pages are required; they are
104 deallocated at the start of the next collection if they haven't
105 been recycled by then. */
106
21341cfd
AS
107/* Define GGC_DEBUG_LEVEL to print debugging information.
108 0: No debugging output.
109 1: GC statistics only.
110 2: Page-entry allocations/deallocations as well.
111 3: Object allocations as well.
6d2f8887 112 4: Object marks as well. */
21341cfd
AS
113#define GGC_DEBUG_LEVEL (0)
114\f
115#ifndef HOST_BITS_PER_PTR
116#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
117#endif
118
21341cfd
AS
119\f
120/* A two-level tree is used to look up the page-entry for a given
121 pointer. Two chunks of the pointer's bits are extracted to index
122 the first and second levels of the tree, as follows:
123
124 HOST_PAGE_SIZE_BITS
125 32 | |
126 msb +----------------+----+------+------+ lsb
127 | | |
128 PAGE_L1_BITS |
129 | |
130 PAGE_L2_BITS
131
132 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
133 pages are aligned on system page boundaries. The next most
134 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
589005ff 135 index values in the lookup table, respectively.
21341cfd 136
005537df
RH
137 For 32-bit architectures and the settings below, there are no
138 leftover bits. For architectures with wider pointers, the lookup
139 tree points to a list of pages, which must be scanned to find the
140 correct one. */
21341cfd
AS
141
142#define PAGE_L1_BITS (8)
143#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
144#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
145#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
146
147#define LOOKUP_L1(p) \
148 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
149
150#define LOOKUP_L2(p) \
151 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
152
2be510b8
MM
153/* The number of objects per allocation page, for objects on a page of
154 the indicated ORDER. */
155#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
156
17211ab5
GK
157/* The number of objects in P. */
158#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159
2be510b8
MM
160/* The size of an object on a page of the indicated ORDER. */
161#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
162
8537ed68
ZW
163/* For speed, we avoid doing a general integer divide to locate the
164 offset in the allocation bitmap, by precalculating numbers M, S
165 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
166 within the page which is evenly divisible by the object size Z. */
167#define DIV_MULT(ORDER) inverse_table[ORDER].mult
168#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
169#define OFFSET_TO_BIT(OFFSET, ORDER) \
170 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171
2be510b8
MM
172/* The number of extra orders, not corresponding to power-of-two sized
173 objects. */
174
ca7558fc 175#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
2be510b8 176
d1f1cc6a 177#define RTL_SIZE(NSLOTS) \
e1de1560 178 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
d1f1cc6a 179
5e26df64
SB
180#define TREE_EXP_SIZE(OPS) \
181 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
182
2be510b8
MM
183/* The Ith entry is the maximum size of an object to be stored in the
184 Ith extra order. Adding a new entry to this array is the *only*
185 thing you need to do to add a new special allocation size. */
186
187static const size_t extra_order_size_table[] = {
07724022 188 sizeof (struct stmt_ann_d),
2be510b8 189 sizeof (struct tree_decl),
d1f1cc6a 190 sizeof (struct tree_list),
5e26df64 191 TREE_EXP_SIZE (2),
adc4adcd 192 RTL_SIZE (2), /* MEM, PLUS, etc. */
60c1d0d8 193 RTL_SIZE (9), /* INSN */
2be510b8
MM
194};
195
196/* The total number of orders. */
197
198#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
199
b1095f9c
MM
200/* We use this structure to determine the alignment required for
201 allocations. For power-of-two sized allocations, that's not a
202 problem, but it does matter for odd-sized allocations. */
203
204struct max_alignment {
205 char c;
206 union {
207 HOST_WIDEST_INT i;
b1095f9c 208 long double d;
b1095f9c
MM
209 } u;
210};
211
212/* The biggest alignment required. */
213
214#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
215
17211ab5
GK
216/* Compute the smallest nonnegative number which when added to X gives
217 a multiple of F. */
218
219#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
220
221/* Compute the smallest multiple of F that is >= X. */
222
223#define ROUND_UP(x, f) (CEIL (x, f) * (f))
224
2be510b8
MM
225/* The Ith entry is the number of objects on a page or order I. */
226
227static unsigned objects_per_page_table[NUM_ORDERS];
228
229/* The Ith entry is the size of an object on a page of order I. */
230
231static size_t object_size_table[NUM_ORDERS];
21341cfd 232
8537ed68
ZW
233/* The Ith entry is a pair of numbers (mult, shift) such that
234 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
235 for all k evenly divisible by OBJECT_SIZE(I). */
236
237static struct
238{
75d75435 239 size_t mult;
8537ed68
ZW
240 unsigned int shift;
241}
242inverse_table[NUM_ORDERS];
243
21341cfd
AS
244/* A page_entry records the status of an allocation page. This
245 structure is dynamically sized to fit the bitmap in_use_p. */
589005ff 246typedef struct page_entry
21341cfd
AS
247{
248 /* The next page-entry with objects of the same size, or NULL if
249 this is the last page-entry. */
250 struct page_entry *next;
251
9bf793f9
JL
252 /* The previous page-entry with objects of the same size, or NULL if
253 this is the first page-entry. The PREV pointer exists solely to
71cc389b 254 keep the cost of ggc_free manageable. */
9bf793f9
JL
255 struct page_entry *prev;
256
21341cfd
AS
257 /* The number of bytes allocated. (This will always be a multiple
258 of the host system page size.) */
259 size_t bytes;
260
261 /* The address at which the memory is allocated. */
262 char *page;
263
130fadbb
RH
264#ifdef USING_MALLOC_PAGE_GROUPS
265 /* Back pointer to the page group this page came from. */
266 struct page_group *group;
267#endif
268
c4775f82
MS
269 /* This is the index in the by_depth varray where this page table
270 can be found. */
271 unsigned long index_by_depth;
21341cfd
AS
272
273 /* Context depth of this page. */
ae373eda 274 unsigned short context_depth;
21341cfd
AS
275
276 /* The number of free objects remaining on this page. */
277 unsigned short num_free_objects;
278
279 /* A likely candidate for the bit position of a free object for the
280 next allocation from this page. */
281 unsigned short next_bit_hint;
282
ae373eda
MM
283 /* The lg of size of objects allocated from this page. */
284 unsigned char order;
285
21341cfd
AS
286 /* A bit vector indicating whether or not objects are in use. The
287 Nth bit is one if the Nth object on this page is allocated. This
288 array is dynamically sized. */
289 unsigned long in_use_p[1];
290} page_entry;
291
130fadbb
RH
292#ifdef USING_MALLOC_PAGE_GROUPS
293/* A page_group describes a large allocation from malloc, from which
294 we parcel out aligned pages. */
295typedef struct page_group
296{
297 /* A linked list of all extant page groups. */
298 struct page_group *next;
299
300 /* The address we received from malloc. */
301 char *allocation;
302
303 /* The size of the block. */
304 size_t alloc_size;
305
306 /* A bitmask of pages in use. */
307 unsigned int in_use;
308} page_group;
309#endif
21341cfd
AS
310
311#if HOST_BITS_PER_PTR <= 32
312
313/* On 32-bit hosts, we use a two level page table, as pictured above. */
314typedef page_entry **page_table[PAGE_L1_SIZE];
315
316#else
317
005537df
RH
318/* On 64-bit hosts, we use the same two level page tables plus a linked
319 list that disambiguates the top 32-bits. There will almost always be
21341cfd
AS
320 exactly one entry in the list. */
321typedef struct page_table_chain
322{
323 struct page_table_chain *next;
324 size_t high_bits;
325 page_entry **table[PAGE_L1_SIZE];
326} *page_table;
327
328#endif
329
330/* The rest of the global variables. */
331static struct globals
332{
333 /* The Nth element in this array is a page with objects of size 2^N.
334 If there are any pages with free objects, they will be at the
335 head of the list. NULL if there are no page-entries for this
336 object size. */
2be510b8 337 page_entry *pages[NUM_ORDERS];
21341cfd
AS
338
339 /* The Nth element in this array is the last page with objects of
340 size 2^N. NULL if there are no page-entries for this object
341 size. */
2be510b8 342 page_entry *page_tails[NUM_ORDERS];
21341cfd
AS
343
344 /* Lookup table for associating allocation pages with object addresses. */
345 page_table lookup;
346
347 /* The system's page size. */
348 size_t pagesize;
349 size_t lg_pagesize;
350
351 /* Bytes currently allocated. */
352 size_t allocated;
353
354 /* Bytes currently allocated at the end of the last collection. */
355 size_t allocated_last_gc;
356
3277221c
MM
357 /* Total amount of memory mapped. */
358 size_t bytes_mapped;
359
52895e1a
RH
360 /* Bit N set if any allocations have been done at context depth N. */
361 unsigned long context_depth_allocations;
362
363 /* Bit N set if any collections have been done at context depth N. */
364 unsigned long context_depth_collections;
365
21341cfd 366 /* The current depth in the context stack. */
d416576b 367 unsigned short context_depth;
21341cfd
AS
368
369 /* A file descriptor open to /dev/zero for reading. */
825b6926 370#if defined (HAVE_MMAP_DEV_ZERO)
21341cfd
AS
371 int dev_zero_fd;
372#endif
373
374 /* A cache of free system pages. */
375 page_entry *free_pages;
376
130fadbb
RH
377#ifdef USING_MALLOC_PAGE_GROUPS
378 page_group *page_groups;
379#endif
380
21341cfd
AS
381 /* The file descriptor for debugging output. */
382 FILE *debug_file;
c4775f82
MS
383
384 /* Current number of elements in use in depth below. */
385 unsigned int depth_in_use;
386
387 /* Maximum number of elements that can be used before resizing. */
388 unsigned int depth_max;
389
390 /* Each element of this arry is an index in by_depth where the given
391 depth starts. This structure is indexed by that given depth we
392 are interested in. */
393 unsigned int *depth;
394
395 /* Current number of elements in use in by_depth below. */
396 unsigned int by_depth_in_use;
397
398 /* Maximum number of elements that can be used before resizing. */
399 unsigned int by_depth_max;
400
401 /* Each element of this array is a pointer to a page_entry, all
402 page_entries can be found in here by increasing depth.
403 index_by_depth in the page_entry is the index into this data
404 structure where that page_entry can be found. This is used to
405 speed up finding all page_entries at a particular depth. */
406 page_entry **by_depth;
407
408 /* Each element is a pointer to the saved in_use_p bits, if any,
409 zero otherwise. We allocate them all together, to enable a
410 better runtime data access pattern. */
411 unsigned long **save_in_use;
685fe032
RH
412
413#ifdef ENABLE_GC_ALWAYS_COLLECT
414 /* List of free objects to be verified as actually free on the
415 next collection. */
416 struct free_object
417 {
418 void *object;
419 struct free_object *next;
420 } *free_object_list;
421#endif
422
adc4adcd
GP
423#ifdef GATHER_STATISTICS
424 struct
425 {
439a7e54 426 /* Total memory allocated with ggc_alloc. */
adc4adcd 427 unsigned long long total_allocated;
439a7e54 428 /* Total overhead for memory to be allocated with ggc_alloc. */
adc4adcd
GP
429 unsigned long long total_overhead;
430
431 /* Total allocations and overhead for sizes less than 32, 64 and 128.
432 These sizes are interesting because they are typical cache line
938d968e 433 sizes. */
adc4adcd
GP
434
435 unsigned long long total_allocated_under32;
436 unsigned long long total_overhead_under32;
437
438 unsigned long long total_allocated_under64;
439 unsigned long long total_overhead_under64;
440
441 unsigned long long total_allocated_under128;
442 unsigned long long total_overhead_under128;
443
439a7e54
DN
444 /* The allocations for each of the allocation orders. */
445 unsigned long long total_allocated_per_order[NUM_ORDERS];
446
938d968e 447 /* The overhead for each of the allocation orders. */
adc4adcd
GP
448 unsigned long long total_overhead_per_order[NUM_ORDERS];
449 } stats;
450#endif
21341cfd
AS
451} G;
452
21341cfd
AS
453/* The size in bytes required to maintain a bitmap for the objects
454 on a page-entry. */
455#define BITMAP_SIZE(Num_objects) \
2be510b8 456 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
21341cfd 457
130fadbb
RH
458/* Allocate pages in chunks of this size, to throttle calls to memory
459 allocation routines. The first page is used, the rest go onto the
460 free list. This cannot be larger than HOST_BITS_PER_INT for the
772299b3 461 in_use bitmask for page_group. Hosts that need a different value
471854f8 462 can override this by defining GGC_QUIRE_SIZE explicitly. */
772299b3
MA
463#ifndef GGC_QUIRE_SIZE
464# ifdef USING_MMAP
465# define GGC_QUIRE_SIZE 256
466# else
467# define GGC_QUIRE_SIZE 16
468# endif
469#endif
c4775f82
MS
470
471/* Initial guess as to how many page table entries we might need. */
472#define INITIAL_PTE_COUNT 128
21341cfd 473\f
20c1dc5e
AJ
474static int ggc_allocated_p (const void *);
475static page_entry *lookup_page_table_entry (const void *);
476static void set_page_table_entry (void *, page_entry *);
130fadbb 477#ifdef USING_MMAP
20c1dc5e 478static char *alloc_anon (char *, size_t);
130fadbb
RH
479#endif
480#ifdef USING_MALLOC_PAGE_GROUPS
20c1dc5e
AJ
481static size_t page_group_index (char *, char *);
482static void set_page_group_in_use (page_group *, char *);
483static void clear_page_group_in_use (page_group *, char *);
130fadbb 484#endif
20c1dc5e
AJ
485static struct page_entry * alloc_page (unsigned);
486static void free_page (struct page_entry *);
487static void release_pages (void);
488static void clear_marks (void);
489static void sweep_pages (void);
490static void ggc_recalculate_in_use_p (page_entry *);
491static void compute_inverse (unsigned);
492static inline void adjust_depth (void);
493static void move_ptes_to_front (int, int);
21341cfd 494
20c1dc5e
AJ
495void debug_print_page_list (int);
496static void push_depth (unsigned int);
497static void push_by_depth (page_entry *, unsigned long *);
b6f61163 498
c4775f82
MS
499/* Push an entry onto G.depth. */
500
501inline static void
20c1dc5e 502push_depth (unsigned int i)
c4775f82
MS
503{
504 if (G.depth_in_use >= G.depth_max)
505 {
506 G.depth_max *= 2;
703ad42b 507 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
c4775f82
MS
508 }
509 G.depth[G.depth_in_use++] = i;
510}
511
512/* Push an entry onto G.by_depth and G.save_in_use. */
513
514inline static void
20c1dc5e 515push_by_depth (page_entry *p, unsigned long *s)
c4775f82
MS
516{
517 if (G.by_depth_in_use >= G.by_depth_max)
518 {
519 G.by_depth_max *= 2;
703ad42b
KG
520 G.by_depth = xrealloc (G.by_depth,
521 G.by_depth_max * sizeof (page_entry *));
522 G.save_in_use = xrealloc (G.save_in_use,
523 G.by_depth_max * sizeof (unsigned long *));
c4775f82
MS
524 }
525 G.by_depth[G.by_depth_in_use] = p;
526 G.save_in_use[G.by_depth_in_use++] = s;
527}
528
529#if (GCC_VERSION < 3001)
530#define prefetch(X) ((void) X)
531#else
532#define prefetch(X) __builtin_prefetch (X)
533#endif
534
535#define save_in_use_p_i(__i) \
536 (G.save_in_use[__i])
537#define save_in_use_p(__p) \
538 (save_in_use_p_i (__p->index_by_depth))
539
cc2902df 540/* Returns nonzero if P was allocated in GC'able memory. */
21341cfd 541
005537df 542static inline int
20c1dc5e 543ggc_allocated_p (const void *p)
21341cfd
AS
544{
545 page_entry ***base;
005537df 546 size_t L1, L2;
21341cfd
AS
547
548#if HOST_BITS_PER_PTR <= 32
549 base = &G.lookup[0];
550#else
551 page_table table = G.lookup;
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
005537df
RH
553 while (1)
554 {
555 if (table == NULL)
556 return 0;
557 if (table->high_bits == high_bits)
558 break;
559 table = table->next;
560 }
21341cfd
AS
561 base = &table->table[0];
562#endif
563
eaec9b3d 564 /* Extract the level 1 and 2 indices. */
74c937ca
MM
565 L1 = LOOKUP_L1 (p);
566 L2 = LOOKUP_L2 (p);
567
568 return base[L1] && base[L1][L2];
569}
570
589005ff 571/* Traverse the page table and find the entry for a page.
74c937ca
MM
572 Die (probably) if the object wasn't allocated via GC. */
573
574static inline page_entry *
20c1dc5e 575lookup_page_table_entry (const void *p)
74c937ca
MM
576{
577 page_entry ***base;
578 size_t L1, L2;
579
005537df
RH
580#if HOST_BITS_PER_PTR <= 32
581 base = &G.lookup[0];
582#else
583 page_table table = G.lookup;
584 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
585 while (table->high_bits != high_bits)
586 table = table->next;
587 base = &table->table[0];
588#endif
74c937ca 589
eaec9b3d 590 /* Extract the level 1 and 2 indices. */
21341cfd
AS
591 L1 = LOOKUP_L1 (p);
592 L2 = LOOKUP_L2 (p);
593
594 return base[L1][L2];
595}
596
21341cfd 597/* Set the page table entry for a page. */
cb2ec151 598
21341cfd 599static void
20c1dc5e 600set_page_table_entry (void *p, page_entry *entry)
21341cfd
AS
601{
602 page_entry ***base;
603 size_t L1, L2;
604
605#if HOST_BITS_PER_PTR <= 32
606 base = &G.lookup[0];
607#else
608 page_table table;
609 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
610 for (table = G.lookup; table; table = table->next)
611 if (table->high_bits == high_bits)
612 goto found;
613
614 /* Not found -- allocate a new table. */
703ad42b 615 table = xcalloc (1, sizeof(*table));
21341cfd
AS
616 table->next = G.lookup;
617 table->high_bits = high_bits;
618 G.lookup = table;
619found:
620 base = &table->table[0];
621#endif
622
eaec9b3d 623 /* Extract the level 1 and 2 indices. */
21341cfd
AS
624 L1 = LOOKUP_L1 (p);
625 L2 = LOOKUP_L2 (p);
626
627 if (base[L1] == NULL)
703ad42b 628 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
21341cfd
AS
629
630 base[L1][L2] = entry;
631}
632
21341cfd 633/* Prints the page-entry for object size ORDER, for debugging. */
cb2ec151 634
21341cfd 635void
20c1dc5e 636debug_print_page_list (int order)
21341cfd
AS
637{
638 page_entry *p;
20c1dc5e
AJ
639 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
640 (void *) G.page_tails[order]);
21341cfd
AS
641 p = G.pages[order];
642 while (p != NULL)
643 {
20c1dc5e 644 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
683eb0e9 645 p->num_free_objects);
21341cfd
AS
646 p = p->next;
647 }
648 printf ("NULL\n");
649 fflush (stdout);
650}
651
130fadbb 652#ifdef USING_MMAP
21341cfd 653/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825b6926
ZW
654 (if non-null). The ifdef structure here is intended to cause a
655 compile error unless exactly one of the HAVE_* is defined. */
cb2ec151 656
21341cfd 657static inline char *
20c1dc5e 658alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
21341cfd 659{
825b6926 660#ifdef HAVE_MMAP_ANON
400e39e3
KH
661 char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
662 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
825b6926
ZW
663#endif
664#ifdef HAVE_MMAP_DEV_ZERO
400e39e3
KH
665 char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
666 MAP_PRIVATE, G.dev_zero_fd, 0);
21341cfd 667#endif
825b6926
ZW
668
669 if (page == (char *) MAP_FAILED)
005537df 670 {
1f978f5f 671 perror ("virtual memory exhausted");
bd0f0717 672 exit (FATAL_EXIT_CODE);
005537df 673 }
21341cfd 674
3277221c
MM
675 /* Remember that we allocated this memory. */
676 G.bytes_mapped += size;
677
9a0a7d5d
HPN
678 /* Pretend we don't have access to the allocated pages. We'll enable
679 access to smaller pieces of the area in ggc_alloc. Discard the
680 handle to avoid handle leak. */
681 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
682
21341cfd
AS
683 return page;
684}
130fadbb
RH
685#endif
686#ifdef USING_MALLOC_PAGE_GROUPS
687/* Compute the index for this page into the page group. */
688
689static inline size_t
20c1dc5e 690page_group_index (char *allocation, char *page)
130fadbb 691{
c4f2c499 692 return (size_t) (page - allocation) >> G.lg_pagesize;
130fadbb
RH
693}
694
695/* Set and clear the in_use bit for this page in the page group. */
696
697static inline void
20c1dc5e 698set_page_group_in_use (page_group *group, char *page)
130fadbb
RH
699{
700 group->in_use |= 1 << page_group_index (group->allocation, page);
701}
702
703static inline void
20c1dc5e 704clear_page_group_in_use (page_group *group, char *page)
130fadbb
RH
705{
706 group->in_use &= ~(1 << page_group_index (group->allocation, page));
707}
708#endif
21341cfd
AS
709
710/* Allocate a new page for allocating objects of size 2^ORDER,
711 and return an entry for it. The entry is not added to the
712 appropriate page_table list. */
cb2ec151 713
21341cfd 714static inline struct page_entry *
20c1dc5e 715alloc_page (unsigned order)
21341cfd
AS
716{
717 struct page_entry *entry, *p, **pp;
718 char *page;
719 size_t num_objects;
720 size_t bitmap_size;
721 size_t page_entry_size;
722 size_t entry_size;
130fadbb
RH
723#ifdef USING_MALLOC_PAGE_GROUPS
724 page_group *group;
725#endif
21341cfd
AS
726
727 num_objects = OBJECTS_PER_PAGE (order);
728 bitmap_size = BITMAP_SIZE (num_objects + 1);
729 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
2be510b8 730 entry_size = num_objects * OBJECT_SIZE (order);
ca79429a
RH
731 if (entry_size < G.pagesize)
732 entry_size = G.pagesize;
21341cfd
AS
733
734 entry = NULL;
735 page = NULL;
736
737 /* Check the list of free pages for one we can use. */
bd0f0717 738 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
21341cfd
AS
739 if (p->bytes == entry_size)
740 break;
741
742 if (p != NULL)
743 {
dc297297 744 /* Recycle the allocated memory from this page ... */
21341cfd
AS
745 *pp = p->next;
746 page = p->page;
bd0f0717 747
130fadbb
RH
748#ifdef USING_MALLOC_PAGE_GROUPS
749 group = p->group;
750#endif
bd0f0717 751
21341cfd
AS
752 /* ... and, if possible, the page entry itself. */
753 if (p->order == order)
754 {
755 entry = p;
756 memset (entry, 0, page_entry_size);
757 }
758 else
759 free (p);
760 }
825b6926 761#ifdef USING_MMAP
054f5e69 762 else if (entry_size == G.pagesize)
21341cfd 763 {
054f5e69
ZW
764 /* We want just one page. Allocate a bunch of them and put the
765 extras on the freelist. (Can only do this optimization with
766 mmap for backing store.) */
767 struct page_entry *e, *f = G.free_pages;
768 int i;
769
ca79429a 770 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
bd0f0717 771
054f5e69
ZW
772 /* This loop counts down so that the chain will be in ascending
773 memory order. */
774 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
775 {
703ad42b 776 e = xcalloc (1, page_entry_size);
ca79429a
RH
777 e->order = order;
778 e->bytes = G.pagesize;
779 e->page = page + (i << G.lg_pagesize);
054f5e69
ZW
780 e->next = f;
781 f = e;
782 }
bd0f0717 783
054f5e69 784 G.free_pages = f;
21341cfd 785 }
054f5e69
ZW
786 else
787 page = alloc_anon (NULL, entry_size);
130fadbb
RH
788#endif
789#ifdef USING_MALLOC_PAGE_GROUPS
790 else
791 {
792 /* Allocate a large block of memory and serve out the aligned
793 pages therein. This results in much less memory wastage
794 than the traditional implementation of valloc. */
795
796 char *allocation, *a, *enda;
797 size_t alloc_size, head_slop, tail_slop;
798 int multiple_pages = (entry_size == G.pagesize);
799
800 if (multiple_pages)
801 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
802 else
803 alloc_size = entry_size + G.pagesize - 1;
804 allocation = xmalloc (alloc_size);
805
c4f2c499 806 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
130fadbb
RH
807 head_slop = page - allocation;
808 if (multiple_pages)
809 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
810 else
811 tail_slop = alloc_size - entry_size - head_slop;
812 enda = allocation + alloc_size - tail_slop;
813
814 /* We allocated N pages, which are likely not aligned, leaving
815 us with N-1 usable pages. We plan to place the page_group
816 structure somewhere in the slop. */
817 if (head_slop >= sizeof (page_group))
818 group = (page_group *)page - 1;
819 else
820 {
821 /* We magically got an aligned allocation. Too bad, we have
822 to waste a page anyway. */
823 if (tail_slop == 0)
824 {
825 enda -= G.pagesize;
826 tail_slop += G.pagesize;
827 }
282899df 828 gcc_assert (tail_slop >= sizeof (page_group));
130fadbb
RH
829 group = (page_group *)enda;
830 tail_slop -= sizeof (page_group);
831 }
832
833 /* Remember that we allocated this memory. */
834 group->next = G.page_groups;
835 group->allocation = allocation;
836 group->alloc_size = alloc_size;
837 group->in_use = 0;
838 G.page_groups = group;
839 G.bytes_mapped += alloc_size;
840
841 /* If we allocated multiple pages, put the rest on the free list. */
842 if (multiple_pages)
843 {
844 struct page_entry *e, *f = G.free_pages;
845 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
846 {
703ad42b 847 e = xcalloc (1, page_entry_size);
130fadbb
RH
848 e->order = order;
849 e->bytes = G.pagesize;
850 e->page = a;
851 e->group = group;
852 e->next = f;
853 f = e;
854 }
855 G.free_pages = f;
856 }
857 }
858#endif
21341cfd
AS
859
860 if (entry == NULL)
703ad42b 861 entry = xcalloc (1, page_entry_size);
21341cfd
AS
862
863 entry->bytes = entry_size;
864 entry->page = page;
865 entry->context_depth = G.context_depth;
866 entry->order = order;
867 entry->num_free_objects = num_objects;
868 entry->next_bit_hint = 1;
869
52895e1a
RH
870 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
871
130fadbb
RH
872#ifdef USING_MALLOC_PAGE_GROUPS
873 entry->group = group;
874 set_page_group_in_use (group, page);
875#endif
876
21341cfd
AS
877 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
878 increment the hint. */
879 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
880 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
881
882 set_page_table_entry (page, entry);
883
884 if (GGC_DEBUG_LEVEL >= 2)
589005ff 885 fprintf (G.debug_file,
8a951190 886 "Allocating page at %p, object size=%lu, data %p-%p\n",
20c1dc5e 887 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
bd0f0717 888 page + entry_size - 1);
21341cfd
AS
889
890 return entry;
891}
892
c4775f82
MS
893/* Adjust the size of G.depth so that no index greater than the one
894 used by the top of the G.by_depth is used. */
895
896static inline void
20c1dc5e 897adjust_depth (void)
c4775f82
MS
898{
899 page_entry *top;
900
901 if (G.by_depth_in_use)
902 {
903 top = G.by_depth[G.by_depth_in_use-1];
904
e0bb17a8
KH
905 /* Peel back indices in depth that index into by_depth, so that
906 as new elements are added to by_depth, we note the indices
c4775f82
MS
907 of those elements, if they are for new context depths. */
908 while (G.depth_in_use > (size_t)top->context_depth+1)
909 --G.depth_in_use;
910 }
911}
912
cb2ec151 913/* For a page that is no longer needed, put it on the free page list. */
21341cfd 914
685fe032 915static void
20c1dc5e 916free_page (page_entry *entry)
21341cfd
AS
917{
918 if (GGC_DEBUG_LEVEL >= 2)
589005ff 919 fprintf (G.debug_file,
20c1dc5e 920 "Deallocating page at %p, data %p-%p\n", (void *) entry,
21341cfd
AS
921 entry->page, entry->page + entry->bytes - 1);
922
9a0a7d5d
HPN
923 /* Mark the page as inaccessible. Discard the handle to avoid handle
924 leak. */
925 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
926
21341cfd
AS
927 set_page_table_entry (entry->page, NULL);
928
130fadbb
RH
929#ifdef USING_MALLOC_PAGE_GROUPS
930 clear_page_group_in_use (entry->group, entry->page);
931#endif
932
c4775f82
MS
933 if (G.by_depth_in_use > 1)
934 {
935 page_entry *top = G.by_depth[G.by_depth_in_use-1];
282899df
NS
936 int i = entry->index_by_depth;
937
938 /* We cannot free a page from a context deeper than the current
939 one. */
940 gcc_assert (entry->context_depth == top->context_depth);
941
942 /* Put top element into freed slot. */
943 G.by_depth[i] = top;
944 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
945 top->index_by_depth = i;
c4775f82
MS
946 }
947 --G.by_depth_in_use;
948
949 adjust_depth ();
950
21341cfd
AS
951 entry->next = G.free_pages;
952 G.free_pages = entry;
953}
954
cb2ec151 955/* Release the free page cache to the system. */
21341cfd 956
4934cc53 957static void
20c1dc5e 958release_pages (void)
21341cfd 959{
825b6926 960#ifdef USING_MMAP
130fadbb 961 page_entry *p, *next;
21341cfd
AS
962 char *start;
963 size_t len;
964
054f5e69 965 /* Gather up adjacent pages so they are unmapped together. */
21341cfd 966 p = G.free_pages;
21341cfd
AS
967
968 while (p)
969 {
054f5e69 970 start = p->page;
21341cfd 971 next = p->next;
054f5e69 972 len = p->bytes;
21341cfd
AS
973 free (p);
974 p = next;
21341cfd 975
054f5e69
ZW
976 while (p && p->page == start + len)
977 {
978 next = p->next;
979 len += p->bytes;
980 free (p);
981 p = next;
982 }
983
984 munmap (start, len);
985 G.bytes_mapped -= len;
986 }
005537df 987
21341cfd 988 G.free_pages = NULL;
130fadbb
RH
989#endif
990#ifdef USING_MALLOC_PAGE_GROUPS
991 page_entry **pp, *p;
992 page_group **gp, *g;
993
994 /* Remove all pages from free page groups from the list. */
995 pp = &G.free_pages;
996 while ((p = *pp) != NULL)
997 if (p->group->in_use == 0)
998 {
999 *pp = p->next;
1000 free (p);
1001 }
1002 else
1003 pp = &p->next;
1004
1005 /* Remove all free page groups, and release the storage. */
1006 gp = &G.page_groups;
1007 while ((g = *gp) != NULL)
1008 if (g->in_use == 0)
1009 {
1010 *gp = g->next;
589005ff 1011 G.bytes_mapped -= g->alloc_size;
130fadbb
RH
1012 free (g->allocation);
1013 }
1014 else
1015 gp = &g->next;
1016#endif
21341cfd
AS
1017}
1018
21341cfd 1019/* This table provides a fast way to determine ceil(log_2(size)) for
9fd51e67 1020 allocation requests. The minimum allocation size is eight bytes. */
cb2ec151 1021
589005ff 1022static unsigned char size_lookup[257] =
9fd51e67 1023{
589005ff
KH
1024 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1025 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1026 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1027 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1028 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21341cfd 1031 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21341cfd
AS
1032 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1040 8
1041};
1042
b6f61163
DB
1043/* Typed allocation function. Does nothing special in this collector. */
1044
1045void *
b9dcdee4
JH
1046ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1047 MEM_STAT_DECL)
b6f61163 1048{
b9dcdee4 1049 return ggc_alloc_stat (size PASS_MEM_STAT);
b6f61163
DB
1050}
1051
aa40083d 1052/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
cb2ec151 1053
005537df 1054void *
b9dcdee4 1055ggc_alloc_stat (size_t size MEM_STAT_DECL)
21341cfd 1056{
685fe032 1057 size_t order, word, bit, object_offset, object_size;
21341cfd
AS
1058 struct page_entry *entry;
1059 void *result;
1060
1061 if (size <= 256)
685fe032
RH
1062 {
1063 order = size_lookup[size];
1064 object_size = OBJECT_SIZE (order);
1065 }
21341cfd
AS
1066 else
1067 {
1068 order = 9;
685fe032 1069 while (size > (object_size = OBJECT_SIZE (order)))
21341cfd
AS
1070 order++;
1071 }
1072
1073 /* If there are non-full pages for this size allocation, they are at
1074 the head of the list. */
1075 entry = G.pages[order];
1076
1077 /* If there is no page for this object size, or all pages in this
1078 context are full, allocate a new page. */
4934cc53 1079 if (entry == NULL || entry->num_free_objects == 0)
21341cfd
AS
1080 {
1081 struct page_entry *new_entry;
1082 new_entry = alloc_page (order);
589005ff 1083
c4775f82
MS
1084 new_entry->index_by_depth = G.by_depth_in_use;
1085 push_by_depth (new_entry, 0);
1086
1087 /* We can skip context depths, if we do, make sure we go all the
1088 way to the new depth. */
1089 while (new_entry->context_depth >= G.depth_in_use)
1090 push_depth (G.by_depth_in_use-1);
1091
9bf793f9
JL
1092 /* If this is the only entry, it's also the tail. If it is not
1093 the only entry, then we must update the PREV pointer of the
1094 ENTRY (G.pages[order]) to point to our new page entry. */
21341cfd
AS
1095 if (entry == NULL)
1096 G.page_tails[order] = new_entry;
9bf793f9
JL
1097 else
1098 entry->prev = new_entry;
589005ff 1099
9bf793f9
JL
1100 /* Put new pages at the head of the page list. By definition the
1101 entry at the head of the list always has a NULL pointer. */
21341cfd 1102 new_entry->next = entry;
9bf793f9 1103 new_entry->prev = NULL;
21341cfd
AS
1104 entry = new_entry;
1105 G.pages[order] = new_entry;
1106
1107 /* For a new page, we know the word and bit positions (in the
1108 in_use bitmap) of the first available object -- they're zero. */
1109 new_entry->next_bit_hint = 1;
1110 word = 0;
1111 bit = 0;
1112 object_offset = 0;
1113 }
1114 else
1115 {
1116 /* First try to use the hint left from the previous allocation
1117 to locate a clear bit in the in-use bitmap. We've made sure
1118 that the one-past-the-end bit is always set, so if the hint
1119 has run over, this test will fail. */
1120 unsigned hint = entry->next_bit_hint;
1121 word = hint / HOST_BITS_PER_LONG;
1122 bit = hint % HOST_BITS_PER_LONG;
589005ff 1123
21341cfd
AS
1124 /* If the hint didn't work, scan the bitmap from the beginning. */
1125 if ((entry->in_use_p[word] >> bit) & 1)
1126 {
1127 word = bit = 0;
1128 while (~entry->in_use_p[word] == 0)
1129 ++word;
6f0947e4
SB
1130
1131#if GCC_VERSION >= 3004
1132 bit = __builtin_ctzl (~entry->in_use_p[word]);
1133#else
21341cfd
AS
1134 while ((entry->in_use_p[word] >> bit) & 1)
1135 ++bit;
6f0947e4
SB
1136#endif
1137
21341cfd
AS
1138 hint = word * HOST_BITS_PER_LONG + bit;
1139 }
1140
1141 /* Next time, try the next bit. */
1142 entry->next_bit_hint = hint + 1;
1143
685fe032 1144 object_offset = hint * object_size;
21341cfd
AS
1145 }
1146
1147 /* Set the in-use bit. */
1148 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1149
1150 /* Keep a running total of the number of free objects. If this page
1151 fills up, we may have to move it to the end of the list if the
1152 next page isn't full. If the next page is full, all subsequent
1153 pages are full, so there's no need to move it. */
1154 if (--entry->num_free_objects == 0
1155 && entry->next != NULL
1156 && entry->next->num_free_objects > 0)
1157 {
9bf793f9 1158 /* We have a new head for the list. */
21341cfd 1159 G.pages[order] = entry->next;
9bf793f9
JL
1160
1161 /* We are moving ENTRY to the end of the page table list.
1162 The new page at the head of the list will have NULL in
1163 its PREV field and ENTRY will have NULL in its NEXT field. */
1164 entry->next->prev = NULL;
21341cfd 1165 entry->next = NULL;
9bf793f9
JL
1166
1167 /* Append ENTRY to the tail of the list. */
1168 entry->prev = G.page_tails[order];
21341cfd
AS
1169 G.page_tails[order]->next = entry;
1170 G.page_tails[order] = entry;
1171 }
1172
1173 /* Calculate the object's address. */
1174 result = entry->page + object_offset;
07724022
JH
1175#ifdef GATHER_STATISTICS
1176 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1177 result PASS_MEM_STAT);
1178#endif
21341cfd 1179
3788cc17 1180#ifdef ENABLE_GC_CHECKING
9a0a7d5d
HPN
1181 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1182 exact same semantics in presence of memory bugs, regardless of
1183 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1184 handle to avoid handle leak. */
685fe032 1185 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
9a0a7d5d 1186
f8a83ee3
ZW
1187 /* `Poison' the entire allocated object, including any padding at
1188 the end. */
685fe032 1189 memset (result, 0xaf, object_size);
9a0a7d5d
HPN
1190
1191 /* Make the bytes after the end of the object unaccessible. Discard the
1192 handle to avoid handle leak. */
1193 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
685fe032 1194 object_size - size));
21341cfd 1195#endif
cb2ec151 1196
9a0a7d5d
HPN
1197 /* Tell Valgrind that the memory is there, but its content isn't
1198 defined. The bytes at the end of the object are still marked
1199 unaccessible. */
1200 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1201
21341cfd
AS
1202 /* Keep track of how many bytes are being allocated. This
1203 information is used in deciding when to collect. */
685fe032 1204 G.allocated += object_size;
21341cfd 1205
adc4adcd
GP
1206#ifdef GATHER_STATISTICS
1207 {
685fe032 1208 size_t overhead = object_size - size;
adc4adcd 1209
685fe032
RH
1210 G.stats.total_overhead += overhead;
1211 G.stats.total_allocated += object_size;
1212 G.stats.total_overhead_per_order[order] += overhead;
1213 G.stats.total_allocated_per_order[order] += object_size;
adc4adcd 1214
685fe032
RH
1215 if (size <= 32)
1216 {
1217 G.stats.total_overhead_under32 += overhead;
1218 G.stats.total_allocated_under32 += object_size;
1219 }
1220 if (size <= 64)
1221 {
1222 G.stats.total_overhead_under64 += overhead;
1223 G.stats.total_allocated_under64 += object_size;
1224 }
1225 if (size <= 128)
1226 {
1227 G.stats.total_overhead_under128 += overhead;
1228 G.stats.total_allocated_under128 += object_size;
1229 }
adc4adcd
GP
1230 }
1231#endif
685fe032 1232
21341cfd 1233 if (GGC_DEBUG_LEVEL >= 3)
589005ff 1234 fprintf (G.debug_file,
8a951190 1235 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
685fe032 1236 (unsigned long) size, (unsigned long) object_size, result,
20c1dc5e 1237 (void *) entry);
21341cfd
AS
1238
1239 return result;
1240}
1241
cb2ec151 1242/* If P is not marked, marks it and return false. Otherwise return true.
21341cfd
AS
1243 P must have been allocated by the GC allocator; it mustn't point to
1244 static objects, stack variables, or memory allocated with malloc. */
cb2ec151 1245
005537df 1246int
20c1dc5e 1247ggc_set_mark (const void *p)
21341cfd
AS
1248{
1249 page_entry *entry;
1250 unsigned bit, word;
1251 unsigned long mask;
1252
1253 /* Look up the page on which the object is alloced. If the object
1254 wasn't allocated by the collector, we'll probably die. */
74c937ca 1255 entry = lookup_page_table_entry (p);
282899df 1256 gcc_assert (entry);
21341cfd
AS
1257
1258 /* Calculate the index of the object on the page; this is its bit
1259 position in the in_use_p bitmap. */
8537ed68 1260 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
21341cfd
AS
1261 word = bit / HOST_BITS_PER_LONG;
1262 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
589005ff 1263
dc297297 1264 /* If the bit was previously set, skip it. */
21341cfd
AS
1265 if (entry->in_use_p[word] & mask)
1266 return 1;
1267
1268 /* Otherwise set it, and decrement the free object count. */
1269 entry->in_use_p[word] |= mask;
1270 entry->num_free_objects -= 1;
1271
21341cfd
AS
1272 if (GGC_DEBUG_LEVEL >= 4)
1273 fprintf (G.debug_file, "Marking %p\n", p);
1274
1275 return 0;
1276}
1277
589005ff 1278/* Return 1 if P has been marked, zero otherwise.
4c160717
RK
1279 P must have been allocated by the GC allocator; it mustn't point to
1280 static objects, stack variables, or memory allocated with malloc. */
1281
1282int
20c1dc5e 1283ggc_marked_p (const void *p)
4c160717
RK
1284{
1285 page_entry *entry;
1286 unsigned bit, word;
1287 unsigned long mask;
1288
1289 /* Look up the page on which the object is alloced. If the object
1290 wasn't allocated by the collector, we'll probably die. */
1291 entry = lookup_page_table_entry (p);
282899df 1292 gcc_assert (entry);
4c160717
RK
1293
1294 /* Calculate the index of the object on the page; this is its bit
1295 position in the in_use_p bitmap. */
8537ed68 1296 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
4c160717
RK
1297 word = bit / HOST_BITS_PER_LONG;
1298 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
589005ff 1299
a4b5b2ae 1300 return (entry->in_use_p[word] & mask) != 0;
4c160717
RK
1301}
1302
cb2ec151
RH
1303/* Return the size of the gc-able object P. */
1304
3277221c 1305size_t
20c1dc5e 1306ggc_get_size (const void *p)
3277221c
MM
1307{
1308 page_entry *pe = lookup_page_table_entry (p);
2be510b8 1309 return OBJECT_SIZE (pe->order);
3277221c 1310}
685fe032
RH
1311
1312/* Release the memory for object P. */
1313
1314void
1315ggc_free (void *p)
1316{
1317 page_entry *pe = lookup_page_table_entry (p);
1318 size_t order = pe->order;
1319 size_t size = OBJECT_SIZE (order);
1320
07724022
JH
1321#ifdef GATHER_STATISTICS
1322 ggc_free_overhead (p);
1323#endif
1324
685fe032
RH
1325 if (GGC_DEBUG_LEVEL >= 3)
1326 fprintf (G.debug_file,
1327 "Freeing object, actual size=%lu, at %p on %p\n",
1328 (unsigned long) size, p, (void *) pe);
1329
1330#ifdef ENABLE_GC_CHECKING
1331 /* Poison the data, to indicate the data is garbage. */
1332 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1333 memset (p, 0xa5, size);
1334#endif
1335 /* Let valgrind know the object is free. */
1336 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1337
1338#ifdef ENABLE_GC_ALWAYS_COLLECT
1339 /* In the completely-anal-checking mode, we do *not* immediately free
1340 the data, but instead verify that the data is *actually* not
1341 reachable the next time we collect. */
1342 {
1343 struct free_object *fo = xmalloc (sizeof (struct free_object));
1344 fo->object = p;
1345 fo->next = G.free_object_list;
1346 G.free_object_list = fo;
1347 }
1348#else
1349 {
1350 unsigned int bit_offset, word, bit;
1351
1352 G.allocated -= size;
1353
1354 /* Mark the object not-in-use. */
1355 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1356 word = bit_offset / HOST_BITS_PER_LONG;
1357 bit = bit_offset % HOST_BITS_PER_LONG;
1358 pe->in_use_p[word] &= ~(1UL << bit);
1359
1360 if (pe->num_free_objects++ == 0)
1361 {
9bf793f9
JL
1362 page_entry *p, *q;
1363
685fe032
RH
1364 /* If the page is completely full, then it's supposed to
1365 be after all pages that aren't. Since we've freed one
1366 object from a page that was full, we need to move the
9bf793f9 1367 page to the head of the list.
685fe032 1368
9bf793f9
JL
1369 PE is the node we want to move. Q is the previous node
1370 and P is the next node in the list. */
1371 q = pe->prev;
685fe032
RH
1372 if (q && q->num_free_objects == 0)
1373 {
1374 p = pe->next;
9bf793f9 1375
685fe032 1376 q->next = p;
9bf793f9
JL
1377
1378 /* If PE was at the end of the list, then Q becomes the
1379 new end of the list. If PE was not the end of the
1380 list, then we need to update the PREV field for P. */
685fe032
RH
1381 if (!p)
1382 G.page_tails[order] = q;
9bf793f9
JL
1383 else
1384 p->prev = q;
1385
1386 /* Move PE to the head of the list. */
685fe032 1387 pe->next = G.pages[order];
9bf793f9
JL
1388 pe->prev = NULL;
1389 G.pages[order]->prev = pe;
685fe032
RH
1390 G.pages[order] = pe;
1391 }
1392
1393 /* Reset the hint bit to point to the only free object. */
1394 pe->next_bit_hint = bit_offset;
1395 }
1396 }
1397#endif
1398}
21341cfd 1399\f
8537ed68
ZW
1400/* Subroutine of init_ggc which computes the pair of numbers used to
1401 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1402
1403 This algorithm is taken from Granlund and Montgomery's paper
1404 "Division by Invariant Integers using Multiplication"
1405 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1406 constants). */
1407
1408static void
20c1dc5e 1409compute_inverse (unsigned order)
8537ed68 1410{
75d75435
UW
1411 size_t size, inv;
1412 unsigned int e;
280cf02a 1413
8537ed68
ZW
1414 size = OBJECT_SIZE (order);
1415 e = 0;
1416 while (size % 2 == 0)
1417 {
1418 e++;
1419 size >>= 1;
1420 }
cb2ec151 1421
8537ed68
ZW
1422 inv = size;
1423 while (inv * size != 1)
1424 inv = inv * (2 - inv*size);
1425
1426 DIV_MULT (order) = inv;
1427 DIV_SHIFT (order) = e;
1428}
1429
1430/* Initialize the ggc-mmap allocator. */
21341cfd 1431void
20c1dc5e 1432init_ggc (void)
21341cfd 1433{
2be510b8
MM
1434 unsigned order;
1435
21341cfd
AS
1436 G.pagesize = getpagesize();
1437 G.lg_pagesize = exact_log2 (G.pagesize);
1438
825b6926 1439#ifdef HAVE_MMAP_DEV_ZERO
21341cfd
AS
1440 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1441 if (G.dev_zero_fd == -1)
c770ac2b 1442 internal_error ("open /dev/zero: %m");
21341cfd
AS
1443#endif
1444
1445#if 0
1446 G.debug_file = fopen ("ggc-mmap.debug", "w");
1447#else
1448 G.debug_file = stdout;
1449#endif
1450
825b6926 1451#ifdef USING_MMAP
1b3e1423
RH
1452 /* StunOS has an amazing off-by-one error for the first mmap allocation
1453 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1454 believe, is an unaligned page allocation, which would cause us to
1455 hork badly if we tried to use it. */
1456 {
1457 char *p = alloc_anon (NULL, G.pagesize);
825b6926 1458 struct page_entry *e;
1b3e1423
RH
1459 if ((size_t)p & (G.pagesize - 1))
1460 {
1461 /* How losing. Discard this one and try another. If we still
1462 can't get something useful, give up. */
1463
1464 p = alloc_anon (NULL, G.pagesize);
282899df 1465 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1b3e1423 1466 }
825b6926 1467
dc297297 1468 /* We have a good page, might as well hold onto it... */
703ad42b 1469 e = xcalloc (1, sizeof (struct page_entry));
825b6926
ZW
1470 e->bytes = G.pagesize;
1471 e->page = p;
1472 e->next = G.free_pages;
1473 G.free_pages = e;
1b3e1423
RH
1474 }
1475#endif
2be510b8
MM
1476
1477 /* Initialize the object size table. */
1478 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1479 object_size_table[order] = (size_t) 1 << order;
1480 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
b1095f9c
MM
1481 {
1482 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1483
1484 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1485 so that we're sure of getting aligned memory. */
17211ab5 1486 s = ROUND_UP (s, MAX_ALIGNMENT);
b1095f9c
MM
1487 object_size_table[order] = s;
1488 }
2be510b8 1489
8537ed68 1490 /* Initialize the objects-per-page and inverse tables. */
2be510b8
MM
1491 for (order = 0; order < NUM_ORDERS; ++order)
1492 {
1493 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1494 if (objects_per_page_table[order] == 0)
1495 objects_per_page_table[order] = 1;
8537ed68 1496 compute_inverse (order);
2be510b8
MM
1497 }
1498
1499 /* Reset the size_lookup array to put appropriately sized objects in
1500 the special orders. All objects bigger than the previous power
1501 of two, but no greater than the special size, should go in the
1502 new order. */
1503 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1504 {
1505 int o;
1506 int i;
1507
1508 o = size_lookup[OBJECT_SIZE (order)];
1509 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1510 size_lookup[i] = order;
1511 }
c4775f82
MS
1512
1513 G.depth_in_use = 0;
1514 G.depth_max = 10;
703ad42b 1515 G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
c4775f82
MS
1516
1517 G.by_depth_in_use = 0;
1518 G.by_depth_max = INITIAL_PTE_COUNT;
703ad42b
KG
1519 G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1520 G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
21341cfd
AS
1521}
1522
47aeffac
SB
1523/* Start a new GGC zone. */
1524
1525struct alloc_zone *
1526new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1527{
1528 return NULL;
1529}
1530
1531/* Destroy a GGC zone. */
1532void
1533destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1534{
1535}
1536
cb2ec151
RH
1537/* Increment the `GC context'. Objects allocated in an outer context
1538 are never freed, eliminating the need to register their roots. */
21341cfd
AS
1539
1540void
20c1dc5e 1541ggc_push_context (void)
21341cfd
AS
1542{
1543 ++G.context_depth;
1544
1545 /* Die on wrap. */
282899df 1546 gcc_assert (G.context_depth < HOST_BITS_PER_LONG);
21341cfd
AS
1547}
1548
4934cc53
MM
1549/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1550 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1551
1552static void
20c1dc5e 1553ggc_recalculate_in_use_p (page_entry *p)
4934cc53
MM
1554{
1555 unsigned int i;
1556 size_t num_objects;
1557
589005ff 1558 /* Because the past-the-end bit in in_use_p is always set, we
4934cc53 1559 pretend there is one additional object. */
17211ab5 1560 num_objects = OBJECTS_IN_PAGE (p) + 1;
4934cc53
MM
1561
1562 /* Reset the free object count. */
1563 p->num_free_objects = num_objects;
1564
1565 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
589005ff 1566 for (i = 0;
2be510b8
MM
1567 i < CEIL (BITMAP_SIZE (num_objects),
1568 sizeof (*p->in_use_p));
4934cc53
MM
1569 ++i)
1570 {
1571 unsigned long j;
1572
1573 /* Something is in use if it is marked, or if it was in use in a
1574 context further down the context stack. */
c4775f82 1575 p->in_use_p[i] |= save_in_use_p (p)[i];
4934cc53
MM
1576
1577 /* Decrement the free object count for every object allocated. */
1578 for (j = p->in_use_p[i]; j; j >>= 1)
1579 p->num_free_objects -= (j & 1);
1580 }
1581
282899df 1582 gcc_assert (p->num_free_objects < num_objects);
4934cc53
MM
1583}
1584
589005ff 1585/* Decrement the `GC context'. All objects allocated since the
cb2ec151 1586 previous ggc_push_context are migrated to the outer context. */
21341cfd
AS
1587
1588void
20c1dc5e 1589ggc_pop_context (void)
21341cfd 1590{
52895e1a 1591 unsigned long omask;
c4775f82
MS
1592 unsigned int depth, i, e;
1593#ifdef ENABLE_CHECKING
1594 unsigned int order;
1595#endif
21341cfd
AS
1596
1597 depth = --G.context_depth;
52895e1a
RH
1598 omask = (unsigned long)1 << (depth + 1);
1599
1600 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1601 return;
1602
1603 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1604 G.context_depth_allocations &= omask - 1;
1605 G.context_depth_collections &= omask - 1;
21341cfd 1606
a98ebe2e 1607 /* The G.depth array is shortened so that the last index is the
c4775f82
MS
1608 context_depth of the top element of by_depth. */
1609 if (depth+1 < G.depth_in_use)
1610 e = G.depth[depth+1];
1611 else
1612 e = G.by_depth_in_use;
1613
1614 /* We might not have any PTEs of depth depth. */
1615 if (depth < G.depth_in_use)
20c1dc5e 1616 {
c4775f82
MS
1617
1618 /* First we go through all the pages at depth depth to
1619 recalculate the in use bits. */
1620 for (i = G.depth[depth]; i < e; ++i)
1621 {
282899df 1622 page_entry *p = G.by_depth[i];
c4775f82
MS
1623
1624 /* Check that all of the pages really are at the depth that
1625 we expect. */
282899df
NS
1626 gcc_assert (p->context_depth == depth);
1627 gcc_assert (p->index_by_depth == i);
c4775f82
MS
1628
1629 prefetch (&save_in_use_p_i (i+8));
1630 prefetch (&save_in_use_p_i (i+16));
1631 if (save_in_use_p_i (i))
1632 {
1633 p = G.by_depth[i];
1634 ggc_recalculate_in_use_p (p);
1635 free (save_in_use_p_i (i));
1636 save_in_use_p_i (i) = 0;
1637 }
1638 }
1639 }
1640
1641 /* Then, we reset all page_entries with a depth greater than depth
1642 to be at depth. */
1643 for (i = e; i < G.by_depth_in_use; ++i)
1644 {
1645 page_entry *p = G.by_depth[i];
1646
1647 /* Check that all of the pages really are at the depth we
1648 expect. */
282899df
NS
1649 gcc_assert (p->context_depth > depth);
1650 gcc_assert (p->index_by_depth == i);
c4775f82
MS
1651 p->context_depth = depth;
1652 }
1653
1654 adjust_depth ();
1655
1656#ifdef ENABLE_CHECKING
2be510b8 1657 for (order = 2; order < NUM_ORDERS; order++)
21341cfd 1658 {
21341cfd
AS
1659 page_entry *p;
1660
1661 for (p = G.pages[order]; p != NULL; p = p->next)
282899df
NS
1662 gcc_assert (p->context_depth < depth ||
1663 (p->context_depth == depth && !save_in_use_p (p)));
21341cfd 1664 }
c4775f82 1665#endif
21341cfd 1666}
21341cfd 1667\f
cb2ec151
RH
1668/* Unmark all objects. */
1669
685fe032 1670static void
20c1dc5e 1671clear_marks (void)
21341cfd
AS
1672{
1673 unsigned order;
1674
2be510b8 1675 for (order = 2; order < NUM_ORDERS; order++)
21341cfd 1676 {
21341cfd
AS
1677 page_entry *p;
1678
1679 for (p = G.pages[order]; p != NULL; p = p->next)
1680 {
17211ab5
GK
1681 size_t num_objects = OBJECTS_IN_PAGE (p);
1682 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1683
21341cfd 1684 /* The data should be page-aligned. */
282899df 1685 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
21341cfd
AS
1686
1687 /* Pages that aren't in the topmost context are not collected;
1688 nevertheless, we need their in-use bit vectors to store GC
1689 marks. So, back them up first. */
4934cc53 1690 if (p->context_depth < G.context_depth)
21341cfd 1691 {
c4775f82
MS
1692 if (! save_in_use_p (p))
1693 save_in_use_p (p) = xmalloc (bitmap_size);
1694 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
21341cfd
AS
1695 }
1696
1697 /* Reset reset the number of free objects and clear the
1698 in-use bits. These will be adjusted by mark_obj. */
1699 p->num_free_objects = num_objects;
1700 memset (p->in_use_p, 0, bitmap_size);
1701
1702 /* Make sure the one-past-the-end bit is always set. */
589005ff 1703 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
21341cfd
AS
1704 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1705 }
1706 }
1707}
1708
cb2ec151
RH
1709/* Free all empty pages. Partially empty pages need no attention
1710 because the `mark' bit doubles as an `unused' bit. */
1711
685fe032 1712static void
20c1dc5e 1713sweep_pages (void)
21341cfd
AS
1714{
1715 unsigned order;
1716
2be510b8 1717 for (order = 2; order < NUM_ORDERS; order++)
21341cfd
AS
1718 {
1719 /* The last page-entry to consider, regardless of entries
1720 placed at the end of the list. */
1721 page_entry * const last = G.page_tails[order];
1722
17211ab5 1723 size_t num_objects;
054f5e69 1724 size_t live_objects;
21341cfd
AS
1725 page_entry *p, *previous;
1726 int done;
589005ff 1727
21341cfd
AS
1728 p = G.pages[order];
1729 if (p == NULL)
1730 continue;
1731
1732 previous = NULL;
1733 do
1734 {
1735 page_entry *next = p->next;
1736
1737 /* Loop until all entries have been examined. */
1738 done = (p == last);
20c1dc5e 1739
17211ab5 1740 num_objects = OBJECTS_IN_PAGE (p);
21341cfd 1741
054f5e69
ZW
1742 /* Add all live objects on this page to the count of
1743 allocated memory. */
1744 live_objects = num_objects - p->num_free_objects;
1745
2be510b8 1746 G.allocated += OBJECT_SIZE (order) * live_objects;
054f5e69 1747
21341cfd
AS
1748 /* Only objects on pages in the topmost context should get
1749 collected. */
1750 if (p->context_depth < G.context_depth)
1751 ;
1752
1753 /* Remove the page if it's empty. */
054f5e69 1754 else if (live_objects == 0)
21341cfd 1755 {
9bf793f9
JL
1756 /* If P was the first page in the list, then NEXT
1757 becomes the new first page in the list, otherwise
1758 splice P out of the forward pointers. */
21341cfd
AS
1759 if (! previous)
1760 G.pages[order] = next;
1761 else
1762 previous->next = next;
9bf793f9
JL
1763
1764 /* Splice P out of the back pointers too. */
1765 if (next)
1766 next->prev = previous;
21341cfd
AS
1767
1768 /* Are we removing the last element? */
1769 if (p == G.page_tails[order])
1770 G.page_tails[order] = previous;
1771 free_page (p);
1772 p = previous;
1773 }
1774
1775 /* If the page is full, move it to the end. */
1776 else if (p->num_free_objects == 0)
1777 {
1778 /* Don't move it if it's already at the end. */
1779 if (p != G.page_tails[order])
1780 {
1781 /* Move p to the end of the list. */
1782 p->next = NULL;
9bf793f9 1783 p->prev = G.page_tails[order];
21341cfd
AS
1784 G.page_tails[order]->next = p;
1785
1786 /* Update the tail pointer... */
1787 G.page_tails[order] = p;
1788
1789 /* ... and the head pointer, if necessary. */
1790 if (! previous)
1791 G.pages[order] = next;
1792 else
1793 previous->next = next;
9bf793f9
JL
1794
1795 /* And update the backpointer in NEXT if necessary. */
1796 if (next)
1797 next->prev = previous;
1798
21341cfd
AS
1799 p = previous;
1800 }
1801 }
1802
1803 /* If we've fallen through to here, it's a page in the
1804 topmost context that is neither full nor empty. Such a
1805 page must precede pages at lesser context depth in the
1806 list, so move it to the head. */
1807 else if (p != G.pages[order])
1808 {
1809 previous->next = p->next;
9bf793f9
JL
1810
1811 /* Update the backchain in the next node if it exists. */
1812 if (p->next)
1813 p->next->prev = previous;
1814
1815 /* Move P to the head of the list. */
21341cfd 1816 p->next = G.pages[order];
9bf793f9
JL
1817 p->prev = NULL;
1818 G.pages[order]->prev = p;
1819
1820 /* Update the head pointer. */
21341cfd 1821 G.pages[order] = p;
9bf793f9 1822
21341cfd
AS
1823 /* Are we moving the last element? */
1824 if (G.page_tails[order] == p)
1825 G.page_tails[order] = previous;
1826 p = previous;
1827 }
1828
1829 previous = p;
1830 p = next;
589005ff 1831 }
21341cfd 1832 while (! done);
4934cc53
MM
1833
1834 /* Now, restore the in_use_p vectors for any pages from contexts
1835 other than the current one. */
1836 for (p = G.pages[order]; p; p = p->next)
1837 if (p->context_depth != G.context_depth)
1838 ggc_recalculate_in_use_p (p);
21341cfd
AS
1839 }
1840}
1841
3788cc17 1842#ifdef ENABLE_GC_CHECKING
cb2ec151
RH
1843/* Clobber all free objects. */
1844
685fe032 1845static void
20c1dc5e 1846poison_pages (void)
21341cfd
AS
1847{
1848 unsigned order;
1849
2be510b8 1850 for (order = 2; order < NUM_ORDERS; order++)
21341cfd 1851 {
2be510b8 1852 size_t size = OBJECT_SIZE (order);
21341cfd
AS
1853 page_entry *p;
1854
1855 for (p = G.pages[order]; p != NULL; p = p->next)
1856 {
17211ab5 1857 size_t num_objects;
21341cfd 1858 size_t i;
c831fdea
MM
1859
1860 if (p->context_depth != G.context_depth)
1861 /* Since we don't do any collection for pages in pushed
1862 contexts, there's no need to do any poisoning. And
1863 besides, the IN_USE_P array isn't valid until we pop
1864 contexts. */
1865 continue;
1866
17211ab5 1867 num_objects = OBJECTS_IN_PAGE (p);
21341cfd
AS
1868 for (i = 0; i < num_objects; i++)
1869 {
1870 size_t word, bit;
1871 word = i / HOST_BITS_PER_LONG;
1872 bit = i % HOST_BITS_PER_LONG;
1873 if (((p->in_use_p[word] >> bit) & 1) == 0)
9a0a7d5d
HPN
1874 {
1875 char *object = p->page + i * size;
1876
1877 /* Keep poison-by-write when we expect to use Valgrind,
1878 so the exact same memory semantics is kept, in case
1879 there are memory errors. We override this request
1880 below. */
1881 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1882 memset (object, 0xa5, size);
1883
1884 /* Drop the handle to avoid handle leak. */
1885 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1886 }
21341cfd
AS
1887 }
1888 }
1889 }
1890}
685fe032
RH
1891#else
1892#define poison_pages()
1893#endif
1894
1895#ifdef ENABLE_GC_ALWAYS_COLLECT
1896/* Validate that the reportedly free objects actually are. */
1897
1898static void
1899validate_free_objects (void)
1900{
1901 struct free_object *f, *next, *still_free = NULL;
1902
1903 for (f = G.free_object_list; f ; f = next)
1904 {
1905 page_entry *pe = lookup_page_table_entry (f->object);
1906 size_t bit, word;
1907
1908 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1909 word = bit / HOST_BITS_PER_LONG;
1910 bit = bit % HOST_BITS_PER_LONG;
1911 next = f->next;
1912
1913 /* Make certain it isn't visible from any root. Notice that we
1914 do this check before sweep_pages merges save_in_use_p. */
282899df 1915 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
685fe032
RH
1916
1917 /* If the object comes from an outer context, then retain the
1918 free_object entry, so that we can verify that the address
1919 isn't live on the stack in some outer context. */
1920 if (pe->context_depth != G.context_depth)
1921 {
1922 f->next = still_free;
1923 still_free = f;
1924 }
1925 else
1926 free (f);
1927 }
1928
1929 G.free_object_list = still_free;
1930}
1931#else
1932#define validate_free_objects()
21341cfd
AS
1933#endif
1934
cb2ec151
RH
1935/* Top level mark-and-sweep routine. */
1936
21341cfd 1937void
20c1dc5e 1938ggc_collect (void)
21341cfd 1939{
21341cfd
AS
1940 /* Avoid frequent unnecessary work by skipping collection if the
1941 total allocations haven't expanded much since the last
1942 collection. */
19cc0dd4 1943 float allocated_last_gc =
3788cc17
ZW
1944 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1945
19cc0dd4 1946 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
3788cc17 1947
07724022 1948 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
21341cfd 1949 return;
21341cfd 1950
2a9a326b 1951 timevar_push (TV_GC);
21341cfd 1952 if (!quiet_flag)
b9bfacf0 1953 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
685fe032
RH
1954 if (GGC_DEBUG_LEVEL >= 2)
1955 fprintf (G.debug_file, "BEGIN COLLECTING\n");
21341cfd 1956
054f5e69
ZW
1957 /* Zero the total allocated bytes. This will be recalculated in the
1958 sweep phase. */
21341cfd
AS
1959 G.allocated = 0;
1960
589005ff 1961 /* Release the pages we freed the last time we collected, but didn't
21341cfd
AS
1962 reuse in the interim. */
1963 release_pages ();
1964
52895e1a
RH
1965 /* Indicate that we've seen collections at this context depth. */
1966 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1967
21341cfd
AS
1968 clear_marks ();
1969 ggc_mark_roots ();
07724022
JH
1970#ifdef GATHER_STATISTICS
1971 ggc_prune_overhead_list ();
1972#endif
21341cfd 1973 poison_pages ();
685fe032 1974 validate_free_objects ();
cb2ec151
RH
1975 sweep_pages ();
1976
21341cfd
AS
1977 G.allocated_last_gc = G.allocated;
1978
2a9a326b 1979 timevar_pop (TV_GC);
21341cfd 1980
21341cfd 1981 if (!quiet_flag)
2a9a326b 1982 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
685fe032
RH
1983 if (GGC_DEBUG_LEVEL >= 2)
1984 fprintf (G.debug_file, "END COLLECTING\n");
21341cfd 1985}
3277221c
MM
1986
1987/* Print allocation statistics. */
fba0bfd4
ZW
1988#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1989 ? (x) \
1990 : ((x) < 1024*1024*10 \
1991 ? (x) / 1024 \
1992 : (x) / (1024*1024))))
07724022 1993#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
3277221c
MM
1994
1995void
20c1dc5e 1996ggc_print_statistics (void)
3277221c
MM
1997{
1998 struct ggc_statistics stats;
4934cc53 1999 unsigned int i;
fba0bfd4 2000 size_t total_overhead = 0;
3277221c
MM
2001
2002 /* Clear the statistics. */
d219c7f1 2003 memset (&stats, 0, sizeof (stats));
589005ff 2004
3277221c
MM
2005 /* Make sure collection will really occur. */
2006 G.allocated_last_gc = 0;
2007
2008 /* Collect and print the statistics common across collectors. */
fba0bfd4 2009 ggc_print_common_statistics (stderr, &stats);
3277221c 2010
4934cc53
MM
2011 /* Release free pages so that we will not count the bytes allocated
2012 there as part of the total allocated memory. */
2013 release_pages ();
2014
589005ff 2015 /* Collect some information about the various sizes of
3277221c 2016 allocation. */
439a7e54
DN
2017 fprintf (stderr,
2018 "Memory still allocated at the end of the compilation process\n");
adc4adcd 2019 fprintf (stderr, "%-5s %10s %10s %10s\n",
9fd51e67 2020 "Size", "Allocated", "Used", "Overhead");
2be510b8 2021 for (i = 0; i < NUM_ORDERS; ++i)
3277221c
MM
2022 {
2023 page_entry *p;
2024 size_t allocated;
2025 size_t in_use;
fba0bfd4 2026 size_t overhead;
3277221c
MM
2027
2028 /* Skip empty entries. */
2029 if (!G.pages[i])
2030 continue;
2031
fba0bfd4 2032 overhead = allocated = in_use = 0;
3277221c
MM
2033
2034 /* Figure out the total number of bytes allocated for objects of
fba0bfd4
ZW
2035 this size, and how many of them are actually in use. Also figure
2036 out how much memory the page table is using. */
3277221c
MM
2037 for (p = G.pages[i]; p; p = p->next)
2038 {
2039 allocated += p->bytes;
20c1dc5e 2040 in_use +=
17211ab5 2041 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
fba0bfd4
ZW
2042
2043 overhead += (sizeof (page_entry) - sizeof (long)
17211ab5 2044 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
3277221c 2045 }
8a951190
AJ
2046 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2047 (unsigned long) OBJECT_SIZE (i),
07724022
JH
2048 SCALE (allocated), STAT_LABEL (allocated),
2049 SCALE (in_use), STAT_LABEL (in_use),
2050 SCALE (overhead), STAT_LABEL (overhead));
fba0bfd4 2051 total_overhead += overhead;
3277221c 2052 }
8a951190 2053 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
07724022
JH
2054 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2055 SCALE (G.allocated), STAT_LABEL(G.allocated),
2056 SCALE (total_overhead), STAT_LABEL (total_overhead));
adc4adcd
GP
2057
2058#ifdef GATHER_STATISTICS
2059 {
439a7e54
DN
2060 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2061
adc4adcd
GP
2062 fprintf (stderr, "Total Overhead: %10lld\n",
2063 G.stats.total_overhead);
2064 fprintf (stderr, "Total Allocated: %10lld\n",
2065 G.stats.total_allocated);
2066
2067 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2068 G.stats.total_overhead_under32);
2069 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2070 G.stats.total_allocated_under32);
2071 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2072 G.stats.total_overhead_under64);
2073 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2074 G.stats.total_allocated_under64);
2075 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2076 G.stats.total_overhead_under128);
2077 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2078 G.stats.total_allocated_under128);
2079
2080 for (i = 0; i < NUM_ORDERS; i++)
439a7e54
DN
2081 if (G.stats.total_allocated_per_order[i])
2082 {
2083 fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
2084 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2085 fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
2086 OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2087 }
adc4adcd
GP
2088 }
2089#endif
3277221c 2090}
17211ab5
GK
2091\f
2092struct ggc_pch_data
2093{
20c1dc5e 2094 struct ggc_pch_ondisk
17211ab5
GK
2095 {
2096 unsigned totals[NUM_ORDERS];
2097 } d;
2098 size_t base[NUM_ORDERS];
2099 size_t written[NUM_ORDERS];
2100};
2101
2102struct ggc_pch_data *
20c1dc5e 2103init_ggc_pch (void)
17211ab5
GK
2104{
2105 return xcalloc (sizeof (struct ggc_pch_data), 1);
2106}
2107
20c1dc5e
AJ
2108void
2109ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
08cee789
DJ
2110 size_t size, bool is_string ATTRIBUTE_UNUSED,
2111 enum gt_types_enum type ATTRIBUTE_UNUSED)
17211ab5
GK
2112{
2113 unsigned order;
2114
2115 if (size <= 256)
2116 order = size_lookup[size];
2117 else
2118 {
2119 order = 9;
2120 while (size > OBJECT_SIZE (order))
2121 order++;
2122 }
20c1dc5e 2123
17211ab5
GK
2124 d->d.totals[order]++;
2125}
20c1dc5e 2126
17211ab5 2127size_t
20c1dc5e 2128ggc_pch_total_size (struct ggc_pch_data *d)
17211ab5
GK
2129{
2130 size_t a = 0;
2131 unsigned i;
2132
2133 for (i = 0; i < NUM_ORDERS; i++)
2134 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2135 return a;
2136}
2137
2138void
20c1dc5e 2139ggc_pch_this_base (struct ggc_pch_data *d, void *base)
17211ab5
GK
2140{
2141 size_t a = (size_t) base;
2142 unsigned i;
20c1dc5e 2143
17211ab5
GK
2144 for (i = 0; i < NUM_ORDERS; i++)
2145 {
2146 d->base[i] = a;
2147 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2148 }
2149}
2150
2151
2152char *
20c1dc5e 2153ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
08cee789
DJ
2154 size_t size, bool is_string ATTRIBUTE_UNUSED,
2155 enum gt_types_enum type ATTRIBUTE_UNUSED)
17211ab5
GK
2156{
2157 unsigned order;
2158 char *result;
20c1dc5e 2159
17211ab5
GK
2160 if (size <= 256)
2161 order = size_lookup[size];
2162 else
2163 {
2164 order = 9;
2165 while (size > OBJECT_SIZE (order))
2166 order++;
2167 }
2168
2169 result = (char *) d->base[order];
2170 d->base[order] += OBJECT_SIZE (order);
2171 return result;
2172}
2173
20c1dc5e
AJ
2174void
2175ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2176 FILE *f ATTRIBUTE_UNUSED)
17211ab5
GK
2177{
2178 /* Nothing to do. */
2179}
2180
2181void
20c1dc5e
AJ
2182ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2183 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
b6f61163 2184 size_t size, bool is_string ATTRIBUTE_UNUSED)
17211ab5
GK
2185{
2186 unsigned order;
674c7ef1 2187 static const char emptyBytes[256];
17211ab5
GK
2188
2189 if (size <= 256)
2190 order = size_lookup[size];
2191 else
2192 {
2193 order = 9;
2194 while (size > OBJECT_SIZE (order))
2195 order++;
2196 }
20c1dc5e 2197
17211ab5 2198 if (fwrite (x, size, 1, f) != 1)
fa6ef813 2199 fatal_error ("can't write PCH file: %m");
17211ab5 2200
674c7ef1 2201 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
0ee55ad8 2202 object out to OBJECT_SIZE(order). This happens for strings. */
674c7ef1
RB
2203
2204 if (size != OBJECT_SIZE (order))
2205 {
2206 unsigned padding = OBJECT_SIZE(order) - size;
2207
2208 /* To speed small writes, we use a nulled-out array that's larger
2209 than most padding requests as the source for our null bytes. This
2210 permits us to do the padding with fwrite() rather than fseek(), and
3f117656 2211 limits the chance the OS may try to flush any outstanding writes. */
674c7ef1
RB
2212 if (padding <= sizeof(emptyBytes))
2213 {
2214 if (fwrite (emptyBytes, 1, padding, f) != padding)
2215 fatal_error ("can't write PCH file");
2216 }
2217 else
2218 {
0ee55ad8 2219 /* Larger than our buffer? Just default to fseek. */
674c7ef1
RB
2220 if (fseek (f, padding, SEEK_CUR) != 0)
2221 fatal_error ("can't write PCH file");
2222 }
2223 }
17211ab5
GK
2224
2225 d->written[order]++;
2226 if (d->written[order] == d->d.totals[order]
2227 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2228 G.pagesize),
2229 SEEK_CUR) != 0)
fa6ef813 2230 fatal_error ("can't write PCH file: %m");
17211ab5
GK
2231}
2232
2233void
20c1dc5e 2234ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
17211ab5
GK
2235{
2236 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
fa6ef813 2237 fatal_error ("can't write PCH file: %m");
17211ab5
GK
2238 free (d);
2239}
2240
c4775f82
MS
2241/* Move the PCH PTE entries just added to the end of by_depth, to the
2242 front. */
2243
2244static void
20c1dc5e 2245move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
c4775f82
MS
2246{
2247 unsigned i;
2248
2249 /* First, we swap the new entries to the front of the varrays. */
2250 page_entry **new_by_depth;
2251 unsigned long **new_save_in_use;
2252
703ad42b
KG
2253 new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2254 new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
c4775f82
MS
2255
2256 memcpy (&new_by_depth[0],
2257 &G.by_depth[count_old_page_tables],
2258 count_new_page_tables * sizeof (void *));
2259 memcpy (&new_by_depth[count_new_page_tables],
2260 &G.by_depth[0],
2261 count_old_page_tables * sizeof (void *));
2262 memcpy (&new_save_in_use[0],
2263 &G.save_in_use[count_old_page_tables],
2264 count_new_page_tables * sizeof (void *));
2265 memcpy (&new_save_in_use[count_new_page_tables],
2266 &G.save_in_use[0],
2267 count_old_page_tables * sizeof (void *));
2268
2269 free (G.by_depth);
2270 free (G.save_in_use);
20c1dc5e 2271
c4775f82
MS
2272 G.by_depth = new_by_depth;
2273 G.save_in_use = new_save_in_use;
2274
2275 /* Now update all the index_by_depth fields. */
2276 for (i = G.by_depth_in_use; i > 0; --i)
2277 {
2278 page_entry *p = G.by_depth[i-1];
2279 p->index_by_depth = i-1;
2280 }
2281
2282 /* And last, we update the depth pointers in G.depth. The first
2283 entry is already 0, and context 0 entries always start at index
2284 0, so there is nothing to update in the first slot. We need a
2285 second slot, only if we have old ptes, and if we do, they start
2286 at index count_new_page_tables. */
2287 if (count_old_page_tables)
2288 push_depth (count_new_page_tables);
2289}
2290
17211ab5 2291void
20c1dc5e 2292ggc_pch_read (FILE *f, void *addr)
17211ab5
GK
2293{
2294 struct ggc_pch_ondisk d;
2295 unsigned i;
2296 char *offs = addr;
c4775f82
MS
2297 unsigned long count_old_page_tables;
2298 unsigned long count_new_page_tables;
2299
2300 count_old_page_tables = G.by_depth_in_use;
2301
2302 /* We've just read in a PCH file. So, every object that used to be
2303 allocated is now free. */
17211ab5 2304 clear_marks ();
c5d6d04a 2305#ifdef ENABLE_GC_CHECKING
17211ab5
GK
2306 poison_pages ();
2307#endif
2308
2309 /* No object read from a PCH file should ever be freed. So, set the
2310 context depth to 1, and set the depth of all the currently-allocated
2311 pages to be 1 too. PCH pages will have depth 0. */
282899df 2312 gcc_assert (!G.context_depth);
17211ab5
GK
2313 G.context_depth = 1;
2314 for (i = 0; i < NUM_ORDERS; i++)
2315 {
2316 page_entry *p;
2317 for (p = G.pages[i]; p != NULL; p = p->next)
2318 p->context_depth = G.context_depth;
2319 }
2320
2321 /* Allocate the appropriate page-table entries for the pages read from
2322 the PCH file. */
2323 if (fread (&d, sizeof (d), 1, f) != 1)
fa6ef813 2324 fatal_error ("can't read PCH file: %m");
20c1dc5e 2325
17211ab5
GK
2326 for (i = 0; i < NUM_ORDERS; i++)
2327 {
2328 struct page_entry *entry;
2329 char *pte;
2330 size_t bytes;
2331 size_t num_objs;
2332 size_t j;
c4775f82 2333
17211ab5
GK
2334 if (d.totals[i] == 0)
2335 continue;
c4775f82 2336
17211ab5
GK
2337 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2338 num_objs = bytes / OBJECT_SIZE (i);
20c1dc5e 2339 entry = xcalloc (1, (sizeof (struct page_entry)
17211ab5
GK
2340 - sizeof (long)
2341 + BITMAP_SIZE (num_objs + 1)));
2342 entry->bytes = bytes;
2343 entry->page = offs;
2344 entry->context_depth = 0;
2345 offs += bytes;
2346 entry->num_free_objects = 0;
2347 entry->order = i;
2348
20c1dc5e 2349 for (j = 0;
17211ab5
GK
2350 j + HOST_BITS_PER_LONG <= num_objs + 1;
2351 j += HOST_BITS_PER_LONG)
2352 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2353 for (; j < num_objs + 1; j++)
20c1dc5e 2354 entry->in_use_p[j / HOST_BITS_PER_LONG]
17211ab5
GK
2355 |= 1L << (j % HOST_BITS_PER_LONG);
2356
20c1dc5e
AJ
2357 for (pte = entry->page;
2358 pte < entry->page + entry->bytes;
17211ab5
GK
2359 pte += G.pagesize)
2360 set_page_table_entry (pte, entry);
2361
2362 if (G.page_tails[i] != NULL)
2363 G.page_tails[i]->next = entry;
2364 else
2365 G.pages[i] = entry;
2366 G.page_tails[i] = entry;
c4775f82
MS
2367
2368 /* We start off by just adding all the new information to the
2369 end of the varrays, later, we will move the new information
2370 to the front of the varrays, as the PCH page tables are at
2371 context 0. */
2372 push_by_depth (entry, 0);
17211ab5
GK
2373 }
2374
c4775f82
MS
2375 /* Now, we update the various data structures that speed page table
2376 handling. */
2377 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2378
2379 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2380
17211ab5
GK
2381 /* Update the statistics. */
2382 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2383}