]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-page.c
tree-ssa.h: Remove all #include's
[thirdparty/gcc.git] / gcc / ggc-page.c
CommitLineData
21341cfd 1/* "Bag-of-pages" garbage collector for the GNU compiler.
d1e082c2 2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
21341cfd 3
1322177d 4This file is part of GCC.
21341cfd 5
1322177d
LB
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
1322177d 9version.
21341cfd 10
1322177d
LB
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
21341cfd 15
b9bfacf0 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
21341cfd 19
21341cfd 20#include "config.h"
21341cfd 21#include "system.h"
4977bab6
ZW
22#include "coretypes.h"
23#include "tm.h"
21341cfd 24#include "tree.h"
e5ecd4ea 25#include "rtl.h"
1b42a6a9 26#include "tm_p.h"
718f9c0f 27#include "diagnostic-core.h"
21341cfd 28#include "flags.h"
e5ecd4ea 29#include "ggc.h"
a9429e29 30#include "ggc-internal.h"
2a9a326b 31#include "timevar.h"
3788cc17 32#include "params.h"
442b4905 33#include "cgraph.h"
b78cd885 34#include "cfgloop.h"
ae2392a9 35#include "plugin.h"
e5ecd4ea 36
825b6926
ZW
37/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39#ifdef HAVE_MMAP_ANON
40# undef HAVE_MMAP_DEV_ZERO
825b6926 41# define USING_MMAP
005537df 42#endif
21341cfd 43
825b6926 44#ifdef HAVE_MMAP_DEV_ZERO
825b6926 45# define USING_MMAP
8342b467
RH
46#endif
47
130fadbb
RH
48#ifndef USING_MMAP
49#define USING_MALLOC_PAGE_GROUPS
5b918807 50#endif
21341cfd 51
87fb500b
RO
52#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
711a3d82
AK
54# define USING_MADVISE
55#endif
56
0fa2e4df 57/* Strategy:
21341cfd
AS
58
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
64
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
69
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
589005ff 72 in the current (highest-numbered) context may be collected.
21341cfd
AS
73
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
79 context depth.
80
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
85
21341cfd
AS
86/* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
6d2f8887 91 4: Object marks as well. */
21341cfd
AS
92#define GGC_DEBUG_LEVEL (0)
93\f
94#ifndef HOST_BITS_PER_PTR
95#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
96#endif
97
21341cfd
AS
98\f
99/* A two-level tree is used to look up the page-entry for a given
100 pointer. Two chunks of the pointer's bits are extracted to index
101 the first and second levels of the tree, as follows:
102
103 HOST_PAGE_SIZE_BITS
104 32 | |
105 msb +----------------+----+------+------+ lsb
106 | | |
107 PAGE_L1_BITS |
108 | |
109 PAGE_L2_BITS
110
111 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
112 pages are aligned on system page boundaries. The next most
113 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
589005ff 114 index values in the lookup table, respectively.
21341cfd 115
005537df
RH
116 For 32-bit architectures and the settings below, there are no
117 leftover bits. For architectures with wider pointers, the lookup
118 tree points to a list of pages, which must be scanned to find the
119 correct one. */
21341cfd
AS
120
121#define PAGE_L1_BITS (8)
122#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
2a6e6fea
TG
123#define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
124#define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
21341cfd
AS
125
126#define LOOKUP_L1(p) \
2a6e6fea 127 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
21341cfd
AS
128
129#define LOOKUP_L2(p) \
2a6e6fea 130 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
21341cfd 131
2be510b8
MM
132/* The number of objects per allocation page, for objects on a page of
133 the indicated ORDER. */
134#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
135
17211ab5
GK
136/* The number of objects in P. */
137#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
138
2be510b8
MM
139/* The size of an object on a page of the indicated ORDER. */
140#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
141
8537ed68
ZW
142/* For speed, we avoid doing a general integer divide to locate the
143 offset in the allocation bitmap, by precalculating numbers M, S
144 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
145 within the page which is evenly divisible by the object size Z. */
146#define DIV_MULT(ORDER) inverse_table[ORDER].mult
147#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
148#define OFFSET_TO_BIT(OFFSET, ORDER) \
149 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
150
b78cd885
RG
151/* We use this structure to determine the alignment required for
152 allocations. For power-of-two sized allocations, that's not a
153 problem, but it does matter for odd-sized allocations.
154 We do not care about alignment for floating-point types. */
155
156struct max_alignment {
157 char c;
158 union {
159 HOST_WIDEST_INT i;
160 void *p;
161 } u;
162};
163
164/* The biggest alignment required. */
165
166#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
167
168
2be510b8
MM
169/* The number of extra orders, not corresponding to power-of-two sized
170 objects. */
171
ca7558fc 172#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
2be510b8 173
d1f1cc6a 174#define RTL_SIZE(NSLOTS) \
e1de1560 175 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
d1f1cc6a 176
5e26df64
SB
177#define TREE_EXP_SIZE(OPS) \
178 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179
2be510b8
MM
180/* The Ith entry is the maximum size of an object to be stored in the
181 Ith extra order. Adding a new entry to this array is the *only*
182 thing you need to do to add a new special allocation size. */
183
184static const size_t extra_order_size_table[] = {
b78cd885
RG
185 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
186 There are a lot of structures with these sizes and explicitly
187 listing them risks orders being dropped because they changed size. */
188 MAX_ALIGNMENT * 3,
189 MAX_ALIGNMENT * 5,
190 MAX_ALIGNMENT * 6,
191 MAX_ALIGNMENT * 7,
192 MAX_ALIGNMENT * 9,
193 MAX_ALIGNMENT * 10,
194 MAX_ALIGNMENT * 11,
195 MAX_ALIGNMENT * 12,
196 MAX_ALIGNMENT * 13,
197 MAX_ALIGNMENT * 14,
198 MAX_ALIGNMENT * 15,
820cc88f
DB
199 sizeof (struct tree_decl_non_common),
200 sizeof (struct tree_field_decl),
201 sizeof (struct tree_parm_decl),
202 sizeof (struct tree_var_decl),
51545682 203 sizeof (struct tree_type_non_common),
f5938dcd
RG
204 sizeof (struct function),
205 sizeof (struct basic_block_def),
b78cd885
RG
206 sizeof (struct cgraph_node),
207 sizeof (struct loop),
2be510b8
MM
208};
209
210/* The total number of orders. */
211
212#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
213
17211ab5
GK
214/* Compute the smallest nonnegative number which when added to X gives
215 a multiple of F. */
216
217#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
218
219/* Compute the smallest multiple of F that is >= X. */
220
221#define ROUND_UP(x, f) (CEIL (x, f) * (f))
222
3bc50163
AK
223/* Round X to next multiple of the page size */
224
225#define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
226
2be510b8
MM
227/* The Ith entry is the number of objects on a page or order I. */
228
229static unsigned objects_per_page_table[NUM_ORDERS];
230
231/* The Ith entry is the size of an object on a page of order I. */
232
233static size_t object_size_table[NUM_ORDERS];
21341cfd 234
8537ed68
ZW
235/* The Ith entry is a pair of numbers (mult, shift) such that
236 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
237 for all k evenly divisible by OBJECT_SIZE(I). */
238
239static struct
240{
75d75435 241 size_t mult;
8537ed68
ZW
242 unsigned int shift;
243}
244inverse_table[NUM_ORDERS];
245
21341cfd
AS
246/* A page_entry records the status of an allocation page. This
247 structure is dynamically sized to fit the bitmap in_use_p. */
589005ff 248typedef struct page_entry
21341cfd
AS
249{
250 /* The next page-entry with objects of the same size, or NULL if
251 this is the last page-entry. */
252 struct page_entry *next;
253
9bf793f9
JL
254 /* The previous page-entry with objects of the same size, or NULL if
255 this is the first page-entry. The PREV pointer exists solely to
71cc389b 256 keep the cost of ggc_free manageable. */
9bf793f9
JL
257 struct page_entry *prev;
258
21341cfd
AS
259 /* The number of bytes allocated. (This will always be a multiple
260 of the host system page size.) */
261 size_t bytes;
262
263 /* The address at which the memory is allocated. */
264 char *page;
265
130fadbb
RH
266#ifdef USING_MALLOC_PAGE_GROUPS
267 /* Back pointer to the page group this page came from. */
268 struct page_group *group;
269#endif
270
c4775f82
MS
271 /* This is the index in the by_depth varray where this page table
272 can be found. */
273 unsigned long index_by_depth;
21341cfd
AS
274
275 /* Context depth of this page. */
ae373eda 276 unsigned short context_depth;
21341cfd
AS
277
278 /* The number of free objects remaining on this page. */
279 unsigned short num_free_objects;
280
281 /* A likely candidate for the bit position of a free object for the
282 next allocation from this page. */
283 unsigned short next_bit_hint;
284
ae373eda
MM
285 /* The lg of size of objects allocated from this page. */
286 unsigned char order;
287
711a3d82
AK
288 /* Discarded page? */
289 bool discarded;
290
21341cfd
AS
291 /* A bit vector indicating whether or not objects are in use. The
292 Nth bit is one if the Nth object on this page is allocated. This
293 array is dynamically sized. */
294 unsigned long in_use_p[1];
295} page_entry;
296
130fadbb
RH
297#ifdef USING_MALLOC_PAGE_GROUPS
298/* A page_group describes a large allocation from malloc, from which
299 we parcel out aligned pages. */
300typedef struct page_group
301{
302 /* A linked list of all extant page groups. */
303 struct page_group *next;
304
305 /* The address we received from malloc. */
306 char *allocation;
307
308 /* The size of the block. */
309 size_t alloc_size;
310
311 /* A bitmask of pages in use. */
312 unsigned int in_use;
313} page_group;
314#endif
21341cfd
AS
315
316#if HOST_BITS_PER_PTR <= 32
317
318/* On 32-bit hosts, we use a two level page table, as pictured above. */
319typedef page_entry **page_table[PAGE_L1_SIZE];
320
321#else
322
005537df
RH
323/* On 64-bit hosts, we use the same two level page tables plus a linked
324 list that disambiguates the top 32-bits. There will almost always be
21341cfd
AS
325 exactly one entry in the list. */
326typedef struct page_table_chain
327{
328 struct page_table_chain *next;
329 size_t high_bits;
330 page_entry **table[PAGE_L1_SIZE];
331} *page_table;
332
333#endif
334
9957322d
DK
335#ifdef ENABLE_GC_ALWAYS_COLLECT
336/* List of free objects to be verified as actually free on the
337 next collection. */
338struct free_object
339{
340 void *object;
341 struct free_object *next;
342};
343#endif
344
21341cfd
AS
345/* The rest of the global variables. */
346static struct globals
347{
348 /* The Nth element in this array is a page with objects of size 2^N.
349 If there are any pages with free objects, they will be at the
350 head of the list. NULL if there are no page-entries for this
351 object size. */
2be510b8 352 page_entry *pages[NUM_ORDERS];
21341cfd
AS
353
354 /* The Nth element in this array is the last page with objects of
355 size 2^N. NULL if there are no page-entries for this object
356 size. */
2be510b8 357 page_entry *page_tails[NUM_ORDERS];
21341cfd
AS
358
359 /* Lookup table for associating allocation pages with object addresses. */
360 page_table lookup;
361
362 /* The system's page size. */
363 size_t pagesize;
364 size_t lg_pagesize;
365
366 /* Bytes currently allocated. */
367 size_t allocated;
368
369 /* Bytes currently allocated at the end of the last collection. */
370 size_t allocated_last_gc;
371
3277221c
MM
372 /* Total amount of memory mapped. */
373 size_t bytes_mapped;
374
52895e1a
RH
375 /* Bit N set if any allocations have been done at context depth N. */
376 unsigned long context_depth_allocations;
377
378 /* Bit N set if any collections have been done at context depth N. */
379 unsigned long context_depth_collections;
380
21341cfd 381 /* The current depth in the context stack. */
d416576b 382 unsigned short context_depth;
21341cfd
AS
383
384 /* A file descriptor open to /dev/zero for reading. */
825b6926 385#if defined (HAVE_MMAP_DEV_ZERO)
21341cfd
AS
386 int dev_zero_fd;
387#endif
388
389 /* A cache of free system pages. */
390 page_entry *free_pages;
391
130fadbb
RH
392#ifdef USING_MALLOC_PAGE_GROUPS
393 page_group *page_groups;
394#endif
395
21341cfd
AS
396 /* The file descriptor for debugging output. */
397 FILE *debug_file;
c4775f82
MS
398
399 /* Current number of elements in use in depth below. */
400 unsigned int depth_in_use;
401
402 /* Maximum number of elements that can be used before resizing. */
403 unsigned int depth_max;
404
fa10beec 405 /* Each element of this array is an index in by_depth where the given
c4775f82
MS
406 depth starts. This structure is indexed by that given depth we
407 are interested in. */
408 unsigned int *depth;
409
410 /* Current number of elements in use in by_depth below. */
411 unsigned int by_depth_in_use;
412
413 /* Maximum number of elements that can be used before resizing. */
414 unsigned int by_depth_max;
415
416 /* Each element of this array is a pointer to a page_entry, all
417 page_entries can be found in here by increasing depth.
418 index_by_depth in the page_entry is the index into this data
419 structure where that page_entry can be found. This is used to
420 speed up finding all page_entries at a particular depth. */
421 page_entry **by_depth;
422
423 /* Each element is a pointer to the saved in_use_p bits, if any,
424 zero otherwise. We allocate them all together, to enable a
425 better runtime data access pattern. */
426 unsigned long **save_in_use;
685fe032
RH
427
428#ifdef ENABLE_GC_ALWAYS_COLLECT
429 /* List of free objects to be verified as actually free on the
430 next collection. */
9957322d 431 struct free_object *free_object_list;
685fe032
RH
432#endif
433
adc4adcd
GP
434 struct
435 {
a9429e29 436 /* Total GC-allocated memory. */
adc4adcd 437 unsigned long long total_allocated;
a9429e29 438 /* Total overhead for GC-allocated memory. */
adc4adcd
GP
439 unsigned long long total_overhead;
440
441 /* Total allocations and overhead for sizes less than 32, 64 and 128.
442 These sizes are interesting because they are typical cache line
938d968e 443 sizes. */
b8698a0f 444
adc4adcd
GP
445 unsigned long long total_allocated_under32;
446 unsigned long long total_overhead_under32;
b8698a0f 447
adc4adcd
GP
448 unsigned long long total_allocated_under64;
449 unsigned long long total_overhead_under64;
b8698a0f 450
adc4adcd
GP
451 unsigned long long total_allocated_under128;
452 unsigned long long total_overhead_under128;
b8698a0f 453
439a7e54
DN
454 /* The allocations for each of the allocation orders. */
455 unsigned long long total_allocated_per_order[NUM_ORDERS];
456
938d968e 457 /* The overhead for each of the allocation orders. */
adc4adcd
GP
458 unsigned long long total_overhead_per_order[NUM_ORDERS];
459 } stats;
21341cfd
AS
460} G;
461
21341cfd
AS
462/* The size in bytes required to maintain a bitmap for the objects
463 on a page-entry. */
464#define BITMAP_SIZE(Num_objects) \
c3284718 465 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
21341cfd 466
130fadbb
RH
467/* Allocate pages in chunks of this size, to throttle calls to memory
468 allocation routines. The first page is used, the rest go onto the
469 free list. This cannot be larger than HOST_BITS_PER_INT for the
772299b3 470 in_use bitmask for page_group. Hosts that need a different value
471854f8 471 can override this by defining GGC_QUIRE_SIZE explicitly. */
772299b3
MA
472#ifndef GGC_QUIRE_SIZE
473# ifdef USING_MMAP
6c995fa5 474# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
772299b3
MA
475# else
476# define GGC_QUIRE_SIZE 16
477# endif
478#endif
c4775f82
MS
479
480/* Initial guess as to how many page table entries we might need. */
481#define INITIAL_PTE_COUNT 128
21341cfd 482\f
20c1dc5e
AJ
483static int ggc_allocated_p (const void *);
484static page_entry *lookup_page_table_entry (const void *);
485static void set_page_table_entry (void *, page_entry *);
130fadbb 486#ifdef USING_MMAP
25f0ea81 487static char *alloc_anon (char *, size_t, bool check);
130fadbb
RH
488#endif
489#ifdef USING_MALLOC_PAGE_GROUPS
20c1dc5e
AJ
490static size_t page_group_index (char *, char *);
491static void set_page_group_in_use (page_group *, char *);
492static void clear_page_group_in_use (page_group *, char *);
130fadbb 493#endif
20c1dc5e
AJ
494static struct page_entry * alloc_page (unsigned);
495static void free_page (struct page_entry *);
496static void release_pages (void);
497static void clear_marks (void);
498static void sweep_pages (void);
499static void ggc_recalculate_in_use_p (page_entry *);
500static void compute_inverse (unsigned);
501static inline void adjust_depth (void);
502static void move_ptes_to_front (int, int);
21341cfd 503
20c1dc5e
AJ
504void debug_print_page_list (int);
505static void push_depth (unsigned int);
506static void push_by_depth (page_entry *, unsigned long *);
b6f61163 507
c4775f82
MS
508/* Push an entry onto G.depth. */
509
510inline static void
20c1dc5e 511push_depth (unsigned int i)
c4775f82
MS
512{
513 if (G.depth_in_use >= G.depth_max)
514 {
515 G.depth_max *= 2;
d3bfe4de 516 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
c4775f82
MS
517 }
518 G.depth[G.depth_in_use++] = i;
519}
520
521/* Push an entry onto G.by_depth and G.save_in_use. */
522
523inline static void
20c1dc5e 524push_by_depth (page_entry *p, unsigned long *s)
c4775f82
MS
525{
526 if (G.by_depth_in_use >= G.by_depth_max)
527 {
528 G.by_depth_max *= 2;
d3bfe4de
KG
529 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
530 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
531 G.by_depth_max);
c4775f82
MS
532 }
533 G.by_depth[G.by_depth_in_use] = p;
534 G.save_in_use[G.by_depth_in_use++] = s;
535}
536
537#if (GCC_VERSION < 3001)
538#define prefetch(X) ((void) X)
539#else
540#define prefetch(X) __builtin_prefetch (X)
541#endif
542
543#define save_in_use_p_i(__i) \
544 (G.save_in_use[__i])
545#define save_in_use_p(__p) \
546 (save_in_use_p_i (__p->index_by_depth))
547
cc2902df 548/* Returns nonzero if P was allocated in GC'able memory. */
21341cfd 549
005537df 550static inline int
20c1dc5e 551ggc_allocated_p (const void *p)
21341cfd
AS
552{
553 page_entry ***base;
005537df 554 size_t L1, L2;
21341cfd
AS
555
556#if HOST_BITS_PER_PTR <= 32
557 base = &G.lookup[0];
558#else
559 page_table table = G.lookup;
2a6e6fea 560 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
005537df
RH
561 while (1)
562 {
563 if (table == NULL)
564 return 0;
565 if (table->high_bits == high_bits)
566 break;
567 table = table->next;
568 }
21341cfd
AS
569 base = &table->table[0];
570#endif
571
eaec9b3d 572 /* Extract the level 1 and 2 indices. */
74c937ca
MM
573 L1 = LOOKUP_L1 (p);
574 L2 = LOOKUP_L2 (p);
575
576 return base[L1] && base[L1][L2];
577}
578
589005ff 579/* Traverse the page table and find the entry for a page.
74c937ca
MM
580 Die (probably) if the object wasn't allocated via GC. */
581
582static inline page_entry *
20c1dc5e 583lookup_page_table_entry (const void *p)
74c937ca
MM
584{
585 page_entry ***base;
586 size_t L1, L2;
587
005537df
RH
588#if HOST_BITS_PER_PTR <= 32
589 base = &G.lookup[0];
590#else
591 page_table table = G.lookup;
2a6e6fea 592 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
005537df
RH
593 while (table->high_bits != high_bits)
594 table = table->next;
595 base = &table->table[0];
596#endif
74c937ca 597
eaec9b3d 598 /* Extract the level 1 and 2 indices. */
21341cfd
AS
599 L1 = LOOKUP_L1 (p);
600 L2 = LOOKUP_L2 (p);
601
602 return base[L1][L2];
603}
604
21341cfd 605/* Set the page table entry for a page. */
cb2ec151 606
21341cfd 607static void
20c1dc5e 608set_page_table_entry (void *p, page_entry *entry)
21341cfd
AS
609{
610 page_entry ***base;
611 size_t L1, L2;
612
613#if HOST_BITS_PER_PTR <= 32
614 base = &G.lookup[0];
615#else
616 page_table table;
2a6e6fea 617 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
21341cfd
AS
618 for (table = G.lookup; table; table = table->next)
619 if (table->high_bits == high_bits)
620 goto found;
621
622 /* Not found -- allocate a new table. */
4dc6c528 623 table = XCNEW (struct page_table_chain);
21341cfd
AS
624 table->next = G.lookup;
625 table->high_bits = high_bits;
626 G.lookup = table;
627found:
628 base = &table->table[0];
629#endif
630
eaec9b3d 631 /* Extract the level 1 and 2 indices. */
21341cfd
AS
632 L1 = LOOKUP_L1 (p);
633 L2 = LOOKUP_L2 (p);
634
635 if (base[L1] == NULL)
5ed6ace5 636 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
21341cfd
AS
637
638 base[L1][L2] = entry;
639}
640
21341cfd 641/* Prints the page-entry for object size ORDER, for debugging. */
cb2ec151 642
24e47c76 643DEBUG_FUNCTION void
20c1dc5e 644debug_print_page_list (int order)
21341cfd
AS
645{
646 page_entry *p;
20c1dc5e
AJ
647 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
648 (void *) G.page_tails[order]);
21341cfd
AS
649 p = G.pages[order];
650 while (p != NULL)
651 {
20c1dc5e 652 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
683eb0e9 653 p->num_free_objects);
21341cfd
AS
654 p = p->next;
655 }
656 printf ("NULL\n");
657 fflush (stdout);
658}
659
130fadbb 660#ifdef USING_MMAP
21341cfd 661/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825b6926
ZW
662 (if non-null). The ifdef structure here is intended to cause a
663 compile error unless exactly one of the HAVE_* is defined. */
cb2ec151 664
21341cfd 665static inline char *
25f0ea81 666alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
21341cfd 667{
825b6926 668#ifdef HAVE_MMAP_ANON
d3bfe4de
KG
669 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
670 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
825b6926
ZW
671#endif
672#ifdef HAVE_MMAP_DEV_ZERO
d3bfe4de
KG
673 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
674 MAP_PRIVATE, G.dev_zero_fd, 0);
21341cfd 675#endif
825b6926
ZW
676
677 if (page == (char *) MAP_FAILED)
005537df 678 {
25f0ea81
AK
679 if (!check)
680 return NULL;
1f978f5f 681 perror ("virtual memory exhausted");
bd0f0717 682 exit (FATAL_EXIT_CODE);
005537df 683 }
21341cfd 684
3277221c
MM
685 /* Remember that we allocated this memory. */
686 G.bytes_mapped += size;
687
9a0a7d5d 688 /* Pretend we don't have access to the allocated pages. We'll enable
a9429e29 689 access to smaller pieces of the area in ggc_internal_alloc. Discard the
9a0a7d5d 690 handle to avoid handle leak. */
35dee980 691 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
9a0a7d5d 692
21341cfd
AS
693 return page;
694}
130fadbb
RH
695#endif
696#ifdef USING_MALLOC_PAGE_GROUPS
697/* Compute the index for this page into the page group. */
698
699static inline size_t
20c1dc5e 700page_group_index (char *allocation, char *page)
130fadbb 701{
c4f2c499 702 return (size_t) (page - allocation) >> G.lg_pagesize;
130fadbb
RH
703}
704
705/* Set and clear the in_use bit for this page in the page group. */
706
707static inline void
20c1dc5e 708set_page_group_in_use (page_group *group, char *page)
130fadbb
RH
709{
710 group->in_use |= 1 << page_group_index (group->allocation, page);
711}
712
713static inline void
20c1dc5e 714clear_page_group_in_use (page_group *group, char *page)
130fadbb
RH
715{
716 group->in_use &= ~(1 << page_group_index (group->allocation, page));
717}
718#endif
21341cfd
AS
719
720/* Allocate a new page for allocating objects of size 2^ORDER,
721 and return an entry for it. The entry is not added to the
722 appropriate page_table list. */
cb2ec151 723
21341cfd 724static inline struct page_entry *
20c1dc5e 725alloc_page (unsigned order)
21341cfd
AS
726{
727 struct page_entry *entry, *p, **pp;
728 char *page;
729 size_t num_objects;
730 size_t bitmap_size;
731 size_t page_entry_size;
732 size_t entry_size;
130fadbb
RH
733#ifdef USING_MALLOC_PAGE_GROUPS
734 page_group *group;
735#endif
21341cfd
AS
736
737 num_objects = OBJECTS_PER_PAGE (order);
738 bitmap_size = BITMAP_SIZE (num_objects + 1);
739 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
2be510b8 740 entry_size = num_objects * OBJECT_SIZE (order);
ca79429a
RH
741 if (entry_size < G.pagesize)
742 entry_size = G.pagesize;
3bc50163 743 entry_size = PAGE_ALIGN (entry_size);
21341cfd
AS
744
745 entry = NULL;
746 page = NULL;
747
748 /* Check the list of free pages for one we can use. */
bd0f0717 749 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
21341cfd
AS
750 if (p->bytes == entry_size)
751 break;
752
753 if (p != NULL)
754 {
711a3d82
AK
755 if (p->discarded)
756 G.bytes_mapped += p->bytes;
757 p->discarded = false;
758
dc297297 759 /* Recycle the allocated memory from this page ... */
21341cfd
AS
760 *pp = p->next;
761 page = p->page;
bd0f0717 762
130fadbb
RH
763#ifdef USING_MALLOC_PAGE_GROUPS
764 group = p->group;
765#endif
bd0f0717 766
21341cfd
AS
767 /* ... and, if possible, the page entry itself. */
768 if (p->order == order)
769 {
770 entry = p;
771 memset (entry, 0, page_entry_size);
772 }
773 else
774 free (p);
775 }
825b6926 776#ifdef USING_MMAP
054f5e69 777 else if (entry_size == G.pagesize)
21341cfd 778 {
054f5e69
ZW
779 /* We want just one page. Allocate a bunch of them and put the
780 extras on the freelist. (Can only do this optimization with
781 mmap for backing store.) */
782 struct page_entry *e, *f = G.free_pages;
25f0ea81 783 int i, entries = GGC_QUIRE_SIZE;
054f5e69 784
25f0ea81
AK
785 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
786 if (page == NULL)
787 {
c3284718 788 page = alloc_anon (NULL, G.pagesize, true);
25f0ea81
AK
789 entries = 1;
790 }
bd0f0717 791
054f5e69
ZW
792 /* This loop counts down so that the chain will be in ascending
793 memory order. */
25f0ea81 794 for (i = entries - 1; i >= 1; i--)
054f5e69 795 {
d3bfe4de 796 e = XCNEWVAR (struct page_entry, page_entry_size);
ca79429a
RH
797 e->order = order;
798 e->bytes = G.pagesize;
799 e->page = page + (i << G.lg_pagesize);
054f5e69
ZW
800 e->next = f;
801 f = e;
802 }
bd0f0717 803
054f5e69 804 G.free_pages = f;
21341cfd 805 }
054f5e69 806 else
25f0ea81 807 page = alloc_anon (NULL, entry_size, true);
130fadbb
RH
808#endif
809#ifdef USING_MALLOC_PAGE_GROUPS
810 else
811 {
812 /* Allocate a large block of memory and serve out the aligned
813 pages therein. This results in much less memory wastage
814 than the traditional implementation of valloc. */
815
816 char *allocation, *a, *enda;
817 size_t alloc_size, head_slop, tail_slop;
818 int multiple_pages = (entry_size == G.pagesize);
819
820 if (multiple_pages)
821 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
822 else
823 alloc_size = entry_size + G.pagesize - 1;
ec4d7730 824 allocation = XNEWVEC (char, alloc_size);
130fadbb 825
2a6e6fea 826 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
130fadbb
RH
827 head_slop = page - allocation;
828 if (multiple_pages)
829 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
830 else
831 tail_slop = alloc_size - entry_size - head_slop;
832 enda = allocation + alloc_size - tail_slop;
833
834 /* We allocated N pages, which are likely not aligned, leaving
835 us with N-1 usable pages. We plan to place the page_group
836 structure somewhere in the slop. */
837 if (head_slop >= sizeof (page_group))
838 group = (page_group *)page - 1;
839 else
840 {
841 /* We magically got an aligned allocation. Too bad, we have
842 to waste a page anyway. */
843 if (tail_slop == 0)
844 {
845 enda -= G.pagesize;
846 tail_slop += G.pagesize;
847 }
282899df 848 gcc_assert (tail_slop >= sizeof (page_group));
130fadbb
RH
849 group = (page_group *)enda;
850 tail_slop -= sizeof (page_group);
851 }
852
853 /* Remember that we allocated this memory. */
854 group->next = G.page_groups;
855 group->allocation = allocation;
856 group->alloc_size = alloc_size;
857 group->in_use = 0;
858 G.page_groups = group;
859 G.bytes_mapped += alloc_size;
860
861 /* If we allocated multiple pages, put the rest on the free list. */
862 if (multiple_pages)
863 {
864 struct page_entry *e, *f = G.free_pages;
865 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
866 {
ec4d7730 867 e = XCNEWVAR (struct page_entry, page_entry_size);
130fadbb
RH
868 e->order = order;
869 e->bytes = G.pagesize;
870 e->page = a;
871 e->group = group;
872 e->next = f;
873 f = e;
874 }
875 G.free_pages = f;
876 }
877 }
878#endif
21341cfd
AS
879
880 if (entry == NULL)
d3bfe4de 881 entry = XCNEWVAR (struct page_entry, page_entry_size);
21341cfd
AS
882
883 entry->bytes = entry_size;
884 entry->page = page;
885 entry->context_depth = G.context_depth;
886 entry->order = order;
887 entry->num_free_objects = num_objects;
888 entry->next_bit_hint = 1;
889
52895e1a
RH
890 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
891
130fadbb
RH
892#ifdef USING_MALLOC_PAGE_GROUPS
893 entry->group = group;
894 set_page_group_in_use (group, page);
895#endif
896
21341cfd
AS
897 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
898 increment the hint. */
899 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
900 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
901
902 set_page_table_entry (page, entry);
903
904 if (GGC_DEBUG_LEVEL >= 2)
589005ff 905 fprintf (G.debug_file,
8a951190 906 "Allocating page at %p, object size=%lu, data %p-%p\n",
20c1dc5e 907 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
bd0f0717 908 page + entry_size - 1);
21341cfd
AS
909
910 return entry;
911}
912
c4775f82
MS
913/* Adjust the size of G.depth so that no index greater than the one
914 used by the top of the G.by_depth is used. */
915
916static inline void
20c1dc5e 917adjust_depth (void)
c4775f82
MS
918{
919 page_entry *top;
920
921 if (G.by_depth_in_use)
922 {
923 top = G.by_depth[G.by_depth_in_use-1];
924
e0bb17a8
KH
925 /* Peel back indices in depth that index into by_depth, so that
926 as new elements are added to by_depth, we note the indices
c4775f82
MS
927 of those elements, if they are for new context depths. */
928 while (G.depth_in_use > (size_t)top->context_depth+1)
929 --G.depth_in_use;
930 }
931}
932
cb2ec151 933/* For a page that is no longer needed, put it on the free page list. */
21341cfd 934
685fe032 935static void
20c1dc5e 936free_page (page_entry *entry)
21341cfd
AS
937{
938 if (GGC_DEBUG_LEVEL >= 2)
589005ff 939 fprintf (G.debug_file,
20c1dc5e 940 "Deallocating page at %p, data %p-%p\n", (void *) entry,
21341cfd
AS
941 entry->page, entry->page + entry->bytes - 1);
942
9a0a7d5d
HPN
943 /* Mark the page as inaccessible. Discard the handle to avoid handle
944 leak. */
35dee980 945 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
9a0a7d5d 946
21341cfd
AS
947 set_page_table_entry (entry->page, NULL);
948
130fadbb
RH
949#ifdef USING_MALLOC_PAGE_GROUPS
950 clear_page_group_in_use (entry->group, entry->page);
951#endif
952
c4775f82
MS
953 if (G.by_depth_in_use > 1)
954 {
955 page_entry *top = G.by_depth[G.by_depth_in_use-1];
282899df
NS
956 int i = entry->index_by_depth;
957
958 /* We cannot free a page from a context deeper than the current
959 one. */
960 gcc_assert (entry->context_depth == top->context_depth);
b8698a0f 961
282899df
NS
962 /* Put top element into freed slot. */
963 G.by_depth[i] = top;
964 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
965 top->index_by_depth = i;
c4775f82
MS
966 }
967 --G.by_depth_in_use;
968
969 adjust_depth ();
970
21341cfd
AS
971 entry->next = G.free_pages;
972 G.free_pages = entry;
973}
974
cb2ec151 975/* Release the free page cache to the system. */
21341cfd 976
4934cc53 977static void
20c1dc5e 978release_pages (void)
21341cfd 979{
711a3d82
AK
980#ifdef USING_MADVISE
981 page_entry *p, *start_p;
982 char *start;
983 size_t len;
d33ef9a5
AK
984 size_t mapped_len;
985 page_entry *next, *prev, *newprev;
986 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
987
988 /* First free larger continuous areas to the OS.
989 This allows other allocators to grab these areas if needed.
990 This is only done on larger chunks to avoid fragmentation.
991 This does not always work because the free_pages list is only
992 approximately sorted. */
993
994 p = G.free_pages;
995 prev = NULL;
996 while (p)
997 {
998 start = p->page;
999 start_p = p;
1000 len = 0;
1001 mapped_len = 0;
1002 newprev = prev;
1003 while (p && p->page == start + len)
1004 {
1005 len += p->bytes;
1006 if (!p->discarded)
1007 mapped_len += p->bytes;
1008 newprev = p;
1009 p = p->next;
1010 }
1011 if (len >= free_unit)
1012 {
1013 while (start_p != p)
1014 {
1015 next = start_p->next;
1016 free (start_p);
1017 start_p = next;
1018 }
1019 munmap (start, len);
1020 if (prev)
1021 prev->next = p;
1022 else
1023 G.free_pages = p;
1024 G.bytes_mapped -= mapped_len;
1025 continue;
1026 }
1027 prev = newprev;
1028 }
1029
1030 /* Now give back the fragmented pages to the OS, but keep the address
1031 space to reuse it next time. */
711a3d82
AK
1032
1033 for (p = G.free_pages; p; )
1034 {
1035 if (p->discarded)
1036 {
1037 p = p->next;
1038 continue;
1039 }
1040 start = p->page;
1041 len = p->bytes;
1042 start_p = p;
1043 p = p->next;
1044 while (p && p->page == start + len)
1045 {
1046 len += p->bytes;
1047 p = p->next;
1048 }
1049 /* Give the page back to the kernel, but don't free the mapping.
1050 This avoids fragmentation in the virtual memory map of the
1051 process. Next time we can reuse it by just touching it. */
1052 madvise (start, len, MADV_DONTNEED);
1053 /* Don't count those pages as mapped to not touch the garbage collector
1054 unnecessarily. */
1055 G.bytes_mapped -= len;
1056 while (start_p != p)
1057 {
1058 start_p->discarded = true;
1059 start_p = start_p->next;
1060 }
1061 }
1062#endif
1063#if defined(USING_MMAP) && !defined(USING_MADVISE)
130fadbb 1064 page_entry *p, *next;
21341cfd
AS
1065 char *start;
1066 size_t len;
1067
054f5e69 1068 /* Gather up adjacent pages so they are unmapped together. */
21341cfd 1069 p = G.free_pages;
21341cfd
AS
1070
1071 while (p)
1072 {
054f5e69 1073 start = p->page;
21341cfd 1074 next = p->next;
054f5e69 1075 len = p->bytes;
21341cfd
AS
1076 free (p);
1077 p = next;
21341cfd 1078
054f5e69
ZW
1079 while (p && p->page == start + len)
1080 {
1081 next = p->next;
1082 len += p->bytes;
1083 free (p);
1084 p = next;
1085 }
1086
1087 munmap (start, len);
1088 G.bytes_mapped -= len;
1089 }
005537df 1090
21341cfd 1091 G.free_pages = NULL;
130fadbb
RH
1092#endif
1093#ifdef USING_MALLOC_PAGE_GROUPS
1094 page_entry **pp, *p;
1095 page_group **gp, *g;
1096
1097 /* Remove all pages from free page groups from the list. */
1098 pp = &G.free_pages;
1099 while ((p = *pp) != NULL)
1100 if (p->group->in_use == 0)
1101 {
1102 *pp = p->next;
1103 free (p);
1104 }
1105 else
1106 pp = &p->next;
1107
1108 /* Remove all free page groups, and release the storage. */
1109 gp = &G.page_groups;
1110 while ((g = *gp) != NULL)
1111 if (g->in_use == 0)
1112 {
1113 *gp = g->next;
589005ff 1114 G.bytes_mapped -= g->alloc_size;
130fadbb
RH
1115 free (g->allocation);
1116 }
1117 else
1118 gp = &g->next;
1119#endif
21341cfd
AS
1120}
1121
21341cfd 1122/* This table provides a fast way to determine ceil(log_2(size)) for
9fd51e67 1123 allocation requests. The minimum allocation size is eight bytes. */
6583cf15
NC
1124#define NUM_SIZE_LOOKUP 512
1125static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
9fd51e67 1126{
589005ff
KH
1127 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1128 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1129 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1130 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1131 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1132 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1133 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21341cfd 1134 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
21341cfd
AS
1135 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1136 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1137 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1138 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1139 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1140 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1141 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1142 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
f5938dcd
RG
1143 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1144 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1145 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1146 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1147 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1148 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1149 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1150 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1151 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1152 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1153 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1154 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1155 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1156 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1157 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
ecf7b86f 1158 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
21341cfd
AS
1159};
1160
b9bd6f74
TT
1161/* For a given size of memory requested for allocation, return the
1162 actual size that is going to be allocated, as well as the size
1163 order. */
1164
1165static void
1166ggc_round_alloc_size_1 (size_t requested_size,
1167 size_t *size_order,
1168 size_t *alloced_size)
1169{
1170 size_t order, object_size;
1171
1172 if (requested_size < NUM_SIZE_LOOKUP)
1173 {
1174 order = size_lookup[requested_size];
1175 object_size = OBJECT_SIZE (order);
1176 }
1177 else
1178 {
1179 order = 10;
1180 while (requested_size > (object_size = OBJECT_SIZE (order)))
1181 order++;
1182 }
1183
1184 if (size_order)
1185 *size_order = order;
1186 if (alloced_size)
1187 *alloced_size = object_size;
1188}
1189
1190/* For a given size of memory requested for allocation, return the
1191 actual size that is going to be allocated. */
1192
1193size_t
1194ggc_round_alloc_size (size_t requested_size)
1195{
1196 size_t size = 0;
1197
1198 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1199 return size;
1200}
1201
aa40083d 1202/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
cb2ec151 1203
005537df 1204void *
a9429e29 1205ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
21341cfd 1206{
685fe032 1207 size_t order, word, bit, object_offset, object_size;
21341cfd
AS
1208 struct page_entry *entry;
1209 void *result;
1210
b9bd6f74 1211 ggc_round_alloc_size_1 (size, &order, &object_size);
21341cfd
AS
1212
1213 /* If there are non-full pages for this size allocation, they are at
1214 the head of the list. */
1215 entry = G.pages[order];
1216
1217 /* If there is no page for this object size, or all pages in this
1218 context are full, allocate a new page. */
4934cc53 1219 if (entry == NULL || entry->num_free_objects == 0)
21341cfd
AS
1220 {
1221 struct page_entry *new_entry;
1222 new_entry = alloc_page (order);
589005ff 1223
c4775f82
MS
1224 new_entry->index_by_depth = G.by_depth_in_use;
1225 push_by_depth (new_entry, 0);
1226
1227 /* We can skip context depths, if we do, make sure we go all the
1228 way to the new depth. */
1229 while (new_entry->context_depth >= G.depth_in_use)
1230 push_depth (G.by_depth_in_use-1);
1231
9bf793f9
JL
1232 /* If this is the only entry, it's also the tail. If it is not
1233 the only entry, then we must update the PREV pointer of the
1234 ENTRY (G.pages[order]) to point to our new page entry. */
21341cfd
AS
1235 if (entry == NULL)
1236 G.page_tails[order] = new_entry;
9bf793f9
JL
1237 else
1238 entry->prev = new_entry;
589005ff 1239
9bf793f9
JL
1240 /* Put new pages at the head of the page list. By definition the
1241 entry at the head of the list always has a NULL pointer. */
21341cfd 1242 new_entry->next = entry;
9bf793f9 1243 new_entry->prev = NULL;
21341cfd
AS
1244 entry = new_entry;
1245 G.pages[order] = new_entry;
1246
1247 /* For a new page, we know the word and bit positions (in the
1248 in_use bitmap) of the first available object -- they're zero. */
1249 new_entry->next_bit_hint = 1;
1250 word = 0;
1251 bit = 0;
1252 object_offset = 0;
1253 }
1254 else
1255 {
1256 /* First try to use the hint left from the previous allocation
1257 to locate a clear bit in the in-use bitmap. We've made sure
1258 that the one-past-the-end bit is always set, so if the hint
1259 has run over, this test will fail. */
1260 unsigned hint = entry->next_bit_hint;
1261 word = hint / HOST_BITS_PER_LONG;
1262 bit = hint % HOST_BITS_PER_LONG;
589005ff 1263
21341cfd
AS
1264 /* If the hint didn't work, scan the bitmap from the beginning. */
1265 if ((entry->in_use_p[word] >> bit) & 1)
1266 {
1267 word = bit = 0;
1268 while (~entry->in_use_p[word] == 0)
1269 ++word;
6f0947e4
SB
1270
1271#if GCC_VERSION >= 3004
1272 bit = __builtin_ctzl (~entry->in_use_p[word]);
1273#else
21341cfd
AS
1274 while ((entry->in_use_p[word] >> bit) & 1)
1275 ++bit;
6f0947e4
SB
1276#endif
1277
21341cfd
AS
1278 hint = word * HOST_BITS_PER_LONG + bit;
1279 }
1280
1281 /* Next time, try the next bit. */
1282 entry->next_bit_hint = hint + 1;
1283
685fe032 1284 object_offset = hint * object_size;
21341cfd
AS
1285 }
1286
1287 /* Set the in-use bit. */
1288 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1289
1290 /* Keep a running total of the number of free objects. If this page
1291 fills up, we may have to move it to the end of the list if the
1292 next page isn't full. If the next page is full, all subsequent
1293 pages are full, so there's no need to move it. */
1294 if (--entry->num_free_objects == 0
1295 && entry->next != NULL
1296 && entry->next->num_free_objects > 0)
1297 {
9bf793f9 1298 /* We have a new head for the list. */
21341cfd 1299 G.pages[order] = entry->next;
9bf793f9
JL
1300
1301 /* We are moving ENTRY to the end of the page table list.
1302 The new page at the head of the list will have NULL in
1303 its PREV field and ENTRY will have NULL in its NEXT field. */
1304 entry->next->prev = NULL;
21341cfd 1305 entry->next = NULL;
9bf793f9
JL
1306
1307 /* Append ENTRY to the tail of the list. */
1308 entry->prev = G.page_tails[order];
21341cfd
AS
1309 G.page_tails[order]->next = entry;
1310 G.page_tails[order] = entry;
1311 }
1312
1313 /* Calculate the object's address. */
1314 result = entry->page + object_offset;
7aa6d18a
SB
1315 if (GATHER_STATISTICS)
1316 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1317 result FINAL_PASS_MEM_STAT);
21341cfd 1318
3788cc17 1319#ifdef ENABLE_GC_CHECKING
9a0a7d5d
HPN
1320 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1321 exact same semantics in presence of memory bugs, regardless of
1322 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1323 handle to avoid handle leak. */
35dee980 1324 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
9a0a7d5d 1325
f8a83ee3
ZW
1326 /* `Poison' the entire allocated object, including any padding at
1327 the end. */
685fe032 1328 memset (result, 0xaf, object_size);
9a0a7d5d
HPN
1329
1330 /* Make the bytes after the end of the object unaccessible. Discard the
1331 handle to avoid handle leak. */
35dee980
HPN
1332 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1333 object_size - size));
21341cfd 1334#endif
cb2ec151 1335
9a0a7d5d
HPN
1336 /* Tell Valgrind that the memory is there, but its content isn't
1337 defined. The bytes at the end of the object are still marked
1338 unaccessible. */
35dee980 1339 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
9a0a7d5d 1340
21341cfd
AS
1341 /* Keep track of how many bytes are being allocated. This
1342 information is used in deciding when to collect. */
685fe032 1343 G.allocated += object_size;
21341cfd 1344
8d18c628
ZD
1345 /* For timevar statistics. */
1346 timevar_ggc_mem_total += object_size;
1347
7aa6d18a
SB
1348 if (GATHER_STATISTICS)
1349 {
1350 size_t overhead = object_size - size;
adc4adcd 1351
7aa6d18a
SB
1352 G.stats.total_overhead += overhead;
1353 G.stats.total_allocated += object_size;
1354 G.stats.total_overhead_per_order[order] += overhead;
1355 G.stats.total_allocated_per_order[order] += object_size;
adc4adcd 1356
7aa6d18a
SB
1357 if (size <= 32)
1358 {
1359 G.stats.total_overhead_under32 += overhead;
1360 G.stats.total_allocated_under32 += object_size;
1361 }
1362 if (size <= 64)
1363 {
1364 G.stats.total_overhead_under64 += overhead;
1365 G.stats.total_allocated_under64 += object_size;
1366 }
1367 if (size <= 128)
1368 {
1369 G.stats.total_overhead_under128 += overhead;
1370 G.stats.total_allocated_under128 += object_size;
1371 }
1372 }
685fe032 1373
21341cfd 1374 if (GGC_DEBUG_LEVEL >= 3)
589005ff 1375 fprintf (G.debug_file,
8a951190 1376 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
685fe032 1377 (unsigned long) size, (unsigned long) object_size, result,
20c1dc5e 1378 (void *) entry);
21341cfd
AS
1379
1380 return result;
1381}
1382
dae4174e
TT
1383/* Mark function for strings. */
1384
1385void
1386gt_ggc_m_S (const void *p)
1387{
1388 page_entry *entry;
1389 unsigned bit, word;
1390 unsigned long mask;
1391 unsigned long offset;
1392
1393 if (!p || !ggc_allocated_p (p))
1394 return;
1395
1396 /* Look up the page on which the object is alloced. . */
1397 entry = lookup_page_table_entry (p);
1398 gcc_assert (entry);
1399
1400 /* Calculate the index of the object on the page; this is its bit
1401 position in the in_use_p bitmap. Note that because a char* might
1402 point to the middle of an object, we need special code here to
1403 make sure P points to the start of an object. */
1404 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1405 if (offset)
1406 {
1407 /* Here we've seen a char* which does not point to the beginning
1408 of an allocated object. We assume it points to the middle of
1409 a STRING_CST. */
1410 gcc_assert (offset == offsetof (struct tree_string, str));
1411 p = ((const char *) p) - offset;
d3bfe4de 1412 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
dae4174e
TT
1413 return;
1414 }
1415
1416 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1417 word = bit / HOST_BITS_PER_LONG;
1418 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1419
1420 /* If the bit was previously set, skip it. */
1421 if (entry->in_use_p[word] & mask)
1422 return;
1423
1424 /* Otherwise set it, and decrement the free object count. */
1425 entry->in_use_p[word] |= mask;
1426 entry->num_free_objects -= 1;
1427
1428 if (GGC_DEBUG_LEVEL >= 4)
1429 fprintf (G.debug_file, "Marking %p\n", p);
1430
1431 return;
1432}
1433
0823efed
DN
1434
1435/* User-callable entry points for marking string X. */
1436
1437void
1438gt_ggc_mx (const char *& x)
1439{
1440 gt_ggc_m_S (x);
1441}
1442
1443void
1444gt_ggc_mx (unsigned char *& x)
1445{
1446 gt_ggc_m_S (x);
1447}
1448
1449void
1450gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1451{
1452}
1453
cb2ec151 1454/* If P is not marked, marks it and return false. Otherwise return true.
21341cfd
AS
1455 P must have been allocated by the GC allocator; it mustn't point to
1456 static objects, stack variables, or memory allocated with malloc. */
cb2ec151 1457
005537df 1458int
20c1dc5e 1459ggc_set_mark (const void *p)
21341cfd
AS
1460{
1461 page_entry *entry;
1462 unsigned bit, word;
1463 unsigned long mask;
1464
1465 /* Look up the page on which the object is alloced. If the object
1466 wasn't allocated by the collector, we'll probably die. */
74c937ca 1467 entry = lookup_page_table_entry (p);
282899df 1468 gcc_assert (entry);
21341cfd
AS
1469
1470 /* Calculate the index of the object on the page; this is its bit
1471 position in the in_use_p bitmap. */
8537ed68 1472 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
21341cfd
AS
1473 word = bit / HOST_BITS_PER_LONG;
1474 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
589005ff 1475
dc297297 1476 /* If the bit was previously set, skip it. */
21341cfd
AS
1477 if (entry->in_use_p[word] & mask)
1478 return 1;
1479
1480 /* Otherwise set it, and decrement the free object count. */
1481 entry->in_use_p[word] |= mask;
1482 entry->num_free_objects -= 1;
1483
21341cfd
AS
1484 if (GGC_DEBUG_LEVEL >= 4)
1485 fprintf (G.debug_file, "Marking %p\n", p);
1486
1487 return 0;
1488}
1489
589005ff 1490/* Return 1 if P has been marked, zero otherwise.
4c160717
RK
1491 P must have been allocated by the GC allocator; it mustn't point to
1492 static objects, stack variables, or memory allocated with malloc. */
1493
1494int
20c1dc5e 1495ggc_marked_p (const void *p)
4c160717
RK
1496{
1497 page_entry *entry;
1498 unsigned bit, word;
1499 unsigned long mask;
1500
1501 /* Look up the page on which the object is alloced. If the object
1502 wasn't allocated by the collector, we'll probably die. */
1503 entry = lookup_page_table_entry (p);
282899df 1504 gcc_assert (entry);
4c160717
RK
1505
1506 /* Calculate the index of the object on the page; this is its bit
1507 position in the in_use_p bitmap. */
8537ed68 1508 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
4c160717
RK
1509 word = bit / HOST_BITS_PER_LONG;
1510 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
589005ff 1511
a4b5b2ae 1512 return (entry->in_use_p[word] & mask) != 0;
4c160717
RK
1513}
1514
cb2ec151
RH
1515/* Return the size of the gc-able object P. */
1516
3277221c 1517size_t
20c1dc5e 1518ggc_get_size (const void *p)
3277221c
MM
1519{
1520 page_entry *pe = lookup_page_table_entry (p);
2be510b8 1521 return OBJECT_SIZE (pe->order);
3277221c 1522}
685fe032
RH
1523
1524/* Release the memory for object P. */
1525
1526void
1527ggc_free (void *p)
1528{
1529 page_entry *pe = lookup_page_table_entry (p);
1530 size_t order = pe->order;
1531 size_t size = OBJECT_SIZE (order);
1532
7aa6d18a
SB
1533 if (GATHER_STATISTICS)
1534 ggc_free_overhead (p);
07724022 1535
685fe032
RH
1536 if (GGC_DEBUG_LEVEL >= 3)
1537 fprintf (G.debug_file,
1538 "Freeing object, actual size=%lu, at %p on %p\n",
1539 (unsigned long) size, p, (void *) pe);
1540
1541#ifdef ENABLE_GC_CHECKING
1542 /* Poison the data, to indicate the data is garbage. */
35dee980 1543 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
685fe032
RH
1544 memset (p, 0xa5, size);
1545#endif
1546 /* Let valgrind know the object is free. */
35dee980 1547 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
685fe032
RH
1548
1549#ifdef ENABLE_GC_ALWAYS_COLLECT
1550 /* In the completely-anal-checking mode, we do *not* immediately free
b8698a0f 1551 the data, but instead verify that the data is *actually* not
685fe032
RH
1552 reachable the next time we collect. */
1553 {
5ed6ace5 1554 struct free_object *fo = XNEW (struct free_object);
685fe032
RH
1555 fo->object = p;
1556 fo->next = G.free_object_list;
1557 G.free_object_list = fo;
1558 }
1559#else
1560 {
1561 unsigned int bit_offset, word, bit;
1562
1563 G.allocated -= size;
1564
1565 /* Mark the object not-in-use. */
1566 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1567 word = bit_offset / HOST_BITS_PER_LONG;
1568 bit = bit_offset % HOST_BITS_PER_LONG;
1569 pe->in_use_p[word] &= ~(1UL << bit);
1570
1571 if (pe->num_free_objects++ == 0)
1572 {
9bf793f9
JL
1573 page_entry *p, *q;
1574
685fe032
RH
1575 /* If the page is completely full, then it's supposed to
1576 be after all pages that aren't. Since we've freed one
1577 object from a page that was full, we need to move the
b8698a0f 1578 page to the head of the list.
685fe032 1579
9bf793f9
JL
1580 PE is the node we want to move. Q is the previous node
1581 and P is the next node in the list. */
1582 q = pe->prev;
685fe032
RH
1583 if (q && q->num_free_objects == 0)
1584 {
1585 p = pe->next;
9bf793f9 1586
685fe032 1587 q->next = p;
9bf793f9
JL
1588
1589 /* If PE was at the end of the list, then Q becomes the
1590 new end of the list. If PE was not the end of the
1591 list, then we need to update the PREV field for P. */
685fe032
RH
1592 if (!p)
1593 G.page_tails[order] = q;
9bf793f9
JL
1594 else
1595 p->prev = q;
1596
1597 /* Move PE to the head of the list. */
685fe032 1598 pe->next = G.pages[order];
9bf793f9
JL
1599 pe->prev = NULL;
1600 G.pages[order]->prev = pe;
685fe032
RH
1601 G.pages[order] = pe;
1602 }
1603
1604 /* Reset the hint bit to point to the only free object. */
1605 pe->next_bit_hint = bit_offset;
1606 }
1607 }
1608#endif
1609}
21341cfd 1610\f
8537ed68
ZW
1611/* Subroutine of init_ggc which computes the pair of numbers used to
1612 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1613
1614 This algorithm is taken from Granlund and Montgomery's paper
1615 "Division by Invariant Integers using Multiplication"
1616 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1617 constants). */
1618
1619static void
20c1dc5e 1620compute_inverse (unsigned order)
8537ed68 1621{
b8698a0f 1622 size_t size, inv;
75d75435 1623 unsigned int e;
280cf02a 1624
8537ed68
ZW
1625 size = OBJECT_SIZE (order);
1626 e = 0;
1627 while (size % 2 == 0)
1628 {
1629 e++;
1630 size >>= 1;
1631 }
cb2ec151 1632
8537ed68
ZW
1633 inv = size;
1634 while (inv * size != 1)
1635 inv = inv * (2 - inv*size);
1636
1637 DIV_MULT (order) = inv;
1638 DIV_SHIFT (order) = e;
1639}
1640
1641/* Initialize the ggc-mmap allocator. */
21341cfd 1642void
20c1dc5e 1643init_ggc (void)
21341cfd 1644{
2be510b8
MM
1645 unsigned order;
1646
c3284718 1647 G.pagesize = getpagesize ();
21341cfd
AS
1648 G.lg_pagesize = exact_log2 (G.pagesize);
1649
825b6926 1650#ifdef HAVE_MMAP_DEV_ZERO
21341cfd
AS
1651 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1652 if (G.dev_zero_fd == -1)
c770ac2b 1653 internal_error ("open /dev/zero: %m");
21341cfd
AS
1654#endif
1655
1656#if 0
1657 G.debug_file = fopen ("ggc-mmap.debug", "w");
1658#else
1659 G.debug_file = stdout;
1660#endif
1661
825b6926 1662#ifdef USING_MMAP
1b3e1423
RH
1663 /* StunOS has an amazing off-by-one error for the first mmap allocation
1664 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1665 believe, is an unaligned page allocation, which would cause us to
1666 hork badly if we tried to use it. */
1667 {
25f0ea81 1668 char *p = alloc_anon (NULL, G.pagesize, true);
825b6926 1669 struct page_entry *e;
2a6e6fea 1670 if ((uintptr_t)p & (G.pagesize - 1))
1b3e1423
RH
1671 {
1672 /* How losing. Discard this one and try another. If we still
1673 can't get something useful, give up. */
1674
25f0ea81 1675 p = alloc_anon (NULL, G.pagesize, true);
2a6e6fea 1676 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1b3e1423 1677 }
825b6926 1678
dc297297 1679 /* We have a good page, might as well hold onto it... */
5ed6ace5 1680 e = XCNEW (struct page_entry);
825b6926
ZW
1681 e->bytes = G.pagesize;
1682 e->page = p;
1683 e->next = G.free_pages;
1684 G.free_pages = e;
1b3e1423
RH
1685 }
1686#endif
2be510b8
MM
1687
1688 /* Initialize the object size table. */
1689 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1690 object_size_table[order] = (size_t) 1 << order;
1691 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
b1095f9c
MM
1692 {
1693 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
a469a4f2
RG
1694
1695 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1696 so that we're sure of getting aligned memory. */
1697 s = ROUND_UP (s, MAX_ALIGNMENT);
b1095f9c
MM
1698 object_size_table[order] = s;
1699 }
2be510b8 1700
8537ed68 1701 /* Initialize the objects-per-page and inverse tables. */
2be510b8
MM
1702 for (order = 0; order < NUM_ORDERS; ++order)
1703 {
1704 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1705 if (objects_per_page_table[order] == 0)
1706 objects_per_page_table[order] = 1;
8537ed68 1707 compute_inverse (order);
2be510b8
MM
1708 }
1709
1710 /* Reset the size_lookup array to put appropriately sized objects in
1711 the special orders. All objects bigger than the previous power
1712 of two, but no greater than the special size, should go in the
a469a4f2 1713 new order. */
2be510b8
MM
1714 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1715 {
a469a4f2
RG
1716 int o;
1717 int i;
c4775f82 1718
6583cf15
NC
1719 i = OBJECT_SIZE (order);
1720 if (i >= NUM_SIZE_LOOKUP)
1721 continue;
1722
1723 for (o = size_lookup[i]; o == size_lookup [i]; --i)
a469a4f2
RG
1724 size_lookup[i] = order;
1725 }
ecf7b86f 1726
c4775f82
MS
1727 G.depth_in_use = 0;
1728 G.depth_max = 10;
5ed6ace5 1729 G.depth = XNEWVEC (unsigned int, G.depth_max);
c4775f82
MS
1730
1731 G.by_depth_in_use = 0;
1732 G.by_depth_max = INITIAL_PTE_COUNT;
5ed6ace5
MD
1733 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1734 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
21341cfd
AS
1735}
1736
4934cc53
MM
1737/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1738 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1739
1740static void
20c1dc5e 1741ggc_recalculate_in_use_p (page_entry *p)
4934cc53
MM
1742{
1743 unsigned int i;
1744 size_t num_objects;
1745
589005ff 1746 /* Because the past-the-end bit in in_use_p is always set, we
4934cc53 1747 pretend there is one additional object. */
17211ab5 1748 num_objects = OBJECTS_IN_PAGE (p) + 1;
4934cc53
MM
1749
1750 /* Reset the free object count. */
1751 p->num_free_objects = num_objects;
1752
1753 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
589005ff 1754 for (i = 0;
2be510b8
MM
1755 i < CEIL (BITMAP_SIZE (num_objects),
1756 sizeof (*p->in_use_p));
4934cc53
MM
1757 ++i)
1758 {
1759 unsigned long j;
1760
1761 /* Something is in use if it is marked, or if it was in use in a
1762 context further down the context stack. */
c4775f82 1763 p->in_use_p[i] |= save_in_use_p (p)[i];
4934cc53
MM
1764
1765 /* Decrement the free object count for every object allocated. */
1766 for (j = p->in_use_p[i]; j; j >>= 1)
1767 p->num_free_objects -= (j & 1);
1768 }
1769
282899df 1770 gcc_assert (p->num_free_objects < num_objects);
4934cc53 1771}
21341cfd 1772\f
cb2ec151
RH
1773/* Unmark all objects. */
1774
685fe032 1775static void
20c1dc5e 1776clear_marks (void)
21341cfd
AS
1777{
1778 unsigned order;
1779
2be510b8 1780 for (order = 2; order < NUM_ORDERS; order++)
21341cfd 1781 {
21341cfd
AS
1782 page_entry *p;
1783
1784 for (p = G.pages[order]; p != NULL; p = p->next)
1785 {
17211ab5
GK
1786 size_t num_objects = OBJECTS_IN_PAGE (p);
1787 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1788
21341cfd 1789 /* The data should be page-aligned. */
2a6e6fea 1790 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
21341cfd
AS
1791
1792 /* Pages that aren't in the topmost context are not collected;
1793 nevertheless, we need their in-use bit vectors to store GC
1794 marks. So, back them up first. */
4934cc53 1795 if (p->context_depth < G.context_depth)
21341cfd 1796 {
c4775f82 1797 if (! save_in_use_p (p))
d3bfe4de 1798 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
c4775f82 1799 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
21341cfd
AS
1800 }
1801
1802 /* Reset reset the number of free objects and clear the
1803 in-use bits. These will be adjusted by mark_obj. */
1804 p->num_free_objects = num_objects;
1805 memset (p->in_use_p, 0, bitmap_size);
1806
1807 /* Make sure the one-past-the-end bit is always set. */
589005ff 1808 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
21341cfd
AS
1809 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1810 }
1811 }
1812}
1813
cb2ec151
RH
1814/* Free all empty pages. Partially empty pages need no attention
1815 because the `mark' bit doubles as an `unused' bit. */
1816
685fe032 1817static void
20c1dc5e 1818sweep_pages (void)
21341cfd
AS
1819{
1820 unsigned order;
1821
2be510b8 1822 for (order = 2; order < NUM_ORDERS; order++)
21341cfd
AS
1823 {
1824 /* The last page-entry to consider, regardless of entries
1825 placed at the end of the list. */
1826 page_entry * const last = G.page_tails[order];
1827
17211ab5 1828 size_t num_objects;
054f5e69 1829 size_t live_objects;
21341cfd
AS
1830 page_entry *p, *previous;
1831 int done;
589005ff 1832
21341cfd
AS
1833 p = G.pages[order];
1834 if (p == NULL)
1835 continue;
1836
1837 previous = NULL;
1838 do
1839 {
1840 page_entry *next = p->next;
1841
1842 /* Loop until all entries have been examined. */
1843 done = (p == last);
20c1dc5e 1844
17211ab5 1845 num_objects = OBJECTS_IN_PAGE (p);
21341cfd 1846
054f5e69
ZW
1847 /* Add all live objects on this page to the count of
1848 allocated memory. */
1849 live_objects = num_objects - p->num_free_objects;
1850
2be510b8 1851 G.allocated += OBJECT_SIZE (order) * live_objects;
054f5e69 1852
21341cfd
AS
1853 /* Only objects on pages in the topmost context should get
1854 collected. */
1855 if (p->context_depth < G.context_depth)
1856 ;
1857
1858 /* Remove the page if it's empty. */
054f5e69 1859 else if (live_objects == 0)
21341cfd 1860 {
9bf793f9
JL
1861 /* If P was the first page in the list, then NEXT
1862 becomes the new first page in the list, otherwise
1863 splice P out of the forward pointers. */
21341cfd
AS
1864 if (! previous)
1865 G.pages[order] = next;
1866 else
1867 previous->next = next;
b8698a0f 1868
9bf793f9
JL
1869 /* Splice P out of the back pointers too. */
1870 if (next)
1871 next->prev = previous;
21341cfd
AS
1872
1873 /* Are we removing the last element? */
1874 if (p == G.page_tails[order])
1875 G.page_tails[order] = previous;
1876 free_page (p);
1877 p = previous;
1878 }
1879
1880 /* If the page is full, move it to the end. */
1881 else if (p->num_free_objects == 0)
1882 {
1883 /* Don't move it if it's already at the end. */
1884 if (p != G.page_tails[order])
1885 {
1886 /* Move p to the end of the list. */
1887 p->next = NULL;
9bf793f9 1888 p->prev = G.page_tails[order];
21341cfd
AS
1889 G.page_tails[order]->next = p;
1890
1891 /* Update the tail pointer... */
1892 G.page_tails[order] = p;
1893
1894 /* ... and the head pointer, if necessary. */
1895 if (! previous)
1896 G.pages[order] = next;
1897 else
1898 previous->next = next;
9bf793f9
JL
1899
1900 /* And update the backpointer in NEXT if necessary. */
1901 if (next)
1902 next->prev = previous;
1903
21341cfd
AS
1904 p = previous;
1905 }
1906 }
1907
1908 /* If we've fallen through to here, it's a page in the
1909 topmost context that is neither full nor empty. Such a
1910 page must precede pages at lesser context depth in the
1911 list, so move it to the head. */
1912 else if (p != G.pages[order])
1913 {
1914 previous->next = p->next;
9bf793f9
JL
1915
1916 /* Update the backchain in the next node if it exists. */
1917 if (p->next)
1918 p->next->prev = previous;
1919
1920 /* Move P to the head of the list. */
21341cfd 1921 p->next = G.pages[order];
9bf793f9
JL
1922 p->prev = NULL;
1923 G.pages[order]->prev = p;
1924
1925 /* Update the head pointer. */
21341cfd 1926 G.pages[order] = p;
9bf793f9 1927
21341cfd
AS
1928 /* Are we moving the last element? */
1929 if (G.page_tails[order] == p)
1930 G.page_tails[order] = previous;
1931 p = previous;
1932 }
1933
1934 previous = p;
1935 p = next;
589005ff 1936 }
21341cfd 1937 while (! done);
4934cc53
MM
1938
1939 /* Now, restore the in_use_p vectors for any pages from contexts
1940 other than the current one. */
1941 for (p = G.pages[order]; p; p = p->next)
1942 if (p->context_depth != G.context_depth)
1943 ggc_recalculate_in_use_p (p);
21341cfd
AS
1944 }
1945}
1946
3788cc17 1947#ifdef ENABLE_GC_CHECKING
cb2ec151
RH
1948/* Clobber all free objects. */
1949
685fe032 1950static void
20c1dc5e 1951poison_pages (void)
21341cfd
AS
1952{
1953 unsigned order;
1954
2be510b8 1955 for (order = 2; order < NUM_ORDERS; order++)
21341cfd 1956 {
2be510b8 1957 size_t size = OBJECT_SIZE (order);
21341cfd
AS
1958 page_entry *p;
1959
1960 for (p = G.pages[order]; p != NULL; p = p->next)
1961 {
17211ab5 1962 size_t num_objects;
21341cfd 1963 size_t i;
c831fdea
MM
1964
1965 if (p->context_depth != G.context_depth)
1966 /* Since we don't do any collection for pages in pushed
1967 contexts, there's no need to do any poisoning. And
1968 besides, the IN_USE_P array isn't valid until we pop
1969 contexts. */
1970 continue;
1971
17211ab5 1972 num_objects = OBJECTS_IN_PAGE (p);
21341cfd
AS
1973 for (i = 0; i < num_objects; i++)
1974 {
1975 size_t word, bit;
1976 word = i / HOST_BITS_PER_LONG;
1977 bit = i % HOST_BITS_PER_LONG;
1978 if (((p->in_use_p[word] >> bit) & 1) == 0)
9a0a7d5d
HPN
1979 {
1980 char *object = p->page + i * size;
1981
1982 /* Keep poison-by-write when we expect to use Valgrind,
1983 so the exact same memory semantics is kept, in case
1984 there are memory errors. We override this request
1985 below. */
35dee980
HPN
1986 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1987 size));
9a0a7d5d
HPN
1988 memset (object, 0xa5, size);
1989
1990 /* Drop the handle to avoid handle leak. */
35dee980 1991 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
9a0a7d5d 1992 }
21341cfd
AS
1993 }
1994 }
1995 }
1996}
685fe032
RH
1997#else
1998#define poison_pages()
1999#endif
2000
2001#ifdef ENABLE_GC_ALWAYS_COLLECT
2002/* Validate that the reportedly free objects actually are. */
2003
2004static void
2005validate_free_objects (void)
2006{
2007 struct free_object *f, *next, *still_free = NULL;
2008
2009 for (f = G.free_object_list; f ; f = next)
2010 {
2011 page_entry *pe = lookup_page_table_entry (f->object);
2012 size_t bit, word;
2013
2014 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2015 word = bit / HOST_BITS_PER_LONG;
2016 bit = bit % HOST_BITS_PER_LONG;
2017 next = f->next;
2018
2019 /* Make certain it isn't visible from any root. Notice that we
2020 do this check before sweep_pages merges save_in_use_p. */
282899df 2021 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
685fe032
RH
2022
2023 /* If the object comes from an outer context, then retain the
2024 free_object entry, so that we can verify that the address
2025 isn't live on the stack in some outer context. */
2026 if (pe->context_depth != G.context_depth)
2027 {
2028 f->next = still_free;
2029 still_free = f;
2030 }
2031 else
2032 free (f);
2033 }
2034
2035 G.free_object_list = still_free;
2036}
2037#else
2038#define validate_free_objects()
21341cfd
AS
2039#endif
2040
cb2ec151
RH
2041/* Top level mark-and-sweep routine. */
2042
21341cfd 2043void
20c1dc5e 2044ggc_collect (void)
21341cfd 2045{
21341cfd
AS
2046 /* Avoid frequent unnecessary work by skipping collection if the
2047 total allocations haven't expanded much since the last
2048 collection. */
19cc0dd4 2049 float allocated_last_gc =
3788cc17
ZW
2050 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2051
19cc0dd4 2052 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
3788cc17 2053
07724022 2054 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
21341cfd 2055 return;
21341cfd 2056
2a9a326b 2057 timevar_push (TV_GC);
21341cfd 2058 if (!quiet_flag)
b9bfacf0 2059 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
685fe032
RH
2060 if (GGC_DEBUG_LEVEL >= 2)
2061 fprintf (G.debug_file, "BEGIN COLLECTING\n");
21341cfd 2062
054f5e69
ZW
2063 /* Zero the total allocated bytes. This will be recalculated in the
2064 sweep phase. */
21341cfd
AS
2065 G.allocated = 0;
2066
589005ff 2067 /* Release the pages we freed the last time we collected, but didn't
21341cfd
AS
2068 reuse in the interim. */
2069 release_pages ();
2070
52895e1a
RH
2071 /* Indicate that we've seen collections at this context depth. */
2072 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2073
ae2392a9
BS
2074 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2075
21341cfd
AS
2076 clear_marks ();
2077 ggc_mark_roots ();
7aa6d18a
SB
2078
2079 if (GATHER_STATISTICS)
2080 ggc_prune_overhead_list ();
2081
21341cfd 2082 poison_pages ();
685fe032 2083 validate_free_objects ();
cb2ec151
RH
2084 sweep_pages ();
2085
21341cfd
AS
2086 G.allocated_last_gc = G.allocated;
2087
ae2392a9
BS
2088 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2089
2a9a326b 2090 timevar_pop (TV_GC);
21341cfd 2091
21341cfd 2092 if (!quiet_flag)
2a9a326b 2093 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
685fe032
RH
2094 if (GGC_DEBUG_LEVEL >= 2)
2095 fprintf (G.debug_file, "END COLLECTING\n");
21341cfd 2096}
3277221c
MM
2097
2098/* Print allocation statistics. */
fba0bfd4
ZW
2099#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2100 ? (x) \
2101 : ((x) < 1024*1024*10 \
2102 ? (x) / 1024 \
2103 : (x) / (1024*1024))))
07724022 2104#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
3277221c
MM
2105
2106void
20c1dc5e 2107ggc_print_statistics (void)
3277221c
MM
2108{
2109 struct ggc_statistics stats;
4934cc53 2110 unsigned int i;
fba0bfd4 2111 size_t total_overhead = 0;
3277221c
MM
2112
2113 /* Clear the statistics. */
d219c7f1 2114 memset (&stats, 0, sizeof (stats));
589005ff 2115
3277221c
MM
2116 /* Make sure collection will really occur. */
2117 G.allocated_last_gc = 0;
2118
2119 /* Collect and print the statistics common across collectors. */
fba0bfd4 2120 ggc_print_common_statistics (stderr, &stats);
3277221c 2121
4934cc53
MM
2122 /* Release free pages so that we will not count the bytes allocated
2123 there as part of the total allocated memory. */
2124 release_pages ();
2125
589005ff 2126 /* Collect some information about the various sizes of
3277221c 2127 allocation. */
439a7e54
DN
2128 fprintf (stderr,
2129 "Memory still allocated at the end of the compilation process\n");
adc4adcd 2130 fprintf (stderr, "%-5s %10s %10s %10s\n",
9fd51e67 2131 "Size", "Allocated", "Used", "Overhead");
2be510b8 2132 for (i = 0; i < NUM_ORDERS; ++i)
3277221c
MM
2133 {
2134 page_entry *p;
2135 size_t allocated;
2136 size_t in_use;
fba0bfd4 2137 size_t overhead;
3277221c
MM
2138
2139 /* Skip empty entries. */
2140 if (!G.pages[i])
2141 continue;
2142
fba0bfd4 2143 overhead = allocated = in_use = 0;
3277221c
MM
2144
2145 /* Figure out the total number of bytes allocated for objects of
fba0bfd4
ZW
2146 this size, and how many of them are actually in use. Also figure
2147 out how much memory the page table is using. */
3277221c
MM
2148 for (p = G.pages[i]; p; p = p->next)
2149 {
2150 allocated += p->bytes;
20c1dc5e 2151 in_use +=
17211ab5 2152 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
fba0bfd4
ZW
2153
2154 overhead += (sizeof (page_entry) - sizeof (long)
17211ab5 2155 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
3277221c 2156 }
8a951190
AJ
2157 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2158 (unsigned long) OBJECT_SIZE (i),
07724022
JH
2159 SCALE (allocated), STAT_LABEL (allocated),
2160 SCALE (in_use), STAT_LABEL (in_use),
2161 SCALE (overhead), STAT_LABEL (overhead));
fba0bfd4 2162 total_overhead += overhead;
3277221c 2163 }
8a951190 2164 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
07724022 2165 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
c3284718 2166 SCALE (G.allocated), STAT_LABEL (G.allocated),
07724022 2167 SCALE (total_overhead), STAT_LABEL (total_overhead));
adc4adcd 2168
7aa6d18a
SB
2169 if (GATHER_STATISTICS)
2170 {
2171 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2172
6edc3e32 2173 fprintf (stderr, "Total Overhead: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a 2174 G.stats.total_overhead);
6edc3e32 2175 fprintf (stderr, "Total Allocated: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a
SB
2176 G.stats.total_allocated);
2177
6edc3e32 2178 fprintf (stderr, "Total Overhead under 32B: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a 2179 G.stats.total_overhead_under32);
6edc3e32 2180 fprintf (stderr, "Total Allocated under 32B: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a 2181 G.stats.total_allocated_under32);
6edc3e32 2182 fprintf (stderr, "Total Overhead under 64B: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a 2183 G.stats.total_overhead_under64);
6edc3e32 2184 fprintf (stderr, "Total Allocated under 64B: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a 2185 G.stats.total_allocated_under64);
6edc3e32 2186 fprintf (stderr, "Total Overhead under 128B: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a 2187 G.stats.total_overhead_under128);
6edc3e32 2188 fprintf (stderr, "Total Allocated under 128B: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a
SB
2189 G.stats.total_allocated_under128);
2190
2191 for (i = 0; i < NUM_ORDERS; i++)
2192 if (G.stats.total_allocated_per_order[i])
2193 {
6edc3e32 2194 fprintf (stderr, "Total Overhead page size %7lu: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a
SB
2195 (unsigned long) OBJECT_SIZE (i),
2196 G.stats.total_overhead_per_order[i]);
6edc3e32 2197 fprintf (stderr, "Total Allocated page size %7lu: %10" HOST_LONG_LONG_FORMAT "d\n",
7aa6d18a
SB
2198 (unsigned long) OBJECT_SIZE (i),
2199 G.stats.total_allocated_per_order[i]);
2200 }
adc4adcd 2201 }
3277221c 2202}
17211ab5 2203\f
24b97832
ILT
2204struct ggc_pch_ondisk
2205{
2206 unsigned totals[NUM_ORDERS];
2207};
2208
17211ab5
GK
2209struct ggc_pch_data
2210{
24b97832 2211 struct ggc_pch_ondisk d;
2a6e6fea 2212 uintptr_t base[NUM_ORDERS];
17211ab5
GK
2213 size_t written[NUM_ORDERS];
2214};
2215
2216struct ggc_pch_data *
20c1dc5e 2217init_ggc_pch (void)
17211ab5 2218{
5ed6ace5 2219 return XCNEW (struct ggc_pch_data);
17211ab5
GK
2220}
2221
20c1dc5e
AJ
2222void
2223ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
cd030c07 2224 size_t size, bool is_string ATTRIBUTE_UNUSED)
17211ab5
GK
2225{
2226 unsigned order;
2227
6583cf15 2228 if (size < NUM_SIZE_LOOKUP)
17211ab5
GK
2229 order = size_lookup[size];
2230 else
2231 {
f5938dcd 2232 order = 10;
17211ab5
GK
2233 while (size > OBJECT_SIZE (order))
2234 order++;
2235 }
20c1dc5e 2236
17211ab5
GK
2237 d->d.totals[order]++;
2238}
20c1dc5e 2239
17211ab5 2240size_t
20c1dc5e 2241ggc_pch_total_size (struct ggc_pch_data *d)
17211ab5
GK
2242{
2243 size_t a = 0;
2244 unsigned i;
2245
2246 for (i = 0; i < NUM_ORDERS; i++)
3bc50163 2247 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
17211ab5
GK
2248 return a;
2249}
2250
2251void
20c1dc5e 2252ggc_pch_this_base (struct ggc_pch_data *d, void *base)
17211ab5 2253{
2a6e6fea 2254 uintptr_t a = (uintptr_t) base;
17211ab5 2255 unsigned i;
20c1dc5e 2256
17211ab5
GK
2257 for (i = 0; i < NUM_ORDERS; i++)
2258 {
2259 d->base[i] = a;
3bc50163 2260 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
17211ab5
GK
2261 }
2262}
2263
2264
2265char *
20c1dc5e 2266ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
cd030c07 2267 size_t size, bool is_string ATTRIBUTE_UNUSED)
17211ab5
GK
2268{
2269 unsigned order;
2270 char *result;
20c1dc5e 2271
6583cf15 2272 if (size < NUM_SIZE_LOOKUP)
17211ab5
GK
2273 order = size_lookup[size];
2274 else
2275 {
f5938dcd 2276 order = 10;
17211ab5
GK
2277 while (size > OBJECT_SIZE (order))
2278 order++;
2279 }
2280
2281 result = (char *) d->base[order];
2282 d->base[order] += OBJECT_SIZE (order);
2283 return result;
2284}
2285
20c1dc5e
AJ
2286void
2287ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2288 FILE *f ATTRIBUTE_UNUSED)
17211ab5
GK
2289{
2290 /* Nothing to do. */
2291}
2292
2293void
8ed00d76 2294ggc_pch_write_object (struct ggc_pch_data *d,
20c1dc5e 2295 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
b6f61163 2296 size_t size, bool is_string ATTRIBUTE_UNUSED)
17211ab5
GK
2297{
2298 unsigned order;
642324bb 2299 static const char emptyBytes[256] = { 0 };
17211ab5 2300
6583cf15 2301 if (size < NUM_SIZE_LOOKUP)
17211ab5
GK
2302 order = size_lookup[size];
2303 else
2304 {
f5938dcd 2305 order = 10;
17211ab5
GK
2306 while (size > OBJECT_SIZE (order))
2307 order++;
2308 }
20c1dc5e 2309
17211ab5 2310 if (fwrite (x, size, 1, f) != 1)
d8a07487 2311 fatal_error ("can%'t write PCH file: %m");
17211ab5 2312
674c7ef1 2313 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
0ee55ad8 2314 object out to OBJECT_SIZE(order). This happens for strings. */
674c7ef1
RB
2315
2316 if (size != OBJECT_SIZE (order))
2317 {
c3284718 2318 unsigned padding = OBJECT_SIZE (order) - size;
674c7ef1
RB
2319
2320 /* To speed small writes, we use a nulled-out array that's larger
2321 than most padding requests as the source for our null bytes. This
2322 permits us to do the padding with fwrite() rather than fseek(), and
3f117656 2323 limits the chance the OS may try to flush any outstanding writes. */
c3284718 2324 if (padding <= sizeof (emptyBytes))
674c7ef1
RB
2325 {
2326 if (fwrite (emptyBytes, 1, padding, f) != padding)
d8a07487 2327 fatal_error ("can%'t write PCH file");
674c7ef1
RB
2328 }
2329 else
2330 {
0ee55ad8 2331 /* Larger than our buffer? Just default to fseek. */
674c7ef1 2332 if (fseek (f, padding, SEEK_CUR) != 0)
d8a07487 2333 fatal_error ("can%'t write PCH file");
674c7ef1
RB
2334 }
2335 }
17211ab5
GK
2336
2337 d->written[order]++;
2338 if (d->written[order] == d->d.totals[order]
2339 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2340 G.pagesize),
2341 SEEK_CUR) != 0)
d8a07487 2342 fatal_error ("can%'t write PCH file: %m");
17211ab5
GK
2343}
2344
2345void
20c1dc5e 2346ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
17211ab5
GK
2347{
2348 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
d8a07487 2349 fatal_error ("can%'t write PCH file: %m");
17211ab5
GK
2350 free (d);
2351}
2352
c4775f82
MS
2353/* Move the PCH PTE entries just added to the end of by_depth, to the
2354 front. */
2355
2356static void
20c1dc5e 2357move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
c4775f82
MS
2358{
2359 unsigned i;
2360
2361 /* First, we swap the new entries to the front of the varrays. */
2362 page_entry **new_by_depth;
2363 unsigned long **new_save_in_use;
2364
5ed6ace5
MD
2365 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2366 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
c4775f82
MS
2367
2368 memcpy (&new_by_depth[0],
2369 &G.by_depth[count_old_page_tables],
2370 count_new_page_tables * sizeof (void *));
2371 memcpy (&new_by_depth[count_new_page_tables],
2372 &G.by_depth[0],
2373 count_old_page_tables * sizeof (void *));
2374 memcpy (&new_save_in_use[0],
2375 &G.save_in_use[count_old_page_tables],
2376 count_new_page_tables * sizeof (void *));
2377 memcpy (&new_save_in_use[count_new_page_tables],
2378 &G.save_in_use[0],
2379 count_old_page_tables * sizeof (void *));
2380
2381 free (G.by_depth);
2382 free (G.save_in_use);
20c1dc5e 2383
c4775f82
MS
2384 G.by_depth = new_by_depth;
2385 G.save_in_use = new_save_in_use;
2386
2387 /* Now update all the index_by_depth fields. */
2388 for (i = G.by_depth_in_use; i > 0; --i)
2389 {
2390 page_entry *p = G.by_depth[i-1];
2391 p->index_by_depth = i-1;
2392 }
2393
2394 /* And last, we update the depth pointers in G.depth. The first
2395 entry is already 0, and context 0 entries always start at index
2396 0, so there is nothing to update in the first slot. We need a
2397 second slot, only if we have old ptes, and if we do, they start
2398 at index count_new_page_tables. */
2399 if (count_old_page_tables)
2400 push_depth (count_new_page_tables);
2401}
2402
17211ab5 2403void
20c1dc5e 2404ggc_pch_read (FILE *f, void *addr)
17211ab5
GK
2405{
2406 struct ggc_pch_ondisk d;
2407 unsigned i;
d3bfe4de 2408 char *offs = (char *) addr;
c4775f82
MS
2409 unsigned long count_old_page_tables;
2410 unsigned long count_new_page_tables;
2411
2412 count_old_page_tables = G.by_depth_in_use;
2413
2414 /* We've just read in a PCH file. So, every object that used to be
2415 allocated is now free. */
17211ab5 2416 clear_marks ();
c5d6d04a 2417#ifdef ENABLE_GC_CHECKING
17211ab5
GK
2418 poison_pages ();
2419#endif
ead8827d
LB
2420 /* Since we free all the allocated objects, the free list becomes
2421 useless. Validate it now, which will also clear it. */
c3284718 2422 validate_free_objects ();
17211ab5
GK
2423
2424 /* No object read from a PCH file should ever be freed. So, set the
2425 context depth to 1, and set the depth of all the currently-allocated
2426 pages to be 1 too. PCH pages will have depth 0. */
282899df 2427 gcc_assert (!G.context_depth);
17211ab5
GK
2428 G.context_depth = 1;
2429 for (i = 0; i < NUM_ORDERS; i++)
2430 {
2431 page_entry *p;
2432 for (p = G.pages[i]; p != NULL; p = p->next)
2433 p->context_depth = G.context_depth;
2434 }
2435
2436 /* Allocate the appropriate page-table entries for the pages read from
2437 the PCH file. */
2438 if (fread (&d, sizeof (d), 1, f) != 1)
d8a07487 2439 fatal_error ("can%'t read PCH file: %m");
20c1dc5e 2440
17211ab5
GK
2441 for (i = 0; i < NUM_ORDERS; i++)
2442 {
2443 struct page_entry *entry;
2444 char *pte;
2445 size_t bytes;
2446 size_t num_objs;
2447 size_t j;
c4775f82 2448
17211ab5
GK
2449 if (d.totals[i] == 0)
2450 continue;
c4775f82 2451
3bc50163 2452 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
17211ab5 2453 num_objs = bytes / OBJECT_SIZE (i);
d3bfe4de
KG
2454 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2455 - sizeof (long)
2456 + BITMAP_SIZE (num_objs + 1)));
17211ab5
GK
2457 entry->bytes = bytes;
2458 entry->page = offs;
2459 entry->context_depth = 0;
2460 offs += bytes;
2461 entry->num_free_objects = 0;
2462 entry->order = i;
2463
20c1dc5e 2464 for (j = 0;
17211ab5
GK
2465 j + HOST_BITS_PER_LONG <= num_objs + 1;
2466 j += HOST_BITS_PER_LONG)
2467 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2468 for (; j < num_objs + 1; j++)
20c1dc5e 2469 entry->in_use_p[j / HOST_BITS_PER_LONG]
17211ab5
GK
2470 |= 1L << (j % HOST_BITS_PER_LONG);
2471
20c1dc5e
AJ
2472 for (pte = entry->page;
2473 pte < entry->page + entry->bytes;
17211ab5
GK
2474 pte += G.pagesize)
2475 set_page_table_entry (pte, entry);
2476
2477 if (G.page_tails[i] != NULL)
2478 G.page_tails[i]->next = entry;
2479 else
2480 G.pages[i] = entry;
2481 G.page_tails[i] = entry;
c4775f82
MS
2482
2483 /* We start off by just adding all the new information to the
2484 end of the varrays, later, we will move the new information
2485 to the front of the varrays, as the PCH page tables are at
2486 context 0. */
2487 push_by_depth (entry, 0);
17211ab5
GK
2488 }
2489
c4775f82
MS
2490 /* Now, we update the various data structures that speed page table
2491 handling. */
2492 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2493
2494 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2495
17211ab5
GK
2496 /* Update the statistics. */
2497 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2498}