]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-page.c
[PATCH 7/9] ENABLE_CHECKING refactoring: middle-end, LTO FE
[thirdparty/gcc.git] / gcc / ggc-page.c
CommitLineData
911ab6b9 1/* "Bag-of-pages" garbage collector for the GNU compiler.
d353bf18 2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
911ab6b9 3
f12b58b3 4This file is part of GCC.
911ab6b9 5
f12b58b3 6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8c4c00c1 8Software Foundation; either version 3, or (at your option) any later
f12b58b3 9version.
911ab6b9 10
f12b58b3 11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
911ab6b9 15
90856340 16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
911ab6b9 19
911ab6b9 20#include "config.h"
911ab6b9 21#include "system.h"
805e22b2 22#include "coretypes.h"
9ef16211 23#include "backend.h"
b20a8bb4 24#include "alias.h"
911ab6b9 25#include "tree.h"
f01a3c5e 26#include "rtl.h"
f8e15e8a 27#include "tm_p.h"
0b205f4c 28#include "diagnostic-core.h"
911ab6b9 29#include "flags.h"
ba72912a 30#include "ggc-internal.h"
74d2af64 31#include "timevar.h"
2a3edec5 32#include "params.h"
073c1fd5 33#include "cgraph.h"
3089b75c 34#include "cfgloop.h"
740cd0be 35#include "plugin.h"
f01a3c5e 36
901dfcc7 37/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39#ifdef HAVE_MMAP_ANON
40# undef HAVE_MMAP_DEV_ZERO
901dfcc7 41# define USING_MMAP
71661611 42#endif
911ab6b9 43
901dfcc7 44#ifdef HAVE_MMAP_DEV_ZERO
901dfcc7 45# define USING_MMAP
80d3ceff 46#endif
47
80eb2355 48#ifndef USING_MMAP
49#define USING_MALLOC_PAGE_GROUPS
d3b95fd7 50#endif
911ab6b9 51
f4245e06 52#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
c5db973f 54# define USING_MADVISE
55#endif
56
442e3cb9 57/* Strategy:
911ab6b9 58
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
64
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
69
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
3cfec666 72 in the current (highest-numbered) context may be collected.
911ab6b9 73
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
79 context depth.
80
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
85
911ab6b9 86/* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
1e625a2e 91 4: Object marks as well. */
911ab6b9 92#define GGC_DEBUG_LEVEL (0)
93\f
94#ifndef HOST_BITS_PER_PTR
95#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
96#endif
97
911ab6b9 98\f
99/* A two-level tree is used to look up the page-entry for a given
100 pointer. Two chunks of the pointer's bits are extracted to index
101 the first and second levels of the tree, as follows:
102
103 HOST_PAGE_SIZE_BITS
104 32 | |
105 msb +----------------+----+------+------+ lsb
106 | | |
107 PAGE_L1_BITS |
108 | |
109 PAGE_L2_BITS
110
111 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
112 pages are aligned on system page boundaries. The next most
113 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
3cfec666 114 index values in the lookup table, respectively.
911ab6b9 115
71661611 116 For 32-bit architectures and the settings below, there are no
117 leftover bits. For architectures with wider pointers, the lookup
118 tree points to a list of pages, which must be scanned to find the
119 correct one. */
911ab6b9 120
121#define PAGE_L1_BITS (8)
122#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
337c992b 123#define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
124#define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
911ab6b9 125
126#define LOOKUP_L1(p) \
337c992b 127 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
911ab6b9 128
129#define LOOKUP_L2(p) \
337c992b 130 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
911ab6b9 131
2f6aecaf 132/* The number of objects per allocation page, for objects on a page of
133 the indicated ORDER. */
134#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
135
573aba85 136/* The number of objects in P. */
137#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
138
2f6aecaf 139/* The size of an object on a page of the indicated ORDER. */
140#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
141
40b9be5e 142/* For speed, we avoid doing a general integer divide to locate the
143 offset in the allocation bitmap, by precalculating numbers M, S
144 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
145 within the page which is evenly divisible by the object size Z. */
146#define DIV_MULT(ORDER) inverse_table[ORDER].mult
147#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
148#define OFFSET_TO_BIT(OFFSET, ORDER) \
149 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
150
3089b75c 151/* We use this structure to determine the alignment required for
152 allocations. For power-of-two sized allocations, that's not a
153 problem, but it does matter for odd-sized allocations.
154 We do not care about alignment for floating-point types. */
155
156struct max_alignment {
157 char c;
158 union {
3a4303e7 159 int64_t i;
3089b75c 160 void *p;
161 } u;
162};
163
164/* The biggest alignment required. */
165
166#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
167
168
2f6aecaf 169/* The number of extra orders, not corresponding to power-of-two sized
170 objects. */
171
3585dac7 172#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
2f6aecaf 173
7bf41779 174#define RTL_SIZE(NSLOTS) \
bf6b5685 175 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
7bf41779 176
761cbb5d 177#define TREE_EXP_SIZE(OPS) \
178 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179
2f6aecaf 180/* The Ith entry is the maximum size of an object to be stored in the
181 Ith extra order. Adding a new entry to this array is the *only*
182 thing you need to do to add a new special allocation size. */
183
184static const size_t extra_order_size_table[] = {
3089b75c 185 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
186 There are a lot of structures with these sizes and explicitly
187 listing them risks orders being dropped because they changed size. */
188 MAX_ALIGNMENT * 3,
189 MAX_ALIGNMENT * 5,
190 MAX_ALIGNMENT * 6,
191 MAX_ALIGNMENT * 7,
192 MAX_ALIGNMENT * 9,
193 MAX_ALIGNMENT * 10,
194 MAX_ALIGNMENT * 11,
195 MAX_ALIGNMENT * 12,
196 MAX_ALIGNMENT * 13,
197 MAX_ALIGNMENT * 14,
198 MAX_ALIGNMENT * 15,
5ded8c6f 199 sizeof (struct tree_decl_non_common),
200 sizeof (struct tree_field_decl),
201 sizeof (struct tree_parm_decl),
202 sizeof (struct tree_var_decl),
8f2eb9e1 203 sizeof (struct tree_type_non_common),
1c2a6a66 204 sizeof (struct function),
205 sizeof (struct basic_block_def),
3089b75c 206 sizeof (struct cgraph_node),
207 sizeof (struct loop),
2f6aecaf 208};
209
210/* The total number of orders. */
211
212#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
213
573aba85 214/* Compute the smallest nonnegative number which when added to X gives
215 a multiple of F. */
216
217#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
218
25a28b44 219/* Round X to next multiple of the page size */
220
02ce3c0f 221#define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
25a28b44 222
2f6aecaf 223/* The Ith entry is the number of objects on a page or order I. */
224
225static unsigned objects_per_page_table[NUM_ORDERS];
226
227/* The Ith entry is the size of an object on a page of order I. */
228
229static size_t object_size_table[NUM_ORDERS];
911ab6b9 230
40b9be5e 231/* The Ith entry is a pair of numbers (mult, shift) such that
232 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
233 for all k evenly divisible by OBJECT_SIZE(I). */
234
235static struct
236{
2c06e494 237 size_t mult;
40b9be5e 238 unsigned int shift;
239}
240inverse_table[NUM_ORDERS];
241
911ab6b9 242/* A page_entry records the status of an allocation page. This
243 structure is dynamically sized to fit the bitmap in_use_p. */
6dc50383 244struct page_entry
911ab6b9 245{
246 /* The next page-entry with objects of the same size, or NULL if
247 this is the last page-entry. */
248 struct page_entry *next;
249
4a755ae7 250 /* The previous page-entry with objects of the same size, or NULL if
251 this is the first page-entry. The PREV pointer exists solely to
5aedf60c 252 keep the cost of ggc_free manageable. */
4a755ae7 253 struct page_entry *prev;
254
911ab6b9 255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
257 size_t bytes;
258
259 /* The address at which the memory is allocated. */
260 char *page;
261
80eb2355 262#ifdef USING_MALLOC_PAGE_GROUPS
263 /* Back pointer to the page group this page came from. */
264 struct page_group *group;
265#endif
266
76e1b933 267 /* This is the index in the by_depth varray where this page table
268 can be found. */
269 unsigned long index_by_depth;
911ab6b9 270
271 /* Context depth of this page. */
938cf571 272 unsigned short context_depth;
911ab6b9 273
274 /* The number of free objects remaining on this page. */
275 unsigned short num_free_objects;
276
277 /* A likely candidate for the bit position of a free object for the
278 next allocation from this page. */
279 unsigned short next_bit_hint;
280
938cf571 281 /* The lg of size of objects allocated from this page. */
282 unsigned char order;
283
c5db973f 284 /* Discarded page? */
285 bool discarded;
286
911ab6b9 287 /* A bit vector indicating whether or not objects are in use. The
288 Nth bit is one if the Nth object on this page is allocated. This
289 array is dynamically sized. */
290 unsigned long in_use_p[1];
6dc50383 291};
911ab6b9 292
80eb2355 293#ifdef USING_MALLOC_PAGE_GROUPS
294/* A page_group describes a large allocation from malloc, from which
295 we parcel out aligned pages. */
6dc50383 296struct page_group
80eb2355 297{
298 /* A linked list of all extant page groups. */
299 struct page_group *next;
300
301 /* The address we received from malloc. */
302 char *allocation;
303
304 /* The size of the block. */
305 size_t alloc_size;
306
307 /* A bitmask of pages in use. */
308 unsigned int in_use;
6dc50383 309};
80eb2355 310#endif
911ab6b9 311
312#if HOST_BITS_PER_PTR <= 32
313
314/* On 32-bit hosts, we use a two level page table, as pictured above. */
315typedef page_entry **page_table[PAGE_L1_SIZE];
316
317#else
318
71661611 319/* On 64-bit hosts, we use the same two level page tables plus a linked
320 list that disambiguates the top 32-bits. There will almost always be
911ab6b9 321 exactly one entry in the list. */
322typedef struct page_table_chain
323{
324 struct page_table_chain *next;
325 size_t high_bits;
326 page_entry **table[PAGE_L1_SIZE];
327} *page_table;
328
329#endif
330
92f06184 331class finalizer
332{
333public:
334 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
335
336 void *addr () const { return m_addr; }
337
338 void call () const { m_function (m_addr); }
339
340private:
341 void *m_addr;
342 void (*m_function)(void *);
343};
344
345class vec_finalizer
346{
347public:
348 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
349 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
350
351 void call () const
352 {
353 for (size_t i = 0; i < m_n_objects; i++)
354 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
355 }
356
357 void *addr () const { return reinterpret_cast<void *> (m_addr); }
358
359private:
360 uintptr_t m_addr;
361 void (*m_function)(void *);
362 size_t m_object_size;
363 size_t m_n_objects;
364 };
365
3e5823c9 366#ifdef ENABLE_GC_ALWAYS_COLLECT
367/* List of free objects to be verified as actually free on the
368 next collection. */
369struct free_object
370{
371 void *object;
372 struct free_object *next;
373};
374#endif
375
911ab6b9 376/* The rest of the global variables. */
9908fe4d 377static struct ggc_globals
911ab6b9 378{
379 /* The Nth element in this array is a page with objects of size 2^N.
380 If there are any pages with free objects, they will be at the
381 head of the list. NULL if there are no page-entries for this
382 object size. */
2f6aecaf 383 page_entry *pages[NUM_ORDERS];
911ab6b9 384
385 /* The Nth element in this array is the last page with objects of
386 size 2^N. NULL if there are no page-entries for this object
387 size. */
2f6aecaf 388 page_entry *page_tails[NUM_ORDERS];
911ab6b9 389
390 /* Lookup table for associating allocation pages with object addresses. */
391 page_table lookup;
392
393 /* The system's page size. */
394 size_t pagesize;
395 size_t lg_pagesize;
396
397 /* Bytes currently allocated. */
398 size_t allocated;
399
400 /* Bytes currently allocated at the end of the last collection. */
401 size_t allocated_last_gc;
402
4e00b6fd 403 /* Total amount of memory mapped. */
404 size_t bytes_mapped;
405
598638e2 406 /* Bit N set if any allocations have been done at context depth N. */
407 unsigned long context_depth_allocations;
408
409 /* Bit N set if any collections have been done at context depth N. */
410 unsigned long context_depth_collections;
411
911ab6b9 412 /* The current depth in the context stack. */
2d15cd89 413 unsigned short context_depth;
911ab6b9 414
415 /* A file descriptor open to /dev/zero for reading. */
901dfcc7 416#if defined (HAVE_MMAP_DEV_ZERO)
911ab6b9 417 int dev_zero_fd;
418#endif
419
420 /* A cache of free system pages. */
421 page_entry *free_pages;
422
80eb2355 423#ifdef USING_MALLOC_PAGE_GROUPS
424 page_group *page_groups;
425#endif
426
911ab6b9 427 /* The file descriptor for debugging output. */
428 FILE *debug_file;
76e1b933 429
430 /* Current number of elements in use in depth below. */
431 unsigned int depth_in_use;
432
433 /* Maximum number of elements that can be used before resizing. */
434 unsigned int depth_max;
435
f0b5f617 436 /* Each element of this array is an index in by_depth where the given
76e1b933 437 depth starts. This structure is indexed by that given depth we
438 are interested in. */
439 unsigned int *depth;
440
441 /* Current number of elements in use in by_depth below. */
442 unsigned int by_depth_in_use;
443
444 /* Maximum number of elements that can be used before resizing. */
445 unsigned int by_depth_max;
446
447 /* Each element of this array is a pointer to a page_entry, all
448 page_entries can be found in here by increasing depth.
449 index_by_depth in the page_entry is the index into this data
450 structure where that page_entry can be found. This is used to
451 speed up finding all page_entries at a particular depth. */
452 page_entry **by_depth;
453
454 /* Each element is a pointer to the saved in_use_p bits, if any,
455 zero otherwise. We allocate them all together, to enable a
456 better runtime data access pattern. */
457 unsigned long **save_in_use;
c4e03242 458
92f06184 459 /* Finalizers for single objects. */
460 vec<finalizer> finalizers;
461
462 /* Finalizers for vectors of objects. */
463 vec<vec_finalizer> vec_finalizers;
464
c4e03242 465#ifdef ENABLE_GC_ALWAYS_COLLECT
466 /* List of free objects to be verified as actually free on the
467 next collection. */
3e5823c9 468 struct free_object *free_object_list;
c4e03242 469#endif
470
b7257530 471 struct
472 {
ba72912a 473 /* Total GC-allocated memory. */
b7257530 474 unsigned long long total_allocated;
ba72912a 475 /* Total overhead for GC-allocated memory. */
b7257530 476 unsigned long long total_overhead;
477
478 /* Total allocations and overhead for sizes less than 32, 64 and 128.
479 These sizes are interesting because they are typical cache line
b4b174c3 480 sizes. */
48e1416a 481
b7257530 482 unsigned long long total_allocated_under32;
483 unsigned long long total_overhead_under32;
48e1416a 484
b7257530 485 unsigned long long total_allocated_under64;
486 unsigned long long total_overhead_under64;
48e1416a 487
b7257530 488 unsigned long long total_allocated_under128;
489 unsigned long long total_overhead_under128;
48e1416a 490
86736f9e 491 /* The allocations for each of the allocation orders. */
492 unsigned long long total_allocated_per_order[NUM_ORDERS];
493
b4b174c3 494 /* The overhead for each of the allocation orders. */
b7257530 495 unsigned long long total_overhead_per_order[NUM_ORDERS];
496 } stats;
911ab6b9 497} G;
498
8f359205 499/* True if a gc is currently taking place. */
500
501static bool in_gc = false;
502
911ab6b9 503/* The size in bytes required to maintain a bitmap for the objects
504 on a page-entry. */
505#define BITMAP_SIZE(Num_objects) \
9af5ce0c 506 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
911ab6b9 507
80eb2355 508/* Allocate pages in chunks of this size, to throttle calls to memory
509 allocation routines. The first page is used, the rest go onto the
510 free list. This cannot be larger than HOST_BITS_PER_INT for the
28a61eb7 511 in_use bitmask for page_group. Hosts that need a different value
dac49aa5 512 can override this by defining GGC_QUIRE_SIZE explicitly. */
28a61eb7 513#ifndef GGC_QUIRE_SIZE
514# ifdef USING_MMAP
ada13b28 515# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
28a61eb7 516# else
517# define GGC_QUIRE_SIZE 16
518# endif
519#endif
76e1b933 520
521/* Initial guess as to how many page table entries we might need. */
522#define INITIAL_PTE_COUNT 128
911ab6b9 523\f
6ec1f4e0 524static int ggc_allocated_p (const void *);
525static page_entry *lookup_page_table_entry (const void *);
526static void set_page_table_entry (void *, page_entry *);
80eb2355 527#ifdef USING_MMAP
4a2f812e 528static char *alloc_anon (char *, size_t, bool check);
80eb2355 529#endif
530#ifdef USING_MALLOC_PAGE_GROUPS
6ec1f4e0 531static size_t page_group_index (char *, char *);
532static void set_page_group_in_use (page_group *, char *);
533static void clear_page_group_in_use (page_group *, char *);
80eb2355 534#endif
6ec1f4e0 535static struct page_entry * alloc_page (unsigned);
536static void free_page (struct page_entry *);
537static void release_pages (void);
538static void clear_marks (void);
539static void sweep_pages (void);
540static void ggc_recalculate_in_use_p (page_entry *);
541static void compute_inverse (unsigned);
542static inline void adjust_depth (void);
543static void move_ptes_to_front (int, int);
911ab6b9 544
6ec1f4e0 545void debug_print_page_list (int);
546static void push_depth (unsigned int);
547static void push_by_depth (page_entry *, unsigned long *);
7d60cc60 548
76e1b933 549/* Push an entry onto G.depth. */
550
551inline static void
6ec1f4e0 552push_depth (unsigned int i)
76e1b933 553{
554 if (G.depth_in_use >= G.depth_max)
555 {
556 G.depth_max *= 2;
4077bf7a 557 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
76e1b933 558 }
559 G.depth[G.depth_in_use++] = i;
560}
561
562/* Push an entry onto G.by_depth and G.save_in_use. */
563
564inline static void
6ec1f4e0 565push_by_depth (page_entry *p, unsigned long *s)
76e1b933 566{
567 if (G.by_depth_in_use >= G.by_depth_max)
568 {
569 G.by_depth_max *= 2;
4077bf7a 570 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
571 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
572 G.by_depth_max);
76e1b933 573 }
574 G.by_depth[G.by_depth_in_use] = p;
575 G.save_in_use[G.by_depth_in_use++] = s;
576}
577
578#if (GCC_VERSION < 3001)
579#define prefetch(X) ((void) X)
580#else
581#define prefetch(X) __builtin_prefetch (X)
582#endif
583
584#define save_in_use_p_i(__i) \
585 (G.save_in_use[__i])
586#define save_in_use_p(__p) \
587 (save_in_use_p_i (__p->index_by_depth))
588
6ef828f9 589/* Returns nonzero if P was allocated in GC'able memory. */
911ab6b9 590
71661611 591static inline int
6ec1f4e0 592ggc_allocated_p (const void *p)
911ab6b9 593{
594 page_entry ***base;
71661611 595 size_t L1, L2;
911ab6b9 596
597#if HOST_BITS_PER_PTR <= 32
598 base = &G.lookup[0];
599#else
600 page_table table = G.lookup;
337c992b 601 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
71661611 602 while (1)
603 {
604 if (table == NULL)
605 return 0;
606 if (table->high_bits == high_bits)
607 break;
608 table = table->next;
609 }
911ab6b9 610 base = &table->table[0];
611#endif
612
4a82352a 613 /* Extract the level 1 and 2 indices. */
e3691812 614 L1 = LOOKUP_L1 (p);
615 L2 = LOOKUP_L2 (p);
616
617 return base[L1] && base[L1][L2];
618}
619
3cfec666 620/* Traverse the page table and find the entry for a page.
e3691812 621 Die (probably) if the object wasn't allocated via GC. */
622
623static inline page_entry *
6ec1f4e0 624lookup_page_table_entry (const void *p)
e3691812 625{
626 page_entry ***base;
627 size_t L1, L2;
628
71661611 629#if HOST_BITS_PER_PTR <= 32
630 base = &G.lookup[0];
631#else
632 page_table table = G.lookup;
337c992b 633 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
71661611 634 while (table->high_bits != high_bits)
635 table = table->next;
636 base = &table->table[0];
637#endif
e3691812 638
4a82352a 639 /* Extract the level 1 and 2 indices. */
911ab6b9 640 L1 = LOOKUP_L1 (p);
641 L2 = LOOKUP_L2 (p);
642
643 return base[L1][L2];
644}
645
911ab6b9 646/* Set the page table entry for a page. */
e3c4633e 647
911ab6b9 648static void
6ec1f4e0 649set_page_table_entry (void *p, page_entry *entry)
911ab6b9 650{
651 page_entry ***base;
652 size_t L1, L2;
653
654#if HOST_BITS_PER_PTR <= 32
655 base = &G.lookup[0];
656#else
657 page_table table;
337c992b 658 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
911ab6b9 659 for (table = G.lookup; table; table = table->next)
660 if (table->high_bits == high_bits)
661 goto found;
662
663 /* Not found -- allocate a new table. */
c768b48b 664 table = XCNEW (struct page_table_chain);
911ab6b9 665 table->next = G.lookup;
666 table->high_bits = high_bits;
667 G.lookup = table;
668found:
669 base = &table->table[0];
670#endif
671
4a82352a 672 /* Extract the level 1 and 2 indices. */
911ab6b9 673 L1 = LOOKUP_L1 (p);
674 L2 = LOOKUP_L2 (p);
675
676 if (base[L1] == NULL)
4c36ffe6 677 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
911ab6b9 678
679 base[L1][L2] = entry;
680}
681
911ab6b9 682/* Prints the page-entry for object size ORDER, for debugging. */
e3c4633e 683
4b987fac 684DEBUG_FUNCTION void
6ec1f4e0 685debug_print_page_list (int order)
911ab6b9 686{
687 page_entry *p;
6ec1f4e0 688 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
689 (void *) G.page_tails[order]);
911ab6b9 690 p = G.pages[order];
691 while (p != NULL)
692 {
6ec1f4e0 693 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
f8cb9479 694 p->num_free_objects);
911ab6b9 695 p = p->next;
696 }
697 printf ("NULL\n");
698 fflush (stdout);
699}
700
80eb2355 701#ifdef USING_MMAP
911ab6b9 702/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
901dfcc7 703 (if non-null). The ifdef structure here is intended to cause a
704 compile error unless exactly one of the HAVE_* is defined. */
e3c4633e 705
911ab6b9 706static inline char *
4a2f812e 707alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
911ab6b9 708{
901dfcc7 709#ifdef HAVE_MMAP_ANON
4077bf7a 710 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
711 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
901dfcc7 712#endif
713#ifdef HAVE_MMAP_DEV_ZERO
4077bf7a 714 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
715 MAP_PRIVATE, G.dev_zero_fd, 0);
911ab6b9 716#endif
901dfcc7 717
718 if (page == (char *) MAP_FAILED)
71661611 719 {
4a2f812e 720 if (!check)
721 return NULL;
cb8bacb6 722 perror ("virtual memory exhausted");
018eba2e 723 exit (FATAL_EXIT_CODE);
71661611 724 }
911ab6b9 725
4e00b6fd 726 /* Remember that we allocated this memory. */
727 G.bytes_mapped += size;
728
dd359afe 729 /* Pretend we don't have access to the allocated pages. We'll enable
ba72912a 730 access to smaller pieces of the area in ggc_internal_alloc. Discard the
dd359afe 731 handle to avoid handle leak. */
a7779e75 732 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
dd359afe 733
911ab6b9 734 return page;
735}
80eb2355 736#endif
737#ifdef USING_MALLOC_PAGE_GROUPS
738/* Compute the index for this page into the page group. */
739
740static inline size_t
6ec1f4e0 741page_group_index (char *allocation, char *page)
80eb2355 742{
dfe09cce 743 return (size_t) (page - allocation) >> G.lg_pagesize;
80eb2355 744}
745
746/* Set and clear the in_use bit for this page in the page group. */
747
748static inline void
6ec1f4e0 749set_page_group_in_use (page_group *group, char *page)
80eb2355 750{
751 group->in_use |= 1 << page_group_index (group->allocation, page);
752}
753
754static inline void
6ec1f4e0 755clear_page_group_in_use (page_group *group, char *page)
80eb2355 756{
757 group->in_use &= ~(1 << page_group_index (group->allocation, page));
758}
759#endif
911ab6b9 760
761/* Allocate a new page for allocating objects of size 2^ORDER,
762 and return an entry for it. The entry is not added to the
763 appropriate page_table list. */
e3c4633e 764
911ab6b9 765static inline struct page_entry *
6ec1f4e0 766alloc_page (unsigned order)
911ab6b9 767{
768 struct page_entry *entry, *p, **pp;
769 char *page;
770 size_t num_objects;
771 size_t bitmap_size;
772 size_t page_entry_size;
773 size_t entry_size;
80eb2355 774#ifdef USING_MALLOC_PAGE_GROUPS
775 page_group *group;
776#endif
911ab6b9 777
778 num_objects = OBJECTS_PER_PAGE (order);
779 bitmap_size = BITMAP_SIZE (num_objects + 1);
780 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
2f6aecaf 781 entry_size = num_objects * OBJECT_SIZE (order);
9acc7238 782 if (entry_size < G.pagesize)
783 entry_size = G.pagesize;
25a28b44 784 entry_size = PAGE_ALIGN (entry_size);
911ab6b9 785
786 entry = NULL;
787 page = NULL;
788
789 /* Check the list of free pages for one we can use. */
018eba2e 790 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
911ab6b9 791 if (p->bytes == entry_size)
792 break;
793
794 if (p != NULL)
795 {
c5db973f 796 if (p->discarded)
797 G.bytes_mapped += p->bytes;
798 p->discarded = false;
799
aa40f561 800 /* Recycle the allocated memory from this page ... */
911ab6b9 801 *pp = p->next;
802 page = p->page;
018eba2e 803
80eb2355 804#ifdef USING_MALLOC_PAGE_GROUPS
805 group = p->group;
806#endif
018eba2e 807
911ab6b9 808 /* ... and, if possible, the page entry itself. */
809 if (p->order == order)
810 {
811 entry = p;
812 memset (entry, 0, page_entry_size);
813 }
814 else
815 free (p);
816 }
901dfcc7 817#ifdef USING_MMAP
9a2e8b0a 818 else if (entry_size == G.pagesize)
911ab6b9 819 {
9a2e8b0a 820 /* We want just one page. Allocate a bunch of them and put the
821 extras on the freelist. (Can only do this optimization with
822 mmap for backing store.) */
823 struct page_entry *e, *f = G.free_pages;
4a2f812e 824 int i, entries = GGC_QUIRE_SIZE;
9a2e8b0a 825
4a2f812e 826 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
827 if (page == NULL)
828 {
9af5ce0c 829 page = alloc_anon (NULL, G.pagesize, true);
4a2f812e 830 entries = 1;
831 }
018eba2e 832
9a2e8b0a 833 /* This loop counts down so that the chain will be in ascending
834 memory order. */
4a2f812e 835 for (i = entries - 1; i >= 1; i--)
9a2e8b0a 836 {
4077bf7a 837 e = XCNEWVAR (struct page_entry, page_entry_size);
9acc7238 838 e->order = order;
839 e->bytes = G.pagesize;
840 e->page = page + (i << G.lg_pagesize);
9a2e8b0a 841 e->next = f;
842 f = e;
843 }
018eba2e 844
9a2e8b0a 845 G.free_pages = f;
911ab6b9 846 }
9a2e8b0a 847 else
4a2f812e 848 page = alloc_anon (NULL, entry_size, true);
80eb2355 849#endif
850#ifdef USING_MALLOC_PAGE_GROUPS
851 else
852 {
853 /* Allocate a large block of memory and serve out the aligned
854 pages therein. This results in much less memory wastage
855 than the traditional implementation of valloc. */
856
857 char *allocation, *a, *enda;
858 size_t alloc_size, head_slop, tail_slop;
859 int multiple_pages = (entry_size == G.pagesize);
860
861 if (multiple_pages)
862 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
863 else
864 alloc_size = entry_size + G.pagesize - 1;
781dec7f 865 allocation = XNEWVEC (char, alloc_size);
80eb2355 866
337c992b 867 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
80eb2355 868 head_slop = page - allocation;
869 if (multiple_pages)
870 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
871 else
872 tail_slop = alloc_size - entry_size - head_slop;
873 enda = allocation + alloc_size - tail_slop;
874
875 /* We allocated N pages, which are likely not aligned, leaving
876 us with N-1 usable pages. We plan to place the page_group
877 structure somewhere in the slop. */
878 if (head_slop >= sizeof (page_group))
879 group = (page_group *)page - 1;
880 else
881 {
882 /* We magically got an aligned allocation. Too bad, we have
883 to waste a page anyway. */
884 if (tail_slop == 0)
885 {
886 enda -= G.pagesize;
887 tail_slop += G.pagesize;
888 }
0d59b19d 889 gcc_assert (tail_slop >= sizeof (page_group));
80eb2355 890 group = (page_group *)enda;
891 tail_slop -= sizeof (page_group);
892 }
893
894 /* Remember that we allocated this memory. */
895 group->next = G.page_groups;
896 group->allocation = allocation;
897 group->alloc_size = alloc_size;
898 group->in_use = 0;
899 G.page_groups = group;
900 G.bytes_mapped += alloc_size;
901
902 /* If we allocated multiple pages, put the rest on the free list. */
903 if (multiple_pages)
904 {
905 struct page_entry *e, *f = G.free_pages;
906 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
907 {
781dec7f 908 e = XCNEWVAR (struct page_entry, page_entry_size);
80eb2355 909 e->order = order;
910 e->bytes = G.pagesize;
911 e->page = a;
912 e->group = group;
913 e->next = f;
914 f = e;
915 }
916 G.free_pages = f;
917 }
918 }
919#endif
911ab6b9 920
921 if (entry == NULL)
4077bf7a 922 entry = XCNEWVAR (struct page_entry, page_entry_size);
911ab6b9 923
924 entry->bytes = entry_size;
925 entry->page = page;
926 entry->context_depth = G.context_depth;
927 entry->order = order;
928 entry->num_free_objects = num_objects;
929 entry->next_bit_hint = 1;
930
598638e2 931 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
932
80eb2355 933#ifdef USING_MALLOC_PAGE_GROUPS
934 entry->group = group;
935 set_page_group_in_use (group, page);
936#endif
937
911ab6b9 938 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
939 increment the hint. */
940 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
941 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
942
943 set_page_table_entry (page, entry);
944
945 if (GGC_DEBUG_LEVEL >= 2)
3cfec666 946 fprintf (G.debug_file,
29e7390a 947 "Allocating page at %p, object size=%lu, data %p-%p\n",
6ec1f4e0 948 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
018eba2e 949 page + entry_size - 1);
911ab6b9 950
951 return entry;
952}
953
76e1b933 954/* Adjust the size of G.depth so that no index greater than the one
955 used by the top of the G.by_depth is used. */
956
957static inline void
6ec1f4e0 958adjust_depth (void)
76e1b933 959{
960 page_entry *top;
961
962 if (G.by_depth_in_use)
963 {
964 top = G.by_depth[G.by_depth_in_use-1];
965
d01481af 966 /* Peel back indices in depth that index into by_depth, so that
967 as new elements are added to by_depth, we note the indices
76e1b933 968 of those elements, if they are for new context depths. */
969 while (G.depth_in_use > (size_t)top->context_depth+1)
970 --G.depth_in_use;
971 }
972}
973
e3c4633e 974/* For a page that is no longer needed, put it on the free page list. */
911ab6b9 975
c4e03242 976static void
6ec1f4e0 977free_page (page_entry *entry)
911ab6b9 978{
979 if (GGC_DEBUG_LEVEL >= 2)
3cfec666 980 fprintf (G.debug_file,
6ec1f4e0 981 "Deallocating page at %p, data %p-%p\n", (void *) entry,
911ab6b9 982 entry->page, entry->page + entry->bytes - 1);
983
dd359afe 984 /* Mark the page as inaccessible. Discard the handle to avoid handle
985 leak. */
a7779e75 986 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
dd359afe 987
911ab6b9 988 set_page_table_entry (entry->page, NULL);
989
80eb2355 990#ifdef USING_MALLOC_PAGE_GROUPS
991 clear_page_group_in_use (entry->group, entry->page);
992#endif
993
76e1b933 994 if (G.by_depth_in_use > 1)
995 {
996 page_entry *top = G.by_depth[G.by_depth_in_use-1];
0d59b19d 997 int i = entry->index_by_depth;
998
999 /* We cannot free a page from a context deeper than the current
1000 one. */
1001 gcc_assert (entry->context_depth == top->context_depth);
48e1416a 1002
0d59b19d 1003 /* Put top element into freed slot. */
1004 G.by_depth[i] = top;
1005 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1006 top->index_by_depth = i;
76e1b933 1007 }
1008 --G.by_depth_in_use;
1009
1010 adjust_depth ();
1011
911ab6b9 1012 entry->next = G.free_pages;
1013 G.free_pages = entry;
1014}
1015
e3c4633e 1016/* Release the free page cache to the system. */
911ab6b9 1017
c10b9b1c 1018static void
6ec1f4e0 1019release_pages (void)
911ab6b9 1020{
c5db973f 1021#ifdef USING_MADVISE
1022 page_entry *p, *start_p;
1023 char *start;
1024 size_t len;
e8b7c612 1025 size_t mapped_len;
1026 page_entry *next, *prev, *newprev;
1027 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1028
1029 /* First free larger continuous areas to the OS.
1030 This allows other allocators to grab these areas if needed.
1031 This is only done on larger chunks to avoid fragmentation.
1032 This does not always work because the free_pages list is only
1033 approximately sorted. */
1034
1035 p = G.free_pages;
1036 prev = NULL;
1037 while (p)
1038 {
1039 start = p->page;
1040 start_p = p;
1041 len = 0;
1042 mapped_len = 0;
1043 newprev = prev;
1044 while (p && p->page == start + len)
1045 {
1046 len += p->bytes;
1047 if (!p->discarded)
1048 mapped_len += p->bytes;
1049 newprev = p;
1050 p = p->next;
1051 }
1052 if (len >= free_unit)
1053 {
1054 while (start_p != p)
1055 {
1056 next = start_p->next;
1057 free (start_p);
1058 start_p = next;
1059 }
1060 munmap (start, len);
1061 if (prev)
1062 prev->next = p;
1063 else
1064 G.free_pages = p;
1065 G.bytes_mapped -= mapped_len;
1066 continue;
1067 }
1068 prev = newprev;
1069 }
1070
1071 /* Now give back the fragmented pages to the OS, but keep the address
1072 space to reuse it next time. */
c5db973f 1073
1074 for (p = G.free_pages; p; )
1075 {
1076 if (p->discarded)
1077 {
1078 p = p->next;
1079 continue;
1080 }
1081 start = p->page;
1082 len = p->bytes;
1083 start_p = p;
1084 p = p->next;
1085 while (p && p->page == start + len)
1086 {
1087 len += p->bytes;
1088 p = p->next;
1089 }
1090 /* Give the page back to the kernel, but don't free the mapping.
1091 This avoids fragmentation in the virtual memory map of the
1092 process. Next time we can reuse it by just touching it. */
1093 madvise (start, len, MADV_DONTNEED);
1094 /* Don't count those pages as mapped to not touch the garbage collector
1095 unnecessarily. */
1096 G.bytes_mapped -= len;
1097 while (start_p != p)
1098 {
1099 start_p->discarded = true;
1100 start_p = start_p->next;
1101 }
1102 }
1103#endif
1104#if defined(USING_MMAP) && !defined(USING_MADVISE)
80eb2355 1105 page_entry *p, *next;
911ab6b9 1106 char *start;
1107 size_t len;
1108
9a2e8b0a 1109 /* Gather up adjacent pages so they are unmapped together. */
911ab6b9 1110 p = G.free_pages;
911ab6b9 1111
1112 while (p)
1113 {
9a2e8b0a 1114 start = p->page;
911ab6b9 1115 next = p->next;
9a2e8b0a 1116 len = p->bytes;
911ab6b9 1117 free (p);
1118 p = next;
911ab6b9 1119
9a2e8b0a 1120 while (p && p->page == start + len)
1121 {
1122 next = p->next;
1123 len += p->bytes;
1124 free (p);
1125 p = next;
1126 }
1127
1128 munmap (start, len);
1129 G.bytes_mapped -= len;
1130 }
71661611 1131
911ab6b9 1132 G.free_pages = NULL;
80eb2355 1133#endif
1134#ifdef USING_MALLOC_PAGE_GROUPS
1135 page_entry **pp, *p;
1136 page_group **gp, *g;
1137
1138 /* Remove all pages from free page groups from the list. */
1139 pp = &G.free_pages;
1140 while ((p = *pp) != NULL)
1141 if (p->group->in_use == 0)
1142 {
1143 *pp = p->next;
1144 free (p);
1145 }
1146 else
1147 pp = &p->next;
1148
1149 /* Remove all free page groups, and release the storage. */
1150 gp = &G.page_groups;
1151 while ((g = *gp) != NULL)
1152 if (g->in_use == 0)
1153 {
1154 *gp = g->next;
3cfec666 1155 G.bytes_mapped -= g->alloc_size;
80eb2355 1156 free (g->allocation);
1157 }
1158 else
1159 gp = &g->next;
1160#endif
911ab6b9 1161}
1162
911ab6b9 1163/* This table provides a fast way to determine ceil(log_2(size)) for
f806fb68 1164 allocation requests. The minimum allocation size is eight bytes. */
f68513d3 1165#define NUM_SIZE_LOOKUP 512
1166static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
f806fb68 1167{
3cfec666 1168 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1169 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1170 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1171 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1172 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1173 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1174 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
911ab6b9 1175 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
911ab6b9 1176 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1177 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1178 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1179 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1180 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1181 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1182 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1183 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1c2a6a66 1184 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1185 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1186 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1187 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1195 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
8c14f57e 1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
911ab6b9 1200};
1201
1ae3520e 1202/* For a given size of memory requested for allocation, return the
1203 actual size that is going to be allocated, as well as the size
1204 order. */
1205
1206static void
1207ggc_round_alloc_size_1 (size_t requested_size,
1208 size_t *size_order,
1209 size_t *alloced_size)
1210{
1211 size_t order, object_size;
1212
1213 if (requested_size < NUM_SIZE_LOOKUP)
1214 {
1215 order = size_lookup[requested_size];
1216 object_size = OBJECT_SIZE (order);
1217 }
1218 else
1219 {
1220 order = 10;
1221 while (requested_size > (object_size = OBJECT_SIZE (order)))
1222 order++;
1223 }
1224
1225 if (size_order)
1226 *size_order = order;
1227 if (alloced_size)
1228 *alloced_size = object_size;
1229}
1230
1231/* For a given size of memory requested for allocation, return the
1232 actual size that is going to be allocated. */
1233
1234size_t
1235ggc_round_alloc_size (size_t requested_size)
1236{
1237 size_t size = 0;
1238
1239 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1240 return size;
1241}
1242
908b11c1 1243/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
e3c4633e 1244
71661611 1245void *
92f06184 1246ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1247 MEM_STAT_DECL)
911ab6b9 1248{
c4e03242 1249 size_t order, word, bit, object_offset, object_size;
911ab6b9 1250 struct page_entry *entry;
1251 void *result;
1252
1ae3520e 1253 ggc_round_alloc_size_1 (size, &order, &object_size);
911ab6b9 1254
1255 /* If there are non-full pages for this size allocation, they are at
1256 the head of the list. */
1257 entry = G.pages[order];
1258
1259 /* If there is no page for this object size, or all pages in this
1260 context are full, allocate a new page. */
c10b9b1c 1261 if (entry == NULL || entry->num_free_objects == 0)
911ab6b9 1262 {
1263 struct page_entry *new_entry;
1264 new_entry = alloc_page (order);
3cfec666 1265
76e1b933 1266 new_entry->index_by_depth = G.by_depth_in_use;
1267 push_by_depth (new_entry, 0);
1268
1269 /* We can skip context depths, if we do, make sure we go all the
1270 way to the new depth. */
1271 while (new_entry->context_depth >= G.depth_in_use)
1272 push_depth (G.by_depth_in_use-1);
1273
4a755ae7 1274 /* If this is the only entry, it's also the tail. If it is not
1275 the only entry, then we must update the PREV pointer of the
1276 ENTRY (G.pages[order]) to point to our new page entry. */
911ab6b9 1277 if (entry == NULL)
1278 G.page_tails[order] = new_entry;
4a755ae7 1279 else
1280 entry->prev = new_entry;
3cfec666 1281
4a755ae7 1282 /* Put new pages at the head of the page list. By definition the
1283 entry at the head of the list always has a NULL pointer. */
911ab6b9 1284 new_entry->next = entry;
4a755ae7 1285 new_entry->prev = NULL;
911ab6b9 1286 entry = new_entry;
1287 G.pages[order] = new_entry;
1288
1289 /* For a new page, we know the word and bit positions (in the
1290 in_use bitmap) of the first available object -- they're zero. */
1291 new_entry->next_bit_hint = 1;
1292 word = 0;
1293 bit = 0;
1294 object_offset = 0;
1295 }
1296 else
1297 {
1298 /* First try to use the hint left from the previous allocation
1299 to locate a clear bit in the in-use bitmap. We've made sure
1300 that the one-past-the-end bit is always set, so if the hint
1301 has run over, this test will fail. */
1302 unsigned hint = entry->next_bit_hint;
1303 word = hint / HOST_BITS_PER_LONG;
1304 bit = hint % HOST_BITS_PER_LONG;
3cfec666 1305
911ab6b9 1306 /* If the hint didn't work, scan the bitmap from the beginning. */
1307 if ((entry->in_use_p[word] >> bit) & 1)
1308 {
1309 word = bit = 0;
1310 while (~entry->in_use_p[word] == 0)
1311 ++word;
bff49002 1312
1313#if GCC_VERSION >= 3004
1314 bit = __builtin_ctzl (~entry->in_use_p[word]);
1315#else
911ab6b9 1316 while ((entry->in_use_p[word] >> bit) & 1)
1317 ++bit;
bff49002 1318#endif
1319
911ab6b9 1320 hint = word * HOST_BITS_PER_LONG + bit;
1321 }
1322
1323 /* Next time, try the next bit. */
1324 entry->next_bit_hint = hint + 1;
1325
c4e03242 1326 object_offset = hint * object_size;
911ab6b9 1327 }
1328
1329 /* Set the in-use bit. */
1330 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1331
1332 /* Keep a running total of the number of free objects. If this page
1333 fills up, we may have to move it to the end of the list if the
1334 next page isn't full. If the next page is full, all subsequent
1335 pages are full, so there's no need to move it. */
1336 if (--entry->num_free_objects == 0
1337 && entry->next != NULL
1338 && entry->next->num_free_objects > 0)
1339 {
4a755ae7 1340 /* We have a new head for the list. */
911ab6b9 1341 G.pages[order] = entry->next;
4a755ae7 1342
1343 /* We are moving ENTRY to the end of the page table list.
1344 The new page at the head of the list will have NULL in
1345 its PREV field and ENTRY will have NULL in its NEXT field. */
1346 entry->next->prev = NULL;
911ab6b9 1347 entry->next = NULL;
4a755ae7 1348
1349 /* Append ENTRY to the tail of the list. */
1350 entry->prev = G.page_tails[order];
911ab6b9 1351 G.page_tails[order]->next = entry;
1352 G.page_tails[order] = entry;
1353 }
1354
1355 /* Calculate the object's address. */
1356 result = entry->page + object_offset;
ecd52ea9 1357 if (GATHER_STATISTICS)
1358 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1359 result FINAL_PASS_MEM_STAT);
911ab6b9 1360
2a3edec5 1361#ifdef ENABLE_GC_CHECKING
dd359afe 1362 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1363 exact same semantics in presence of memory bugs, regardless of
1364 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1365 handle to avoid handle leak. */
a7779e75 1366 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
dd359afe 1367
791ceafe 1368 /* `Poison' the entire allocated object, including any padding at
1369 the end. */
c4e03242 1370 memset (result, 0xaf, object_size);
dd359afe 1371
1372 /* Make the bytes after the end of the object unaccessible. Discard the
1373 handle to avoid handle leak. */
a7779e75 1374 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1375 object_size - size));
911ab6b9 1376#endif
e3c4633e 1377
dd359afe 1378 /* Tell Valgrind that the memory is there, but its content isn't
1379 defined. The bytes at the end of the object are still marked
1380 unaccessible. */
a7779e75 1381 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
dd359afe 1382
911ab6b9 1383 /* Keep track of how many bytes are being allocated. This
1384 information is used in deciding when to collect. */
c4e03242 1385 G.allocated += object_size;
911ab6b9 1386
8d453ddb 1387 /* For timevar statistics. */
1388 timevar_ggc_mem_total += object_size;
1389
92f06184 1390 if (f && n == 1)
1391 G.finalizers.safe_push (finalizer (result, f));
1392 else if (f)
1393 G.vec_finalizers.safe_push
1394 (vec_finalizer (reinterpret_cast<uintptr_t> (result), f, s, n));
1395
ecd52ea9 1396 if (GATHER_STATISTICS)
1397 {
1398 size_t overhead = object_size - size;
b7257530 1399
ecd52ea9 1400 G.stats.total_overhead += overhead;
1401 G.stats.total_allocated += object_size;
1402 G.stats.total_overhead_per_order[order] += overhead;
1403 G.stats.total_allocated_per_order[order] += object_size;
b7257530 1404
ecd52ea9 1405 if (size <= 32)
1406 {
1407 G.stats.total_overhead_under32 += overhead;
1408 G.stats.total_allocated_under32 += object_size;
1409 }
1410 if (size <= 64)
1411 {
1412 G.stats.total_overhead_under64 += overhead;
1413 G.stats.total_allocated_under64 += object_size;
1414 }
1415 if (size <= 128)
1416 {
1417 G.stats.total_overhead_under128 += overhead;
1418 G.stats.total_allocated_under128 += object_size;
1419 }
1420 }
c4e03242 1421
911ab6b9 1422 if (GGC_DEBUG_LEVEL >= 3)
3cfec666 1423 fprintf (G.debug_file,
29e7390a 1424 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
c4e03242 1425 (unsigned long) size, (unsigned long) object_size, result,
6ec1f4e0 1426 (void *) entry);
911ab6b9 1427
1428 return result;
1429}
1430
dfecde36 1431/* Mark function for strings. */
1432
1433void
1434gt_ggc_m_S (const void *p)
1435{
1436 page_entry *entry;
1437 unsigned bit, word;
1438 unsigned long mask;
1439 unsigned long offset;
1440
1441 if (!p || !ggc_allocated_p (p))
1442 return;
1443
1444 /* Look up the page on which the object is alloced. . */
1445 entry = lookup_page_table_entry (p);
1446 gcc_assert (entry);
1447
1448 /* Calculate the index of the object on the page; this is its bit
1449 position in the in_use_p bitmap. Note that because a char* might
1450 point to the middle of an object, we need special code here to
1451 make sure P points to the start of an object. */
1452 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1453 if (offset)
1454 {
1455 /* Here we've seen a char* which does not point to the beginning
1456 of an allocated object. We assume it points to the middle of
1457 a STRING_CST. */
1458 gcc_assert (offset == offsetof (struct tree_string, str));
1459 p = ((const char *) p) - offset;
4077bf7a 1460 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
dfecde36 1461 return;
1462 }
1463
1464 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1465 word = bit / HOST_BITS_PER_LONG;
1466 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1467
1468 /* If the bit was previously set, skip it. */
1469 if (entry->in_use_p[word] & mask)
1470 return;
1471
1472 /* Otherwise set it, and decrement the free object count. */
1473 entry->in_use_p[word] |= mask;
1474 entry->num_free_objects -= 1;
1475
1476 if (GGC_DEBUG_LEVEL >= 4)
1477 fprintf (G.debug_file, "Marking %p\n", p);
1478
1479 return;
1480}
1481
2b15d2ba 1482
1483/* User-callable entry points for marking string X. */
1484
1485void
1486gt_ggc_mx (const char *& x)
1487{
1488 gt_ggc_m_S (x);
1489}
1490
1491void
1492gt_ggc_mx (unsigned char *& x)
1493{
1494 gt_ggc_m_S (x);
1495}
1496
1497void
1498gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1499{
1500}
1501
e3c4633e 1502/* If P is not marked, marks it and return false. Otherwise return true.
911ab6b9 1503 P must have been allocated by the GC allocator; it mustn't point to
1504 static objects, stack variables, or memory allocated with malloc. */
e3c4633e 1505
71661611 1506int
6ec1f4e0 1507ggc_set_mark (const void *p)
911ab6b9 1508{
1509 page_entry *entry;
1510 unsigned bit, word;
1511 unsigned long mask;
1512
1513 /* Look up the page on which the object is alloced. If the object
1514 wasn't allocated by the collector, we'll probably die. */
e3691812 1515 entry = lookup_page_table_entry (p);
0d59b19d 1516 gcc_assert (entry);
911ab6b9 1517
1518 /* Calculate the index of the object on the page; this is its bit
1519 position in the in_use_p bitmap. */
40b9be5e 1520 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
911ab6b9 1521 word = bit / HOST_BITS_PER_LONG;
1522 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
3cfec666 1523
aa40f561 1524 /* If the bit was previously set, skip it. */
911ab6b9 1525 if (entry->in_use_p[word] & mask)
1526 return 1;
1527
1528 /* Otherwise set it, and decrement the free object count. */
1529 entry->in_use_p[word] |= mask;
1530 entry->num_free_objects -= 1;
1531
911ab6b9 1532 if (GGC_DEBUG_LEVEL >= 4)
1533 fprintf (G.debug_file, "Marking %p\n", p);
1534
1535 return 0;
1536}
1537
3cfec666 1538/* Return 1 if P has been marked, zero otherwise.
15d769aa 1539 P must have been allocated by the GC allocator; it mustn't point to
1540 static objects, stack variables, or memory allocated with malloc. */
1541
1542int
6ec1f4e0 1543ggc_marked_p (const void *p)
15d769aa 1544{
1545 page_entry *entry;
1546 unsigned bit, word;
1547 unsigned long mask;
1548
1549 /* Look up the page on which the object is alloced. If the object
1550 wasn't allocated by the collector, we'll probably die. */
1551 entry = lookup_page_table_entry (p);
0d59b19d 1552 gcc_assert (entry);
15d769aa 1553
1554 /* Calculate the index of the object on the page; this is its bit
1555 position in the in_use_p bitmap. */
40b9be5e 1556 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
15d769aa 1557 word = bit / HOST_BITS_PER_LONG;
1558 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
3cfec666 1559
291e3f96 1560 return (entry->in_use_p[word] & mask) != 0;
15d769aa 1561}
1562
e3c4633e 1563/* Return the size of the gc-able object P. */
1564
4e00b6fd 1565size_t
6ec1f4e0 1566ggc_get_size (const void *p)
4e00b6fd 1567{
1568 page_entry *pe = lookup_page_table_entry (p);
2f6aecaf 1569 return OBJECT_SIZE (pe->order);
4e00b6fd 1570}
c4e03242 1571
1572/* Release the memory for object P. */
1573
1574void
1575ggc_free (void *p)
1576{
8f359205 1577 if (in_gc)
1578 return;
1579
c4e03242 1580 page_entry *pe = lookup_page_table_entry (p);
1581 size_t order = pe->order;
1582 size_t size = OBJECT_SIZE (order);
1583
ecd52ea9 1584 if (GATHER_STATISTICS)
1585 ggc_free_overhead (p);
0ca9a7b6 1586
c4e03242 1587 if (GGC_DEBUG_LEVEL >= 3)
1588 fprintf (G.debug_file,
1589 "Freeing object, actual size=%lu, at %p on %p\n",
1590 (unsigned long) size, p, (void *) pe);
1591
1592#ifdef ENABLE_GC_CHECKING
1593 /* Poison the data, to indicate the data is garbage. */
a7779e75 1594 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
c4e03242 1595 memset (p, 0xa5, size);
1596#endif
1597 /* Let valgrind know the object is free. */
a7779e75 1598 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
c4e03242 1599
1600#ifdef ENABLE_GC_ALWAYS_COLLECT
1601 /* In the completely-anal-checking mode, we do *not* immediately free
48e1416a 1602 the data, but instead verify that the data is *actually* not
c4e03242 1603 reachable the next time we collect. */
1604 {
4c36ffe6 1605 struct free_object *fo = XNEW (struct free_object);
c4e03242 1606 fo->object = p;
1607 fo->next = G.free_object_list;
1608 G.free_object_list = fo;
1609 }
1610#else
1611 {
1612 unsigned int bit_offset, word, bit;
1613
1614 G.allocated -= size;
1615
1616 /* Mark the object not-in-use. */
1617 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1618 word = bit_offset / HOST_BITS_PER_LONG;
1619 bit = bit_offset % HOST_BITS_PER_LONG;
1620 pe->in_use_p[word] &= ~(1UL << bit);
1621
1622 if (pe->num_free_objects++ == 0)
1623 {
4a755ae7 1624 page_entry *p, *q;
1625
c4e03242 1626 /* If the page is completely full, then it's supposed to
1627 be after all pages that aren't. Since we've freed one
1628 object from a page that was full, we need to move the
48e1416a 1629 page to the head of the list.
c4e03242 1630
4a755ae7 1631 PE is the node we want to move. Q is the previous node
1632 and P is the next node in the list. */
1633 q = pe->prev;
c4e03242 1634 if (q && q->num_free_objects == 0)
1635 {
1636 p = pe->next;
4a755ae7 1637
c4e03242 1638 q->next = p;
4a755ae7 1639
1640 /* If PE was at the end of the list, then Q becomes the
1641 new end of the list. If PE was not the end of the
1642 list, then we need to update the PREV field for P. */
c4e03242 1643 if (!p)
1644 G.page_tails[order] = q;
4a755ae7 1645 else
1646 p->prev = q;
1647
1648 /* Move PE to the head of the list. */
c4e03242 1649 pe->next = G.pages[order];
4a755ae7 1650 pe->prev = NULL;
1651 G.pages[order]->prev = pe;
c4e03242 1652 G.pages[order] = pe;
1653 }
1654
1655 /* Reset the hint bit to point to the only free object. */
1656 pe->next_bit_hint = bit_offset;
1657 }
1658 }
1659#endif
1660}
911ab6b9 1661\f
40b9be5e 1662/* Subroutine of init_ggc which computes the pair of numbers used to
1663 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1664
1665 This algorithm is taken from Granlund and Montgomery's paper
1666 "Division by Invariant Integers using Multiplication"
1667 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1668 constants). */
1669
1670static void
6ec1f4e0 1671compute_inverse (unsigned order)
40b9be5e 1672{
48e1416a 1673 size_t size, inv;
2c06e494 1674 unsigned int e;
ff34fd94 1675
40b9be5e 1676 size = OBJECT_SIZE (order);
1677 e = 0;
1678 while (size % 2 == 0)
1679 {
1680 e++;
1681 size >>= 1;
1682 }
e3c4633e 1683
40b9be5e 1684 inv = size;
1685 while (inv * size != 1)
1686 inv = inv * (2 - inv*size);
1687
1688 DIV_MULT (order) = inv;
1689 DIV_SHIFT (order) = e;
1690}
1691
1692/* Initialize the ggc-mmap allocator. */
911ab6b9 1693void
6ec1f4e0 1694init_ggc (void)
911ab6b9 1695{
415309e2 1696 static bool init_p = false;
2f6aecaf 1697 unsigned order;
1698
415309e2 1699 if (init_p)
1700 return;
1701 init_p = true;
1702
9af5ce0c 1703 G.pagesize = getpagesize ();
911ab6b9 1704 G.lg_pagesize = exact_log2 (G.pagesize);
1705
901dfcc7 1706#ifdef HAVE_MMAP_DEV_ZERO
911ab6b9 1707 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1708 if (G.dev_zero_fd == -1)
3a2aee0e 1709 internal_error ("open /dev/zero: %m");
911ab6b9 1710#endif
1711
1712#if 0
1713 G.debug_file = fopen ("ggc-mmap.debug", "w");
1714#else
1715 G.debug_file = stdout;
1716#endif
1717
901dfcc7 1718#ifdef USING_MMAP
42f8e268 1719 /* StunOS has an amazing off-by-one error for the first mmap allocation
1720 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1721 believe, is an unaligned page allocation, which would cause us to
1722 hork badly if we tried to use it. */
1723 {
4a2f812e 1724 char *p = alloc_anon (NULL, G.pagesize, true);
901dfcc7 1725 struct page_entry *e;
337c992b 1726 if ((uintptr_t)p & (G.pagesize - 1))
42f8e268 1727 {
1728 /* How losing. Discard this one and try another. If we still
1729 can't get something useful, give up. */
1730
4a2f812e 1731 p = alloc_anon (NULL, G.pagesize, true);
337c992b 1732 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
42f8e268 1733 }
901dfcc7 1734
aa40f561 1735 /* We have a good page, might as well hold onto it... */
4c36ffe6 1736 e = XCNEW (struct page_entry);
901dfcc7 1737 e->bytes = G.pagesize;
1738 e->page = p;
1739 e->next = G.free_pages;
1740 G.free_pages = e;
42f8e268 1741 }
1742#endif
2f6aecaf 1743
1744 /* Initialize the object size table. */
1745 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1746 object_size_table[order] = (size_t) 1 << order;
1747 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
918edee0 1748 {
1749 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
5da2078a 1750
1751 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1752 so that we're sure of getting aligned memory. */
1753 s = ROUND_UP (s, MAX_ALIGNMENT);
918edee0 1754 object_size_table[order] = s;
1755 }
2f6aecaf 1756
40b9be5e 1757 /* Initialize the objects-per-page and inverse tables. */
2f6aecaf 1758 for (order = 0; order < NUM_ORDERS; ++order)
1759 {
1760 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1761 if (objects_per_page_table[order] == 0)
1762 objects_per_page_table[order] = 1;
40b9be5e 1763 compute_inverse (order);
2f6aecaf 1764 }
1765
1766 /* Reset the size_lookup array to put appropriately sized objects in
1767 the special orders. All objects bigger than the previous power
1768 of two, but no greater than the special size, should go in the
5da2078a 1769 new order. */
2f6aecaf 1770 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1771 {
5da2078a 1772 int o;
1773 int i;
76e1b933 1774
f68513d3 1775 i = OBJECT_SIZE (order);
1776 if (i >= NUM_SIZE_LOOKUP)
1777 continue;
1778
1779 for (o = size_lookup[i]; o == size_lookup [i]; --i)
5da2078a 1780 size_lookup[i] = order;
1781 }
8c14f57e 1782
76e1b933 1783 G.depth_in_use = 0;
1784 G.depth_max = 10;
4c36ffe6 1785 G.depth = XNEWVEC (unsigned int, G.depth_max);
76e1b933 1786
1787 G.by_depth_in_use = 0;
1788 G.by_depth_max = INITIAL_PTE_COUNT;
4c36ffe6 1789 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1790 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
911ab6b9 1791}
1792
c10b9b1c 1793/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1794 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1795
1796static void
6ec1f4e0 1797ggc_recalculate_in_use_p (page_entry *p)
c10b9b1c 1798{
1799 unsigned int i;
1800 size_t num_objects;
1801
3cfec666 1802 /* Because the past-the-end bit in in_use_p is always set, we
c10b9b1c 1803 pretend there is one additional object. */
573aba85 1804 num_objects = OBJECTS_IN_PAGE (p) + 1;
c10b9b1c 1805
1806 /* Reset the free object count. */
1807 p->num_free_objects = num_objects;
1808
1809 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
3cfec666 1810 for (i = 0;
2f6aecaf 1811 i < CEIL (BITMAP_SIZE (num_objects),
1812 sizeof (*p->in_use_p));
c10b9b1c 1813 ++i)
1814 {
1815 unsigned long j;
1816
1817 /* Something is in use if it is marked, or if it was in use in a
1818 context further down the context stack. */
76e1b933 1819 p->in_use_p[i] |= save_in_use_p (p)[i];
c10b9b1c 1820
1821 /* Decrement the free object count for every object allocated. */
1822 for (j = p->in_use_p[i]; j; j >>= 1)
1823 p->num_free_objects -= (j & 1);
1824 }
1825
0d59b19d 1826 gcc_assert (p->num_free_objects < num_objects);
c10b9b1c 1827}
911ab6b9 1828\f
e3c4633e 1829/* Unmark all objects. */
1830
c4e03242 1831static void
6ec1f4e0 1832clear_marks (void)
911ab6b9 1833{
1834 unsigned order;
1835
2f6aecaf 1836 for (order = 2; order < NUM_ORDERS; order++)
911ab6b9 1837 {
911ab6b9 1838 page_entry *p;
1839
1840 for (p = G.pages[order]; p != NULL; p = p->next)
1841 {
573aba85 1842 size_t num_objects = OBJECTS_IN_PAGE (p);
1843 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1844
911ab6b9 1845 /* The data should be page-aligned. */
337c992b 1846 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
911ab6b9 1847
1848 /* Pages that aren't in the topmost context are not collected;
1849 nevertheless, we need their in-use bit vectors to store GC
1850 marks. So, back them up first. */
c10b9b1c 1851 if (p->context_depth < G.context_depth)
911ab6b9 1852 {
76e1b933 1853 if (! save_in_use_p (p))
4077bf7a 1854 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
76e1b933 1855 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
911ab6b9 1856 }
1857
1858 /* Reset reset the number of free objects and clear the
1859 in-use bits. These will be adjusted by mark_obj. */
1860 p->num_free_objects = num_objects;
1861 memset (p->in_use_p, 0, bitmap_size);
1862
1863 /* Make sure the one-past-the-end bit is always set. */
3cfec666 1864 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
911ab6b9 1865 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1866 }
1867 }
1868}
1869
92960204 1870/* Check if any blocks with a registered finalizer have become unmarked. If so
1871 run the finalizer and unregister it because the block is about to be freed.
1872 Note that no garantee is made about what order finalizers will run in so
1873 touching other objects in gc memory is extremely unwise. */
1874
92f06184 1875static void
1876ggc_handle_finalizers ()
1877{
1878 if (G.context_depth != 0)
1879 return;
1880
1881 unsigned length = G.finalizers.length ();
1882 for (unsigned int i = 0; i < length;)
1883 {
1884 finalizer &f = G.finalizers[i];
1885 if (!ggc_marked_p (f.addr ()))
1886 {
1887 f.call ();
1888 G.finalizers.unordered_remove (i);
1889 length--;
1890 }
1891 else
1892 i++;
1893 }
1894
1895
1896 length = G.vec_finalizers.length ();
1897 for (unsigned int i = 0; i < length;)
1898 {
1899 vec_finalizer &f = G.vec_finalizers[i];
1900 if (!ggc_marked_p (f.addr ()))
1901 {
1902 f.call ();
1903 G.vec_finalizers.unordered_remove (i);
1904 length--;
1905 }
1906 else
1907 i++;
1908 }
1909}
1910
e3c4633e 1911/* Free all empty pages. Partially empty pages need no attention
1912 because the `mark' bit doubles as an `unused' bit. */
1913
c4e03242 1914static void
6ec1f4e0 1915sweep_pages (void)
911ab6b9 1916{
1917 unsigned order;
1918
2f6aecaf 1919 for (order = 2; order < NUM_ORDERS; order++)
911ab6b9 1920 {
1921 /* The last page-entry to consider, regardless of entries
1922 placed at the end of the list. */
1923 page_entry * const last = G.page_tails[order];
1924
573aba85 1925 size_t num_objects;
9a2e8b0a 1926 size_t live_objects;
911ab6b9 1927 page_entry *p, *previous;
1928 int done;
3cfec666 1929
911ab6b9 1930 p = G.pages[order];
1931 if (p == NULL)
1932 continue;
1933
1934 previous = NULL;
1935 do
1936 {
1937 page_entry *next = p->next;
1938
1939 /* Loop until all entries have been examined. */
1940 done = (p == last);
6ec1f4e0 1941
573aba85 1942 num_objects = OBJECTS_IN_PAGE (p);
911ab6b9 1943
9a2e8b0a 1944 /* Add all live objects on this page to the count of
1945 allocated memory. */
1946 live_objects = num_objects - p->num_free_objects;
1947
2f6aecaf 1948 G.allocated += OBJECT_SIZE (order) * live_objects;
9a2e8b0a 1949
911ab6b9 1950 /* Only objects on pages in the topmost context should get
1951 collected. */
1952 if (p->context_depth < G.context_depth)
1953 ;
1954
1955 /* Remove the page if it's empty. */
9a2e8b0a 1956 else if (live_objects == 0)
911ab6b9 1957 {
4a755ae7 1958 /* If P was the first page in the list, then NEXT
1959 becomes the new first page in the list, otherwise
1960 splice P out of the forward pointers. */
911ab6b9 1961 if (! previous)
1962 G.pages[order] = next;
1963 else
1964 previous->next = next;
48e1416a 1965
4a755ae7 1966 /* Splice P out of the back pointers too. */
1967 if (next)
1968 next->prev = previous;
911ab6b9 1969
1970 /* Are we removing the last element? */
1971 if (p == G.page_tails[order])
1972 G.page_tails[order] = previous;
1973 free_page (p);
1974 p = previous;
1975 }
1976
1977 /* If the page is full, move it to the end. */
1978 else if (p->num_free_objects == 0)
1979 {
1980 /* Don't move it if it's already at the end. */
1981 if (p != G.page_tails[order])
1982 {
1983 /* Move p to the end of the list. */
1984 p->next = NULL;
4a755ae7 1985 p->prev = G.page_tails[order];
911ab6b9 1986 G.page_tails[order]->next = p;
1987
1988 /* Update the tail pointer... */
1989 G.page_tails[order] = p;
1990
1991 /* ... and the head pointer, if necessary. */
1992 if (! previous)
1993 G.pages[order] = next;
1994 else
1995 previous->next = next;
4a755ae7 1996
1997 /* And update the backpointer in NEXT if necessary. */
1998 if (next)
1999 next->prev = previous;
2000
911ab6b9 2001 p = previous;
2002 }
2003 }
2004
2005 /* If we've fallen through to here, it's a page in the
2006 topmost context that is neither full nor empty. Such a
2007 page must precede pages at lesser context depth in the
2008 list, so move it to the head. */
2009 else if (p != G.pages[order])
2010 {
2011 previous->next = p->next;
4a755ae7 2012
2013 /* Update the backchain in the next node if it exists. */
2014 if (p->next)
2015 p->next->prev = previous;
2016
2017 /* Move P to the head of the list. */
911ab6b9 2018 p->next = G.pages[order];
4a755ae7 2019 p->prev = NULL;
2020 G.pages[order]->prev = p;
2021
2022 /* Update the head pointer. */
911ab6b9 2023 G.pages[order] = p;
4a755ae7 2024
911ab6b9 2025 /* Are we moving the last element? */
2026 if (G.page_tails[order] == p)
2027 G.page_tails[order] = previous;
2028 p = previous;
2029 }
2030
2031 previous = p;
2032 p = next;
3cfec666 2033 }
911ab6b9 2034 while (! done);
c10b9b1c 2035
2036 /* Now, restore the in_use_p vectors for any pages from contexts
2037 other than the current one. */
2038 for (p = G.pages[order]; p; p = p->next)
2039 if (p->context_depth != G.context_depth)
2040 ggc_recalculate_in_use_p (p);
911ab6b9 2041 }
2042}
2043
2a3edec5 2044#ifdef ENABLE_GC_CHECKING
e3c4633e 2045/* Clobber all free objects. */
2046
c4e03242 2047static void
6ec1f4e0 2048poison_pages (void)
911ab6b9 2049{
2050 unsigned order;
2051
2f6aecaf 2052 for (order = 2; order < NUM_ORDERS; order++)
911ab6b9 2053 {
2f6aecaf 2054 size_t size = OBJECT_SIZE (order);
911ab6b9 2055 page_entry *p;
2056
2057 for (p = G.pages[order]; p != NULL; p = p->next)
2058 {
573aba85 2059 size_t num_objects;
911ab6b9 2060 size_t i;
2d517b2f 2061
2062 if (p->context_depth != G.context_depth)
2063 /* Since we don't do any collection for pages in pushed
2064 contexts, there's no need to do any poisoning. And
2065 besides, the IN_USE_P array isn't valid until we pop
2066 contexts. */
2067 continue;
2068
573aba85 2069 num_objects = OBJECTS_IN_PAGE (p);
911ab6b9 2070 for (i = 0; i < num_objects; i++)
2071 {
2072 size_t word, bit;
2073 word = i / HOST_BITS_PER_LONG;
2074 bit = i % HOST_BITS_PER_LONG;
2075 if (((p->in_use_p[word] >> bit) & 1) == 0)
dd359afe 2076 {
2077 char *object = p->page + i * size;
2078
2079 /* Keep poison-by-write when we expect to use Valgrind,
2080 so the exact same memory semantics is kept, in case
2081 there are memory errors. We override this request
2082 below. */
a7779e75 2083 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2084 size));
dd359afe 2085 memset (object, 0xa5, size);
2086
2087 /* Drop the handle to avoid handle leak. */
a7779e75 2088 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
dd359afe 2089 }
911ab6b9 2090 }
2091 }
2092 }
2093}
c4e03242 2094#else
2095#define poison_pages()
2096#endif
2097
2098#ifdef ENABLE_GC_ALWAYS_COLLECT
2099/* Validate that the reportedly free objects actually are. */
2100
2101static void
2102validate_free_objects (void)
2103{
2104 struct free_object *f, *next, *still_free = NULL;
2105
2106 for (f = G.free_object_list; f ; f = next)
2107 {
2108 page_entry *pe = lookup_page_table_entry (f->object);
2109 size_t bit, word;
2110
2111 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2112 word = bit / HOST_BITS_PER_LONG;
2113 bit = bit % HOST_BITS_PER_LONG;
2114 next = f->next;
2115
2116 /* Make certain it isn't visible from any root. Notice that we
2117 do this check before sweep_pages merges save_in_use_p. */
0d59b19d 2118 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
c4e03242 2119
2120 /* If the object comes from an outer context, then retain the
2121 free_object entry, so that we can verify that the address
2122 isn't live on the stack in some outer context. */
2123 if (pe->context_depth != G.context_depth)
2124 {
2125 f->next = still_free;
2126 still_free = f;
2127 }
2128 else
2129 free (f);
2130 }
2131
2132 G.free_object_list = still_free;
2133}
2134#else
2135#define validate_free_objects()
911ab6b9 2136#endif
2137
e3c4633e 2138/* Top level mark-and-sweep routine. */
2139
911ab6b9 2140void
6ec1f4e0 2141ggc_collect (void)
911ab6b9 2142{
911ab6b9 2143 /* Avoid frequent unnecessary work by skipping collection if the
2144 total allocations haven't expanded much since the last
2145 collection. */
83142a4c 2146 float allocated_last_gc =
2a3edec5 2147 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2148
83142a4c 2149 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
0ca9a7b6 2150 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
911ab6b9 2151 return;
911ab6b9 2152
74d2af64 2153 timevar_push (TV_GC);
911ab6b9 2154 if (!quiet_flag)
90856340 2155 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
c4e03242 2156 if (GGC_DEBUG_LEVEL >= 2)
2157 fprintf (G.debug_file, "BEGIN COLLECTING\n");
911ab6b9 2158
9a2e8b0a 2159 /* Zero the total allocated bytes. This will be recalculated in the
2160 sweep phase. */
911ab6b9 2161 G.allocated = 0;
2162
3cfec666 2163 /* Release the pages we freed the last time we collected, but didn't
911ab6b9 2164 reuse in the interim. */
2165 release_pages ();
2166
598638e2 2167 /* Indicate that we've seen collections at this context depth. */
2168 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2169
740cd0be 2170 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2171
8f359205 2172 in_gc = true;
911ab6b9 2173 clear_marks ();
2174 ggc_mark_roots ();
92f06184 2175 ggc_handle_finalizers ();
ecd52ea9 2176
2177 if (GATHER_STATISTICS)
2178 ggc_prune_overhead_list ();
2179
911ab6b9 2180 poison_pages ();
c4e03242 2181 validate_free_objects ();
e3c4633e 2182 sweep_pages ();
2183
8f359205 2184 in_gc = false;
911ab6b9 2185 G.allocated_last_gc = G.allocated;
2186
740cd0be 2187 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2188
74d2af64 2189 timevar_pop (TV_GC);
911ab6b9 2190
911ab6b9 2191 if (!quiet_flag)
74d2af64 2192 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
c4e03242 2193 if (GGC_DEBUG_LEVEL >= 2)
2194 fprintf (G.debug_file, "END COLLECTING\n");
911ab6b9 2195}
4e00b6fd 2196
4f78e0a8 2197/* Assume that all GGC memory is reachable and grow the limits for next collection.
2198 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2199 reachable. */
2200
2201void
2202ggc_grow (void)
2203{
382ecba7 2204 if (!flag_checking)
2205 G.allocated_last_gc = MAX (G.allocated_last_gc,
2206 G.allocated);
2207 else
2208 ggc_collect ();
4f78e0a8 2209 if (!quiet_flag)
2210 fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2211}
2212
4e00b6fd 2213/* Print allocation statistics. */
2a8997e8 2214#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2215 ? (x) \
2216 : ((x) < 1024*1024*10 \
2217 ? (x) / 1024 \
2218 : (x) / (1024*1024))))
0ca9a7b6 2219#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
4e00b6fd 2220
2221void
6ec1f4e0 2222ggc_print_statistics (void)
4e00b6fd 2223{
2224 struct ggc_statistics stats;
c10b9b1c 2225 unsigned int i;
2a8997e8 2226 size_t total_overhead = 0;
4e00b6fd 2227
2228 /* Clear the statistics. */
3e9d8cee 2229 memset (&stats, 0, sizeof (stats));
3cfec666 2230
4e00b6fd 2231 /* Make sure collection will really occur. */
2232 G.allocated_last_gc = 0;
2233
2234 /* Collect and print the statistics common across collectors. */
2a8997e8 2235 ggc_print_common_statistics (stderr, &stats);
4e00b6fd 2236
c10b9b1c 2237 /* Release free pages so that we will not count the bytes allocated
2238 there as part of the total allocated memory. */
2239 release_pages ();
2240
3cfec666 2241 /* Collect some information about the various sizes of
4e00b6fd 2242 allocation. */
86736f9e 2243 fprintf (stderr,
2244 "Memory still allocated at the end of the compilation process\n");
98cc198b 2245 fprintf (stderr, "%-8s %10s %10s %10s\n",
f806fb68 2246 "Size", "Allocated", "Used", "Overhead");
2f6aecaf 2247 for (i = 0; i < NUM_ORDERS; ++i)
4e00b6fd 2248 {
2249 page_entry *p;
2250 size_t allocated;
2251 size_t in_use;
2a8997e8 2252 size_t overhead;
4e00b6fd 2253
2254 /* Skip empty entries. */
2255 if (!G.pages[i])
2256 continue;
2257
2a8997e8 2258 overhead = allocated = in_use = 0;
4e00b6fd 2259
2260 /* Figure out the total number of bytes allocated for objects of
2a8997e8 2261 this size, and how many of them are actually in use. Also figure
2262 out how much memory the page table is using. */
4e00b6fd 2263 for (p = G.pages[i]; p; p = p->next)
2264 {
2265 allocated += p->bytes;
6ec1f4e0 2266 in_use +=
573aba85 2267 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2a8997e8 2268
2269 overhead += (sizeof (page_entry) - sizeof (long)
573aba85 2270 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
4e00b6fd 2271 }
98cc198b 2272 fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
29e7390a 2273 (unsigned long) OBJECT_SIZE (i),
0ca9a7b6 2274 SCALE (allocated), STAT_LABEL (allocated),
2275 SCALE (in_use), STAT_LABEL (in_use),
2276 SCALE (overhead), STAT_LABEL (overhead));
2a8997e8 2277 total_overhead += overhead;
4e00b6fd 2278 }
98cc198b 2279 fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
0ca9a7b6 2280 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
9af5ce0c 2281 SCALE (G.allocated), STAT_LABEL (G.allocated),
0ca9a7b6 2282 SCALE (total_overhead), STAT_LABEL (total_overhead));
b7257530 2283
ecd52ea9 2284 if (GATHER_STATISTICS)
2285 {
98cc198b 2286 fprintf (stderr, "\nTotal allocations and overheads during "
2287 "the compilation process\n");
ecd52ea9 2288
98cc198b 2289 fprintf (stderr, "Total Overhead: %10"
2290 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
2291 fprintf (stderr, "Total Allocated: %10"
2292 HOST_LONG_LONG_FORMAT "d\n",
ecd52ea9 2293 G.stats.total_allocated);
2294
98cc198b 2295 fprintf (stderr, "Total Overhead under 32B: %10"
2296 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
2297 fprintf (stderr, "Total Allocated under 32B: %10"
2298 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
2299 fprintf (stderr, "Total Overhead under 64B: %10"
2300 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
2301 fprintf (stderr, "Total Allocated under 64B: %10"
2302 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
2303 fprintf (stderr, "Total Overhead under 128B: %10"
2304 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
2305 fprintf (stderr, "Total Allocated under 128B: %10"
2306 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
ecd52ea9 2307
2308 for (i = 0; i < NUM_ORDERS; i++)
2309 if (G.stats.total_allocated_per_order[i])
2310 {
98cc198b 2311 fprintf (stderr, "Total Overhead page size %9lu: %10"
2312 HOST_LONG_LONG_FORMAT "d\n",
ecd52ea9 2313 (unsigned long) OBJECT_SIZE (i),
2314 G.stats.total_overhead_per_order[i]);
98cc198b 2315 fprintf (stderr, "Total Allocated page size %9lu: %10"
2316 HOST_LONG_LONG_FORMAT "d\n",
ecd52ea9 2317 (unsigned long) OBJECT_SIZE (i),
2318 G.stats.total_allocated_per_order[i]);
2319 }
b7257530 2320 }
4e00b6fd 2321}
573aba85 2322\f
0b09525f 2323struct ggc_pch_ondisk
2324{
2325 unsigned totals[NUM_ORDERS];
2326};
2327
573aba85 2328struct ggc_pch_data
2329{
0b09525f 2330 struct ggc_pch_ondisk d;
337c992b 2331 uintptr_t base[NUM_ORDERS];
573aba85 2332 size_t written[NUM_ORDERS];
2333};
2334
2335struct ggc_pch_data *
6ec1f4e0 2336init_ggc_pch (void)
573aba85 2337{
4c36ffe6 2338 return XCNEW (struct ggc_pch_data);
573aba85 2339}
2340
6ec1f4e0 2341void
2342ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
5cc13354 2343 size_t size, bool is_string ATTRIBUTE_UNUSED)
573aba85 2344{
2345 unsigned order;
2346
f68513d3 2347 if (size < NUM_SIZE_LOOKUP)
573aba85 2348 order = size_lookup[size];
2349 else
2350 {
1c2a6a66 2351 order = 10;
573aba85 2352 while (size > OBJECT_SIZE (order))
2353 order++;
2354 }
6ec1f4e0 2355
573aba85 2356 d->d.totals[order]++;
2357}
6ec1f4e0 2358
573aba85 2359size_t
6ec1f4e0 2360ggc_pch_total_size (struct ggc_pch_data *d)
573aba85 2361{
2362 size_t a = 0;
2363 unsigned i;
2364
2365 for (i = 0; i < NUM_ORDERS; i++)
25a28b44 2366 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
573aba85 2367 return a;
2368}
2369
2370void
6ec1f4e0 2371ggc_pch_this_base (struct ggc_pch_data *d, void *base)
573aba85 2372{
337c992b 2373 uintptr_t a = (uintptr_t) base;
573aba85 2374 unsigned i;
6ec1f4e0 2375
573aba85 2376 for (i = 0; i < NUM_ORDERS; i++)
2377 {
2378 d->base[i] = a;
25a28b44 2379 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
573aba85 2380 }
2381}
2382
2383
2384char *
6ec1f4e0 2385ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
5cc13354 2386 size_t size, bool is_string ATTRIBUTE_UNUSED)
573aba85 2387{
2388 unsigned order;
2389 char *result;
6ec1f4e0 2390
f68513d3 2391 if (size < NUM_SIZE_LOOKUP)
573aba85 2392 order = size_lookup[size];
2393 else
2394 {
1c2a6a66 2395 order = 10;
573aba85 2396 while (size > OBJECT_SIZE (order))
2397 order++;
2398 }
2399
2400 result = (char *) d->base[order];
2401 d->base[order] += OBJECT_SIZE (order);
2402 return result;
2403}
2404
6ec1f4e0 2405void
2406ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2407 FILE *f ATTRIBUTE_UNUSED)
573aba85 2408{
2409 /* Nothing to do. */
2410}
2411
2412void
98044fac 2413ggc_pch_write_object (struct ggc_pch_data *d,
6ec1f4e0 2414 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
7d60cc60 2415 size_t size, bool is_string ATTRIBUTE_UNUSED)
573aba85 2416{
2417 unsigned order;
1a7c0ccb 2418 static const char emptyBytes[256] = { 0 };
573aba85 2419
f68513d3 2420 if (size < NUM_SIZE_LOOKUP)
573aba85 2421 order = size_lookup[size];
2422 else
2423 {
1c2a6a66 2424 order = 10;
573aba85 2425 while (size > OBJECT_SIZE (order))
2426 order++;
2427 }
6ec1f4e0 2428
573aba85 2429 if (fwrite (x, size, 1, f) != 1)
c05be867 2430 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 2431
89e22a52 2432 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
e9efa031 2433 object out to OBJECT_SIZE(order). This happens for strings. */
89e22a52 2434
2435 if (size != OBJECT_SIZE (order))
2436 {
9af5ce0c 2437 unsigned padding = OBJECT_SIZE (order) - size;
89e22a52 2438
2439 /* To speed small writes, we use a nulled-out array that's larger
2440 than most padding requests as the source for our null bytes. This
2441 permits us to do the padding with fwrite() rather than fseek(), and
822e391f 2442 limits the chance the OS may try to flush any outstanding writes. */
9af5ce0c 2443 if (padding <= sizeof (emptyBytes))
89e22a52 2444 {
2445 if (fwrite (emptyBytes, 1, padding, f) != padding)
c05be867 2446 fatal_error (input_location, "can%'t write PCH file");
89e22a52 2447 }
2448 else
2449 {
e9efa031 2450 /* Larger than our buffer? Just default to fseek. */
89e22a52 2451 if (fseek (f, padding, SEEK_CUR) != 0)
c05be867 2452 fatal_error (input_location, "can%'t write PCH file");
89e22a52 2453 }
2454 }
573aba85 2455
2456 d->written[order]++;
2457 if (d->written[order] == d->d.totals[order]
2458 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2459 G.pagesize),
2460 SEEK_CUR) != 0)
c05be867 2461 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 2462}
2463
2464void
6ec1f4e0 2465ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
573aba85 2466{
2467 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
c05be867 2468 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 2469 free (d);
2470}
2471
76e1b933 2472/* Move the PCH PTE entries just added to the end of by_depth, to the
2473 front. */
2474
2475static void
6ec1f4e0 2476move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
76e1b933 2477{
2478 unsigned i;
2479
2480 /* First, we swap the new entries to the front of the varrays. */
2481 page_entry **new_by_depth;
2482 unsigned long **new_save_in_use;
2483
4c36ffe6 2484 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2485 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
76e1b933 2486
2487 memcpy (&new_by_depth[0],
2488 &G.by_depth[count_old_page_tables],
2489 count_new_page_tables * sizeof (void *));
2490 memcpy (&new_by_depth[count_new_page_tables],
2491 &G.by_depth[0],
2492 count_old_page_tables * sizeof (void *));
2493 memcpy (&new_save_in_use[0],
2494 &G.save_in_use[count_old_page_tables],
2495 count_new_page_tables * sizeof (void *));
2496 memcpy (&new_save_in_use[count_new_page_tables],
2497 &G.save_in_use[0],
2498 count_old_page_tables * sizeof (void *));
2499
2500 free (G.by_depth);
2501 free (G.save_in_use);
6ec1f4e0 2502
76e1b933 2503 G.by_depth = new_by_depth;
2504 G.save_in_use = new_save_in_use;
2505
2506 /* Now update all the index_by_depth fields. */
2507 for (i = G.by_depth_in_use; i > 0; --i)
2508 {
2509 page_entry *p = G.by_depth[i-1];
2510 p->index_by_depth = i-1;
2511 }
2512
2513 /* And last, we update the depth pointers in G.depth. The first
2514 entry is already 0, and context 0 entries always start at index
2515 0, so there is nothing to update in the first slot. We need a
2516 second slot, only if we have old ptes, and if we do, they start
2517 at index count_new_page_tables. */
2518 if (count_old_page_tables)
2519 push_depth (count_new_page_tables);
2520}
2521
573aba85 2522void
6ec1f4e0 2523ggc_pch_read (FILE *f, void *addr)
573aba85 2524{
2525 struct ggc_pch_ondisk d;
2526 unsigned i;
4077bf7a 2527 char *offs = (char *) addr;
76e1b933 2528 unsigned long count_old_page_tables;
2529 unsigned long count_new_page_tables;
2530
2531 count_old_page_tables = G.by_depth_in_use;
2532
2533 /* We've just read in a PCH file. So, every object that used to be
2534 allocated is now free. */
573aba85 2535 clear_marks ();
32bbbaac 2536#ifdef ENABLE_GC_CHECKING
573aba85 2537 poison_pages ();
2538#endif
3fff4d99 2539 /* Since we free all the allocated objects, the free list becomes
2540 useless. Validate it now, which will also clear it. */
9af5ce0c 2541 validate_free_objects ();
573aba85 2542
2543 /* No object read from a PCH file should ever be freed. So, set the
2544 context depth to 1, and set the depth of all the currently-allocated
2545 pages to be 1 too. PCH pages will have depth 0. */
0d59b19d 2546 gcc_assert (!G.context_depth);
573aba85 2547 G.context_depth = 1;
2548 for (i = 0; i < NUM_ORDERS; i++)
2549 {
2550 page_entry *p;
2551 for (p = G.pages[i]; p != NULL; p = p->next)
2552 p->context_depth = G.context_depth;
2553 }
2554
2555 /* Allocate the appropriate page-table entries for the pages read from
2556 the PCH file. */
2557 if (fread (&d, sizeof (d), 1, f) != 1)
c05be867 2558 fatal_error (input_location, "can%'t read PCH file: %m");
6ec1f4e0 2559
573aba85 2560 for (i = 0; i < NUM_ORDERS; i++)
2561 {
2562 struct page_entry *entry;
2563 char *pte;
2564 size_t bytes;
2565 size_t num_objs;
2566 size_t j;
76e1b933 2567
573aba85 2568 if (d.totals[i] == 0)
2569 continue;
76e1b933 2570
25a28b44 2571 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
573aba85 2572 num_objs = bytes / OBJECT_SIZE (i);
4077bf7a 2573 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2574 - sizeof (long)
2575 + BITMAP_SIZE (num_objs + 1)));
573aba85 2576 entry->bytes = bytes;
2577 entry->page = offs;
2578 entry->context_depth = 0;
2579 offs += bytes;
2580 entry->num_free_objects = 0;
2581 entry->order = i;
2582
6ec1f4e0 2583 for (j = 0;
573aba85 2584 j + HOST_BITS_PER_LONG <= num_objs + 1;
2585 j += HOST_BITS_PER_LONG)
2586 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2587 for (; j < num_objs + 1; j++)
6ec1f4e0 2588 entry->in_use_p[j / HOST_BITS_PER_LONG]
573aba85 2589 |= 1L << (j % HOST_BITS_PER_LONG);
2590
6ec1f4e0 2591 for (pte = entry->page;
2592 pte < entry->page + entry->bytes;
573aba85 2593 pte += G.pagesize)
2594 set_page_table_entry (pte, entry);
2595
2596 if (G.page_tails[i] != NULL)
2597 G.page_tails[i]->next = entry;
2598 else
2599 G.pages[i] = entry;
2600 G.page_tails[i] = entry;
76e1b933 2601
2602 /* We start off by just adding all the new information to the
2603 end of the varrays, later, we will move the new information
2604 to the front of the varrays, as the PCH page tables are at
2605 context 0. */
2606 push_by_depth (entry, 0);
573aba85 2607 }
2608
76e1b933 2609 /* Now, we update the various data structures that speed page table
2610 handling. */
2611 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2612
2613 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2614
573aba85 2615 /* Update the statistics. */
2616 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2617}