]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ggc-page.c
PR middle-end/92153
[thirdparty/gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "memmodel.h"
28 #include "tm_p.h"
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "cgraph.h"
35 #include "cfgloop.h"
36 #include "plugin.h"
37
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
40 #ifdef HAVE_MMAP_ANON
41 # undef HAVE_MMAP_DEV_ZERO
42 # define USING_MMAP
43 #endif
44
45 #ifdef HAVE_MMAP_DEV_ZERO
46 # define USING_MMAP
47 #endif
48
49 #ifndef USING_MMAP
50 #define USING_MALLOC_PAGE_GROUPS
51 #endif
52
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54 && defined(USING_MMAP)
55 # define USING_MADVISE
56 #endif
57
58 /* Strategy:
59
60 This garbage-collecting allocator allocates objects on one of a set
61 of pages. Each page can allocate objects of a single size only;
62 available sizes are powers of two starting at four bytes. The size
63 of an allocation request is rounded up to the next power of two
64 (`order'), and satisfied from the appropriate page.
65
66 Each page is recorded in a page-entry, which also maintains an
67 in-use bitmap of object positions on the page. This allows the
68 allocation state of a particular object to be flipped without
69 touching the page itself.
70
71 Each page-entry also has a context depth, which is used to track
72 pushing and popping of allocation contexts. Only objects allocated
73 in the current (highest-numbered) context may be collected.
74
75 Page entries are arranged in an array of singly-linked lists. The
76 array is indexed by the allocation size, in bits, of the pages on
77 it; i.e. all pages on a list allocate objects of the same size.
78 Pages are ordered on the list such that all non-full pages precede
79 all full pages, with non-full pages arranged in order of decreasing
80 context depth.
81
82 Empty pages (of all orders) are kept on a single page cache list,
83 and are considered first when new pages are required; they are
84 deallocated at the start of the next collection if they haven't
85 been recycled by then. */
86
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88 0: No debugging output.
89 1: GC statistics only.
90 2: Page-entry allocations/deallocations as well.
91 3: Object allocations as well.
92 4: Object marks as well. */
93 #define GGC_DEBUG_LEVEL (0)
94 \f
95 /* A two-level tree is used to look up the page-entry for a given
96 pointer. Two chunks of the pointer's bits are extracted to index
97 the first and second levels of the tree, as follows:
98
99 HOST_PAGE_SIZE_BITS
100 32 | |
101 msb +----------------+----+------+------+ lsb
102 | | |
103 PAGE_L1_BITS |
104 | |
105 PAGE_L2_BITS
106
107 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108 pages are aligned on system page boundaries. The next most
109 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110 index values in the lookup table, respectively.
111
112 For 32-bit architectures and the settings below, there are no
113 leftover bits. For architectures with wider pointers, the lookup
114 tree points to a list of pages, which must be scanned to find the
115 correct one. */
116
117 #define PAGE_L1_BITS (8)
118 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
119 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
120 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
121
122 #define LOOKUP_L1(p) \
123 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
124
125 #define LOOKUP_L2(p) \
126 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
127
128 /* The number of objects per allocation page, for objects on a page of
129 the indicated ORDER. */
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
131
132 /* The number of objects in P. */
133 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
134
135 /* The size of an object on a page of the indicated ORDER. */
136 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
137
138 /* For speed, we avoid doing a general integer divide to locate the
139 offset in the allocation bitmap, by precalculating numbers M, S
140 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141 within the page which is evenly divisible by the object size Z. */
142 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144 #define OFFSET_TO_BIT(OFFSET, ORDER) \
145 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
146
147 /* We use this structure to determine the alignment required for
148 allocations. For power-of-two sized allocations, that's not a
149 problem, but it does matter for odd-sized allocations.
150 We do not care about alignment for floating-point types. */
151
152 struct max_alignment {
153 char c;
154 union {
155 int64_t i;
156 void *p;
157 } u;
158 };
159
160 /* The biggest alignment required. */
161
162 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
163
164
165 /* The number of extra orders, not corresponding to power-of-two sized
166 objects. */
167
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
169
170 #define RTL_SIZE(NSLOTS) \
171 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
172
173 #define TREE_EXP_SIZE(OPS) \
174 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
175
176 /* The Ith entry is the maximum size of an object to be stored in the
177 Ith extra order. Adding a new entry to this array is the *only*
178 thing you need to do to add a new special allocation size. */
179
180 static const size_t extra_order_size_table[] = {
181 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182 There are a lot of structures with these sizes and explicitly
183 listing them risks orders being dropped because they changed size. */
184 MAX_ALIGNMENT * 3,
185 MAX_ALIGNMENT * 5,
186 MAX_ALIGNMENT * 6,
187 MAX_ALIGNMENT * 7,
188 MAX_ALIGNMENT * 9,
189 MAX_ALIGNMENT * 10,
190 MAX_ALIGNMENT * 11,
191 MAX_ALIGNMENT * 12,
192 MAX_ALIGNMENT * 13,
193 MAX_ALIGNMENT * 14,
194 MAX_ALIGNMENT * 15,
195 sizeof (struct tree_decl_non_common),
196 sizeof (struct tree_field_decl),
197 sizeof (struct tree_parm_decl),
198 sizeof (struct tree_var_decl),
199 sizeof (struct tree_type_non_common),
200 sizeof (struct function),
201 sizeof (struct basic_block_def),
202 sizeof (struct cgraph_node),
203 sizeof (class loop),
204 };
205
206 /* The total number of orders. */
207
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209
210 /* Compute the smallest nonnegative number which when added to X gives
211 a multiple of F. */
212
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214
215 /* Round X to next multiple of the page size */
216
217 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
218
219 /* The Ith entry is the number of objects on a page or order I. */
220
221 static unsigned objects_per_page_table[NUM_ORDERS];
222
223 /* The Ith entry is the size of an object on a page of order I. */
224
225 static size_t object_size_table[NUM_ORDERS];
226
227 /* The Ith entry is a pair of numbers (mult, shift) such that
228 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229 for all k evenly divisible by OBJECT_SIZE(I). */
230
231 static struct
232 {
233 size_t mult;
234 unsigned int shift;
235 }
236 inverse_table[NUM_ORDERS];
237
238 /* A page_entry records the status of an allocation page. This
239 structure is dynamically sized to fit the bitmap in_use_p. */
240 struct page_entry
241 {
242 /* The next page-entry with objects of the same size, or NULL if
243 this is the last page-entry. */
244 struct page_entry *next;
245
246 /* The previous page-entry with objects of the same size, or NULL if
247 this is the first page-entry. The PREV pointer exists solely to
248 keep the cost of ggc_free manageable. */
249 struct page_entry *prev;
250
251 /* The number of bytes allocated. (This will always be a multiple
252 of the host system page size.) */
253 size_t bytes;
254
255 /* The address at which the memory is allocated. */
256 char *page;
257
258 #ifdef USING_MALLOC_PAGE_GROUPS
259 /* Back pointer to the page group this page came from. */
260 struct page_group *group;
261 #endif
262
263 /* This is the index in the by_depth varray where this page table
264 can be found. */
265 unsigned long index_by_depth;
266
267 /* Context depth of this page. */
268 unsigned short context_depth;
269
270 /* The number of free objects remaining on this page. */
271 unsigned short num_free_objects;
272
273 /* A likely candidate for the bit position of a free object for the
274 next allocation from this page. */
275 unsigned short next_bit_hint;
276
277 /* The lg of size of objects allocated from this page. */
278 unsigned char order;
279
280 /* Discarded page? */
281 bool discarded;
282
283 /* A bit vector indicating whether or not objects are in use. The
284 Nth bit is one if the Nth object on this page is allocated. This
285 array is dynamically sized. */
286 unsigned long in_use_p[1];
287 };
288
289 #ifdef USING_MALLOC_PAGE_GROUPS
290 /* A page_group describes a large allocation from malloc, from which
291 we parcel out aligned pages. */
292 struct page_group
293 {
294 /* A linked list of all extant page groups. */
295 struct page_group *next;
296
297 /* The address we received from malloc. */
298 char *allocation;
299
300 /* The size of the block. */
301 size_t alloc_size;
302
303 /* A bitmask of pages in use. */
304 unsigned int in_use;
305 };
306 #endif
307
308 #if HOST_BITS_PER_PTR <= 32
309
310 /* On 32-bit hosts, we use a two level page table, as pictured above. */
311 typedef page_entry **page_table[PAGE_L1_SIZE];
312
313 #else
314
315 /* On 64-bit hosts, we use the same two level page tables plus a linked
316 list that disambiguates the top 32-bits. There will almost always be
317 exactly one entry in the list. */
318 typedef struct page_table_chain
319 {
320 struct page_table_chain *next;
321 size_t high_bits;
322 page_entry **table[PAGE_L1_SIZE];
323 } *page_table;
324
325 #endif
326
327 class finalizer
328 {
329 public:
330 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
331
332 void *addr () const { return m_addr; }
333
334 void call () const { m_function (m_addr); }
335
336 private:
337 void *m_addr;
338 void (*m_function)(void *);
339 };
340
341 class vec_finalizer
342 {
343 public:
344 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
345 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
346
347 void call () const
348 {
349 for (size_t i = 0; i < m_n_objects; i++)
350 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
351 }
352
353 void *addr () const { return reinterpret_cast<void *> (m_addr); }
354
355 private:
356 uintptr_t m_addr;
357 void (*m_function)(void *);
358 size_t m_object_size;
359 size_t m_n_objects;
360 };
361
362 #ifdef ENABLE_GC_ALWAYS_COLLECT
363 /* List of free objects to be verified as actually free on the
364 next collection. */
365 struct free_object
366 {
367 void *object;
368 struct free_object *next;
369 };
370 #endif
371
372 /* The rest of the global variables. */
373 static struct ggc_globals
374 {
375 /* The Nth element in this array is a page with objects of size 2^N.
376 If there are any pages with free objects, they will be at the
377 head of the list. NULL if there are no page-entries for this
378 object size. */
379 page_entry *pages[NUM_ORDERS];
380
381 /* The Nth element in this array is the last page with objects of
382 size 2^N. NULL if there are no page-entries for this object
383 size. */
384 page_entry *page_tails[NUM_ORDERS];
385
386 /* Lookup table for associating allocation pages with object addresses. */
387 page_table lookup;
388
389 /* The system's page size. */
390 size_t pagesize;
391 size_t lg_pagesize;
392
393 /* Bytes currently allocated. */
394 size_t allocated;
395
396 /* Bytes currently allocated at the end of the last collection. */
397 size_t allocated_last_gc;
398
399 /* Total amount of memory mapped. */
400 size_t bytes_mapped;
401
402 /* Bit N set if any allocations have been done at context depth N. */
403 unsigned long context_depth_allocations;
404
405 /* Bit N set if any collections have been done at context depth N. */
406 unsigned long context_depth_collections;
407
408 /* The current depth in the context stack. */
409 unsigned short context_depth;
410
411 /* A file descriptor open to /dev/zero for reading. */
412 #if defined (HAVE_MMAP_DEV_ZERO)
413 int dev_zero_fd;
414 #endif
415
416 /* A cache of free system pages. */
417 page_entry *free_pages;
418
419 #ifdef USING_MALLOC_PAGE_GROUPS
420 page_group *page_groups;
421 #endif
422
423 /* The file descriptor for debugging output. */
424 FILE *debug_file;
425
426 /* Current number of elements in use in depth below. */
427 unsigned int depth_in_use;
428
429 /* Maximum number of elements that can be used before resizing. */
430 unsigned int depth_max;
431
432 /* Each element of this array is an index in by_depth where the given
433 depth starts. This structure is indexed by that given depth we
434 are interested in. */
435 unsigned int *depth;
436
437 /* Current number of elements in use in by_depth below. */
438 unsigned int by_depth_in_use;
439
440 /* Maximum number of elements that can be used before resizing. */
441 unsigned int by_depth_max;
442
443 /* Each element of this array is a pointer to a page_entry, all
444 page_entries can be found in here by increasing depth.
445 index_by_depth in the page_entry is the index into this data
446 structure where that page_entry can be found. This is used to
447 speed up finding all page_entries at a particular depth. */
448 page_entry **by_depth;
449
450 /* Each element is a pointer to the saved in_use_p bits, if any,
451 zero otherwise. We allocate them all together, to enable a
452 better runtime data access pattern. */
453 unsigned long **save_in_use;
454
455 /* Finalizers for single objects. The first index is collection_depth. */
456 vec<vec<finalizer> > finalizers;
457
458 /* Finalizers for vectors of objects. */
459 vec<vec<vec_finalizer> > vec_finalizers;
460
461 #ifdef ENABLE_GC_ALWAYS_COLLECT
462 /* List of free objects to be verified as actually free on the
463 next collection. */
464 struct free_object *free_object_list;
465 #endif
466
467 struct
468 {
469 /* Total GC-allocated memory. */
470 unsigned long long total_allocated;
471 /* Total overhead for GC-allocated memory. */
472 unsigned long long total_overhead;
473
474 /* Total allocations and overhead for sizes less than 32, 64 and 128.
475 These sizes are interesting because they are typical cache line
476 sizes. */
477
478 unsigned long long total_allocated_under32;
479 unsigned long long total_overhead_under32;
480
481 unsigned long long total_allocated_under64;
482 unsigned long long total_overhead_under64;
483
484 unsigned long long total_allocated_under128;
485 unsigned long long total_overhead_under128;
486
487 /* The allocations for each of the allocation orders. */
488 unsigned long long total_allocated_per_order[NUM_ORDERS];
489
490 /* The overhead for each of the allocation orders. */
491 unsigned long long total_overhead_per_order[NUM_ORDERS];
492 } stats;
493 } G;
494
495 /* True if a gc is currently taking place. */
496
497 static bool in_gc = false;
498
499 /* The size in bytes required to maintain a bitmap for the objects
500 on a page-entry. */
501 #define BITMAP_SIZE(Num_objects) \
502 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
503
504 /* Allocate pages in chunks of this size, to throttle calls to memory
505 allocation routines. The first page is used, the rest go onto the
506 free list. This cannot be larger than HOST_BITS_PER_INT for the
507 in_use bitmask for page_group. Hosts that need a different value
508 can override this by defining GGC_QUIRE_SIZE explicitly. */
509 #ifndef GGC_QUIRE_SIZE
510 # ifdef USING_MMAP
511 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
512 # else
513 # define GGC_QUIRE_SIZE 16
514 # endif
515 #endif
516
517 /* Initial guess as to how many page table entries we might need. */
518 #define INITIAL_PTE_COUNT 128
519 \f
520 static page_entry *lookup_page_table_entry (const void *);
521 static void set_page_table_entry (void *, page_entry *);
522 #ifdef USING_MMAP
523 static char *alloc_anon (char *, size_t, bool check);
524 #endif
525 #ifdef USING_MALLOC_PAGE_GROUPS
526 static size_t page_group_index (char *, char *);
527 static void set_page_group_in_use (page_group *, char *);
528 static void clear_page_group_in_use (page_group *, char *);
529 #endif
530 static struct page_entry * alloc_page (unsigned);
531 static void free_page (struct page_entry *);
532 static void clear_marks (void);
533 static void sweep_pages (void);
534 static void ggc_recalculate_in_use_p (page_entry *);
535 static void compute_inverse (unsigned);
536 static inline void adjust_depth (void);
537 static void move_ptes_to_front (int, int);
538
539 void debug_print_page_list (int);
540 static void push_depth (unsigned int);
541 static void push_by_depth (page_entry *, unsigned long *);
542
543 /* Push an entry onto G.depth. */
544
545 inline static void
546 push_depth (unsigned int i)
547 {
548 if (G.depth_in_use >= G.depth_max)
549 {
550 G.depth_max *= 2;
551 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
552 }
553 G.depth[G.depth_in_use++] = i;
554 }
555
556 /* Push an entry onto G.by_depth and G.save_in_use. */
557
558 inline static void
559 push_by_depth (page_entry *p, unsigned long *s)
560 {
561 if (G.by_depth_in_use >= G.by_depth_max)
562 {
563 G.by_depth_max *= 2;
564 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
565 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
566 G.by_depth_max);
567 }
568 G.by_depth[G.by_depth_in_use] = p;
569 G.save_in_use[G.by_depth_in_use++] = s;
570 }
571
572 #if (GCC_VERSION < 3001)
573 #define prefetch(X) ((void) X)
574 #else
575 #define prefetch(X) __builtin_prefetch (X)
576 #endif
577
578 #define save_in_use_p_i(__i) \
579 (G.save_in_use[__i])
580 #define save_in_use_p(__p) \
581 (save_in_use_p_i (__p->index_by_depth))
582
583 /* Traverse the page table and find the entry for a page.
584 If the object wasn't allocated in GC return NULL. */
585
586 static inline page_entry *
587 safe_lookup_page_table_entry (const void *p)
588 {
589 page_entry ***base;
590 size_t L1, L2;
591
592 #if HOST_BITS_PER_PTR <= 32
593 base = &G.lookup[0];
594 #else
595 page_table table = G.lookup;
596 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
597 while (1)
598 {
599 if (table == NULL)
600 return NULL;
601 if (table->high_bits == high_bits)
602 break;
603 table = table->next;
604 }
605 base = &table->table[0];
606 #endif
607
608 /* Extract the level 1 and 2 indices. */
609 L1 = LOOKUP_L1 (p);
610 L2 = LOOKUP_L2 (p);
611 if (! base[L1])
612 return NULL;
613
614 return base[L1][L2];
615 }
616
617 /* Traverse the page table and find the entry for a page.
618 Die (probably) if the object wasn't allocated via GC. */
619
620 static inline page_entry *
621 lookup_page_table_entry (const void *p)
622 {
623 page_entry ***base;
624 size_t L1, L2;
625
626 #if HOST_BITS_PER_PTR <= 32
627 base = &G.lookup[0];
628 #else
629 page_table table = G.lookup;
630 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
631 while (table->high_bits != high_bits)
632 table = table->next;
633 base = &table->table[0];
634 #endif
635
636 /* Extract the level 1 and 2 indices. */
637 L1 = LOOKUP_L1 (p);
638 L2 = LOOKUP_L2 (p);
639
640 return base[L1][L2];
641 }
642
643 /* Set the page table entry for a page. */
644
645 static void
646 set_page_table_entry (void *p, page_entry *entry)
647 {
648 page_entry ***base;
649 size_t L1, L2;
650
651 #if HOST_BITS_PER_PTR <= 32
652 base = &G.lookup[0];
653 #else
654 page_table table;
655 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
656 for (table = G.lookup; table; table = table->next)
657 if (table->high_bits == high_bits)
658 goto found;
659
660 /* Not found -- allocate a new table. */
661 table = XCNEW (struct page_table_chain);
662 table->next = G.lookup;
663 table->high_bits = high_bits;
664 G.lookup = table;
665 found:
666 base = &table->table[0];
667 #endif
668
669 /* Extract the level 1 and 2 indices. */
670 L1 = LOOKUP_L1 (p);
671 L2 = LOOKUP_L2 (p);
672
673 if (base[L1] == NULL)
674 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
675
676 base[L1][L2] = entry;
677 }
678
679 /* Prints the page-entry for object size ORDER, for debugging. */
680
681 DEBUG_FUNCTION void
682 debug_print_page_list (int order)
683 {
684 page_entry *p;
685 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
686 (void *) G.page_tails[order]);
687 p = G.pages[order];
688 while (p != NULL)
689 {
690 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
691 p->num_free_objects);
692 p = p->next;
693 }
694 printf ("NULL\n");
695 fflush (stdout);
696 }
697
698 #ifdef USING_MMAP
699 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
700 (if non-null). The ifdef structure here is intended to cause a
701 compile error unless exactly one of the HAVE_* is defined. */
702
703 static inline char *
704 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
705 {
706 #ifdef HAVE_MMAP_ANON
707 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
708 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
709 #endif
710 #ifdef HAVE_MMAP_DEV_ZERO
711 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
712 MAP_PRIVATE, G.dev_zero_fd, 0);
713 #endif
714
715 if (page == (char *) MAP_FAILED)
716 {
717 if (!check)
718 return NULL;
719 perror ("virtual memory exhausted");
720 exit (FATAL_EXIT_CODE);
721 }
722
723 /* Remember that we allocated this memory. */
724 G.bytes_mapped += size;
725
726 /* Pretend we don't have access to the allocated pages. We'll enable
727 access to smaller pieces of the area in ggc_internal_alloc. Discard the
728 handle to avoid handle leak. */
729 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
730
731 return page;
732 }
733 #endif
734 #ifdef USING_MALLOC_PAGE_GROUPS
735 /* Compute the index for this page into the page group. */
736
737 static inline size_t
738 page_group_index (char *allocation, char *page)
739 {
740 return (size_t) (page - allocation) >> G.lg_pagesize;
741 }
742
743 /* Set and clear the in_use bit for this page in the page group. */
744
745 static inline void
746 set_page_group_in_use (page_group *group, char *page)
747 {
748 group->in_use |= 1 << page_group_index (group->allocation, page);
749 }
750
751 static inline void
752 clear_page_group_in_use (page_group *group, char *page)
753 {
754 group->in_use &= ~(1 << page_group_index (group->allocation, page));
755 }
756 #endif
757
758 /* Allocate a new page for allocating objects of size 2^ORDER,
759 and return an entry for it. The entry is not added to the
760 appropriate page_table list. */
761
762 static inline struct page_entry *
763 alloc_page (unsigned order)
764 {
765 struct page_entry *entry, *p, **pp;
766 char *page;
767 size_t num_objects;
768 size_t bitmap_size;
769 size_t page_entry_size;
770 size_t entry_size;
771 #ifdef USING_MALLOC_PAGE_GROUPS
772 page_group *group;
773 #endif
774
775 num_objects = OBJECTS_PER_PAGE (order);
776 bitmap_size = BITMAP_SIZE (num_objects + 1);
777 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
778 entry_size = num_objects * OBJECT_SIZE (order);
779 if (entry_size < G.pagesize)
780 entry_size = G.pagesize;
781 entry_size = PAGE_ALIGN (entry_size);
782
783 entry = NULL;
784 page = NULL;
785
786 /* Check the list of free pages for one we can use. */
787 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
788 if (p->bytes == entry_size)
789 break;
790
791 if (p != NULL)
792 {
793 if (p->discarded)
794 G.bytes_mapped += p->bytes;
795 p->discarded = false;
796
797 /* Recycle the allocated memory from this page ... */
798 *pp = p->next;
799 page = p->page;
800
801 #ifdef USING_MALLOC_PAGE_GROUPS
802 group = p->group;
803 #endif
804
805 /* ... and, if possible, the page entry itself. */
806 if (p->order == order)
807 {
808 entry = p;
809 memset (entry, 0, page_entry_size);
810 }
811 else
812 free (p);
813 }
814 #ifdef USING_MMAP
815 else if (entry_size == G.pagesize)
816 {
817 /* We want just one page. Allocate a bunch of them and put the
818 extras on the freelist. (Can only do this optimization with
819 mmap for backing store.) */
820 struct page_entry *e, *f = G.free_pages;
821 int i, entries = GGC_QUIRE_SIZE;
822
823 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
824 if (page == NULL)
825 {
826 page = alloc_anon (NULL, G.pagesize, true);
827 entries = 1;
828 }
829
830 /* This loop counts down so that the chain will be in ascending
831 memory order. */
832 for (i = entries - 1; i >= 1; i--)
833 {
834 e = XCNEWVAR (struct page_entry, page_entry_size);
835 e->order = order;
836 e->bytes = G.pagesize;
837 e->page = page + (i << G.lg_pagesize);
838 e->next = f;
839 f = e;
840 }
841
842 G.free_pages = f;
843 }
844 else
845 page = alloc_anon (NULL, entry_size, true);
846 #endif
847 #ifdef USING_MALLOC_PAGE_GROUPS
848 else
849 {
850 /* Allocate a large block of memory and serve out the aligned
851 pages therein. This results in much less memory wastage
852 than the traditional implementation of valloc. */
853
854 char *allocation, *a, *enda;
855 size_t alloc_size, head_slop, tail_slop;
856 int multiple_pages = (entry_size == G.pagesize);
857
858 if (multiple_pages)
859 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
860 else
861 alloc_size = entry_size + G.pagesize - 1;
862 allocation = XNEWVEC (char, alloc_size);
863
864 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
865 head_slop = page - allocation;
866 if (multiple_pages)
867 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
868 else
869 tail_slop = alloc_size - entry_size - head_slop;
870 enda = allocation + alloc_size - tail_slop;
871
872 /* We allocated N pages, which are likely not aligned, leaving
873 us with N-1 usable pages. We plan to place the page_group
874 structure somewhere in the slop. */
875 if (head_slop >= sizeof (page_group))
876 group = (page_group *)page - 1;
877 else
878 {
879 /* We magically got an aligned allocation. Too bad, we have
880 to waste a page anyway. */
881 if (tail_slop == 0)
882 {
883 enda -= G.pagesize;
884 tail_slop += G.pagesize;
885 }
886 gcc_assert (tail_slop >= sizeof (page_group));
887 group = (page_group *)enda;
888 tail_slop -= sizeof (page_group);
889 }
890
891 /* Remember that we allocated this memory. */
892 group->next = G.page_groups;
893 group->allocation = allocation;
894 group->alloc_size = alloc_size;
895 group->in_use = 0;
896 G.page_groups = group;
897 G.bytes_mapped += alloc_size;
898
899 /* If we allocated multiple pages, put the rest on the free list. */
900 if (multiple_pages)
901 {
902 struct page_entry *e, *f = G.free_pages;
903 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
904 {
905 e = XCNEWVAR (struct page_entry, page_entry_size);
906 e->order = order;
907 e->bytes = G.pagesize;
908 e->page = a;
909 e->group = group;
910 e->next = f;
911 f = e;
912 }
913 G.free_pages = f;
914 }
915 }
916 #endif
917
918 if (entry == NULL)
919 entry = XCNEWVAR (struct page_entry, page_entry_size);
920
921 entry->bytes = entry_size;
922 entry->page = page;
923 entry->context_depth = G.context_depth;
924 entry->order = order;
925 entry->num_free_objects = num_objects;
926 entry->next_bit_hint = 1;
927
928 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
929
930 #ifdef USING_MALLOC_PAGE_GROUPS
931 entry->group = group;
932 set_page_group_in_use (group, page);
933 #endif
934
935 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
936 increment the hint. */
937 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
938 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
939
940 set_page_table_entry (page, entry);
941
942 if (GGC_DEBUG_LEVEL >= 2)
943 fprintf (G.debug_file,
944 "Allocating page at %p, object size=%lu, data %p-%p\n",
945 (void *) entry, (unsigned long) OBJECT_SIZE (order),
946 (void *) page, (void *) (page + entry_size - 1));
947
948 return entry;
949 }
950
951 /* Adjust the size of G.depth so that no index greater than the one
952 used by the top of the G.by_depth is used. */
953
954 static inline void
955 adjust_depth (void)
956 {
957 page_entry *top;
958
959 if (G.by_depth_in_use)
960 {
961 top = G.by_depth[G.by_depth_in_use-1];
962
963 /* Peel back indices in depth that index into by_depth, so that
964 as new elements are added to by_depth, we note the indices
965 of those elements, if they are for new context depths. */
966 while (G.depth_in_use > (size_t)top->context_depth+1)
967 --G.depth_in_use;
968 }
969 }
970
971 /* For a page that is no longer needed, put it on the free page list. */
972
973 static void
974 free_page (page_entry *entry)
975 {
976 if (GGC_DEBUG_LEVEL >= 2)
977 fprintf (G.debug_file,
978 "Deallocating page at %p, data %p-%p\n", (void *) entry,
979 (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
980
981 /* Mark the page as inaccessible. Discard the handle to avoid handle
982 leak. */
983 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
984
985 set_page_table_entry (entry->page, NULL);
986
987 #ifdef USING_MALLOC_PAGE_GROUPS
988 clear_page_group_in_use (entry->group, entry->page);
989 #endif
990
991 if (G.by_depth_in_use > 1)
992 {
993 page_entry *top = G.by_depth[G.by_depth_in_use-1];
994 int i = entry->index_by_depth;
995
996 /* We cannot free a page from a context deeper than the current
997 one. */
998 gcc_assert (entry->context_depth == top->context_depth);
999
1000 /* Put top element into freed slot. */
1001 G.by_depth[i] = top;
1002 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1003 top->index_by_depth = i;
1004 }
1005 --G.by_depth_in_use;
1006
1007 adjust_depth ();
1008
1009 entry->next = G.free_pages;
1010 G.free_pages = entry;
1011 }
1012
1013 /* Release the free page cache to the system. */
1014
1015 static void
1016 release_pages (void)
1017 {
1018 size_t n1 = 0;
1019 size_t n2 = 0;
1020 #ifdef USING_MADVISE
1021 page_entry *p, *start_p;
1022 char *start;
1023 size_t len;
1024 size_t mapped_len;
1025 page_entry *next, *prev, *newprev;
1026 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1027
1028 /* First free larger continuous areas to the OS.
1029 This allows other allocators to grab these areas if needed.
1030 This is only done on larger chunks to avoid fragmentation.
1031 This does not always work because the free_pages list is only
1032 approximately sorted. */
1033
1034 p = G.free_pages;
1035 prev = NULL;
1036 while (p)
1037 {
1038 start = p->page;
1039 start_p = p;
1040 len = 0;
1041 mapped_len = 0;
1042 newprev = prev;
1043 while (p && p->page == start + len)
1044 {
1045 len += p->bytes;
1046 if (!p->discarded)
1047 mapped_len += p->bytes;
1048 newprev = p;
1049 p = p->next;
1050 }
1051 if (len >= free_unit)
1052 {
1053 while (start_p != p)
1054 {
1055 next = start_p->next;
1056 free (start_p);
1057 start_p = next;
1058 }
1059 munmap (start, len);
1060 if (prev)
1061 prev->next = p;
1062 else
1063 G.free_pages = p;
1064 G.bytes_mapped -= mapped_len;
1065 n1 += len;
1066 continue;
1067 }
1068 prev = newprev;
1069 }
1070
1071 /* Now give back the fragmented pages to the OS, but keep the address
1072 space to reuse it next time. */
1073
1074 for (p = G.free_pages; p; )
1075 {
1076 if (p->discarded)
1077 {
1078 p = p->next;
1079 continue;
1080 }
1081 start = p->page;
1082 len = p->bytes;
1083 start_p = p;
1084 p = p->next;
1085 while (p && p->page == start + len)
1086 {
1087 len += p->bytes;
1088 p = p->next;
1089 }
1090 /* Give the page back to the kernel, but don't free the mapping.
1091 This avoids fragmentation in the virtual memory map of the
1092 process. Next time we can reuse it by just touching it. */
1093 madvise (start, len, MADV_DONTNEED);
1094 /* Don't count those pages as mapped to not touch the garbage collector
1095 unnecessarily. */
1096 G.bytes_mapped -= len;
1097 n2 += len;
1098 while (start_p != p)
1099 {
1100 start_p->discarded = true;
1101 start_p = start_p->next;
1102 }
1103 }
1104 #endif
1105 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1106 page_entry *p, *next;
1107 char *start;
1108 size_t len;
1109
1110 /* Gather up adjacent pages so they are unmapped together. */
1111 p = G.free_pages;
1112
1113 while (p)
1114 {
1115 start = p->page;
1116 next = p->next;
1117 len = p->bytes;
1118 free (p);
1119 p = next;
1120
1121 while (p && p->page == start + len)
1122 {
1123 next = p->next;
1124 len += p->bytes;
1125 free (p);
1126 p = next;
1127 }
1128
1129 munmap (start, len);
1130 n1 += len;
1131 G.bytes_mapped -= len;
1132 }
1133
1134 G.free_pages = NULL;
1135 #endif
1136 #ifdef USING_MALLOC_PAGE_GROUPS
1137 page_entry **pp, *p;
1138 page_group **gp, *g;
1139
1140 /* Remove all pages from free page groups from the list. */
1141 pp = &G.free_pages;
1142 while ((p = *pp) != NULL)
1143 if (p->group->in_use == 0)
1144 {
1145 *pp = p->next;
1146 free (p);
1147 }
1148 else
1149 pp = &p->next;
1150
1151 /* Remove all free page groups, and release the storage. */
1152 gp = &G.page_groups;
1153 while ((g = *gp) != NULL)
1154 if (g->in_use == 0)
1155 {
1156 *gp = g->next;
1157 G.bytes_mapped -= g->alloc_size;
1158 n1 += g->alloc_size;
1159 free (g->allocation);
1160 }
1161 else
1162 gp = &g->next;
1163 #endif
1164 if (!quiet_flag && (n1 || n2))
1165 {
1166 fprintf (stderr, " {GC");
1167 if (n1)
1168 fprintf (stderr, " released %luk", (unsigned long)(n1 / 1024));
1169 if (n2)
1170 fprintf (stderr, " madv_dontneed %luk", (unsigned long)(n2 / 1024));
1171 fprintf (stderr, "}");
1172 }
1173 }
1174
1175 /* This table provides a fast way to determine ceil(log_2(size)) for
1176 allocation requests. The minimum allocation size is eight bytes. */
1177 #define NUM_SIZE_LOOKUP 512
1178 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1179 {
1180 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1181 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1182 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1183 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1184 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1185 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1188 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1189 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1196 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1211 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1212 };
1213
1214 /* For a given size of memory requested for allocation, return the
1215 actual size that is going to be allocated, as well as the size
1216 order. */
1217
1218 static void
1219 ggc_round_alloc_size_1 (size_t requested_size,
1220 size_t *size_order,
1221 size_t *alloced_size)
1222 {
1223 size_t order, object_size;
1224
1225 if (requested_size < NUM_SIZE_LOOKUP)
1226 {
1227 order = size_lookup[requested_size];
1228 object_size = OBJECT_SIZE (order);
1229 }
1230 else
1231 {
1232 order = 10;
1233 while (requested_size > (object_size = OBJECT_SIZE (order)))
1234 order++;
1235 }
1236
1237 if (size_order)
1238 *size_order = order;
1239 if (alloced_size)
1240 *alloced_size = object_size;
1241 }
1242
1243 /* For a given size of memory requested for allocation, return the
1244 actual size that is going to be allocated. */
1245
1246 size_t
1247 ggc_round_alloc_size (size_t requested_size)
1248 {
1249 size_t size = 0;
1250
1251 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1252 return size;
1253 }
1254
1255 /* Push a finalizer onto the appropriate vec. */
1256
1257 static void
1258 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1259 {
1260 if (f == NULL)
1261 /* No finalizer. */;
1262 else if (n == 1)
1263 {
1264 finalizer fin (result, f);
1265 G.finalizers[G.context_depth].safe_push (fin);
1266 }
1267 else
1268 {
1269 vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1270 G.vec_finalizers[G.context_depth].safe_push (fin);
1271 }
1272 }
1273
1274 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1275
1276 void *
1277 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1278 MEM_STAT_DECL)
1279 {
1280 size_t order, word, bit, object_offset, object_size;
1281 struct page_entry *entry;
1282 void *result;
1283
1284 ggc_round_alloc_size_1 (size, &order, &object_size);
1285
1286 /* If there are non-full pages for this size allocation, they are at
1287 the head of the list. */
1288 entry = G.pages[order];
1289
1290 /* If there is no page for this object size, or all pages in this
1291 context are full, allocate a new page. */
1292 if (entry == NULL || entry->num_free_objects == 0)
1293 {
1294 struct page_entry *new_entry;
1295 new_entry = alloc_page (order);
1296
1297 new_entry->index_by_depth = G.by_depth_in_use;
1298 push_by_depth (new_entry, 0);
1299
1300 /* We can skip context depths, if we do, make sure we go all the
1301 way to the new depth. */
1302 while (new_entry->context_depth >= G.depth_in_use)
1303 push_depth (G.by_depth_in_use-1);
1304
1305 /* If this is the only entry, it's also the tail. If it is not
1306 the only entry, then we must update the PREV pointer of the
1307 ENTRY (G.pages[order]) to point to our new page entry. */
1308 if (entry == NULL)
1309 G.page_tails[order] = new_entry;
1310 else
1311 entry->prev = new_entry;
1312
1313 /* Put new pages at the head of the page list. By definition the
1314 entry at the head of the list always has a NULL pointer. */
1315 new_entry->next = entry;
1316 new_entry->prev = NULL;
1317 entry = new_entry;
1318 G.pages[order] = new_entry;
1319
1320 /* For a new page, we know the word and bit positions (in the
1321 in_use bitmap) of the first available object -- they're zero. */
1322 new_entry->next_bit_hint = 1;
1323 word = 0;
1324 bit = 0;
1325 object_offset = 0;
1326 }
1327 else
1328 {
1329 /* First try to use the hint left from the previous allocation
1330 to locate a clear bit in the in-use bitmap. We've made sure
1331 that the one-past-the-end bit is always set, so if the hint
1332 has run over, this test will fail. */
1333 unsigned hint = entry->next_bit_hint;
1334 word = hint / HOST_BITS_PER_LONG;
1335 bit = hint % HOST_BITS_PER_LONG;
1336
1337 /* If the hint didn't work, scan the bitmap from the beginning. */
1338 if ((entry->in_use_p[word] >> bit) & 1)
1339 {
1340 word = bit = 0;
1341 while (~entry->in_use_p[word] == 0)
1342 ++word;
1343
1344 #if GCC_VERSION >= 3004
1345 bit = __builtin_ctzl (~entry->in_use_p[word]);
1346 #else
1347 while ((entry->in_use_p[word] >> bit) & 1)
1348 ++bit;
1349 #endif
1350
1351 hint = word * HOST_BITS_PER_LONG + bit;
1352 }
1353
1354 /* Next time, try the next bit. */
1355 entry->next_bit_hint = hint + 1;
1356
1357 object_offset = hint * object_size;
1358 }
1359
1360 /* Set the in-use bit. */
1361 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1362
1363 /* Keep a running total of the number of free objects. If this page
1364 fills up, we may have to move it to the end of the list if the
1365 next page isn't full. If the next page is full, all subsequent
1366 pages are full, so there's no need to move it. */
1367 if (--entry->num_free_objects == 0
1368 && entry->next != NULL
1369 && entry->next->num_free_objects > 0)
1370 {
1371 /* We have a new head for the list. */
1372 G.pages[order] = entry->next;
1373
1374 /* We are moving ENTRY to the end of the page table list.
1375 The new page at the head of the list will have NULL in
1376 its PREV field and ENTRY will have NULL in its NEXT field. */
1377 entry->next->prev = NULL;
1378 entry->next = NULL;
1379
1380 /* Append ENTRY to the tail of the list. */
1381 entry->prev = G.page_tails[order];
1382 G.page_tails[order]->next = entry;
1383 G.page_tails[order] = entry;
1384 }
1385
1386 /* Calculate the object's address. */
1387 result = entry->page + object_offset;
1388 if (GATHER_STATISTICS)
1389 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1390 result FINAL_PASS_MEM_STAT);
1391
1392 #ifdef ENABLE_GC_CHECKING
1393 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1394 exact same semantics in presence of memory bugs, regardless of
1395 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1396 handle to avoid handle leak. */
1397 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1398
1399 /* `Poison' the entire allocated object, including any padding at
1400 the end. */
1401 memset (result, 0xaf, object_size);
1402
1403 /* Make the bytes after the end of the object unaccessible. Discard the
1404 handle to avoid handle leak. */
1405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1406 object_size - size));
1407 #endif
1408
1409 /* Tell Valgrind that the memory is there, but its content isn't
1410 defined. The bytes at the end of the object are still marked
1411 unaccessible. */
1412 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1413
1414 /* Keep track of how many bytes are being allocated. This
1415 information is used in deciding when to collect. */
1416 G.allocated += object_size;
1417
1418 /* For timevar statistics. */
1419 timevar_ggc_mem_total += object_size;
1420
1421 if (f)
1422 add_finalizer (result, f, s, n);
1423
1424 if (GATHER_STATISTICS)
1425 {
1426 size_t overhead = object_size - size;
1427
1428 G.stats.total_overhead += overhead;
1429 G.stats.total_allocated += object_size;
1430 G.stats.total_overhead_per_order[order] += overhead;
1431 G.stats.total_allocated_per_order[order] += object_size;
1432
1433 if (size <= 32)
1434 {
1435 G.stats.total_overhead_under32 += overhead;
1436 G.stats.total_allocated_under32 += object_size;
1437 }
1438 if (size <= 64)
1439 {
1440 G.stats.total_overhead_under64 += overhead;
1441 G.stats.total_allocated_under64 += object_size;
1442 }
1443 if (size <= 128)
1444 {
1445 G.stats.total_overhead_under128 += overhead;
1446 G.stats.total_allocated_under128 += object_size;
1447 }
1448 }
1449
1450 if (GGC_DEBUG_LEVEL >= 3)
1451 fprintf (G.debug_file,
1452 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1453 (unsigned long) size, (unsigned long) object_size, result,
1454 (void *) entry);
1455
1456 return result;
1457 }
1458
1459 /* Mark function for strings. */
1460
1461 void
1462 gt_ggc_m_S (const void *p)
1463 {
1464 page_entry *entry;
1465 unsigned bit, word;
1466 unsigned long mask;
1467 unsigned long offset;
1468
1469 if (!p)
1470 return;
1471
1472 /* Look up the page on which the object is alloced. If it was not
1473 GC allocated, gracefully bail out. */
1474 entry = safe_lookup_page_table_entry (p);
1475 if (!entry)
1476 return;
1477
1478 /* Calculate the index of the object on the page; this is its bit
1479 position in the in_use_p bitmap. Note that because a char* might
1480 point to the middle of an object, we need special code here to
1481 make sure P points to the start of an object. */
1482 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1483 if (offset)
1484 {
1485 /* Here we've seen a char* which does not point to the beginning
1486 of an allocated object. We assume it points to the middle of
1487 a STRING_CST. */
1488 gcc_assert (offset == offsetof (struct tree_string, str));
1489 p = ((const char *) p) - offset;
1490 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1491 return;
1492 }
1493
1494 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1495 word = bit / HOST_BITS_PER_LONG;
1496 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1497
1498 /* If the bit was previously set, skip it. */
1499 if (entry->in_use_p[word] & mask)
1500 return;
1501
1502 /* Otherwise set it, and decrement the free object count. */
1503 entry->in_use_p[word] |= mask;
1504 entry->num_free_objects -= 1;
1505
1506 if (GGC_DEBUG_LEVEL >= 4)
1507 fprintf (G.debug_file, "Marking %p\n", p);
1508
1509 return;
1510 }
1511
1512
1513 /* User-callable entry points for marking string X. */
1514
1515 void
1516 gt_ggc_mx (const char *& x)
1517 {
1518 gt_ggc_m_S (x);
1519 }
1520
1521 void
1522 gt_ggc_mx (unsigned char *& x)
1523 {
1524 gt_ggc_m_S (x);
1525 }
1526
1527 void
1528 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1529 {
1530 }
1531
1532 /* If P is not marked, marks it and return false. Otherwise return true.
1533 P must have been allocated by the GC allocator; it mustn't point to
1534 static objects, stack variables, or memory allocated with malloc. */
1535
1536 int
1537 ggc_set_mark (const void *p)
1538 {
1539 page_entry *entry;
1540 unsigned bit, word;
1541 unsigned long mask;
1542
1543 /* Look up the page on which the object is alloced. If the object
1544 wasn't allocated by the collector, we'll probably die. */
1545 entry = lookup_page_table_entry (p);
1546 gcc_assert (entry);
1547
1548 /* Calculate the index of the object on the page; this is its bit
1549 position in the in_use_p bitmap. */
1550 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1551 word = bit / HOST_BITS_PER_LONG;
1552 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1553
1554 /* If the bit was previously set, skip it. */
1555 if (entry->in_use_p[word] & mask)
1556 return 1;
1557
1558 /* Otherwise set it, and decrement the free object count. */
1559 entry->in_use_p[word] |= mask;
1560 entry->num_free_objects -= 1;
1561
1562 if (GGC_DEBUG_LEVEL >= 4)
1563 fprintf (G.debug_file, "Marking %p\n", p);
1564
1565 return 0;
1566 }
1567
1568 /* Return 1 if P has been marked, zero otherwise.
1569 P must have been allocated by the GC allocator; it mustn't point to
1570 static objects, stack variables, or memory allocated with malloc. */
1571
1572 int
1573 ggc_marked_p (const void *p)
1574 {
1575 page_entry *entry;
1576 unsigned bit, word;
1577 unsigned long mask;
1578
1579 /* Look up the page on which the object is alloced. If the object
1580 wasn't allocated by the collector, we'll probably die. */
1581 entry = lookup_page_table_entry (p);
1582 gcc_assert (entry);
1583
1584 /* Calculate the index of the object on the page; this is its bit
1585 position in the in_use_p bitmap. */
1586 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1587 word = bit / HOST_BITS_PER_LONG;
1588 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1589
1590 return (entry->in_use_p[word] & mask) != 0;
1591 }
1592
1593 /* Return the size of the gc-able object P. */
1594
1595 size_t
1596 ggc_get_size (const void *p)
1597 {
1598 page_entry *pe = lookup_page_table_entry (p);
1599 return OBJECT_SIZE (pe->order);
1600 }
1601
1602 /* Release the memory for object P. */
1603
1604 void
1605 ggc_free (void *p)
1606 {
1607 if (in_gc)
1608 return;
1609
1610 page_entry *pe = lookup_page_table_entry (p);
1611 size_t order = pe->order;
1612 size_t size = OBJECT_SIZE (order);
1613
1614 if (GATHER_STATISTICS)
1615 ggc_free_overhead (p);
1616
1617 if (GGC_DEBUG_LEVEL >= 3)
1618 fprintf (G.debug_file,
1619 "Freeing object, actual size=%lu, at %p on %p\n",
1620 (unsigned long) size, p, (void *) pe);
1621
1622 #ifdef ENABLE_GC_CHECKING
1623 /* Poison the data, to indicate the data is garbage. */
1624 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1625 memset (p, 0xa5, size);
1626 #endif
1627 /* Let valgrind know the object is free. */
1628 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1629
1630 #ifdef ENABLE_GC_ALWAYS_COLLECT
1631 /* In the completely-anal-checking mode, we do *not* immediately free
1632 the data, but instead verify that the data is *actually* not
1633 reachable the next time we collect. */
1634 {
1635 struct free_object *fo = XNEW (struct free_object);
1636 fo->object = p;
1637 fo->next = G.free_object_list;
1638 G.free_object_list = fo;
1639 }
1640 #else
1641 {
1642 unsigned int bit_offset, word, bit;
1643
1644 G.allocated -= size;
1645
1646 /* Mark the object not-in-use. */
1647 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1648 word = bit_offset / HOST_BITS_PER_LONG;
1649 bit = bit_offset % HOST_BITS_PER_LONG;
1650 pe->in_use_p[word] &= ~(1UL << bit);
1651
1652 if (pe->num_free_objects++ == 0)
1653 {
1654 page_entry *p, *q;
1655
1656 /* If the page is completely full, then it's supposed to
1657 be after all pages that aren't. Since we've freed one
1658 object from a page that was full, we need to move the
1659 page to the head of the list.
1660
1661 PE is the node we want to move. Q is the previous node
1662 and P is the next node in the list. */
1663 q = pe->prev;
1664 if (q && q->num_free_objects == 0)
1665 {
1666 p = pe->next;
1667
1668 q->next = p;
1669
1670 /* If PE was at the end of the list, then Q becomes the
1671 new end of the list. If PE was not the end of the
1672 list, then we need to update the PREV field for P. */
1673 if (!p)
1674 G.page_tails[order] = q;
1675 else
1676 p->prev = q;
1677
1678 /* Move PE to the head of the list. */
1679 pe->next = G.pages[order];
1680 pe->prev = NULL;
1681 G.pages[order]->prev = pe;
1682 G.pages[order] = pe;
1683 }
1684
1685 /* Reset the hint bit to point to the only free object. */
1686 pe->next_bit_hint = bit_offset;
1687 }
1688 }
1689 #endif
1690 }
1691 \f
1692 /* Subroutine of init_ggc which computes the pair of numbers used to
1693 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1694
1695 This algorithm is taken from Granlund and Montgomery's paper
1696 "Division by Invariant Integers using Multiplication"
1697 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1698 constants). */
1699
1700 static void
1701 compute_inverse (unsigned order)
1702 {
1703 size_t size, inv;
1704 unsigned int e;
1705
1706 size = OBJECT_SIZE (order);
1707 e = 0;
1708 while (size % 2 == 0)
1709 {
1710 e++;
1711 size >>= 1;
1712 }
1713
1714 inv = size;
1715 while (inv * size != 1)
1716 inv = inv * (2 - inv*size);
1717
1718 DIV_MULT (order) = inv;
1719 DIV_SHIFT (order) = e;
1720 }
1721
1722 /* Initialize the ggc-mmap allocator. */
1723 void
1724 init_ggc (void)
1725 {
1726 static bool init_p = false;
1727 unsigned order;
1728
1729 if (init_p)
1730 return;
1731 init_p = true;
1732
1733 G.pagesize = getpagesize ();
1734 G.lg_pagesize = exact_log2 (G.pagesize);
1735
1736 #ifdef HAVE_MMAP_DEV_ZERO
1737 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1738 if (G.dev_zero_fd == -1)
1739 internal_error ("open /dev/zero: %m");
1740 #endif
1741
1742 #if 0
1743 G.debug_file = fopen ("ggc-mmap.debug", "w");
1744 #else
1745 G.debug_file = stdout;
1746 #endif
1747
1748 #ifdef USING_MMAP
1749 /* StunOS has an amazing off-by-one error for the first mmap allocation
1750 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1751 believe, is an unaligned page allocation, which would cause us to
1752 hork badly if we tried to use it. */
1753 {
1754 char *p = alloc_anon (NULL, G.pagesize, true);
1755 struct page_entry *e;
1756 if ((uintptr_t)p & (G.pagesize - 1))
1757 {
1758 /* How losing. Discard this one and try another. If we still
1759 can't get something useful, give up. */
1760
1761 p = alloc_anon (NULL, G.pagesize, true);
1762 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1763 }
1764
1765 /* We have a good page, might as well hold onto it... */
1766 e = XCNEW (struct page_entry);
1767 e->bytes = G.pagesize;
1768 e->page = p;
1769 e->next = G.free_pages;
1770 G.free_pages = e;
1771 }
1772 #endif
1773
1774 /* Initialize the object size table. */
1775 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1776 object_size_table[order] = (size_t) 1 << order;
1777 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1778 {
1779 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1780
1781 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1782 so that we're sure of getting aligned memory. */
1783 s = ROUND_UP (s, MAX_ALIGNMENT);
1784 object_size_table[order] = s;
1785 }
1786
1787 /* Initialize the objects-per-page and inverse tables. */
1788 for (order = 0; order < NUM_ORDERS; ++order)
1789 {
1790 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1791 if (objects_per_page_table[order] == 0)
1792 objects_per_page_table[order] = 1;
1793 compute_inverse (order);
1794 }
1795
1796 /* Reset the size_lookup array to put appropriately sized objects in
1797 the special orders. All objects bigger than the previous power
1798 of two, but no greater than the special size, should go in the
1799 new order. */
1800 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1801 {
1802 int o;
1803 int i;
1804
1805 i = OBJECT_SIZE (order);
1806 if (i >= NUM_SIZE_LOOKUP)
1807 continue;
1808
1809 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1810 size_lookup[i] = order;
1811 }
1812
1813 G.depth_in_use = 0;
1814 G.depth_max = 10;
1815 G.depth = XNEWVEC (unsigned int, G.depth_max);
1816
1817 G.by_depth_in_use = 0;
1818 G.by_depth_max = INITIAL_PTE_COUNT;
1819 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1820 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1821
1822 /* Allocate space for the depth 0 finalizers. */
1823 G.finalizers.safe_push (vNULL);
1824 G.vec_finalizers.safe_push (vNULL);
1825 gcc_assert (G.finalizers.length() == 1);
1826 }
1827
1828 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1829 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1830
1831 static void
1832 ggc_recalculate_in_use_p (page_entry *p)
1833 {
1834 unsigned int i;
1835 size_t num_objects;
1836
1837 /* Because the past-the-end bit in in_use_p is always set, we
1838 pretend there is one additional object. */
1839 num_objects = OBJECTS_IN_PAGE (p) + 1;
1840
1841 /* Reset the free object count. */
1842 p->num_free_objects = num_objects;
1843
1844 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1845 for (i = 0;
1846 i < CEIL (BITMAP_SIZE (num_objects),
1847 sizeof (*p->in_use_p));
1848 ++i)
1849 {
1850 unsigned long j;
1851
1852 /* Something is in use if it is marked, or if it was in use in a
1853 context further down the context stack. */
1854 p->in_use_p[i] |= save_in_use_p (p)[i];
1855
1856 /* Decrement the free object count for every object allocated. */
1857 for (j = p->in_use_p[i]; j; j >>= 1)
1858 p->num_free_objects -= (j & 1);
1859 }
1860
1861 gcc_assert (p->num_free_objects < num_objects);
1862 }
1863 \f
1864 /* Unmark all objects. */
1865
1866 static void
1867 clear_marks (void)
1868 {
1869 unsigned order;
1870
1871 for (order = 2; order < NUM_ORDERS; order++)
1872 {
1873 page_entry *p;
1874
1875 for (p = G.pages[order]; p != NULL; p = p->next)
1876 {
1877 size_t num_objects = OBJECTS_IN_PAGE (p);
1878 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1879
1880 /* The data should be page-aligned. */
1881 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1882
1883 /* Pages that aren't in the topmost context are not collected;
1884 nevertheless, we need their in-use bit vectors to store GC
1885 marks. So, back them up first. */
1886 if (p->context_depth < G.context_depth)
1887 {
1888 if (! save_in_use_p (p))
1889 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1890 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1891 }
1892
1893 /* Reset reset the number of free objects and clear the
1894 in-use bits. These will be adjusted by mark_obj. */
1895 p->num_free_objects = num_objects;
1896 memset (p->in_use_p, 0, bitmap_size);
1897
1898 /* Make sure the one-past-the-end bit is always set. */
1899 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1900 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1901 }
1902 }
1903 }
1904
1905 /* Check if any blocks with a registered finalizer have become unmarked. If so
1906 run the finalizer and unregister it because the block is about to be freed.
1907 Note that no garantee is made about what order finalizers will run in so
1908 touching other objects in gc memory is extremely unwise. */
1909
1910 static void
1911 ggc_handle_finalizers ()
1912 {
1913 unsigned dlen = G.finalizers.length();
1914 for (unsigned d = G.context_depth; d < dlen; ++d)
1915 {
1916 vec<finalizer> &v = G.finalizers[d];
1917 unsigned length = v.length ();
1918 for (unsigned int i = 0; i < length;)
1919 {
1920 finalizer &f = v[i];
1921 if (!ggc_marked_p (f.addr ()))
1922 {
1923 f.call ();
1924 v.unordered_remove (i);
1925 length--;
1926 }
1927 else
1928 i++;
1929 }
1930 }
1931
1932 gcc_assert (dlen == G.vec_finalizers.length());
1933 for (unsigned d = G.context_depth; d < dlen; ++d)
1934 {
1935 vec<vec_finalizer> &vv = G.vec_finalizers[d];
1936 unsigned length = vv.length ();
1937 for (unsigned int i = 0; i < length;)
1938 {
1939 vec_finalizer &f = vv[i];
1940 if (!ggc_marked_p (f.addr ()))
1941 {
1942 f.call ();
1943 vv.unordered_remove (i);
1944 length--;
1945 }
1946 else
1947 i++;
1948 }
1949 }
1950 }
1951
1952 /* Free all empty pages. Partially empty pages need no attention
1953 because the `mark' bit doubles as an `unused' bit. */
1954
1955 static void
1956 sweep_pages (void)
1957 {
1958 unsigned order;
1959
1960 for (order = 2; order < NUM_ORDERS; order++)
1961 {
1962 /* The last page-entry to consider, regardless of entries
1963 placed at the end of the list. */
1964 page_entry * const last = G.page_tails[order];
1965
1966 size_t num_objects;
1967 size_t live_objects;
1968 page_entry *p, *previous;
1969 int done;
1970
1971 p = G.pages[order];
1972 if (p == NULL)
1973 continue;
1974
1975 previous = NULL;
1976 do
1977 {
1978 page_entry *next = p->next;
1979
1980 /* Loop until all entries have been examined. */
1981 done = (p == last);
1982
1983 num_objects = OBJECTS_IN_PAGE (p);
1984
1985 /* Add all live objects on this page to the count of
1986 allocated memory. */
1987 live_objects = num_objects - p->num_free_objects;
1988
1989 G.allocated += OBJECT_SIZE (order) * live_objects;
1990
1991 /* Only objects on pages in the topmost context should get
1992 collected. */
1993 if (p->context_depth < G.context_depth)
1994 ;
1995
1996 /* Remove the page if it's empty. */
1997 else if (live_objects == 0)
1998 {
1999 /* If P was the first page in the list, then NEXT
2000 becomes the new first page in the list, otherwise
2001 splice P out of the forward pointers. */
2002 if (! previous)
2003 G.pages[order] = next;
2004 else
2005 previous->next = next;
2006
2007 /* Splice P out of the back pointers too. */
2008 if (next)
2009 next->prev = previous;
2010
2011 /* Are we removing the last element? */
2012 if (p == G.page_tails[order])
2013 G.page_tails[order] = previous;
2014 free_page (p);
2015 p = previous;
2016 }
2017
2018 /* If the page is full, move it to the end. */
2019 else if (p->num_free_objects == 0)
2020 {
2021 /* Don't move it if it's already at the end. */
2022 if (p != G.page_tails[order])
2023 {
2024 /* Move p to the end of the list. */
2025 p->next = NULL;
2026 p->prev = G.page_tails[order];
2027 G.page_tails[order]->next = p;
2028
2029 /* Update the tail pointer... */
2030 G.page_tails[order] = p;
2031
2032 /* ... and the head pointer, if necessary. */
2033 if (! previous)
2034 G.pages[order] = next;
2035 else
2036 previous->next = next;
2037
2038 /* And update the backpointer in NEXT if necessary. */
2039 if (next)
2040 next->prev = previous;
2041
2042 p = previous;
2043 }
2044 }
2045
2046 /* If we've fallen through to here, it's a page in the
2047 topmost context that is neither full nor empty. Such a
2048 page must precede pages at lesser context depth in the
2049 list, so move it to the head. */
2050 else if (p != G.pages[order])
2051 {
2052 previous->next = p->next;
2053
2054 /* Update the backchain in the next node if it exists. */
2055 if (p->next)
2056 p->next->prev = previous;
2057
2058 /* Move P to the head of the list. */
2059 p->next = G.pages[order];
2060 p->prev = NULL;
2061 G.pages[order]->prev = p;
2062
2063 /* Update the head pointer. */
2064 G.pages[order] = p;
2065
2066 /* Are we moving the last element? */
2067 if (G.page_tails[order] == p)
2068 G.page_tails[order] = previous;
2069 p = previous;
2070 }
2071
2072 previous = p;
2073 p = next;
2074 }
2075 while (! done);
2076
2077 /* Now, restore the in_use_p vectors for any pages from contexts
2078 other than the current one. */
2079 for (p = G.pages[order]; p; p = p->next)
2080 if (p->context_depth != G.context_depth)
2081 ggc_recalculate_in_use_p (p);
2082 }
2083 }
2084
2085 #ifdef ENABLE_GC_CHECKING
2086 /* Clobber all free objects. */
2087
2088 static void
2089 poison_pages (void)
2090 {
2091 unsigned order;
2092
2093 for (order = 2; order < NUM_ORDERS; order++)
2094 {
2095 size_t size = OBJECT_SIZE (order);
2096 page_entry *p;
2097
2098 for (p = G.pages[order]; p != NULL; p = p->next)
2099 {
2100 size_t num_objects;
2101 size_t i;
2102
2103 if (p->context_depth != G.context_depth)
2104 /* Since we don't do any collection for pages in pushed
2105 contexts, there's no need to do any poisoning. And
2106 besides, the IN_USE_P array isn't valid until we pop
2107 contexts. */
2108 continue;
2109
2110 num_objects = OBJECTS_IN_PAGE (p);
2111 for (i = 0; i < num_objects; i++)
2112 {
2113 size_t word, bit;
2114 word = i / HOST_BITS_PER_LONG;
2115 bit = i % HOST_BITS_PER_LONG;
2116 if (((p->in_use_p[word] >> bit) & 1) == 0)
2117 {
2118 char *object = p->page + i * size;
2119
2120 /* Keep poison-by-write when we expect to use Valgrind,
2121 so the exact same memory semantics is kept, in case
2122 there are memory errors. We override this request
2123 below. */
2124 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2125 size));
2126 memset (object, 0xa5, size);
2127
2128 /* Drop the handle to avoid handle leak. */
2129 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2130 }
2131 }
2132 }
2133 }
2134 }
2135 #else
2136 #define poison_pages()
2137 #endif
2138
2139 #ifdef ENABLE_GC_ALWAYS_COLLECT
2140 /* Validate that the reportedly free objects actually are. */
2141
2142 static void
2143 validate_free_objects (void)
2144 {
2145 struct free_object *f, *next, *still_free = NULL;
2146
2147 for (f = G.free_object_list; f ; f = next)
2148 {
2149 page_entry *pe = lookup_page_table_entry (f->object);
2150 size_t bit, word;
2151
2152 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2153 word = bit / HOST_BITS_PER_LONG;
2154 bit = bit % HOST_BITS_PER_LONG;
2155 next = f->next;
2156
2157 /* Make certain it isn't visible from any root. Notice that we
2158 do this check before sweep_pages merges save_in_use_p. */
2159 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2160
2161 /* If the object comes from an outer context, then retain the
2162 free_object entry, so that we can verify that the address
2163 isn't live on the stack in some outer context. */
2164 if (pe->context_depth != G.context_depth)
2165 {
2166 f->next = still_free;
2167 still_free = f;
2168 }
2169 else
2170 free (f);
2171 }
2172
2173 G.free_object_list = still_free;
2174 }
2175 #else
2176 #define validate_free_objects()
2177 #endif
2178
2179 /* Top level mark-and-sweep routine. */
2180
2181 void
2182 ggc_collect (void)
2183 {
2184 /* Avoid frequent unnecessary work by skipping collection if the
2185 total allocations haven't expanded much since the last
2186 collection. */
2187 float allocated_last_gc =
2188 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2189
2190 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2191 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2192 return;
2193
2194 timevar_push (TV_GC);
2195 if (GGC_DEBUG_LEVEL >= 2)
2196 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2197
2198 /* Zero the total allocated bytes. This will be recalculated in the
2199 sweep phase. */
2200 size_t allocated = G.allocated;
2201 G.allocated = 0;
2202
2203 /* Release the pages we freed the last time we collected, but didn't
2204 reuse in the interim. */
2205 release_pages ();
2206
2207 /* Output this later so we do not interfere with release_pages. */
2208 if (!quiet_flag)
2209 fprintf (stderr, " {GC %luk -> ", (unsigned long) allocated / 1024);
2210
2211 /* Indicate that we've seen collections at this context depth. */
2212 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2213
2214 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2215
2216 in_gc = true;
2217 clear_marks ();
2218 ggc_mark_roots ();
2219 ggc_handle_finalizers ();
2220
2221 if (GATHER_STATISTICS)
2222 ggc_prune_overhead_list ();
2223
2224 poison_pages ();
2225 validate_free_objects ();
2226 sweep_pages ();
2227
2228 in_gc = false;
2229 G.allocated_last_gc = G.allocated;
2230
2231 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2232
2233 timevar_pop (TV_GC);
2234
2235 if (!quiet_flag)
2236 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2237 if (GGC_DEBUG_LEVEL >= 2)
2238 fprintf (G.debug_file, "END COLLECTING\n");
2239 }
2240
2241 /* Return free pages to the system. */
2242
2243 void
2244 ggc_trim ()
2245 {
2246 timevar_push (TV_GC);
2247 G.allocated = 0;
2248 sweep_pages ();
2249 release_pages ();
2250 if (!quiet_flag)
2251 fprintf (stderr, " {GC trimmed to %luk, %luk mapped}",
2252 (unsigned long) G.allocated / 1024,
2253 (unsigned long) G.bytes_mapped / 1024);
2254 timevar_pop (TV_GC);
2255 }
2256
2257 /* Assume that all GGC memory is reachable and grow the limits for next
2258 collection. With checking, trigger GGC so -Q compilation outputs how much
2259 of memory really is reachable. */
2260
2261 void
2262 ggc_grow (void)
2263 {
2264 if (!flag_checking)
2265 G.allocated_last_gc = MAX (G.allocated_last_gc,
2266 G.allocated);
2267 else
2268 ggc_collect ();
2269 if (!quiet_flag)
2270 fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2271 }
2272
2273 void
2274 ggc_print_statistics (void)
2275 {
2276 struct ggc_statistics stats;
2277 unsigned int i;
2278 size_t total_overhead = 0;
2279
2280 /* Clear the statistics. */
2281 memset (&stats, 0, sizeof (stats));
2282
2283 /* Make sure collection will really occur. */
2284 G.allocated_last_gc = 0;
2285
2286 /* Collect and print the statistics common across collectors. */
2287 ggc_print_common_statistics (stderr, &stats);
2288
2289 /* Release free pages so that we will not count the bytes allocated
2290 there as part of the total allocated memory. */
2291 release_pages ();
2292
2293 /* Collect some information about the various sizes of
2294 allocation. */
2295 fprintf (stderr,
2296 "Memory still allocated at the end of the compilation process\n");
2297 fprintf (stderr, "%-8s %10s %10s %10s\n",
2298 "Size", "Allocated", "Used", "Overhead");
2299 for (i = 0; i < NUM_ORDERS; ++i)
2300 {
2301 page_entry *p;
2302 size_t allocated;
2303 size_t in_use;
2304 size_t overhead;
2305
2306 /* Skip empty entries. */
2307 if (!G.pages[i])
2308 continue;
2309
2310 overhead = allocated = in_use = 0;
2311
2312 /* Figure out the total number of bytes allocated for objects of
2313 this size, and how many of them are actually in use. Also figure
2314 out how much memory the page table is using. */
2315 for (p = G.pages[i]; p; p = p->next)
2316 {
2317 allocated += p->bytes;
2318 in_use +=
2319 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2320
2321 overhead += (sizeof (page_entry) - sizeof (long)
2322 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2323 }
2324 fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2325 PRsa (10) "\n",
2326 (uint64_t)OBJECT_SIZE (i),
2327 SIZE_AMOUNT (allocated),
2328 SIZE_AMOUNT (in_use),
2329 SIZE_AMOUNT (overhead));
2330 total_overhead += overhead;
2331 }
2332 fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2333 "Total",
2334 SIZE_AMOUNT (G.bytes_mapped),
2335 SIZE_AMOUNT (G.allocated),
2336 SIZE_AMOUNT (total_overhead));
2337
2338 if (GATHER_STATISTICS)
2339 {
2340 fprintf (stderr, "\nTotal allocations and overheads during "
2341 "the compilation process\n");
2342
2343 fprintf (stderr, "Total Overhead: "
2344 PRsa (9) "\n",
2345 SIZE_AMOUNT (G.stats.total_overhead));
2346 fprintf (stderr, "Total Allocated: "
2347 PRsa (9) "\n",
2348 SIZE_AMOUNT (G.stats.total_allocated));
2349
2350 fprintf (stderr, "Total Overhead under 32B: "
2351 PRsa (9) "\n",
2352 SIZE_AMOUNT (G.stats.total_overhead_under32));
2353 fprintf (stderr, "Total Allocated under 32B: "
2354 PRsa (9) "\n",
2355 SIZE_AMOUNT (G.stats.total_allocated_under32));
2356 fprintf (stderr, "Total Overhead under 64B: "
2357 PRsa (9) "\n",
2358 SIZE_AMOUNT (G.stats.total_overhead_under64));
2359 fprintf (stderr, "Total Allocated under 64B: "
2360 PRsa (9) "\n",
2361 SIZE_AMOUNT (G.stats.total_allocated_under64));
2362 fprintf (stderr, "Total Overhead under 128B: "
2363 PRsa (9) "\n",
2364 SIZE_AMOUNT (G.stats.total_overhead_under128));
2365 fprintf (stderr, "Total Allocated under 128B: "
2366 PRsa (9) "\n",
2367 SIZE_AMOUNT (G.stats.total_allocated_under128));
2368
2369 for (i = 0; i < NUM_ORDERS; i++)
2370 if (G.stats.total_allocated_per_order[i])
2371 {
2372 fprintf (stderr, "Total Overhead page size %9" PRIu64 ": "
2373 PRsa (9) "\n",
2374 (uint64_t)OBJECT_SIZE (i),
2375 SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2376 fprintf (stderr, "Total Allocated page size %9" PRIu64 ": "
2377 PRsa (9) "\n",
2378 (uint64_t)OBJECT_SIZE (i),
2379 SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2380 }
2381 }
2382 }
2383 \f
2384 struct ggc_pch_ondisk
2385 {
2386 unsigned totals[NUM_ORDERS];
2387 };
2388
2389 struct ggc_pch_data
2390 {
2391 struct ggc_pch_ondisk d;
2392 uintptr_t base[NUM_ORDERS];
2393 size_t written[NUM_ORDERS];
2394 };
2395
2396 struct ggc_pch_data *
2397 init_ggc_pch (void)
2398 {
2399 return XCNEW (struct ggc_pch_data);
2400 }
2401
2402 void
2403 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2404 size_t size, bool is_string ATTRIBUTE_UNUSED)
2405 {
2406 unsigned order;
2407
2408 if (size < NUM_SIZE_LOOKUP)
2409 order = size_lookup[size];
2410 else
2411 {
2412 order = 10;
2413 while (size > OBJECT_SIZE (order))
2414 order++;
2415 }
2416
2417 d->d.totals[order]++;
2418 }
2419
2420 size_t
2421 ggc_pch_total_size (struct ggc_pch_data *d)
2422 {
2423 size_t a = 0;
2424 unsigned i;
2425
2426 for (i = 0; i < NUM_ORDERS; i++)
2427 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2428 return a;
2429 }
2430
2431 void
2432 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2433 {
2434 uintptr_t a = (uintptr_t) base;
2435 unsigned i;
2436
2437 for (i = 0; i < NUM_ORDERS; i++)
2438 {
2439 d->base[i] = a;
2440 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2441 }
2442 }
2443
2444
2445 char *
2446 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2447 size_t size, bool is_string ATTRIBUTE_UNUSED)
2448 {
2449 unsigned order;
2450 char *result;
2451
2452 if (size < NUM_SIZE_LOOKUP)
2453 order = size_lookup[size];
2454 else
2455 {
2456 order = 10;
2457 while (size > OBJECT_SIZE (order))
2458 order++;
2459 }
2460
2461 result = (char *) d->base[order];
2462 d->base[order] += OBJECT_SIZE (order);
2463 return result;
2464 }
2465
2466 void
2467 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2468 FILE *f ATTRIBUTE_UNUSED)
2469 {
2470 /* Nothing to do. */
2471 }
2472
2473 void
2474 ggc_pch_write_object (struct ggc_pch_data *d,
2475 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2476 size_t size, bool is_string ATTRIBUTE_UNUSED)
2477 {
2478 unsigned order;
2479 static const char emptyBytes[256] = { 0 };
2480
2481 if (size < NUM_SIZE_LOOKUP)
2482 order = size_lookup[size];
2483 else
2484 {
2485 order = 10;
2486 while (size > OBJECT_SIZE (order))
2487 order++;
2488 }
2489
2490 if (fwrite (x, size, 1, f) != 1)
2491 fatal_error (input_location, "cannot write PCH file: %m");
2492
2493 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2494 object out to OBJECT_SIZE(order). This happens for strings. */
2495
2496 if (size != OBJECT_SIZE (order))
2497 {
2498 unsigned padding = OBJECT_SIZE (order) - size;
2499
2500 /* To speed small writes, we use a nulled-out array that's larger
2501 than most padding requests as the source for our null bytes. This
2502 permits us to do the padding with fwrite() rather than fseek(), and
2503 limits the chance the OS may try to flush any outstanding writes. */
2504 if (padding <= sizeof (emptyBytes))
2505 {
2506 if (fwrite (emptyBytes, 1, padding, f) != padding)
2507 fatal_error (input_location, "cannot write PCH file");
2508 }
2509 else
2510 {
2511 /* Larger than our buffer? Just default to fseek. */
2512 if (fseek (f, padding, SEEK_CUR) != 0)
2513 fatal_error (input_location, "cannot write PCH file");
2514 }
2515 }
2516
2517 d->written[order]++;
2518 if (d->written[order] == d->d.totals[order]
2519 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2520 G.pagesize),
2521 SEEK_CUR) != 0)
2522 fatal_error (input_location, "cannot write PCH file: %m");
2523 }
2524
2525 void
2526 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2527 {
2528 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2529 fatal_error (input_location, "cannot write PCH file: %m");
2530 free (d);
2531 }
2532
2533 /* Move the PCH PTE entries just added to the end of by_depth, to the
2534 front. */
2535
2536 static void
2537 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2538 {
2539 /* First, we swap the new entries to the front of the varrays. */
2540 page_entry **new_by_depth;
2541 unsigned long **new_save_in_use;
2542
2543 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2544 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2545
2546 memcpy (&new_by_depth[0],
2547 &G.by_depth[count_old_page_tables],
2548 count_new_page_tables * sizeof (void *));
2549 memcpy (&new_by_depth[count_new_page_tables],
2550 &G.by_depth[0],
2551 count_old_page_tables * sizeof (void *));
2552 memcpy (&new_save_in_use[0],
2553 &G.save_in_use[count_old_page_tables],
2554 count_new_page_tables * sizeof (void *));
2555 memcpy (&new_save_in_use[count_new_page_tables],
2556 &G.save_in_use[0],
2557 count_old_page_tables * sizeof (void *));
2558
2559 free (G.by_depth);
2560 free (G.save_in_use);
2561
2562 G.by_depth = new_by_depth;
2563 G.save_in_use = new_save_in_use;
2564
2565 /* Now update all the index_by_depth fields. */
2566 for (unsigned i = G.by_depth_in_use; i--;)
2567 {
2568 page_entry *p = G.by_depth[i];
2569 p->index_by_depth = i;
2570 }
2571
2572 /* And last, we update the depth pointers in G.depth. The first
2573 entry is already 0, and context 0 entries always start at index
2574 0, so there is nothing to update in the first slot. We need a
2575 second slot, only if we have old ptes, and if we do, they start
2576 at index count_new_page_tables. */
2577 if (count_old_page_tables)
2578 push_depth (count_new_page_tables);
2579 }
2580
2581 void
2582 ggc_pch_read (FILE *f, void *addr)
2583 {
2584 struct ggc_pch_ondisk d;
2585 unsigned i;
2586 char *offs = (char *) addr;
2587 unsigned long count_old_page_tables;
2588 unsigned long count_new_page_tables;
2589
2590 count_old_page_tables = G.by_depth_in_use;
2591
2592 if (fread (&d, sizeof (d), 1, f) != 1)
2593 fatal_error (input_location, "cannot read PCH file: %m");
2594
2595 /* We've just read in a PCH file. So, every object that used to be
2596 allocated is now free. */
2597 clear_marks ();
2598 #ifdef ENABLE_GC_CHECKING
2599 poison_pages ();
2600 #endif
2601 /* Since we free all the allocated objects, the free list becomes
2602 useless. Validate it now, which will also clear it. */
2603 validate_free_objects ();
2604
2605 /* No object read from a PCH file should ever be freed. So, set the
2606 context depth to 1, and set the depth of all the currently-allocated
2607 pages to be 1 too. PCH pages will have depth 0. */
2608 gcc_assert (!G.context_depth);
2609 G.context_depth = 1;
2610 /* Allocate space for the depth 1 finalizers. */
2611 G.finalizers.safe_push (vNULL);
2612 G.vec_finalizers.safe_push (vNULL);
2613 gcc_assert (G.finalizers.length() == 2);
2614 for (i = 0; i < NUM_ORDERS; i++)
2615 {
2616 page_entry *p;
2617 for (p = G.pages[i]; p != NULL; p = p->next)
2618 p->context_depth = G.context_depth;
2619 }
2620
2621 /* Allocate the appropriate page-table entries for the pages read from
2622 the PCH file. */
2623
2624 for (i = 0; i < NUM_ORDERS; i++)
2625 {
2626 struct page_entry *entry;
2627 char *pte;
2628 size_t bytes;
2629 size_t num_objs;
2630 size_t j;
2631
2632 if (d.totals[i] == 0)
2633 continue;
2634
2635 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2636 num_objs = bytes / OBJECT_SIZE (i);
2637 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2638 - sizeof (long)
2639 + BITMAP_SIZE (num_objs + 1)));
2640 entry->bytes = bytes;
2641 entry->page = offs;
2642 entry->context_depth = 0;
2643 offs += bytes;
2644 entry->num_free_objects = 0;
2645 entry->order = i;
2646
2647 for (j = 0;
2648 j + HOST_BITS_PER_LONG <= num_objs + 1;
2649 j += HOST_BITS_PER_LONG)
2650 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2651 for (; j < num_objs + 1; j++)
2652 entry->in_use_p[j / HOST_BITS_PER_LONG]
2653 |= 1L << (j % HOST_BITS_PER_LONG);
2654
2655 for (pte = entry->page;
2656 pte < entry->page + entry->bytes;
2657 pte += G.pagesize)
2658 set_page_table_entry (pte, entry);
2659
2660 if (G.page_tails[i] != NULL)
2661 G.page_tails[i]->next = entry;
2662 else
2663 G.pages[i] = entry;
2664 G.page_tails[i] = entry;
2665
2666 /* We start off by just adding all the new information to the
2667 end of the varrays, later, we will move the new information
2668 to the front of the varrays, as the PCH page tables are at
2669 context 0. */
2670 push_by_depth (entry, 0);
2671 }
2672
2673 /* Now, we update the various data structures that speed page table
2674 handling. */
2675 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2676
2677 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2678
2679 /* Update the statistics. */
2680 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2681 }