]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ggc-page.c
2015-06-04 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / ggc-page.c
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "hash-set.h"
25 #include "vec.h"
26 #include "input.h"
27 #include "alias.h"
28 #include "symtab.h"
29 #include "inchash.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "tm_p.h"
33 #include "diagnostic-core.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "ggc-internal.h"
37 #include "timevar.h"
38 #include "params.h"
39 #include "hash-map.h"
40 #include "is-a.h"
41 #include "plugin-api.h"
42 #include "vec.h"
43 #include "hashtab.h"
44 #include "hash-set.h"
45 #include "hard-reg-set.h"
46 #include "input.h"
47 #include "function.h"
48 #include "ipa-ref.h"
49 #include "cgraph.h"
50 #include "cfgloop.h"
51 #include "plugin.h"
52 #include "basic-block.h"
53
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
58 # define USING_MMAP
59 #endif
60
61 #ifdef HAVE_MMAP_DEV_ZERO
62 # define USING_MMAP
63 #endif
64
65 #ifndef USING_MMAP
66 #define USING_MALLOC_PAGE_GROUPS
67 #endif
68
69 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
70 && defined(USING_MMAP)
71 # define USING_MADVISE
72 #endif
73
74 /* Strategy:
75
76 This garbage-collecting allocator allocates objects on one of a set
77 of pages. Each page can allocate objects of a single size only;
78 available sizes are powers of two starting at four bytes. The size
79 of an allocation request is rounded up to the next power of two
80 (`order'), and satisfied from the appropriate page.
81
82 Each page is recorded in a page-entry, which also maintains an
83 in-use bitmap of object positions on the page. This allows the
84 allocation state of a particular object to be flipped without
85 touching the page itself.
86
87 Each page-entry also has a context depth, which is used to track
88 pushing and popping of allocation contexts. Only objects allocated
89 in the current (highest-numbered) context may be collected.
90
91 Page entries are arranged in an array of singly-linked lists. The
92 array is indexed by the allocation size, in bits, of the pages on
93 it; i.e. all pages on a list allocate objects of the same size.
94 Pages are ordered on the list such that all non-full pages precede
95 all full pages, with non-full pages arranged in order of decreasing
96 context depth.
97
98 Empty pages (of all orders) are kept on a single page cache list,
99 and are considered first when new pages are required; they are
100 deallocated at the start of the next collection if they haven't
101 been recycled by then. */
102
103 /* Define GGC_DEBUG_LEVEL to print debugging information.
104 0: No debugging output.
105 1: GC statistics only.
106 2: Page-entry allocations/deallocations as well.
107 3: Object allocations as well.
108 4: Object marks as well. */
109 #define GGC_DEBUG_LEVEL (0)
110 \f
111 #ifndef HOST_BITS_PER_PTR
112 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
113 #endif
114
115 \f
116 /* A two-level tree is used to look up the page-entry for a given
117 pointer. Two chunks of the pointer's bits are extracted to index
118 the first and second levels of the tree, as follows:
119
120 HOST_PAGE_SIZE_BITS
121 32 | |
122 msb +----------------+----+------+------+ lsb
123 | | |
124 PAGE_L1_BITS |
125 | |
126 PAGE_L2_BITS
127
128 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
129 pages are aligned on system page boundaries. The next most
130 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
131 index values in the lookup table, respectively.
132
133 For 32-bit architectures and the settings below, there are no
134 leftover bits. For architectures with wider pointers, the lookup
135 tree points to a list of pages, which must be scanned to find the
136 correct one. */
137
138 #define PAGE_L1_BITS (8)
139 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
140 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
141 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
142
143 #define LOOKUP_L1(p) \
144 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
145
146 #define LOOKUP_L2(p) \
147 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
148
149 /* The number of objects per allocation page, for objects on a page of
150 the indicated ORDER. */
151 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
152
153 /* The number of objects in P. */
154 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
155
156 /* The size of an object on a page of the indicated ORDER. */
157 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
158
159 /* For speed, we avoid doing a general integer divide to locate the
160 offset in the allocation bitmap, by precalculating numbers M, S
161 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
162 within the page which is evenly divisible by the object size Z. */
163 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
164 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
165 #define OFFSET_TO_BIT(OFFSET, ORDER) \
166 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
167
168 /* We use this structure to determine the alignment required for
169 allocations. For power-of-two sized allocations, that's not a
170 problem, but it does matter for odd-sized allocations.
171 We do not care about alignment for floating-point types. */
172
173 struct max_alignment {
174 char c;
175 union {
176 int64_t i;
177 void *p;
178 } u;
179 };
180
181 /* The biggest alignment required. */
182
183 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
184
185
186 /* The number of extra orders, not corresponding to power-of-two sized
187 objects. */
188
189 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
190
191 #define RTL_SIZE(NSLOTS) \
192 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
193
194 #define TREE_EXP_SIZE(OPS) \
195 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
196
197 /* The Ith entry is the maximum size of an object to be stored in the
198 Ith extra order. Adding a new entry to this array is the *only*
199 thing you need to do to add a new special allocation size. */
200
201 static const size_t extra_order_size_table[] = {
202 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
203 There are a lot of structures with these sizes and explicitly
204 listing them risks orders being dropped because they changed size. */
205 MAX_ALIGNMENT * 3,
206 MAX_ALIGNMENT * 5,
207 MAX_ALIGNMENT * 6,
208 MAX_ALIGNMENT * 7,
209 MAX_ALIGNMENT * 9,
210 MAX_ALIGNMENT * 10,
211 MAX_ALIGNMENT * 11,
212 MAX_ALIGNMENT * 12,
213 MAX_ALIGNMENT * 13,
214 MAX_ALIGNMENT * 14,
215 MAX_ALIGNMENT * 15,
216 sizeof (struct tree_decl_non_common),
217 sizeof (struct tree_field_decl),
218 sizeof (struct tree_parm_decl),
219 sizeof (struct tree_var_decl),
220 sizeof (struct tree_type_non_common),
221 sizeof (struct function),
222 sizeof (struct basic_block_def),
223 sizeof (struct cgraph_node),
224 sizeof (struct loop),
225 };
226
227 /* The total number of orders. */
228
229 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
230
231 /* Compute the smallest nonnegative number which when added to X gives
232 a multiple of F. */
233
234 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
235
236 /* Compute the smallest multiple of F that is >= X. */
237
238 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
239
240 /* Round X to next multiple of the page size */
241
242 #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
243
244 /* The Ith entry is the number of objects on a page or order I. */
245
246 static unsigned objects_per_page_table[NUM_ORDERS];
247
248 /* The Ith entry is the size of an object on a page of order I. */
249
250 static size_t object_size_table[NUM_ORDERS];
251
252 /* The Ith entry is a pair of numbers (mult, shift) such that
253 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
254 for all k evenly divisible by OBJECT_SIZE(I). */
255
256 static struct
257 {
258 size_t mult;
259 unsigned int shift;
260 }
261 inverse_table[NUM_ORDERS];
262
263 /* A page_entry records the status of an allocation page. This
264 structure is dynamically sized to fit the bitmap in_use_p. */
265 typedef struct page_entry
266 {
267 /* The next page-entry with objects of the same size, or NULL if
268 this is the last page-entry. */
269 struct page_entry *next;
270
271 /* The previous page-entry with objects of the same size, or NULL if
272 this is the first page-entry. The PREV pointer exists solely to
273 keep the cost of ggc_free manageable. */
274 struct page_entry *prev;
275
276 /* The number of bytes allocated. (This will always be a multiple
277 of the host system page size.) */
278 size_t bytes;
279
280 /* The address at which the memory is allocated. */
281 char *page;
282
283 #ifdef USING_MALLOC_PAGE_GROUPS
284 /* Back pointer to the page group this page came from. */
285 struct page_group *group;
286 #endif
287
288 /* This is the index in the by_depth varray where this page table
289 can be found. */
290 unsigned long index_by_depth;
291
292 /* Context depth of this page. */
293 unsigned short context_depth;
294
295 /* The number of free objects remaining on this page. */
296 unsigned short num_free_objects;
297
298 /* A likely candidate for the bit position of a free object for the
299 next allocation from this page. */
300 unsigned short next_bit_hint;
301
302 /* The lg of size of objects allocated from this page. */
303 unsigned char order;
304
305 /* Discarded page? */
306 bool discarded;
307
308 /* A bit vector indicating whether or not objects are in use. The
309 Nth bit is one if the Nth object on this page is allocated. This
310 array is dynamically sized. */
311 unsigned long in_use_p[1];
312 } page_entry;
313
314 #ifdef USING_MALLOC_PAGE_GROUPS
315 /* A page_group describes a large allocation from malloc, from which
316 we parcel out aligned pages. */
317 typedef struct page_group
318 {
319 /* A linked list of all extant page groups. */
320 struct page_group *next;
321
322 /* The address we received from malloc. */
323 char *allocation;
324
325 /* The size of the block. */
326 size_t alloc_size;
327
328 /* A bitmask of pages in use. */
329 unsigned int in_use;
330 } page_group;
331 #endif
332
333 #if HOST_BITS_PER_PTR <= 32
334
335 /* On 32-bit hosts, we use a two level page table, as pictured above. */
336 typedef page_entry **page_table[PAGE_L1_SIZE];
337
338 #else
339
340 /* On 64-bit hosts, we use the same two level page tables plus a linked
341 list that disambiguates the top 32-bits. There will almost always be
342 exactly one entry in the list. */
343 typedef struct page_table_chain
344 {
345 struct page_table_chain *next;
346 size_t high_bits;
347 page_entry **table[PAGE_L1_SIZE];
348 } *page_table;
349
350 #endif
351
352 class finalizer
353 {
354 public:
355 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
356
357 void *addr () const { return m_addr; }
358
359 void call () const { m_function (m_addr); }
360
361 private:
362 void *m_addr;
363 void (*m_function)(void *);
364 };
365
366 class vec_finalizer
367 {
368 public:
369 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
370 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
371
372 void call () const
373 {
374 for (size_t i = 0; i < m_n_objects; i++)
375 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
376 }
377
378 void *addr () const { return reinterpret_cast<void *> (m_addr); }
379
380 private:
381 uintptr_t m_addr;
382 void (*m_function)(void *);
383 size_t m_object_size;
384 size_t m_n_objects;
385 };
386
387 #ifdef ENABLE_GC_ALWAYS_COLLECT
388 /* List of free objects to be verified as actually free on the
389 next collection. */
390 struct free_object
391 {
392 void *object;
393 struct free_object *next;
394 };
395 #endif
396
397 /* The rest of the global variables. */
398 static struct ggc_globals
399 {
400 /* The Nth element in this array is a page with objects of size 2^N.
401 If there are any pages with free objects, they will be at the
402 head of the list. NULL if there are no page-entries for this
403 object size. */
404 page_entry *pages[NUM_ORDERS];
405
406 /* The Nth element in this array is the last page with objects of
407 size 2^N. NULL if there are no page-entries for this object
408 size. */
409 page_entry *page_tails[NUM_ORDERS];
410
411 /* Lookup table for associating allocation pages with object addresses. */
412 page_table lookup;
413
414 /* The system's page size. */
415 size_t pagesize;
416 size_t lg_pagesize;
417
418 /* Bytes currently allocated. */
419 size_t allocated;
420
421 /* Bytes currently allocated at the end of the last collection. */
422 size_t allocated_last_gc;
423
424 /* Total amount of memory mapped. */
425 size_t bytes_mapped;
426
427 /* Bit N set if any allocations have been done at context depth N. */
428 unsigned long context_depth_allocations;
429
430 /* Bit N set if any collections have been done at context depth N. */
431 unsigned long context_depth_collections;
432
433 /* The current depth in the context stack. */
434 unsigned short context_depth;
435
436 /* A file descriptor open to /dev/zero for reading. */
437 #if defined (HAVE_MMAP_DEV_ZERO)
438 int dev_zero_fd;
439 #endif
440
441 /* A cache of free system pages. */
442 page_entry *free_pages;
443
444 #ifdef USING_MALLOC_PAGE_GROUPS
445 page_group *page_groups;
446 #endif
447
448 /* The file descriptor for debugging output. */
449 FILE *debug_file;
450
451 /* Current number of elements in use in depth below. */
452 unsigned int depth_in_use;
453
454 /* Maximum number of elements that can be used before resizing. */
455 unsigned int depth_max;
456
457 /* Each element of this array is an index in by_depth where the given
458 depth starts. This structure is indexed by that given depth we
459 are interested in. */
460 unsigned int *depth;
461
462 /* Current number of elements in use in by_depth below. */
463 unsigned int by_depth_in_use;
464
465 /* Maximum number of elements that can be used before resizing. */
466 unsigned int by_depth_max;
467
468 /* Each element of this array is a pointer to a page_entry, all
469 page_entries can be found in here by increasing depth.
470 index_by_depth in the page_entry is the index into this data
471 structure where that page_entry can be found. This is used to
472 speed up finding all page_entries at a particular depth. */
473 page_entry **by_depth;
474
475 /* Each element is a pointer to the saved in_use_p bits, if any,
476 zero otherwise. We allocate them all together, to enable a
477 better runtime data access pattern. */
478 unsigned long **save_in_use;
479
480 /* Finalizers for single objects. */
481 vec<finalizer> finalizers;
482
483 /* Finalizers for vectors of objects. */
484 vec<vec_finalizer> vec_finalizers;
485
486 #ifdef ENABLE_GC_ALWAYS_COLLECT
487 /* List of free objects to be verified as actually free on the
488 next collection. */
489 struct free_object *free_object_list;
490 #endif
491
492 struct
493 {
494 /* Total GC-allocated memory. */
495 unsigned long long total_allocated;
496 /* Total overhead for GC-allocated memory. */
497 unsigned long long total_overhead;
498
499 /* Total allocations and overhead for sizes less than 32, 64 and 128.
500 These sizes are interesting because they are typical cache line
501 sizes. */
502
503 unsigned long long total_allocated_under32;
504 unsigned long long total_overhead_under32;
505
506 unsigned long long total_allocated_under64;
507 unsigned long long total_overhead_under64;
508
509 unsigned long long total_allocated_under128;
510 unsigned long long total_overhead_under128;
511
512 /* The allocations for each of the allocation orders. */
513 unsigned long long total_allocated_per_order[NUM_ORDERS];
514
515 /* The overhead for each of the allocation orders. */
516 unsigned long long total_overhead_per_order[NUM_ORDERS];
517 } stats;
518 } G;
519
520 /* True if a gc is currently taking place. */
521
522 static bool in_gc = false;
523
524 /* The size in bytes required to maintain a bitmap for the objects
525 on a page-entry. */
526 #define BITMAP_SIZE(Num_objects) \
527 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
528
529 /* Allocate pages in chunks of this size, to throttle calls to memory
530 allocation routines. The first page is used, the rest go onto the
531 free list. This cannot be larger than HOST_BITS_PER_INT for the
532 in_use bitmask for page_group. Hosts that need a different value
533 can override this by defining GGC_QUIRE_SIZE explicitly. */
534 #ifndef GGC_QUIRE_SIZE
535 # ifdef USING_MMAP
536 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
537 # else
538 # define GGC_QUIRE_SIZE 16
539 # endif
540 #endif
541
542 /* Initial guess as to how many page table entries we might need. */
543 #define INITIAL_PTE_COUNT 128
544 \f
545 static int ggc_allocated_p (const void *);
546 static page_entry *lookup_page_table_entry (const void *);
547 static void set_page_table_entry (void *, page_entry *);
548 #ifdef USING_MMAP
549 static char *alloc_anon (char *, size_t, bool check);
550 #endif
551 #ifdef USING_MALLOC_PAGE_GROUPS
552 static size_t page_group_index (char *, char *);
553 static void set_page_group_in_use (page_group *, char *);
554 static void clear_page_group_in_use (page_group *, char *);
555 #endif
556 static struct page_entry * alloc_page (unsigned);
557 static void free_page (struct page_entry *);
558 static void release_pages (void);
559 static void clear_marks (void);
560 static void sweep_pages (void);
561 static void ggc_recalculate_in_use_p (page_entry *);
562 static void compute_inverse (unsigned);
563 static inline void adjust_depth (void);
564 static void move_ptes_to_front (int, int);
565
566 void debug_print_page_list (int);
567 static void push_depth (unsigned int);
568 static void push_by_depth (page_entry *, unsigned long *);
569
570 /* Push an entry onto G.depth. */
571
572 inline static void
573 push_depth (unsigned int i)
574 {
575 if (G.depth_in_use >= G.depth_max)
576 {
577 G.depth_max *= 2;
578 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
579 }
580 G.depth[G.depth_in_use++] = i;
581 }
582
583 /* Push an entry onto G.by_depth and G.save_in_use. */
584
585 inline static void
586 push_by_depth (page_entry *p, unsigned long *s)
587 {
588 if (G.by_depth_in_use >= G.by_depth_max)
589 {
590 G.by_depth_max *= 2;
591 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
592 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
593 G.by_depth_max);
594 }
595 G.by_depth[G.by_depth_in_use] = p;
596 G.save_in_use[G.by_depth_in_use++] = s;
597 }
598
599 #if (GCC_VERSION < 3001)
600 #define prefetch(X) ((void) X)
601 #else
602 #define prefetch(X) __builtin_prefetch (X)
603 #endif
604
605 #define save_in_use_p_i(__i) \
606 (G.save_in_use[__i])
607 #define save_in_use_p(__p) \
608 (save_in_use_p_i (__p->index_by_depth))
609
610 /* Returns nonzero if P was allocated in GC'able memory. */
611
612 static inline int
613 ggc_allocated_p (const void *p)
614 {
615 page_entry ***base;
616 size_t L1, L2;
617
618 #if HOST_BITS_PER_PTR <= 32
619 base = &G.lookup[0];
620 #else
621 page_table table = G.lookup;
622 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
623 while (1)
624 {
625 if (table == NULL)
626 return 0;
627 if (table->high_bits == high_bits)
628 break;
629 table = table->next;
630 }
631 base = &table->table[0];
632 #endif
633
634 /* Extract the level 1 and 2 indices. */
635 L1 = LOOKUP_L1 (p);
636 L2 = LOOKUP_L2 (p);
637
638 return base[L1] && base[L1][L2];
639 }
640
641 /* Traverse the page table and find the entry for a page.
642 Die (probably) if the object wasn't allocated via GC. */
643
644 static inline page_entry *
645 lookup_page_table_entry (const void *p)
646 {
647 page_entry ***base;
648 size_t L1, L2;
649
650 #if HOST_BITS_PER_PTR <= 32
651 base = &G.lookup[0];
652 #else
653 page_table table = G.lookup;
654 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
655 while (table->high_bits != high_bits)
656 table = table->next;
657 base = &table->table[0];
658 #endif
659
660 /* Extract the level 1 and 2 indices. */
661 L1 = LOOKUP_L1 (p);
662 L2 = LOOKUP_L2 (p);
663
664 return base[L1][L2];
665 }
666
667 /* Set the page table entry for a page. */
668
669 static void
670 set_page_table_entry (void *p, page_entry *entry)
671 {
672 page_entry ***base;
673 size_t L1, L2;
674
675 #if HOST_BITS_PER_PTR <= 32
676 base = &G.lookup[0];
677 #else
678 page_table table;
679 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
680 for (table = G.lookup; table; table = table->next)
681 if (table->high_bits == high_bits)
682 goto found;
683
684 /* Not found -- allocate a new table. */
685 table = XCNEW (struct page_table_chain);
686 table->next = G.lookup;
687 table->high_bits = high_bits;
688 G.lookup = table;
689 found:
690 base = &table->table[0];
691 #endif
692
693 /* Extract the level 1 and 2 indices. */
694 L1 = LOOKUP_L1 (p);
695 L2 = LOOKUP_L2 (p);
696
697 if (base[L1] == NULL)
698 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
699
700 base[L1][L2] = entry;
701 }
702
703 /* Prints the page-entry for object size ORDER, for debugging. */
704
705 DEBUG_FUNCTION void
706 debug_print_page_list (int order)
707 {
708 page_entry *p;
709 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
710 (void *) G.page_tails[order]);
711 p = G.pages[order];
712 while (p != NULL)
713 {
714 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
715 p->num_free_objects);
716 p = p->next;
717 }
718 printf ("NULL\n");
719 fflush (stdout);
720 }
721
722 #ifdef USING_MMAP
723 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
724 (if non-null). The ifdef structure here is intended to cause a
725 compile error unless exactly one of the HAVE_* is defined. */
726
727 static inline char *
728 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
729 {
730 #ifdef HAVE_MMAP_ANON
731 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
732 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
733 #endif
734 #ifdef HAVE_MMAP_DEV_ZERO
735 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
736 MAP_PRIVATE, G.dev_zero_fd, 0);
737 #endif
738
739 if (page == (char *) MAP_FAILED)
740 {
741 if (!check)
742 return NULL;
743 perror ("virtual memory exhausted");
744 exit (FATAL_EXIT_CODE);
745 }
746
747 /* Remember that we allocated this memory. */
748 G.bytes_mapped += size;
749
750 /* Pretend we don't have access to the allocated pages. We'll enable
751 access to smaller pieces of the area in ggc_internal_alloc. Discard the
752 handle to avoid handle leak. */
753 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
754
755 return page;
756 }
757 #endif
758 #ifdef USING_MALLOC_PAGE_GROUPS
759 /* Compute the index for this page into the page group. */
760
761 static inline size_t
762 page_group_index (char *allocation, char *page)
763 {
764 return (size_t) (page - allocation) >> G.lg_pagesize;
765 }
766
767 /* Set and clear the in_use bit for this page in the page group. */
768
769 static inline void
770 set_page_group_in_use (page_group *group, char *page)
771 {
772 group->in_use |= 1 << page_group_index (group->allocation, page);
773 }
774
775 static inline void
776 clear_page_group_in_use (page_group *group, char *page)
777 {
778 group->in_use &= ~(1 << page_group_index (group->allocation, page));
779 }
780 #endif
781
782 /* Allocate a new page for allocating objects of size 2^ORDER,
783 and return an entry for it. The entry is not added to the
784 appropriate page_table list. */
785
786 static inline struct page_entry *
787 alloc_page (unsigned order)
788 {
789 struct page_entry *entry, *p, **pp;
790 char *page;
791 size_t num_objects;
792 size_t bitmap_size;
793 size_t page_entry_size;
794 size_t entry_size;
795 #ifdef USING_MALLOC_PAGE_GROUPS
796 page_group *group;
797 #endif
798
799 num_objects = OBJECTS_PER_PAGE (order);
800 bitmap_size = BITMAP_SIZE (num_objects + 1);
801 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
802 entry_size = num_objects * OBJECT_SIZE (order);
803 if (entry_size < G.pagesize)
804 entry_size = G.pagesize;
805 entry_size = PAGE_ALIGN (entry_size);
806
807 entry = NULL;
808 page = NULL;
809
810 /* Check the list of free pages for one we can use. */
811 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
812 if (p->bytes == entry_size)
813 break;
814
815 if (p != NULL)
816 {
817 if (p->discarded)
818 G.bytes_mapped += p->bytes;
819 p->discarded = false;
820
821 /* Recycle the allocated memory from this page ... */
822 *pp = p->next;
823 page = p->page;
824
825 #ifdef USING_MALLOC_PAGE_GROUPS
826 group = p->group;
827 #endif
828
829 /* ... and, if possible, the page entry itself. */
830 if (p->order == order)
831 {
832 entry = p;
833 memset (entry, 0, page_entry_size);
834 }
835 else
836 free (p);
837 }
838 #ifdef USING_MMAP
839 else if (entry_size == G.pagesize)
840 {
841 /* We want just one page. Allocate a bunch of them and put the
842 extras on the freelist. (Can only do this optimization with
843 mmap for backing store.) */
844 struct page_entry *e, *f = G.free_pages;
845 int i, entries = GGC_QUIRE_SIZE;
846
847 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
848 if (page == NULL)
849 {
850 page = alloc_anon (NULL, G.pagesize, true);
851 entries = 1;
852 }
853
854 /* This loop counts down so that the chain will be in ascending
855 memory order. */
856 for (i = entries - 1; i >= 1; i--)
857 {
858 e = XCNEWVAR (struct page_entry, page_entry_size);
859 e->order = order;
860 e->bytes = G.pagesize;
861 e->page = page + (i << G.lg_pagesize);
862 e->next = f;
863 f = e;
864 }
865
866 G.free_pages = f;
867 }
868 else
869 page = alloc_anon (NULL, entry_size, true);
870 #endif
871 #ifdef USING_MALLOC_PAGE_GROUPS
872 else
873 {
874 /* Allocate a large block of memory and serve out the aligned
875 pages therein. This results in much less memory wastage
876 than the traditional implementation of valloc. */
877
878 char *allocation, *a, *enda;
879 size_t alloc_size, head_slop, tail_slop;
880 int multiple_pages = (entry_size == G.pagesize);
881
882 if (multiple_pages)
883 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
884 else
885 alloc_size = entry_size + G.pagesize - 1;
886 allocation = XNEWVEC (char, alloc_size);
887
888 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
889 head_slop = page - allocation;
890 if (multiple_pages)
891 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
892 else
893 tail_slop = alloc_size - entry_size - head_slop;
894 enda = allocation + alloc_size - tail_slop;
895
896 /* We allocated N pages, which are likely not aligned, leaving
897 us with N-1 usable pages. We plan to place the page_group
898 structure somewhere in the slop. */
899 if (head_slop >= sizeof (page_group))
900 group = (page_group *)page - 1;
901 else
902 {
903 /* We magically got an aligned allocation. Too bad, we have
904 to waste a page anyway. */
905 if (tail_slop == 0)
906 {
907 enda -= G.pagesize;
908 tail_slop += G.pagesize;
909 }
910 gcc_assert (tail_slop >= sizeof (page_group));
911 group = (page_group *)enda;
912 tail_slop -= sizeof (page_group);
913 }
914
915 /* Remember that we allocated this memory. */
916 group->next = G.page_groups;
917 group->allocation = allocation;
918 group->alloc_size = alloc_size;
919 group->in_use = 0;
920 G.page_groups = group;
921 G.bytes_mapped += alloc_size;
922
923 /* If we allocated multiple pages, put the rest on the free list. */
924 if (multiple_pages)
925 {
926 struct page_entry *e, *f = G.free_pages;
927 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
928 {
929 e = XCNEWVAR (struct page_entry, page_entry_size);
930 e->order = order;
931 e->bytes = G.pagesize;
932 e->page = a;
933 e->group = group;
934 e->next = f;
935 f = e;
936 }
937 G.free_pages = f;
938 }
939 }
940 #endif
941
942 if (entry == NULL)
943 entry = XCNEWVAR (struct page_entry, page_entry_size);
944
945 entry->bytes = entry_size;
946 entry->page = page;
947 entry->context_depth = G.context_depth;
948 entry->order = order;
949 entry->num_free_objects = num_objects;
950 entry->next_bit_hint = 1;
951
952 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
953
954 #ifdef USING_MALLOC_PAGE_GROUPS
955 entry->group = group;
956 set_page_group_in_use (group, page);
957 #endif
958
959 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
960 increment the hint. */
961 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
962 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
963
964 set_page_table_entry (page, entry);
965
966 if (GGC_DEBUG_LEVEL >= 2)
967 fprintf (G.debug_file,
968 "Allocating page at %p, object size=%lu, data %p-%p\n",
969 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
970 page + entry_size - 1);
971
972 return entry;
973 }
974
975 /* Adjust the size of G.depth so that no index greater than the one
976 used by the top of the G.by_depth is used. */
977
978 static inline void
979 adjust_depth (void)
980 {
981 page_entry *top;
982
983 if (G.by_depth_in_use)
984 {
985 top = G.by_depth[G.by_depth_in_use-1];
986
987 /* Peel back indices in depth that index into by_depth, so that
988 as new elements are added to by_depth, we note the indices
989 of those elements, if they are for new context depths. */
990 while (G.depth_in_use > (size_t)top->context_depth+1)
991 --G.depth_in_use;
992 }
993 }
994
995 /* For a page that is no longer needed, put it on the free page list. */
996
997 static void
998 free_page (page_entry *entry)
999 {
1000 if (GGC_DEBUG_LEVEL >= 2)
1001 fprintf (G.debug_file,
1002 "Deallocating page at %p, data %p-%p\n", (void *) entry,
1003 entry->page, entry->page + entry->bytes - 1);
1004
1005 /* Mark the page as inaccessible. Discard the handle to avoid handle
1006 leak. */
1007 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
1008
1009 set_page_table_entry (entry->page, NULL);
1010
1011 #ifdef USING_MALLOC_PAGE_GROUPS
1012 clear_page_group_in_use (entry->group, entry->page);
1013 #endif
1014
1015 if (G.by_depth_in_use > 1)
1016 {
1017 page_entry *top = G.by_depth[G.by_depth_in_use-1];
1018 int i = entry->index_by_depth;
1019
1020 /* We cannot free a page from a context deeper than the current
1021 one. */
1022 gcc_assert (entry->context_depth == top->context_depth);
1023
1024 /* Put top element into freed slot. */
1025 G.by_depth[i] = top;
1026 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1027 top->index_by_depth = i;
1028 }
1029 --G.by_depth_in_use;
1030
1031 adjust_depth ();
1032
1033 entry->next = G.free_pages;
1034 G.free_pages = entry;
1035 }
1036
1037 /* Release the free page cache to the system. */
1038
1039 static void
1040 release_pages (void)
1041 {
1042 #ifdef USING_MADVISE
1043 page_entry *p, *start_p;
1044 char *start;
1045 size_t len;
1046 size_t mapped_len;
1047 page_entry *next, *prev, *newprev;
1048 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1049
1050 /* First free larger continuous areas to the OS.
1051 This allows other allocators to grab these areas if needed.
1052 This is only done on larger chunks to avoid fragmentation.
1053 This does not always work because the free_pages list is only
1054 approximately sorted. */
1055
1056 p = G.free_pages;
1057 prev = NULL;
1058 while (p)
1059 {
1060 start = p->page;
1061 start_p = p;
1062 len = 0;
1063 mapped_len = 0;
1064 newprev = prev;
1065 while (p && p->page == start + len)
1066 {
1067 len += p->bytes;
1068 if (!p->discarded)
1069 mapped_len += p->bytes;
1070 newprev = p;
1071 p = p->next;
1072 }
1073 if (len >= free_unit)
1074 {
1075 while (start_p != p)
1076 {
1077 next = start_p->next;
1078 free (start_p);
1079 start_p = next;
1080 }
1081 munmap (start, len);
1082 if (prev)
1083 prev->next = p;
1084 else
1085 G.free_pages = p;
1086 G.bytes_mapped -= mapped_len;
1087 continue;
1088 }
1089 prev = newprev;
1090 }
1091
1092 /* Now give back the fragmented pages to the OS, but keep the address
1093 space to reuse it next time. */
1094
1095 for (p = G.free_pages; p; )
1096 {
1097 if (p->discarded)
1098 {
1099 p = p->next;
1100 continue;
1101 }
1102 start = p->page;
1103 len = p->bytes;
1104 start_p = p;
1105 p = p->next;
1106 while (p && p->page == start + len)
1107 {
1108 len += p->bytes;
1109 p = p->next;
1110 }
1111 /* Give the page back to the kernel, but don't free the mapping.
1112 This avoids fragmentation in the virtual memory map of the
1113 process. Next time we can reuse it by just touching it. */
1114 madvise (start, len, MADV_DONTNEED);
1115 /* Don't count those pages as mapped to not touch the garbage collector
1116 unnecessarily. */
1117 G.bytes_mapped -= len;
1118 while (start_p != p)
1119 {
1120 start_p->discarded = true;
1121 start_p = start_p->next;
1122 }
1123 }
1124 #endif
1125 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1126 page_entry *p, *next;
1127 char *start;
1128 size_t len;
1129
1130 /* Gather up adjacent pages so they are unmapped together. */
1131 p = G.free_pages;
1132
1133 while (p)
1134 {
1135 start = p->page;
1136 next = p->next;
1137 len = p->bytes;
1138 free (p);
1139 p = next;
1140
1141 while (p && p->page == start + len)
1142 {
1143 next = p->next;
1144 len += p->bytes;
1145 free (p);
1146 p = next;
1147 }
1148
1149 munmap (start, len);
1150 G.bytes_mapped -= len;
1151 }
1152
1153 G.free_pages = NULL;
1154 #endif
1155 #ifdef USING_MALLOC_PAGE_GROUPS
1156 page_entry **pp, *p;
1157 page_group **gp, *g;
1158
1159 /* Remove all pages from free page groups from the list. */
1160 pp = &G.free_pages;
1161 while ((p = *pp) != NULL)
1162 if (p->group->in_use == 0)
1163 {
1164 *pp = p->next;
1165 free (p);
1166 }
1167 else
1168 pp = &p->next;
1169
1170 /* Remove all free page groups, and release the storage. */
1171 gp = &G.page_groups;
1172 while ((g = *gp) != NULL)
1173 if (g->in_use == 0)
1174 {
1175 *gp = g->next;
1176 G.bytes_mapped -= g->alloc_size;
1177 free (g->allocation);
1178 }
1179 else
1180 gp = &g->next;
1181 #endif
1182 }
1183
1184 /* This table provides a fast way to determine ceil(log_2(size)) for
1185 allocation requests. The minimum allocation size is eight bytes. */
1186 #define NUM_SIZE_LOOKUP 512
1187 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1188 {
1189 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1190 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1191 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1192 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1193 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1194 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1195 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1196 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1197 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1198 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1199 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1200 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1201 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1202 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1203 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1204 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1205 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1211 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1212 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1213 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1214 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1215 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1216 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1217 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1218 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1219 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1220 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1221 };
1222
1223 /* For a given size of memory requested for allocation, return the
1224 actual size that is going to be allocated, as well as the size
1225 order. */
1226
1227 static void
1228 ggc_round_alloc_size_1 (size_t requested_size,
1229 size_t *size_order,
1230 size_t *alloced_size)
1231 {
1232 size_t order, object_size;
1233
1234 if (requested_size < NUM_SIZE_LOOKUP)
1235 {
1236 order = size_lookup[requested_size];
1237 object_size = OBJECT_SIZE (order);
1238 }
1239 else
1240 {
1241 order = 10;
1242 while (requested_size > (object_size = OBJECT_SIZE (order)))
1243 order++;
1244 }
1245
1246 if (size_order)
1247 *size_order = order;
1248 if (alloced_size)
1249 *alloced_size = object_size;
1250 }
1251
1252 /* For a given size of memory requested for allocation, return the
1253 actual size that is going to be allocated. */
1254
1255 size_t
1256 ggc_round_alloc_size (size_t requested_size)
1257 {
1258 size_t size = 0;
1259
1260 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1261 return size;
1262 }
1263
1264 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1265
1266 void *
1267 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1268 MEM_STAT_DECL)
1269 {
1270 size_t order, word, bit, object_offset, object_size;
1271 struct page_entry *entry;
1272 void *result;
1273
1274 ggc_round_alloc_size_1 (size, &order, &object_size);
1275
1276 /* If there are non-full pages for this size allocation, they are at
1277 the head of the list. */
1278 entry = G.pages[order];
1279
1280 /* If there is no page for this object size, or all pages in this
1281 context are full, allocate a new page. */
1282 if (entry == NULL || entry->num_free_objects == 0)
1283 {
1284 struct page_entry *new_entry;
1285 new_entry = alloc_page (order);
1286
1287 new_entry->index_by_depth = G.by_depth_in_use;
1288 push_by_depth (new_entry, 0);
1289
1290 /* We can skip context depths, if we do, make sure we go all the
1291 way to the new depth. */
1292 while (new_entry->context_depth >= G.depth_in_use)
1293 push_depth (G.by_depth_in_use-1);
1294
1295 /* If this is the only entry, it's also the tail. If it is not
1296 the only entry, then we must update the PREV pointer of the
1297 ENTRY (G.pages[order]) to point to our new page entry. */
1298 if (entry == NULL)
1299 G.page_tails[order] = new_entry;
1300 else
1301 entry->prev = new_entry;
1302
1303 /* Put new pages at the head of the page list. By definition the
1304 entry at the head of the list always has a NULL pointer. */
1305 new_entry->next = entry;
1306 new_entry->prev = NULL;
1307 entry = new_entry;
1308 G.pages[order] = new_entry;
1309
1310 /* For a new page, we know the word and bit positions (in the
1311 in_use bitmap) of the first available object -- they're zero. */
1312 new_entry->next_bit_hint = 1;
1313 word = 0;
1314 bit = 0;
1315 object_offset = 0;
1316 }
1317 else
1318 {
1319 /* First try to use the hint left from the previous allocation
1320 to locate a clear bit in the in-use bitmap. We've made sure
1321 that the one-past-the-end bit is always set, so if the hint
1322 has run over, this test will fail. */
1323 unsigned hint = entry->next_bit_hint;
1324 word = hint / HOST_BITS_PER_LONG;
1325 bit = hint % HOST_BITS_PER_LONG;
1326
1327 /* If the hint didn't work, scan the bitmap from the beginning. */
1328 if ((entry->in_use_p[word] >> bit) & 1)
1329 {
1330 word = bit = 0;
1331 while (~entry->in_use_p[word] == 0)
1332 ++word;
1333
1334 #if GCC_VERSION >= 3004
1335 bit = __builtin_ctzl (~entry->in_use_p[word]);
1336 #else
1337 while ((entry->in_use_p[word] >> bit) & 1)
1338 ++bit;
1339 #endif
1340
1341 hint = word * HOST_BITS_PER_LONG + bit;
1342 }
1343
1344 /* Next time, try the next bit. */
1345 entry->next_bit_hint = hint + 1;
1346
1347 object_offset = hint * object_size;
1348 }
1349
1350 /* Set the in-use bit. */
1351 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1352
1353 /* Keep a running total of the number of free objects. If this page
1354 fills up, we may have to move it to the end of the list if the
1355 next page isn't full. If the next page is full, all subsequent
1356 pages are full, so there's no need to move it. */
1357 if (--entry->num_free_objects == 0
1358 && entry->next != NULL
1359 && entry->next->num_free_objects > 0)
1360 {
1361 /* We have a new head for the list. */
1362 G.pages[order] = entry->next;
1363
1364 /* We are moving ENTRY to the end of the page table list.
1365 The new page at the head of the list will have NULL in
1366 its PREV field and ENTRY will have NULL in its NEXT field. */
1367 entry->next->prev = NULL;
1368 entry->next = NULL;
1369
1370 /* Append ENTRY to the tail of the list. */
1371 entry->prev = G.page_tails[order];
1372 G.page_tails[order]->next = entry;
1373 G.page_tails[order] = entry;
1374 }
1375
1376 /* Calculate the object's address. */
1377 result = entry->page + object_offset;
1378 if (GATHER_STATISTICS)
1379 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1380 result FINAL_PASS_MEM_STAT);
1381
1382 #ifdef ENABLE_GC_CHECKING
1383 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1384 exact same semantics in presence of memory bugs, regardless of
1385 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1386 handle to avoid handle leak. */
1387 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1388
1389 /* `Poison' the entire allocated object, including any padding at
1390 the end. */
1391 memset (result, 0xaf, object_size);
1392
1393 /* Make the bytes after the end of the object unaccessible. Discard the
1394 handle to avoid handle leak. */
1395 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1396 object_size - size));
1397 #endif
1398
1399 /* Tell Valgrind that the memory is there, but its content isn't
1400 defined. The bytes at the end of the object are still marked
1401 unaccessible. */
1402 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1403
1404 /* Keep track of how many bytes are being allocated. This
1405 information is used in deciding when to collect. */
1406 G.allocated += object_size;
1407
1408 /* For timevar statistics. */
1409 timevar_ggc_mem_total += object_size;
1410
1411 if (f && n == 1)
1412 G.finalizers.safe_push (finalizer (result, f));
1413 else if (f)
1414 G.vec_finalizers.safe_push
1415 (vec_finalizer (reinterpret_cast<uintptr_t> (result), f, s, n));
1416
1417 if (GATHER_STATISTICS)
1418 {
1419 size_t overhead = object_size - size;
1420
1421 G.stats.total_overhead += overhead;
1422 G.stats.total_allocated += object_size;
1423 G.stats.total_overhead_per_order[order] += overhead;
1424 G.stats.total_allocated_per_order[order] += object_size;
1425
1426 if (size <= 32)
1427 {
1428 G.stats.total_overhead_under32 += overhead;
1429 G.stats.total_allocated_under32 += object_size;
1430 }
1431 if (size <= 64)
1432 {
1433 G.stats.total_overhead_under64 += overhead;
1434 G.stats.total_allocated_under64 += object_size;
1435 }
1436 if (size <= 128)
1437 {
1438 G.stats.total_overhead_under128 += overhead;
1439 G.stats.total_allocated_under128 += object_size;
1440 }
1441 }
1442
1443 if (GGC_DEBUG_LEVEL >= 3)
1444 fprintf (G.debug_file,
1445 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1446 (unsigned long) size, (unsigned long) object_size, result,
1447 (void *) entry);
1448
1449 return result;
1450 }
1451
1452 /* Mark function for strings. */
1453
1454 void
1455 gt_ggc_m_S (const void *p)
1456 {
1457 page_entry *entry;
1458 unsigned bit, word;
1459 unsigned long mask;
1460 unsigned long offset;
1461
1462 if (!p || !ggc_allocated_p (p))
1463 return;
1464
1465 /* Look up the page on which the object is alloced. . */
1466 entry = lookup_page_table_entry (p);
1467 gcc_assert (entry);
1468
1469 /* Calculate the index of the object on the page; this is its bit
1470 position in the in_use_p bitmap. Note that because a char* might
1471 point to the middle of an object, we need special code here to
1472 make sure P points to the start of an object. */
1473 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1474 if (offset)
1475 {
1476 /* Here we've seen a char* which does not point to the beginning
1477 of an allocated object. We assume it points to the middle of
1478 a STRING_CST. */
1479 gcc_assert (offset == offsetof (struct tree_string, str));
1480 p = ((const char *) p) - offset;
1481 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1482 return;
1483 }
1484
1485 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1486 word = bit / HOST_BITS_PER_LONG;
1487 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1488
1489 /* If the bit was previously set, skip it. */
1490 if (entry->in_use_p[word] & mask)
1491 return;
1492
1493 /* Otherwise set it, and decrement the free object count. */
1494 entry->in_use_p[word] |= mask;
1495 entry->num_free_objects -= 1;
1496
1497 if (GGC_DEBUG_LEVEL >= 4)
1498 fprintf (G.debug_file, "Marking %p\n", p);
1499
1500 return;
1501 }
1502
1503
1504 /* User-callable entry points for marking string X. */
1505
1506 void
1507 gt_ggc_mx (const char *& x)
1508 {
1509 gt_ggc_m_S (x);
1510 }
1511
1512 void
1513 gt_ggc_mx (unsigned char *& x)
1514 {
1515 gt_ggc_m_S (x);
1516 }
1517
1518 void
1519 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1520 {
1521 }
1522
1523 /* If P is not marked, marks it and return false. Otherwise return true.
1524 P must have been allocated by the GC allocator; it mustn't point to
1525 static objects, stack variables, or memory allocated with malloc. */
1526
1527 int
1528 ggc_set_mark (const void *p)
1529 {
1530 page_entry *entry;
1531 unsigned bit, word;
1532 unsigned long mask;
1533
1534 /* Look up the page on which the object is alloced. If the object
1535 wasn't allocated by the collector, we'll probably die. */
1536 entry = lookup_page_table_entry (p);
1537 gcc_assert (entry);
1538
1539 /* Calculate the index of the object on the page; this is its bit
1540 position in the in_use_p bitmap. */
1541 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1542 word = bit / HOST_BITS_PER_LONG;
1543 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1544
1545 /* If the bit was previously set, skip it. */
1546 if (entry->in_use_p[word] & mask)
1547 return 1;
1548
1549 /* Otherwise set it, and decrement the free object count. */
1550 entry->in_use_p[word] |= mask;
1551 entry->num_free_objects -= 1;
1552
1553 if (GGC_DEBUG_LEVEL >= 4)
1554 fprintf (G.debug_file, "Marking %p\n", p);
1555
1556 return 0;
1557 }
1558
1559 /* Return 1 if P has been marked, zero otherwise.
1560 P must have been allocated by the GC allocator; it mustn't point to
1561 static objects, stack variables, or memory allocated with malloc. */
1562
1563 int
1564 ggc_marked_p (const void *p)
1565 {
1566 page_entry *entry;
1567 unsigned bit, word;
1568 unsigned long mask;
1569
1570 /* Look up the page on which the object is alloced. If the object
1571 wasn't allocated by the collector, we'll probably die. */
1572 entry = lookup_page_table_entry (p);
1573 gcc_assert (entry);
1574
1575 /* Calculate the index of the object on the page; this is its bit
1576 position in the in_use_p bitmap. */
1577 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1578 word = bit / HOST_BITS_PER_LONG;
1579 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1580
1581 return (entry->in_use_p[word] & mask) != 0;
1582 }
1583
1584 /* Return the size of the gc-able object P. */
1585
1586 size_t
1587 ggc_get_size (const void *p)
1588 {
1589 page_entry *pe = lookup_page_table_entry (p);
1590 return OBJECT_SIZE (pe->order);
1591 }
1592
1593 /* Release the memory for object P. */
1594
1595 void
1596 ggc_free (void *p)
1597 {
1598 if (in_gc)
1599 return;
1600
1601 page_entry *pe = lookup_page_table_entry (p);
1602 size_t order = pe->order;
1603 size_t size = OBJECT_SIZE (order);
1604
1605 if (GATHER_STATISTICS)
1606 ggc_free_overhead (p);
1607
1608 if (GGC_DEBUG_LEVEL >= 3)
1609 fprintf (G.debug_file,
1610 "Freeing object, actual size=%lu, at %p on %p\n",
1611 (unsigned long) size, p, (void *) pe);
1612
1613 #ifdef ENABLE_GC_CHECKING
1614 /* Poison the data, to indicate the data is garbage. */
1615 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1616 memset (p, 0xa5, size);
1617 #endif
1618 /* Let valgrind know the object is free. */
1619 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1620
1621 #ifdef ENABLE_GC_ALWAYS_COLLECT
1622 /* In the completely-anal-checking mode, we do *not* immediately free
1623 the data, but instead verify that the data is *actually* not
1624 reachable the next time we collect. */
1625 {
1626 struct free_object *fo = XNEW (struct free_object);
1627 fo->object = p;
1628 fo->next = G.free_object_list;
1629 G.free_object_list = fo;
1630 }
1631 #else
1632 {
1633 unsigned int bit_offset, word, bit;
1634
1635 G.allocated -= size;
1636
1637 /* Mark the object not-in-use. */
1638 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1639 word = bit_offset / HOST_BITS_PER_LONG;
1640 bit = bit_offset % HOST_BITS_PER_LONG;
1641 pe->in_use_p[word] &= ~(1UL << bit);
1642
1643 if (pe->num_free_objects++ == 0)
1644 {
1645 page_entry *p, *q;
1646
1647 /* If the page is completely full, then it's supposed to
1648 be after all pages that aren't. Since we've freed one
1649 object from a page that was full, we need to move the
1650 page to the head of the list.
1651
1652 PE is the node we want to move. Q is the previous node
1653 and P is the next node in the list. */
1654 q = pe->prev;
1655 if (q && q->num_free_objects == 0)
1656 {
1657 p = pe->next;
1658
1659 q->next = p;
1660
1661 /* If PE was at the end of the list, then Q becomes the
1662 new end of the list. If PE was not the end of the
1663 list, then we need to update the PREV field for P. */
1664 if (!p)
1665 G.page_tails[order] = q;
1666 else
1667 p->prev = q;
1668
1669 /* Move PE to the head of the list. */
1670 pe->next = G.pages[order];
1671 pe->prev = NULL;
1672 G.pages[order]->prev = pe;
1673 G.pages[order] = pe;
1674 }
1675
1676 /* Reset the hint bit to point to the only free object. */
1677 pe->next_bit_hint = bit_offset;
1678 }
1679 }
1680 #endif
1681 }
1682 \f
1683 /* Subroutine of init_ggc which computes the pair of numbers used to
1684 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1685
1686 This algorithm is taken from Granlund and Montgomery's paper
1687 "Division by Invariant Integers using Multiplication"
1688 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1689 constants). */
1690
1691 static void
1692 compute_inverse (unsigned order)
1693 {
1694 size_t size, inv;
1695 unsigned int e;
1696
1697 size = OBJECT_SIZE (order);
1698 e = 0;
1699 while (size % 2 == 0)
1700 {
1701 e++;
1702 size >>= 1;
1703 }
1704
1705 inv = size;
1706 while (inv * size != 1)
1707 inv = inv * (2 - inv*size);
1708
1709 DIV_MULT (order) = inv;
1710 DIV_SHIFT (order) = e;
1711 }
1712
1713 /* Initialize the ggc-mmap allocator. */
1714 void
1715 init_ggc (void)
1716 {
1717 static bool init_p = false;
1718 unsigned order;
1719
1720 if (init_p)
1721 return;
1722 init_p = true;
1723
1724 G.pagesize = getpagesize ();
1725 G.lg_pagesize = exact_log2 (G.pagesize);
1726
1727 #ifdef HAVE_MMAP_DEV_ZERO
1728 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1729 if (G.dev_zero_fd == -1)
1730 internal_error ("open /dev/zero: %m");
1731 #endif
1732
1733 #if 0
1734 G.debug_file = fopen ("ggc-mmap.debug", "w");
1735 #else
1736 G.debug_file = stdout;
1737 #endif
1738
1739 #ifdef USING_MMAP
1740 /* StunOS has an amazing off-by-one error for the first mmap allocation
1741 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1742 believe, is an unaligned page allocation, which would cause us to
1743 hork badly if we tried to use it. */
1744 {
1745 char *p = alloc_anon (NULL, G.pagesize, true);
1746 struct page_entry *e;
1747 if ((uintptr_t)p & (G.pagesize - 1))
1748 {
1749 /* How losing. Discard this one and try another. If we still
1750 can't get something useful, give up. */
1751
1752 p = alloc_anon (NULL, G.pagesize, true);
1753 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1754 }
1755
1756 /* We have a good page, might as well hold onto it... */
1757 e = XCNEW (struct page_entry);
1758 e->bytes = G.pagesize;
1759 e->page = p;
1760 e->next = G.free_pages;
1761 G.free_pages = e;
1762 }
1763 #endif
1764
1765 /* Initialize the object size table. */
1766 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1767 object_size_table[order] = (size_t) 1 << order;
1768 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1769 {
1770 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1771
1772 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1773 so that we're sure of getting aligned memory. */
1774 s = ROUND_UP (s, MAX_ALIGNMENT);
1775 object_size_table[order] = s;
1776 }
1777
1778 /* Initialize the objects-per-page and inverse tables. */
1779 for (order = 0; order < NUM_ORDERS; ++order)
1780 {
1781 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1782 if (objects_per_page_table[order] == 0)
1783 objects_per_page_table[order] = 1;
1784 compute_inverse (order);
1785 }
1786
1787 /* Reset the size_lookup array to put appropriately sized objects in
1788 the special orders. All objects bigger than the previous power
1789 of two, but no greater than the special size, should go in the
1790 new order. */
1791 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1792 {
1793 int o;
1794 int i;
1795
1796 i = OBJECT_SIZE (order);
1797 if (i >= NUM_SIZE_LOOKUP)
1798 continue;
1799
1800 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1801 size_lookup[i] = order;
1802 }
1803
1804 G.depth_in_use = 0;
1805 G.depth_max = 10;
1806 G.depth = XNEWVEC (unsigned int, G.depth_max);
1807
1808 G.by_depth_in_use = 0;
1809 G.by_depth_max = INITIAL_PTE_COUNT;
1810 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1811 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1812 }
1813
1814 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1815 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1816
1817 static void
1818 ggc_recalculate_in_use_p (page_entry *p)
1819 {
1820 unsigned int i;
1821 size_t num_objects;
1822
1823 /* Because the past-the-end bit in in_use_p is always set, we
1824 pretend there is one additional object. */
1825 num_objects = OBJECTS_IN_PAGE (p) + 1;
1826
1827 /* Reset the free object count. */
1828 p->num_free_objects = num_objects;
1829
1830 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1831 for (i = 0;
1832 i < CEIL (BITMAP_SIZE (num_objects),
1833 sizeof (*p->in_use_p));
1834 ++i)
1835 {
1836 unsigned long j;
1837
1838 /* Something is in use if it is marked, or if it was in use in a
1839 context further down the context stack. */
1840 p->in_use_p[i] |= save_in_use_p (p)[i];
1841
1842 /* Decrement the free object count for every object allocated. */
1843 for (j = p->in_use_p[i]; j; j >>= 1)
1844 p->num_free_objects -= (j & 1);
1845 }
1846
1847 gcc_assert (p->num_free_objects < num_objects);
1848 }
1849 \f
1850 /* Unmark all objects. */
1851
1852 static void
1853 clear_marks (void)
1854 {
1855 unsigned order;
1856
1857 for (order = 2; order < NUM_ORDERS; order++)
1858 {
1859 page_entry *p;
1860
1861 for (p = G.pages[order]; p != NULL; p = p->next)
1862 {
1863 size_t num_objects = OBJECTS_IN_PAGE (p);
1864 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1865
1866 /* The data should be page-aligned. */
1867 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1868
1869 /* Pages that aren't in the topmost context are not collected;
1870 nevertheless, we need their in-use bit vectors to store GC
1871 marks. So, back them up first. */
1872 if (p->context_depth < G.context_depth)
1873 {
1874 if (! save_in_use_p (p))
1875 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1876 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1877 }
1878
1879 /* Reset reset the number of free objects and clear the
1880 in-use bits. These will be adjusted by mark_obj. */
1881 p->num_free_objects = num_objects;
1882 memset (p->in_use_p, 0, bitmap_size);
1883
1884 /* Make sure the one-past-the-end bit is always set. */
1885 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1886 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1887 }
1888 }
1889 }
1890
1891 /* Check if any blocks with a registered finalizer have become unmarked. If so
1892 run the finalizer and unregister it because the block is about to be freed.
1893 Note that no garantee is made about what order finalizers will run in so
1894 touching other objects in gc memory is extremely unwise. */
1895
1896 static void
1897 ggc_handle_finalizers ()
1898 {
1899 if (G.context_depth != 0)
1900 return;
1901
1902 unsigned length = G.finalizers.length ();
1903 for (unsigned int i = 0; i < length;)
1904 {
1905 finalizer &f = G.finalizers[i];
1906 if (!ggc_marked_p (f.addr ()))
1907 {
1908 f.call ();
1909 G.finalizers.unordered_remove (i);
1910 length--;
1911 }
1912 else
1913 i++;
1914 }
1915
1916
1917 length = G.vec_finalizers.length ();
1918 for (unsigned int i = 0; i < length;)
1919 {
1920 vec_finalizer &f = G.vec_finalizers[i];
1921 if (!ggc_marked_p (f.addr ()))
1922 {
1923 f.call ();
1924 G.vec_finalizers.unordered_remove (i);
1925 length--;
1926 }
1927 else
1928 i++;
1929 }
1930 }
1931
1932 /* Free all empty pages. Partially empty pages need no attention
1933 because the `mark' bit doubles as an `unused' bit. */
1934
1935 static void
1936 sweep_pages (void)
1937 {
1938 unsigned order;
1939
1940 for (order = 2; order < NUM_ORDERS; order++)
1941 {
1942 /* The last page-entry to consider, regardless of entries
1943 placed at the end of the list. */
1944 page_entry * const last = G.page_tails[order];
1945
1946 size_t num_objects;
1947 size_t live_objects;
1948 page_entry *p, *previous;
1949 int done;
1950
1951 p = G.pages[order];
1952 if (p == NULL)
1953 continue;
1954
1955 previous = NULL;
1956 do
1957 {
1958 page_entry *next = p->next;
1959
1960 /* Loop until all entries have been examined. */
1961 done = (p == last);
1962
1963 num_objects = OBJECTS_IN_PAGE (p);
1964
1965 /* Add all live objects on this page to the count of
1966 allocated memory. */
1967 live_objects = num_objects - p->num_free_objects;
1968
1969 G.allocated += OBJECT_SIZE (order) * live_objects;
1970
1971 /* Only objects on pages in the topmost context should get
1972 collected. */
1973 if (p->context_depth < G.context_depth)
1974 ;
1975
1976 /* Remove the page if it's empty. */
1977 else if (live_objects == 0)
1978 {
1979 /* If P was the first page in the list, then NEXT
1980 becomes the new first page in the list, otherwise
1981 splice P out of the forward pointers. */
1982 if (! previous)
1983 G.pages[order] = next;
1984 else
1985 previous->next = next;
1986
1987 /* Splice P out of the back pointers too. */
1988 if (next)
1989 next->prev = previous;
1990
1991 /* Are we removing the last element? */
1992 if (p == G.page_tails[order])
1993 G.page_tails[order] = previous;
1994 free_page (p);
1995 p = previous;
1996 }
1997
1998 /* If the page is full, move it to the end. */
1999 else if (p->num_free_objects == 0)
2000 {
2001 /* Don't move it if it's already at the end. */
2002 if (p != G.page_tails[order])
2003 {
2004 /* Move p to the end of the list. */
2005 p->next = NULL;
2006 p->prev = G.page_tails[order];
2007 G.page_tails[order]->next = p;
2008
2009 /* Update the tail pointer... */
2010 G.page_tails[order] = p;
2011
2012 /* ... and the head pointer, if necessary. */
2013 if (! previous)
2014 G.pages[order] = next;
2015 else
2016 previous->next = next;
2017
2018 /* And update the backpointer in NEXT if necessary. */
2019 if (next)
2020 next->prev = previous;
2021
2022 p = previous;
2023 }
2024 }
2025
2026 /* If we've fallen through to here, it's a page in the
2027 topmost context that is neither full nor empty. Such a
2028 page must precede pages at lesser context depth in the
2029 list, so move it to the head. */
2030 else if (p != G.pages[order])
2031 {
2032 previous->next = p->next;
2033
2034 /* Update the backchain in the next node if it exists. */
2035 if (p->next)
2036 p->next->prev = previous;
2037
2038 /* Move P to the head of the list. */
2039 p->next = G.pages[order];
2040 p->prev = NULL;
2041 G.pages[order]->prev = p;
2042
2043 /* Update the head pointer. */
2044 G.pages[order] = p;
2045
2046 /* Are we moving the last element? */
2047 if (G.page_tails[order] == p)
2048 G.page_tails[order] = previous;
2049 p = previous;
2050 }
2051
2052 previous = p;
2053 p = next;
2054 }
2055 while (! done);
2056
2057 /* Now, restore the in_use_p vectors for any pages from contexts
2058 other than the current one. */
2059 for (p = G.pages[order]; p; p = p->next)
2060 if (p->context_depth != G.context_depth)
2061 ggc_recalculate_in_use_p (p);
2062 }
2063 }
2064
2065 #ifdef ENABLE_GC_CHECKING
2066 /* Clobber all free objects. */
2067
2068 static void
2069 poison_pages (void)
2070 {
2071 unsigned order;
2072
2073 for (order = 2; order < NUM_ORDERS; order++)
2074 {
2075 size_t size = OBJECT_SIZE (order);
2076 page_entry *p;
2077
2078 for (p = G.pages[order]; p != NULL; p = p->next)
2079 {
2080 size_t num_objects;
2081 size_t i;
2082
2083 if (p->context_depth != G.context_depth)
2084 /* Since we don't do any collection for pages in pushed
2085 contexts, there's no need to do any poisoning. And
2086 besides, the IN_USE_P array isn't valid until we pop
2087 contexts. */
2088 continue;
2089
2090 num_objects = OBJECTS_IN_PAGE (p);
2091 for (i = 0; i < num_objects; i++)
2092 {
2093 size_t word, bit;
2094 word = i / HOST_BITS_PER_LONG;
2095 bit = i % HOST_BITS_PER_LONG;
2096 if (((p->in_use_p[word] >> bit) & 1) == 0)
2097 {
2098 char *object = p->page + i * size;
2099
2100 /* Keep poison-by-write when we expect to use Valgrind,
2101 so the exact same memory semantics is kept, in case
2102 there are memory errors. We override this request
2103 below. */
2104 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2105 size));
2106 memset (object, 0xa5, size);
2107
2108 /* Drop the handle to avoid handle leak. */
2109 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2110 }
2111 }
2112 }
2113 }
2114 }
2115 #else
2116 #define poison_pages()
2117 #endif
2118
2119 #ifdef ENABLE_GC_ALWAYS_COLLECT
2120 /* Validate that the reportedly free objects actually are. */
2121
2122 static void
2123 validate_free_objects (void)
2124 {
2125 struct free_object *f, *next, *still_free = NULL;
2126
2127 for (f = G.free_object_list; f ; f = next)
2128 {
2129 page_entry *pe = lookup_page_table_entry (f->object);
2130 size_t bit, word;
2131
2132 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2133 word = bit / HOST_BITS_PER_LONG;
2134 bit = bit % HOST_BITS_PER_LONG;
2135 next = f->next;
2136
2137 /* Make certain it isn't visible from any root. Notice that we
2138 do this check before sweep_pages merges save_in_use_p. */
2139 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2140
2141 /* If the object comes from an outer context, then retain the
2142 free_object entry, so that we can verify that the address
2143 isn't live on the stack in some outer context. */
2144 if (pe->context_depth != G.context_depth)
2145 {
2146 f->next = still_free;
2147 still_free = f;
2148 }
2149 else
2150 free (f);
2151 }
2152
2153 G.free_object_list = still_free;
2154 }
2155 #else
2156 #define validate_free_objects()
2157 #endif
2158
2159 /* Top level mark-and-sweep routine. */
2160
2161 void
2162 ggc_collect (void)
2163 {
2164 /* Avoid frequent unnecessary work by skipping collection if the
2165 total allocations haven't expanded much since the last
2166 collection. */
2167 float allocated_last_gc =
2168 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2169
2170 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2171 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2172 return;
2173
2174 timevar_push (TV_GC);
2175 if (!quiet_flag)
2176 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2177 if (GGC_DEBUG_LEVEL >= 2)
2178 fprintf (G.debug_file, "BEGIN COLLECTING\n");
2179
2180 /* Zero the total allocated bytes. This will be recalculated in the
2181 sweep phase. */
2182 G.allocated = 0;
2183
2184 /* Release the pages we freed the last time we collected, but didn't
2185 reuse in the interim. */
2186 release_pages ();
2187
2188 /* Indicate that we've seen collections at this context depth. */
2189 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2190
2191 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2192
2193 in_gc = true;
2194 clear_marks ();
2195 ggc_mark_roots ();
2196 ggc_handle_finalizers ();
2197
2198 if (GATHER_STATISTICS)
2199 ggc_prune_overhead_list ();
2200
2201 poison_pages ();
2202 validate_free_objects ();
2203 sweep_pages ();
2204
2205 in_gc = false;
2206 G.allocated_last_gc = G.allocated;
2207
2208 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2209
2210 timevar_pop (TV_GC);
2211
2212 if (!quiet_flag)
2213 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2214 if (GGC_DEBUG_LEVEL >= 2)
2215 fprintf (G.debug_file, "END COLLECTING\n");
2216 }
2217
2218 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2219 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2220 reachable. */
2221
2222 void
2223 ggc_grow (void)
2224 {
2225 #ifndef ENABLE_CHECKING
2226 G.allocated_last_gc = MAX (G.allocated_last_gc,
2227 G.allocated);
2228 #else
2229 ggc_collect ();
2230 #endif
2231 if (!quiet_flag)
2232 fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2233 }
2234
2235 /* Print allocation statistics. */
2236 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2237 ? (x) \
2238 : ((x) < 1024*1024*10 \
2239 ? (x) / 1024 \
2240 : (x) / (1024*1024))))
2241 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2242
2243 void
2244 ggc_print_statistics (void)
2245 {
2246 struct ggc_statistics stats;
2247 unsigned int i;
2248 size_t total_overhead = 0;
2249
2250 /* Clear the statistics. */
2251 memset (&stats, 0, sizeof (stats));
2252
2253 /* Make sure collection will really occur. */
2254 G.allocated_last_gc = 0;
2255
2256 /* Collect and print the statistics common across collectors. */
2257 ggc_print_common_statistics (stderr, &stats);
2258
2259 /* Release free pages so that we will not count the bytes allocated
2260 there as part of the total allocated memory. */
2261 release_pages ();
2262
2263 /* Collect some information about the various sizes of
2264 allocation. */
2265 fprintf (stderr,
2266 "Memory still allocated at the end of the compilation process\n");
2267 fprintf (stderr, "%-8s %10s %10s %10s\n",
2268 "Size", "Allocated", "Used", "Overhead");
2269 for (i = 0; i < NUM_ORDERS; ++i)
2270 {
2271 page_entry *p;
2272 size_t allocated;
2273 size_t in_use;
2274 size_t overhead;
2275
2276 /* Skip empty entries. */
2277 if (!G.pages[i])
2278 continue;
2279
2280 overhead = allocated = in_use = 0;
2281
2282 /* Figure out the total number of bytes allocated for objects of
2283 this size, and how many of them are actually in use. Also figure
2284 out how much memory the page table is using. */
2285 for (p = G.pages[i]; p; p = p->next)
2286 {
2287 allocated += p->bytes;
2288 in_use +=
2289 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2290
2291 overhead += (sizeof (page_entry) - sizeof (long)
2292 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2293 }
2294 fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2295 (unsigned long) OBJECT_SIZE (i),
2296 SCALE (allocated), STAT_LABEL (allocated),
2297 SCALE (in_use), STAT_LABEL (in_use),
2298 SCALE (overhead), STAT_LABEL (overhead));
2299 total_overhead += overhead;
2300 }
2301 fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2302 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2303 SCALE (G.allocated), STAT_LABEL (G.allocated),
2304 SCALE (total_overhead), STAT_LABEL (total_overhead));
2305
2306 if (GATHER_STATISTICS)
2307 {
2308 fprintf (stderr, "\nTotal allocations and overheads during "
2309 "the compilation process\n");
2310
2311 fprintf (stderr, "Total Overhead: %10"
2312 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
2313 fprintf (stderr, "Total Allocated: %10"
2314 HOST_LONG_LONG_FORMAT "d\n",
2315 G.stats.total_allocated);
2316
2317 fprintf (stderr, "Total Overhead under 32B: %10"
2318 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
2319 fprintf (stderr, "Total Allocated under 32B: %10"
2320 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
2321 fprintf (stderr, "Total Overhead under 64B: %10"
2322 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
2323 fprintf (stderr, "Total Allocated under 64B: %10"
2324 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
2325 fprintf (stderr, "Total Overhead under 128B: %10"
2326 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
2327 fprintf (stderr, "Total Allocated under 128B: %10"
2328 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
2329
2330 for (i = 0; i < NUM_ORDERS; i++)
2331 if (G.stats.total_allocated_per_order[i])
2332 {
2333 fprintf (stderr, "Total Overhead page size %9lu: %10"
2334 HOST_LONG_LONG_FORMAT "d\n",
2335 (unsigned long) OBJECT_SIZE (i),
2336 G.stats.total_overhead_per_order[i]);
2337 fprintf (stderr, "Total Allocated page size %9lu: %10"
2338 HOST_LONG_LONG_FORMAT "d\n",
2339 (unsigned long) OBJECT_SIZE (i),
2340 G.stats.total_allocated_per_order[i]);
2341 }
2342 }
2343 }
2344 \f
2345 struct ggc_pch_ondisk
2346 {
2347 unsigned totals[NUM_ORDERS];
2348 };
2349
2350 struct ggc_pch_data
2351 {
2352 struct ggc_pch_ondisk d;
2353 uintptr_t base[NUM_ORDERS];
2354 size_t written[NUM_ORDERS];
2355 };
2356
2357 struct ggc_pch_data *
2358 init_ggc_pch (void)
2359 {
2360 return XCNEW (struct ggc_pch_data);
2361 }
2362
2363 void
2364 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2365 size_t size, bool is_string ATTRIBUTE_UNUSED)
2366 {
2367 unsigned order;
2368
2369 if (size < NUM_SIZE_LOOKUP)
2370 order = size_lookup[size];
2371 else
2372 {
2373 order = 10;
2374 while (size > OBJECT_SIZE (order))
2375 order++;
2376 }
2377
2378 d->d.totals[order]++;
2379 }
2380
2381 size_t
2382 ggc_pch_total_size (struct ggc_pch_data *d)
2383 {
2384 size_t a = 0;
2385 unsigned i;
2386
2387 for (i = 0; i < NUM_ORDERS; i++)
2388 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2389 return a;
2390 }
2391
2392 void
2393 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2394 {
2395 uintptr_t a = (uintptr_t) base;
2396 unsigned i;
2397
2398 for (i = 0; i < NUM_ORDERS; i++)
2399 {
2400 d->base[i] = a;
2401 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2402 }
2403 }
2404
2405
2406 char *
2407 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2408 size_t size, bool is_string ATTRIBUTE_UNUSED)
2409 {
2410 unsigned order;
2411 char *result;
2412
2413 if (size < NUM_SIZE_LOOKUP)
2414 order = size_lookup[size];
2415 else
2416 {
2417 order = 10;
2418 while (size > OBJECT_SIZE (order))
2419 order++;
2420 }
2421
2422 result = (char *) d->base[order];
2423 d->base[order] += OBJECT_SIZE (order);
2424 return result;
2425 }
2426
2427 void
2428 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2429 FILE *f ATTRIBUTE_UNUSED)
2430 {
2431 /* Nothing to do. */
2432 }
2433
2434 void
2435 ggc_pch_write_object (struct ggc_pch_data *d,
2436 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2437 size_t size, bool is_string ATTRIBUTE_UNUSED)
2438 {
2439 unsigned order;
2440 static const char emptyBytes[256] = { 0 };
2441
2442 if (size < NUM_SIZE_LOOKUP)
2443 order = size_lookup[size];
2444 else
2445 {
2446 order = 10;
2447 while (size > OBJECT_SIZE (order))
2448 order++;
2449 }
2450
2451 if (fwrite (x, size, 1, f) != 1)
2452 fatal_error (input_location, "can%'t write PCH file: %m");
2453
2454 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2455 object out to OBJECT_SIZE(order). This happens for strings. */
2456
2457 if (size != OBJECT_SIZE (order))
2458 {
2459 unsigned padding = OBJECT_SIZE (order) - size;
2460
2461 /* To speed small writes, we use a nulled-out array that's larger
2462 than most padding requests as the source for our null bytes. This
2463 permits us to do the padding with fwrite() rather than fseek(), and
2464 limits the chance the OS may try to flush any outstanding writes. */
2465 if (padding <= sizeof (emptyBytes))
2466 {
2467 if (fwrite (emptyBytes, 1, padding, f) != padding)
2468 fatal_error (input_location, "can%'t write PCH file");
2469 }
2470 else
2471 {
2472 /* Larger than our buffer? Just default to fseek. */
2473 if (fseek (f, padding, SEEK_CUR) != 0)
2474 fatal_error (input_location, "can%'t write PCH file");
2475 }
2476 }
2477
2478 d->written[order]++;
2479 if (d->written[order] == d->d.totals[order]
2480 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2481 G.pagesize),
2482 SEEK_CUR) != 0)
2483 fatal_error (input_location, "can%'t write PCH file: %m");
2484 }
2485
2486 void
2487 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2488 {
2489 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2490 fatal_error (input_location, "can%'t write PCH file: %m");
2491 free (d);
2492 }
2493
2494 /* Move the PCH PTE entries just added to the end of by_depth, to the
2495 front. */
2496
2497 static void
2498 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2499 {
2500 unsigned i;
2501
2502 /* First, we swap the new entries to the front of the varrays. */
2503 page_entry **new_by_depth;
2504 unsigned long **new_save_in_use;
2505
2506 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2507 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2508
2509 memcpy (&new_by_depth[0],
2510 &G.by_depth[count_old_page_tables],
2511 count_new_page_tables * sizeof (void *));
2512 memcpy (&new_by_depth[count_new_page_tables],
2513 &G.by_depth[0],
2514 count_old_page_tables * sizeof (void *));
2515 memcpy (&new_save_in_use[0],
2516 &G.save_in_use[count_old_page_tables],
2517 count_new_page_tables * sizeof (void *));
2518 memcpy (&new_save_in_use[count_new_page_tables],
2519 &G.save_in_use[0],
2520 count_old_page_tables * sizeof (void *));
2521
2522 free (G.by_depth);
2523 free (G.save_in_use);
2524
2525 G.by_depth = new_by_depth;
2526 G.save_in_use = new_save_in_use;
2527
2528 /* Now update all the index_by_depth fields. */
2529 for (i = G.by_depth_in_use; i > 0; --i)
2530 {
2531 page_entry *p = G.by_depth[i-1];
2532 p->index_by_depth = i-1;
2533 }
2534
2535 /* And last, we update the depth pointers in G.depth. The first
2536 entry is already 0, and context 0 entries always start at index
2537 0, so there is nothing to update in the first slot. We need a
2538 second slot, only if we have old ptes, and if we do, they start
2539 at index count_new_page_tables. */
2540 if (count_old_page_tables)
2541 push_depth (count_new_page_tables);
2542 }
2543
2544 void
2545 ggc_pch_read (FILE *f, void *addr)
2546 {
2547 struct ggc_pch_ondisk d;
2548 unsigned i;
2549 char *offs = (char *) addr;
2550 unsigned long count_old_page_tables;
2551 unsigned long count_new_page_tables;
2552
2553 count_old_page_tables = G.by_depth_in_use;
2554
2555 /* We've just read in a PCH file. So, every object that used to be
2556 allocated is now free. */
2557 clear_marks ();
2558 #ifdef ENABLE_GC_CHECKING
2559 poison_pages ();
2560 #endif
2561 /* Since we free all the allocated objects, the free list becomes
2562 useless. Validate it now, which will also clear it. */
2563 validate_free_objects ();
2564
2565 /* No object read from a PCH file should ever be freed. So, set the
2566 context depth to 1, and set the depth of all the currently-allocated
2567 pages to be 1 too. PCH pages will have depth 0. */
2568 gcc_assert (!G.context_depth);
2569 G.context_depth = 1;
2570 for (i = 0; i < NUM_ORDERS; i++)
2571 {
2572 page_entry *p;
2573 for (p = G.pages[i]; p != NULL; p = p->next)
2574 p->context_depth = G.context_depth;
2575 }
2576
2577 /* Allocate the appropriate page-table entries for the pages read from
2578 the PCH file. */
2579 if (fread (&d, sizeof (d), 1, f) != 1)
2580 fatal_error (input_location, "can%'t read PCH file: %m");
2581
2582 for (i = 0; i < NUM_ORDERS; i++)
2583 {
2584 struct page_entry *entry;
2585 char *pte;
2586 size_t bytes;
2587 size_t num_objs;
2588 size_t j;
2589
2590 if (d.totals[i] == 0)
2591 continue;
2592
2593 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2594 num_objs = bytes / OBJECT_SIZE (i);
2595 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2596 - sizeof (long)
2597 + BITMAP_SIZE (num_objs + 1)));
2598 entry->bytes = bytes;
2599 entry->page = offs;
2600 entry->context_depth = 0;
2601 offs += bytes;
2602 entry->num_free_objects = 0;
2603 entry->order = i;
2604
2605 for (j = 0;
2606 j + HOST_BITS_PER_LONG <= num_objs + 1;
2607 j += HOST_BITS_PER_LONG)
2608 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2609 for (; j < num_objs + 1; j++)
2610 entry->in_use_p[j / HOST_BITS_PER_LONG]
2611 |= 1L << (j % HOST_BITS_PER_LONG);
2612
2613 for (pte = entry->page;
2614 pte < entry->page + entry->bytes;
2615 pte += G.pagesize)
2616 set_page_table_entry (pte, entry);
2617
2618 if (G.page_tails[i] != NULL)
2619 G.page_tails[i]->next = entry;
2620 else
2621 G.pages[i] = entry;
2622 G.page_tails[i] = entry;
2623
2624 /* We start off by just adding all the new information to the
2625 end of the varrays, later, we will move the new information
2626 to the front of the varrays, as the PCH page tables are at
2627 context 0. */
2628 push_by_depth (entry, 0);
2629 }
2630
2631 /* Now, we update the various data structures that speed page table
2632 handling. */
2633 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2634
2635 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2636
2637 /* Update the statistics. */
2638 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2639 }