]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ggc-common.c
Always pass explicit location to fatal_error.
[thirdparty/gcc.git] / gcc / ggc-common.c
CommitLineData
bebb7b68 1/* Simple garbage collection for the GNU compiler.
d353bf18 2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
bebb7b68 3
f12b58b3 4This file is part of GCC.
bebb7b68 5
f12b58b3 6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8c4c00c1 8Software Foundation; either version 3, or (at your option) any later
f12b58b3 9version.
bebb7b68 10
f12b58b3 11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
155b05dc 13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
bebb7b68 15
155b05dc 16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
bebb7b68 19
20/* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23#include "config.h"
24#include "system.h"
805e22b2 25#include "coretypes.h"
d9dd21a8 26#include "hash-table.h"
f8e15e8a 27#include "ggc.h"
ba72912a 28#include "ggc-internal.h"
0b205f4c 29#include "diagnostic-core.h"
7d83df95 30#include "params.h"
ddf4604f 31#include "hosthooks.h"
53ee4dac 32#include "hosthooks-def.h"
740cd0be 33#include "plugin.h"
34#include "vec.h"
dbb19e66 35#include "timevar.h"
573aba85 36
0ca9a7b6 37/* When set, ggc_collect will do collection. */
38bool ggc_force_collect;
39
dfecde36 40/* When true, protect the contents of the identifier hash table. */
41bool ggc_protect_identifiers = true;
42
4e00b6fd 43/* Statistics about the allocation. */
44static ggc_statistics *ggc_stats;
45
573aba85 46struct traversal_state;
47
6ec1f4e0 48static int compare_ptr_data (const void *, const void *);
49static void relocate_ptrs (void *, void *);
50static void write_pch_globals (const struct ggc_root_tab * const *tab,
51 struct traversal_state *state);
bebb7b68 52
53/* Maintain global roots that are preserved during GC. */
54
740cd0be 55/* This extra vector of dynamically registered root_tab-s is used by
56 ggc_mark_roots and gives the ability to dynamically add new GGC root
86b63696 57 tables, for instance from some plugins; this vector is on the heap
58 since it is used by GGC internally. */
59typedef const struct ggc_root_tab *const_ggc_root_tab_t;
f1f41a6c 60static vec<const_ggc_root_tab_t> extra_root_vec;
740cd0be 61
740cd0be 62/* Dynamically register a new GGC root table RT. This is useful for
63 plugins. */
64
48e1416a 65void
740cd0be 66ggc_register_root_tab (const struct ggc_root_tab* rt)
67{
86b63696 68 if (rt)
f1f41a6c 69 extra_root_vec.safe_push (rt);
740cd0be 70}
71
c03efe25 72/* Mark all the roots in the table RT. */
73
74static void
75ggc_mark_root_tab (const_ggc_root_tab_t rt)
76{
77 size_t i;
78
79 for ( ; rt->base != NULL; rt++)
80 for (i = 0; i < rt->nelt; i++)
81 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
82}
83
e3c4633e 84/* Iterate through all registered roots and mark each element. */
85
bebb7b68 86void
6ec1f4e0 87ggc_mark_roots (void)
0046a2c2 88{
1f3233d1 89 const struct ggc_root_tab *const *rt;
c03efe25 90 const_ggc_root_tab_t rtp, rti;
1f3233d1 91 size_t i;
3cfec666 92
1f3233d1 93 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
94 for (rti = *rt; rti->base != NULL; rti++)
95 memset (rti->base, 0, rti->stride);
96
97 for (rt = gt_ggc_rtab; *rt; rt++)
c03efe25 98 ggc_mark_root_tab (*rt);
740cd0be 99
f1f41a6c 100 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
c03efe25 101 ggc_mark_root_tab (rtp);
769177ac 102
dfecde36 103 if (ggc_protect_identifiers)
104 ggc_mark_stringpool ();
769177ac 105
f863a586 106 gt_clear_caches ();
107
dfecde36 108 if (! ggc_protect_identifiers)
109 ggc_purge_stringpool ();
740cd0be 110
111 /* Some plugins may call ggc_set_mark from here. */
112 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
0046a2c2 113}
114
1f3233d1 115/* Allocate a block of memory, then clear it. */
116void *
92f06184 117ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
118 MEM_STAT_DECL)
cf6cce73 119{
92f06184 120 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
1f3233d1 121 memset (buf, 0, size);
122 return buf;
cf6cce73 123}
124
1f3233d1 125/* Resize a block of memory, possibly re-allocating it. */
126void *
881f903e 127ggc_realloc (void *x, size_t size MEM_STAT_DECL)
cf6cce73 128{
1f3233d1 129 void *r;
130 size_t old_size;
cf6cce73 131
1f3233d1 132 if (x == NULL)
881f903e 133 return ggc_internal_alloc (size PASS_MEM_STAT);
cf6cce73 134
1f3233d1 135 old_size = ggc_get_size (x);
c4e03242 136
1f3233d1 137 if (size <= old_size)
dd359afe 138 {
139 /* Mark the unwanted memory as unaccessible. We also need to make
140 the "new" size accessible, since ggc_get_size returns the size of
141 the pool, not the size of the individually allocated object, the
142 size which was previously made accessible. Unfortunately, we
143 don't know that previously allocated size. Without that
144 knowledge we have to lose some initialization-tracking for the
145 old parts of the object. An alternative is to mark the whole
6ec1f4e0 146 old_size as reachable, but that would lose tracking of writes
dd359afe 147 after the end of the object (by small offsets). Discard the
148 handle to avoid handle leak. */
a7779e75 149 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
150 old_size - size));
151 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
dd359afe 152 return x;
153 }
cf6cce73 154
881f903e 155 r = ggc_internal_alloc (size PASS_MEM_STAT);
dd359afe 156
157 /* Since ggc_get_size returns the size of the pool, not the size of the
158 individually allocated object, we'd access parts of the old object
159 that were marked invalid with the memcpy below. We lose a bit of the
160 initialization-tracking since some of it may be uninitialized. */
a7779e75 161 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
dd359afe 162
1f3233d1 163 memcpy (r, x, old_size);
dd359afe 164
165 /* The old object is not supposed to be used anymore. */
c4e03242 166 ggc_free (x);
dd359afe 167
1f3233d1 168 return r;
cf6cce73 169}
170
791ceafe 171void *
ba72912a 172ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
173 size_t n ATTRIBUTE_UNUSED)
791ceafe 174{
ba72912a 175 gcc_assert (c * n == sizeof (struct htab));
25a27413 176 return ggc_cleared_alloc<htab> ();
ba72912a 177}
178
179/* TODO: once we actually use type information in GGC, create a new tag
180 gt_gcc_ptr_array and use it for pointer arrays. */
181void *
182ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
183{
184 gcc_assert (sizeof (PTR *) == n);
25a27413 185 return ggc_cleared_vec_alloc<PTR *> (c);
791ceafe 186}
187
573aba85 188/* These are for splay_tree_new_ggc. */
6ec1f4e0 189void *
5cc13354 190ggc_splay_alloc (int sz, void *nl)
573aba85 191{
0d59b19d 192 gcc_assert (!nl);
ba72912a 193 return ggc_internal_alloc (sz);
573aba85 194}
195
196void
6ec1f4e0 197ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
573aba85 198{
0d59b19d 199 gcc_assert (!nl);
573aba85 200}
201
4e00b6fd 202/* Print statistics that are independent of the collector in use. */
2a8997e8 203#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
204 ? (x) \
205 : ((x) < 1024*1024*10 \
206 ? (x) / 1024 \
207 : (x) / (1024*1024))))
208#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
4e00b6fd 209
210void
6ec1f4e0 211ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
212 ggc_statistics *stats)
4e00b6fd 213{
4e00b6fd 214 /* Set the pointer so that during collection we will actually gather
215 the statistics. */
216 ggc_stats = stats;
217
218 /* Then do one collection to fill in the statistics. */
219 ggc_collect ();
220
573aba85 221 /* At present, we don't really gather any interesting statistics. */
222
223 /* Don't gather statistics any more. */
224 ggc_stats = NULL;
225}
226\f
227/* Functions for saving and restoring GCable memory to disk. */
228
6ec1f4e0 229struct ptr_data
573aba85 230{
231 void *obj;
232 void *note_ptr_cookie;
233 gt_note_pointers note_ptr_fn;
234 gt_handle_reorder reorder_fn;
235 size_t size;
236 void *new_addr;
237};
238
5df02853 239#define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
573aba85 240
d9dd21a8 241/* Helper for hashing saving_htab. */
242
243struct saving_hasher : typed_free_remove <ptr_data>
244{
245 typedef ptr_data value_type;
246 typedef void compare_type;
247 static inline hashval_t hash (const value_type *);
248 static inline bool equal (const value_type *, const compare_type *);
249};
250
251inline hashval_t
252saving_hasher::hash (const value_type *p)
253{
254 return POINTER_HASH (p->obj);
255}
256
257inline bool
258saving_hasher::equal (const value_type *p1, const compare_type *p2)
259{
260 return p1->obj == p2;
261}
262
c1f445d2 263static hash_table<saving_hasher> *saving_htab;
d9dd21a8 264
573aba85 265/* Register an object in the hash table. */
266
267int
6ec1f4e0 268gt_pch_note_object (void *obj, void *note_ptr_cookie,
5cc13354 269 gt_note_pointers note_ptr_fn)
573aba85 270{
271 struct ptr_data **slot;
6ec1f4e0 272
573aba85 273 if (obj == NULL || obj == (void *) 1)
274 return 0;
275
276 slot = (struct ptr_data **)
c1f445d2 277 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
573aba85 278 if (*slot != NULL)
279 {
0d59b19d 280 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
281 && (*slot)->note_ptr_cookie == note_ptr_cookie);
573aba85 282 return 0;
283 }
6ec1f4e0 284
4077bf7a 285 *slot = XCNEW (struct ptr_data);
573aba85 286 (*slot)->obj = obj;
287 (*slot)->note_ptr_fn = note_ptr_fn;
288 (*slot)->note_ptr_cookie = note_ptr_cookie;
289 if (note_ptr_fn == gt_pch_p_S)
4077bf7a 290 (*slot)->size = strlen ((const char *)obj) + 1;
573aba85 291 else
292 (*slot)->size = ggc_get_size (obj);
293 return 1;
294}
295
296/* Register an object in the hash table. */
297
298void
6ec1f4e0 299gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
300 gt_handle_reorder reorder_fn)
573aba85 301{
302 struct ptr_data *data;
6ec1f4e0 303
573aba85 304 if (obj == NULL || obj == (void *) 1)
305 return;
306
4077bf7a 307 data = (struct ptr_data *)
c1f445d2 308 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
0d59b19d 309 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
6ec1f4e0 310
573aba85 311 data->reorder_fn = reorder_fn;
312}
313
573aba85 314/* Handy state for the traversal functions. */
315
6ec1f4e0 316struct traversal_state
573aba85 317{
318 FILE *f;
319 struct ggc_pch_data *d;
320 size_t count;
321 struct ptr_data **ptrs;
322 size_t ptrs_i;
323};
324
325/* Callbacks for htab_traverse. */
326
d9dd21a8 327int
328ggc_call_count (ptr_data **slot, traversal_state *state)
573aba85 329{
d9dd21a8 330 struct ptr_data *d = *slot;
6ec1f4e0 331
1bb42c87 332 ggc_pch_count_object (state->d, d->obj, d->size,
5cc13354 333 d->note_ptr_fn == gt_pch_p_S);
573aba85 334 state->count++;
335 return 1;
336}
337
d9dd21a8 338int
339ggc_call_alloc (ptr_data **slot, traversal_state *state)
573aba85 340{
d9dd21a8 341 struct ptr_data *d = *slot;
6ec1f4e0 342
1bb42c87 343 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
5cc13354 344 d->note_ptr_fn == gt_pch_p_S);
573aba85 345 state->ptrs[state->ptrs_i++] = d;
346 return 1;
347}
348
349/* Callback for qsort. */
350
351static int
6ec1f4e0 352compare_ptr_data (const void *p1_p, const void *p2_p)
573aba85 353{
9f627b1a 354 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
355 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
573aba85 356 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
357 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
358}
359
360/* Callbacks for note_ptr_fn. */
361
362static void
6ec1f4e0 363relocate_ptrs (void *ptr_p, void *state_p)
573aba85 364{
365 void **ptr = (void **)ptr_p;
6ec1f4e0 366 struct traversal_state *state ATTRIBUTE_UNUSED
573aba85 367 = (struct traversal_state *)state_p;
368 struct ptr_data *result;
369
370 if (*ptr == NULL || *ptr == (void *)1)
371 return;
6ec1f4e0 372
4077bf7a 373 result = (struct ptr_data *)
c1f445d2 374 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
0d59b19d 375 gcc_assert (result);
573aba85 376 *ptr = result->new_addr;
377}
378
379/* Write out, after relocation, the pointers in TAB. */
380static void
6ec1f4e0 381write_pch_globals (const struct ggc_root_tab * const *tab,
382 struct traversal_state *state)
573aba85 383{
384 const struct ggc_root_tab *const *rt;
385 const struct ggc_root_tab *rti;
386 size_t i;
387
388 for (rt = tab; *rt; rt++)
389 for (rti = *rt; rti->base != NULL; rti++)
390 for (i = 0; i < rti->nelt; i++)
391 {
392 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
393 struct ptr_data *new_ptr;
394 if (ptr == NULL || ptr == (void *)1)
395 {
6ec1f4e0 396 if (fwrite (&ptr, sizeof (void *), 1, state->f)
573aba85 397 != 1)
c05be867 398 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 399 }
400 else
401 {
4077bf7a 402 new_ptr = (struct ptr_data *)
c1f445d2 403 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
6ec1f4e0 404 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
573aba85 405 != 1)
c05be867 406 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 407 }
408 }
409}
410
411/* Hold the information we need to mmap the file back in. */
412
6ec1f4e0 413struct mmap_info
573aba85 414{
415 size_t offset;
416 size_t size;
417 void *preferred_base;
418};
419
420/* Write out the state of the compiler to F. */
421
422void
6ec1f4e0 423gt_pch_save (FILE *f)
573aba85 424{
425 const struct ggc_root_tab *const *rt;
426 const struct ggc_root_tab *rti;
427 size_t i;
428 struct traversal_state state;
429 char *this_object = NULL;
430 size_t this_object_size = 0;
431 struct mmap_info mmi;
9af5ce0c 432 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
573aba85 433
434 gt_pch_save_stringpool ();
435
dbb19e66 436 timevar_push (TV_PCH_PTR_REALLOC);
c1f445d2 437 saving_htab = new hash_table<saving_hasher> (50000);
573aba85 438
439 for (rt = gt_ggc_rtab; *rt; rt++)
440 for (rti = *rt; rti->base != NULL; rti++)
441 for (i = 0; i < rti->nelt; i++)
442 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
443
573aba85 444 /* Prepare the objects for writing, determine addresses and such. */
445 state.f = f;
ba72912a 446 state.d = init_ggc_pch ();
573aba85 447 state.count = 0;
c1f445d2 448 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
573aba85 449
450 mmi.size = ggc_pch_total_size (state.d);
451
ddf4604f 452 /* Try to arrange things so that no relocation is necessary, but
453 don't try very hard. On most platforms, this will always work,
48e1416a 454 and on the rest it's a lot of work to do better.
ddf4604f 455 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
456 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
53ee4dac 457 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
48e1416a 458
573aba85 459 ggc_pch_this_base (state.d, mmi.preferred_base);
460
4c36ffe6 461 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
573aba85 462 state.ptrs_i = 0;
dbb19e66 463
c1f445d2 464 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
dbb19e66 465 timevar_pop (TV_PCH_PTR_REALLOC);
466
467 timevar_push (TV_PCH_PTR_SORT);
573aba85 468 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
dbb19e66 469 timevar_pop (TV_PCH_PTR_SORT);
573aba85 470
471 /* Write out all the scalar variables. */
472 for (rt = gt_pch_scalar_rtab; *rt; rt++)
473 for (rti = *rt; rti->base != NULL; rti++)
474 if (fwrite (rti->base, rti->stride, 1, f) != 1)
c05be867 475 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 476
477 /* Write out all the global pointers, after translation. */
478 write_pch_globals (gt_ggc_rtab, &state);
573aba85 479
62b4d90e 480 /* Pad the PCH file so that the mmapped area starts on an allocation
481 granularity (usually page) boundary. */
573aba85 482 {
04ea9445 483 long o;
484 o = ftell (state.f) + sizeof (mmi);
485 if (o == -1)
c05be867 486 fatal_error (input_location, "can%'t get position in PCH file: %m");
62b4d90e 487 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
488 if (mmi.offset == mmap_offset_alignment)
573aba85 489 mmi.offset = 0;
490 mmi.offset += o;
491 }
492 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
c05be867 493 fatal_error (input_location, "can%'t write PCH file: %m");
573aba85 494 if (mmi.offset != 0
495 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
c05be867 496 fatal_error (input_location, "can%'t write padding to PCH file: %m");
573aba85 497
1bb42c87 498 ggc_pch_prepare_write (state.d, state.f);
499
5ceebb21 500#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
501 vec<char> vbits = vNULL;
502#endif
503
573aba85 504 /* Actually write out the objects. */
505 for (i = 0; i < state.count; i++)
4e00b6fd 506 {
573aba85 507 if (this_object_size < state.ptrs[i]->size)
508 {
509 this_object_size = state.ptrs[i]->size;
4077bf7a 510 this_object = XRESIZEVAR (char, this_object, this_object_size);
573aba85 511 }
5ceebb21 512#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
513 /* obj might contain uninitialized bytes, e.g. in the trailing
514 padding of the object. Avoid warnings by making the memory
515 temporarily defined and then restoring previous state. */
516 int get_vbits = 0;
517 size_t valid_size = state.ptrs[i]->size;
518 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
519 {
520 if (vbits.length () < valid_size)
521 vbits.safe_grow (valid_size);
522 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
523 vbits.address (), valid_size);
524 if (get_vbits == 3)
525 {
526 /* We assume that first part of obj is addressable, and
527 the rest is unaddressable. Find out where the boundary is
528 using binary search. */
529 size_t lo = 0, hi = valid_size;
530 while (hi > lo)
531 {
532 size_t mid = (lo + hi) / 2;
533 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
534 + mid, vbits.address (),
535 1);
536 if (get_vbits == 3)
537 hi = mid;
538 else if (get_vbits == 1)
539 lo = mid + 1;
540 else
541 break;
542 }
543 if (get_vbits == 1 || get_vbits == 3)
544 {
545 valid_size = lo;
546 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
547 vbits.address (),
548 valid_size);
549 }
550 }
551 if (get_vbits == 1)
552 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
553 state.ptrs[i]->size));
554 }
555#endif
573aba85 556 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
557 if (state.ptrs[i]->reorder_fn != NULL)
6ec1f4e0 558 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
573aba85 559 state.ptrs[i]->note_ptr_cookie,
560 relocate_ptrs, &state);
6ec1f4e0 561 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
573aba85 562 state.ptrs[i]->note_ptr_cookie,
563 relocate_ptrs, &state);
564 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
53ee4dac 565 state.ptrs[i]->new_addr, state.ptrs[i]->size,
566 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
573aba85 567 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
568 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
5ceebb21 569#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
570 if (__builtin_expect (get_vbits == 1, 0))
571 {
572 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
573 valid_size);
574 if (valid_size != state.ptrs[i]->size)
575 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
576 state.ptrs[i]->obj
577 + valid_size,
578 state.ptrs[i]->size
579 - valid_size));
580 }
581#endif
4e00b6fd 582 }
5ceebb21 583#if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
584 vbits.release ();
585#endif
586
573aba85 587 ggc_pch_finish (state.d, state.f);
78e9fc50 588 gt_pch_fixup_stringpool ();
573aba85 589
5ceebb21 590 XDELETE (state.ptrs);
591 XDELETE (this_object);
c1f445d2 592 delete saving_htab;
593 saving_htab = NULL;
573aba85 594}
595
596/* Read the state of the compiler back in from F. */
597
598void
6ec1f4e0 599gt_pch_restore (FILE *f)
573aba85 600{
601 const struct ggc_root_tab *const *rt;
602 const struct ggc_root_tab *rti;
603 size_t i;
604 struct mmap_info mmi;
53ee4dac 605 int result;
573aba85 606
607 /* Delete any deletable objects. This makes ggc_pch_read much
608 faster, as it can be sure that no GCable objects remain other
609 than the ones just read in. */
610 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
611 for (rti = *rt; rti->base != NULL; rti++)
612 memset (rti->base, 0, rti->stride);
613
614 /* Read in all the scalar variables. */
615 for (rt = gt_pch_scalar_rtab; *rt; rt++)
616 for (rti = *rt; rti->base != NULL; rti++)
617 if (fread (rti->base, rti->stride, 1, f) != 1)
c05be867 618 fatal_error (input_location, "can%'t read PCH file: %m");
573aba85 619
620 /* Read in all the global pointers, in 6 easy loops. */
621 for (rt = gt_ggc_rtab; *rt; rt++)
622 for (rti = *rt; rti->base != NULL; rti++)
623 for (i = 0; i < rti->nelt; i++)
624 if (fread ((char *)rti->base + rti->stride * i,
625 sizeof (void *), 1, f) != 1)
c05be867 626 fatal_error (input_location, "can%'t read PCH file: %m");
573aba85 627
573aba85 628 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
c05be867 629 fatal_error (input_location, "can%'t read PCH file: %m");
6ec1f4e0 630
53ee4dac 631 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
632 fileno (f), mmi.offset);
633 if (result < 0)
c05be867 634 fatal_error (input_location, "had to relocate PCH");
53ee4dac 635 if (result == 0)
ddf4604f 636 {
53ee4dac 637 if (fseek (f, mmi.offset, SEEK_SET) != 0
638 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
c05be867 639 fatal_error (input_location, "can%'t read PCH file: %m");
53ee4dac 640 }
641 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
c05be867 642 fatal_error (input_location, "can%'t read PCH file: %m");
605921ec 643
53ee4dac 644 ggc_pch_read (f, mmi.preferred_base);
ddf4604f 645
53ee4dac 646 gt_pch_restore_stringpool ();
647}
ddf4604f 648
53ee4dac 649/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
650 Select no address whatsoever, and let gt_pch_save choose what it will with
651 malloc, presumably. */
48443f83 652
53ee4dac 653void *
654default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
655 int fd ATTRIBUTE_UNUSED)
656{
657 return NULL;
658}
48443f83 659
53ee4dac 660/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
661 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
662 same as base, indicating that the memory has been allocated but needs to
663 be read in from the file. Return -1 if the address differs, to relocation
664 of the PCH file would be required. */
665
666int
667default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
668 size_t offset ATTRIBUTE_UNUSED)
669{
670 void *addr = xmalloc (size);
671 return (addr == base) - 1;
672}
48443f83 673
62b4d90e 674/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
675 alignment required for allocating virtual memory. Usually this is the
676 same as pagesize. */
677
678size_t
679default_gt_pch_alloc_granularity (void)
680{
9af5ce0c 681 return getpagesize ();
62b4d90e 682}
683
53ee4dac 684#if HAVE_MMAP_FILE
685/* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
686 We temporarily allocate SIZE bytes, and let the kernel place the data
3927afe0 687 wherever it will. If it worked, that's our spot, if not we're likely
53ee4dac 688 to be in trouble. */
605921ec 689
53ee4dac 690void *
691mmap_gt_pch_get_address (size_t size, int fd)
692{
693 void *ret;
ddf4604f 694
53ee4dac 695 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
696 if (ret == (void *) MAP_FAILED)
697 ret = NULL;
698 else
1d5a5c71 699 munmap ((caddr_t) ret, size);
4e00b6fd 700
53ee4dac 701 return ret;
702}
4e00b6fd 703
53ee4dac 704/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
48e1416a 705 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
53ee4dac 706 mapping the data at BASE, -1 if we couldn't.
6ec1f4e0 707
53ee4dac 708 This version assumes that the kernel honors the START operand of mmap
709 even without MAP_FIXED if START through START+SIZE are not currently
710 mapped with something. */
573aba85 711
53ee4dac 712int
713mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
714{
715 void *addr;
573aba85 716
53ee4dac 717 /* We're called with size == 0 if we're not planning to load a PCH
718 file at all. This allows the hook to free any static space that
719 we might have allocated at link time. */
720 if (size == 0)
721 return -1;
722
1d5a5c71 723 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
53ee4dac 724 fd, offset);
725
726 return addr == base ? 1 : -1;
4e00b6fd 727}
53ee4dac 728#endif /* HAVE_MMAP_FILE */
7d83df95 729
9ca7413c 730#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
731
4cb2b0dd 732/* Modify the bound based on rlimits. */
2577ed5c 733static double
6ec1f4e0 734ggc_rlimit_bound (double limit)
2577ed5c 735{
736#if defined(HAVE_GETRLIMIT)
737 struct rlimit rlim;
4cb2b0dd 738# if defined (RLIMIT_AS)
739 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
740 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
741 if (getrlimit (RLIMIT_AS, &rlim) == 0
26106956 742 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
2577ed5c 743 && rlim.rlim_cur < limit)
744 limit = rlim.rlim_cur;
4cb2b0dd 745# elif defined (RLIMIT_DATA)
746 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
747 might be on an OS that has a broken mmap. (Others don't bound
748 mmap at all, apparently.) */
2577ed5c 749 if (getrlimit (RLIMIT_DATA, &rlim) == 0
26106956 750 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
4cb2b0dd 751 && rlim.rlim_cur < limit
752 /* Darwin has this horribly bogus default setting of
753 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
754 appears to be ignored. Ignore such silliness. If a limit
755 this small was actually effective for mmap, GCC wouldn't even
756 start up. */
757 && rlim.rlim_cur >= 8 * 1024 * 1024)
2577ed5c 758 limit = rlim.rlim_cur;
4cb2b0dd 759# endif /* RLIMIT_AS or RLIMIT_DATA */
2577ed5c 760#endif /* HAVE_GETRLIMIT */
761
762 return limit;
763}
764
7d83df95 765/* Heuristic to set a default for GGC_MIN_EXPAND. */
9ca7413c 766static int
6ec1f4e0 767ggc_min_expand_heuristic (void)
7d83df95 768{
9af5ce0c 769 double min_expand = physmem_total ();
2577ed5c 770
771 /* Adjust for rlimits. */
772 min_expand = ggc_rlimit_bound (min_expand);
6ec1f4e0 773
7d83df95 774 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
775 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
776 min_expand /= 1024*1024*1024;
777 min_expand *= 70;
778 min_expand = MIN (min_expand, 70);
779 min_expand += 30;
780
781 return min_expand;
782}
783
784/* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
9ca7413c 785static int
6ec1f4e0 786ggc_min_heapsize_heuristic (void)
7d83df95 787{
9af5ce0c 788 double phys_kbytes = physmem_total ();
4cb2b0dd 789 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
2577ed5c 790
4cb2b0dd 791 phys_kbytes /= 1024; /* Convert to Kbytes. */
792 limit_kbytes /= 1024;
6ec1f4e0 793
7d83df95 794 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
795 bound of 128M (when RAM >= 1GB). */
4cb2b0dd 796 phys_kbytes /= 8;
797
798#if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
48e1416a 799 /* Try not to overrun the RSS limit while doing garbage collection.
4cb2b0dd 800 The RSS limit is only advisory, so no margin is subtracted. */
801 {
802 struct rlimit rlim;
803 if (getrlimit (RLIMIT_RSS, &rlim) == 0
804 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
805 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
806 }
807# endif
808
809 /* Don't blindly run over our data limit; do GC at least when the
3a1e1e08 810 *next* GC would be within 20Mb of the limit or within a quarter of
811 the limit, whichever is larger. If GCC does hit the data limit,
812 compilation will fail, so this tries to be conservative. */
813 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
ba72912a 814 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
4cb2b0dd 815 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
816
817 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
818 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
7d83df95 819
4cb2b0dd 820 return phys_kbytes;
7d83df95 821}
9ca7413c 822#endif
7d83df95 823
824void
6ec1f4e0 825init_ggc_heuristics (void)
7d83df95 826{
55074432 827#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
686e2769 828 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
829 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
7d83df95 830#endif
831}
674b05f5 832
674b05f5 833/* Datastructure used to store per-call-site statistics. */
9908fe4d 834struct ggc_loc_descriptor
674b05f5 835{
836 const char *file;
837 int line;
838 const char *function;
839 int times;
840 size_t allocated;
841 size_t overhead;
0ca9a7b6 842 size_t freed;
843 size_t collected;
674b05f5 844};
845
d9dd21a8 846/* Hash table helper. */
674b05f5 847
9908fe4d 848struct ggc_loc_desc_hasher : typed_noop_remove <ggc_loc_descriptor>
674b05f5 849{
9908fe4d 850 typedef ggc_loc_descriptor value_type;
851 typedef ggc_loc_descriptor compare_type;
d9dd21a8 852 static inline hashval_t hash (const value_type *);
853 static inline bool equal (const value_type *, const compare_type *);
854};
674b05f5 855
d9dd21a8 856inline hashval_t
9908fe4d 857ggc_loc_desc_hasher::hash (const value_type *d)
d9dd21a8 858{
674b05f5 859 return htab_hash_pointer (d->function) | d->line;
860}
861
d9dd21a8 862inline bool
9908fe4d 863ggc_loc_desc_hasher::equal (const value_type *d, const compare_type *d2)
674b05f5 864{
674b05f5 865 return (d->file == d2->file && d->line == d2->line
866 && d->function == d2->function);
867}
868
d9dd21a8 869/* Hashtable used for statistics. */
9908fe4d 870static hash_table<ggc_loc_desc_hasher> *loc_hash;
d9dd21a8 871
9908fe4d 872struct ggc_ptr_hash_entry
0ca9a7b6 873{
874 void *ptr;
9908fe4d 875 struct ggc_loc_descriptor *loc;
0ca9a7b6 876 size_t size;
877};
878
d9dd21a8 879/* Helper for ptr_hash table. */
880
9908fe4d 881struct ptr_hash_hasher : typed_noop_remove <ggc_ptr_hash_entry>
0ca9a7b6 882{
9908fe4d 883 typedef ggc_ptr_hash_entry value_type;
d9dd21a8 884 typedef void compare_type;
885 static inline hashval_t hash (const value_type *);
886 static inline bool equal (const value_type *, const compare_type *);
887};
0ca9a7b6 888
d9dd21a8 889inline hashval_t
890ptr_hash_hasher::hash (const value_type *d)
891{
0ca9a7b6 892 return htab_hash_pointer (d->ptr);
893}
894
d9dd21a8 895inline bool
896ptr_hash_hasher::equal (const value_type *p, const compare_type *p2)
0ca9a7b6 897{
0ca9a7b6 898 return (p->ptr == p2);
899}
900
d9dd21a8 901/* Hashtable converting address of allocated field to loc descriptor. */
c1f445d2 902static hash_table<ptr_hash_hasher> *ptr_hash;
d9dd21a8 903
674b05f5 904/* Return descriptor for given call site, create new one if needed. */
9908fe4d 905static struct ggc_loc_descriptor *
d9dd21a8 906make_loc_descriptor (const char *name, int line, const char *function)
674b05f5 907{
9908fe4d 908 struct ggc_loc_descriptor loc;
909 struct ggc_loc_descriptor **slot;
674b05f5 910
911 loc.file = name;
912 loc.line = line;
913 loc.function = function;
c1f445d2 914 if (!loc_hash)
9908fe4d 915 loc_hash = new hash_table<ggc_loc_desc_hasher> (10);
674b05f5 916
c1f445d2 917 slot = loc_hash->find_slot (&loc, INSERT);
674b05f5 918 if (*slot)
919 return *slot;
9908fe4d 920 *slot = XCNEW (struct ggc_loc_descriptor);
674b05f5 921 (*slot)->file = name;
922 (*slot)->line = line;
923 (*slot)->function = function;
924 return *slot;
925}
926
3927afe0 927/* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
928void
0ca9a7b6 929ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
3927afe0 930 const char *name, int line, const char *function)
674b05f5 931{
9908fe4d 932 struct ggc_loc_descriptor *loc = make_loc_descriptor (name, line, function);
933 struct ggc_ptr_hash_entry *p = XNEW (struct ggc_ptr_hash_entry);
934 ggc_ptr_hash_entry **slot;
0ca9a7b6 935
936 p->ptr = ptr;
937 p->loc = loc;
938 p->size = allocated + overhead;
c1f445d2 939 if (!ptr_hash)
940 ptr_hash = new hash_table<ptr_hash_hasher> (10);
941 slot = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
0d59b19d 942 gcc_assert (!*slot);
0ca9a7b6 943 *slot = p;
674b05f5 944
945 loc->times++;
946 loc->allocated+=allocated;
947 loc->overhead+=overhead;
948}
949
0ca9a7b6 950/* Helper function for prune_overhead_list. See if SLOT is still marked and
951 remove it from hashtable if it is not. */
d9dd21a8 952int
9908fe4d 953ggc_prune_ptr (ggc_ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
0ca9a7b6 954{
9908fe4d 955 struct ggc_ptr_hash_entry *p = *slot;
0ca9a7b6 956 if (!ggc_marked_p (p->ptr))
957 {
958 p->loc->collected += p->size;
c1f445d2 959 ptr_hash->clear_slot (slot);
0ca9a7b6 960 free (p);
961 }
962 return 1;
963}
964
965/* After live values has been marked, walk all recorded pointers and see if
966 they are still live. */
967void
968ggc_prune_overhead_list (void)
969{
c1f445d2 970 ptr_hash->traverse <void *, ggc_prune_ptr> (NULL);
0ca9a7b6 971}
972
973/* Notice that the pointer has been freed. */
2e2fd8fe 974void
975ggc_free_overhead (void *ptr)
0ca9a7b6 976{
9908fe4d 977 ggc_ptr_hash_entry **slot
c1f445d2 978 = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
9908fe4d 979 struct ggc_ptr_hash_entry *p;
9ca7413c 980 /* The pointer might be not found if a PCH read happened between allocation
981 and ggc_free () call. FIXME: account memory properly in the presence of
982 PCH. */
983 if (!slot)
984 return;
9908fe4d 985 p = (struct ggc_ptr_hash_entry *) *slot;
0ca9a7b6 986 p->loc->freed += p->size;
c1f445d2 987 ptr_hash->clear_slot (slot);
0ca9a7b6 988 free (p);
989}
990
674b05f5 991/* Helper for qsort; sort descriptors by amount of memory consumed. */
992static int
51949610 993final_cmp_statistic (const void *loc1, const void *loc2)
674b05f5 994{
9908fe4d 995 const struct ggc_loc_descriptor *const l1 =
996 *(const struct ggc_loc_descriptor *const *) loc1;
997 const struct ggc_loc_descriptor *const l2 =
998 *(const struct ggc_loc_descriptor *const *) loc2;
51949610 999 long diff;
1000 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
0c07444c 1001 (l2->allocated + l2->overhead - l2->freed));
51949610 1002 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1003}
1004
1005/* Helper for qsort; sort descriptors by amount of memory consumed. */
1006static int
1007cmp_statistic (const void *loc1, const void *loc2)
1008{
9908fe4d 1009 const struct ggc_loc_descriptor *const l1 =
1010 *(const struct ggc_loc_descriptor *const *) loc1;
1011 const struct ggc_loc_descriptor *const l2 =
1012 *(const struct ggc_loc_descriptor *const *) loc2;
51949610 1013 long diff;
1014
1015 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1016 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1017 if (diff)
1018 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1019 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1020 (l2->allocated + l2->overhead - l2->freed));
1021 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
674b05f5 1022}
1023
1024/* Collect array of the descriptors from hashtable. */
9908fe4d 1025static struct ggc_loc_descriptor **loc_array;
d9dd21a8 1026int
9908fe4d 1027ggc_add_statistics (ggc_loc_descriptor **slot, int *n)
674b05f5 1028{
d9dd21a8 1029 loc_array[*n] = *slot;
674b05f5 1030 (*n)++;
1031 return 1;
1032}
1033
1034/* Dump per-site memory statistics. */
ecd52ea9 1035
2e2fd8fe 1036void
ecd52ea9 1037dump_ggc_loc_statistics (bool final)
674b05f5 1038{
674b05f5 1039 int nentries = 0;
1040 char s[4096];
0ca9a7b6 1041 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
674b05f5 1042 int i;
1043
ecd52ea9 1044 if (! GATHER_STATISTICS)
1045 return;
1046
0ca9a7b6 1047 ggc_force_collect = true;
1048 ggc_collect ();
1049
9908fe4d 1050 loc_array = XCNEWVEC (struct ggc_loc_descriptor *,
c1f445d2 1051 loc_hash->elements_with_deleted ());
674b05f5 1052 fprintf (stderr, "-------------------------------------------------------\n");
0ca9a7b6 1053 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1054 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
674b05f5 1055 fprintf (stderr, "-------------------------------------------------------\n");
c1f445d2 1056 loc_hash->traverse <int *, ggc_add_statistics> (&nentries);
51949610 1057 qsort (loc_array, nentries, sizeof (*loc_array),
1058 final ? final_cmp_statistic : cmp_statistic);
674b05f5 1059 for (i = 0; i < nentries; i++)
1060 {
9908fe4d 1061 struct ggc_loc_descriptor *d = loc_array[i];
0ca9a7b6 1062 allocated += d->allocated;
1063 times += d->times;
1064 freed += d->freed;
1065 collected += d->collected;
674b05f5 1066 overhead += d->overhead;
1067 }
1068 for (i = 0; i < nentries; i++)
1069 {
9908fe4d 1070 struct ggc_loc_descriptor *d = loc_array[i];
674b05f5 1071 if (d->allocated)
1072 {
1073 const char *s1 = d->file;
1074 const char *s2;
1075 while ((s2 = strstr (s1, "gcc/")))
1076 s1 = s2 + 4;
1077 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
0ca9a7b6 1078 s[48] = 0;
1079 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1080 (long)d->collected,
1081 (d->collected) * 100.0 / collected,
1082 (long)d->freed,
1083 (d->freed) * 100.0 / freed,
1084 (long)(d->allocated + d->overhead - d->freed - d->collected),
1085 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1086 / (allocated + overhead - freed - collected),
1087 (long)d->overhead,
1088 d->overhead * 100.0 / overhead,
1089 (long)d->times);
674b05f5 1090 }
1091 }
0ca9a7b6 1092 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1093 "Total", (long)collected, (long)freed,
1094 (long)(allocated + overhead - freed - collected), (long)overhead,
1095 (long)times);
1096 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1097 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
674b05f5 1098 fprintf (stderr, "-------------------------------------------------------\n");
5c3cc815 1099 ggc_force_collect = false;
674b05f5 1100}