]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ggc-common.c
PR target/53912
[thirdparty/gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Generic garbage collection (GC) functions and data, not specific to
22 any particular GC implementation. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "hashtab.h"
28 #include "ggc.h"
29 #include "ggc-internal.h"
30 #include "diagnostic-core.h"
31 #include "params.h"
32 #include "hosthooks.h"
33 #include "hosthooks-def.h"
34 #include "plugin.h"
35 #include "vec.h"
36 #include "timevar.h"
37
38 /* When set, ggc_collect will do collection. */
39 bool ggc_force_collect;
40
41 /* When true, protect the contents of the identifier hash table. */
42 bool ggc_protect_identifiers = true;
43
44 /* Statistics about the allocation. */
45 static ggc_statistics *ggc_stats;
46
47 struct traversal_state;
48
49 static int ggc_htab_delete (void **, void *);
50 static hashval_t saving_htab_hash (const void *);
51 static int saving_htab_eq (const void *, const void *);
52 static int call_count (void **, void *);
53 static int call_alloc (void **, void *);
54 static int compare_ptr_data (const void *, const void *);
55 static void relocate_ptrs (void *, void *);
56 static void write_pch_globals (const struct ggc_root_tab * const *tab,
57 struct traversal_state *state);
58
59 /* Maintain global roots that are preserved during GC. */
60
61 /* Process a slot of an htab by deleting it if it has not been marked. */
62
63 static int
64 ggc_htab_delete (void **slot, void *info)
65 {
66 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
67
68 if (! (*r->marked_p) (*slot))
69 htab_clear_slot (*r->base, slot);
70 else
71 (*r->cb) (*slot);
72
73 return 1;
74 }
75
76
77 /* This extra vector of dynamically registered root_tab-s is used by
78 ggc_mark_roots and gives the ability to dynamically add new GGC root
79 tables, for instance from some plugins; this vector is on the heap
80 since it is used by GGC internally. */
81 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
82 static vec<const_ggc_root_tab_t> extra_root_vec;
83
84 /* Dynamically register a new GGC root table RT. This is useful for
85 plugins. */
86
87 void
88 ggc_register_root_tab (const struct ggc_root_tab* rt)
89 {
90 if (rt)
91 extra_root_vec.safe_push (rt);
92 }
93
94 /* This extra vector of dynamically registered cache_tab-s is used by
95 ggc_mark_roots and gives the ability to dynamically add new GGC cache
96 tables, for instance from some plugins; this vector is on the heap
97 since it is used by GGC internally. */
98 typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
99 static vec<const_ggc_cache_tab_t> extra_cache_vec;
100
101 /* Dynamically register a new GGC cache table CT. This is useful for
102 plugins. */
103
104 void
105 ggc_register_cache_tab (const struct ggc_cache_tab* ct)
106 {
107 if (ct)
108 extra_cache_vec.safe_push (ct);
109 }
110
111 /* Scan a hash table that has objects which are to be deleted if they are not
112 already marked. */
113
114 static void
115 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
116 {
117 const struct ggc_cache_tab *cti;
118
119 for (cti = ctp; cti->base != NULL; cti++)
120 if (*cti->base)
121 {
122 ggc_set_mark (*cti->base);
123 htab_traverse_noresize (*cti->base, ggc_htab_delete,
124 CONST_CAST (void *, (const void *)cti));
125 ggc_set_mark ((*cti->base)->entries);
126 }
127 }
128
129 /* Mark all the roots in the table RT. */
130
131 static void
132 ggc_mark_root_tab (const_ggc_root_tab_t rt)
133 {
134 size_t i;
135
136 for ( ; rt->base != NULL; rt++)
137 for (i = 0; i < rt->nelt; i++)
138 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
139 }
140
141 /* Iterate through all registered roots and mark each element. */
142
143 void
144 ggc_mark_roots (void)
145 {
146 const struct ggc_root_tab *const *rt;
147 const_ggc_root_tab_t rtp, rti;
148 const struct ggc_cache_tab *const *ct;
149 const_ggc_cache_tab_t ctp;
150 size_t i;
151
152 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
153 for (rti = *rt; rti->base != NULL; rti++)
154 memset (rti->base, 0, rti->stride);
155
156 for (rt = gt_ggc_rtab; *rt; rt++)
157 ggc_mark_root_tab (*rt);
158
159 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
160 ggc_mark_root_tab (rtp);
161
162 if (ggc_protect_identifiers)
163 ggc_mark_stringpool ();
164
165 /* Now scan all hash tables that have objects which are to be deleted if
166 they are not already marked. */
167 for (ct = gt_ggc_cache_rtab; *ct; ct++)
168 ggc_scan_cache_tab (*ct);
169
170 FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp)
171 ggc_scan_cache_tab (ctp);
172
173 if (! ggc_protect_identifiers)
174 ggc_purge_stringpool ();
175
176 /* Some plugins may call ggc_set_mark from here. */
177 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
178 }
179
180 /* Allocate a block of memory, then clear it. */
181 void *
182 ggc_internal_cleared_alloc_stat (size_t size MEM_STAT_DECL)
183 {
184 void *buf = ggc_internal_alloc_stat (size PASS_MEM_STAT);
185 memset (buf, 0, size);
186 return buf;
187 }
188
189 /* Resize a block of memory, possibly re-allocating it. */
190 void *
191 ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL)
192 {
193 void *r;
194 size_t old_size;
195
196 if (x == NULL)
197 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
198
199 old_size = ggc_get_size (x);
200
201 if (size <= old_size)
202 {
203 /* Mark the unwanted memory as unaccessible. We also need to make
204 the "new" size accessible, since ggc_get_size returns the size of
205 the pool, not the size of the individually allocated object, the
206 size which was previously made accessible. Unfortunately, we
207 don't know that previously allocated size. Without that
208 knowledge we have to lose some initialization-tracking for the
209 old parts of the object. An alternative is to mark the whole
210 old_size as reachable, but that would lose tracking of writes
211 after the end of the object (by small offsets). Discard the
212 handle to avoid handle leak. */
213 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
214 old_size - size));
215 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
216 return x;
217 }
218
219 r = ggc_internal_alloc_stat (size PASS_MEM_STAT);
220
221 /* Since ggc_get_size returns the size of the pool, not the size of the
222 individually allocated object, we'd access parts of the old object
223 that were marked invalid with the memcpy below. We lose a bit of the
224 initialization-tracking since some of it may be uninitialized. */
225 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
226
227 memcpy (r, x, old_size);
228
229 /* The old object is not supposed to be used anymore. */
230 ggc_free (x);
231
232 return r;
233 }
234
235 void *
236 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
237 size_t n ATTRIBUTE_UNUSED)
238 {
239 gcc_assert (c * n == sizeof (struct htab));
240 return ggc_alloc_cleared_htab ();
241 }
242
243 /* TODO: once we actually use type information in GGC, create a new tag
244 gt_gcc_ptr_array and use it for pointer arrays. */
245 void *
246 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
247 {
248 gcc_assert (sizeof (PTR *) == n);
249 return ggc_internal_cleared_vec_alloc (sizeof (PTR *), c);
250 }
251
252 /* These are for splay_tree_new_ggc. */
253 void *
254 ggc_splay_alloc (enum gt_types_enum obj_type ATTRIBUTE_UNUSED, int sz,
255 void *nl)
256 {
257 gcc_assert (!nl);
258 return ggc_internal_alloc (sz);
259 }
260
261 void
262 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
263 {
264 gcc_assert (!nl);
265 }
266
267 /* Print statistics that are independent of the collector in use. */
268 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
269 ? (x) \
270 : ((x) < 1024*1024*10 \
271 ? (x) / 1024 \
272 : (x) / (1024*1024))))
273 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
274
275 void
276 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
277 ggc_statistics *stats)
278 {
279 /* Set the pointer so that during collection we will actually gather
280 the statistics. */
281 ggc_stats = stats;
282
283 /* Then do one collection to fill in the statistics. */
284 ggc_collect ();
285
286 /* At present, we don't really gather any interesting statistics. */
287
288 /* Don't gather statistics any more. */
289 ggc_stats = NULL;
290 }
291 \f
292 /* Functions for saving and restoring GCable memory to disk. */
293
294 static htab_t saving_htab;
295
296 struct ptr_data
297 {
298 void *obj;
299 void *note_ptr_cookie;
300 gt_note_pointers note_ptr_fn;
301 gt_handle_reorder reorder_fn;
302 size_t size;
303 void *new_addr;
304 enum gt_types_enum type;
305 };
306
307 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
308
309 /* Register an object in the hash table. */
310
311 int
312 gt_pch_note_object (void *obj, void *note_ptr_cookie,
313 gt_note_pointers note_ptr_fn,
314 enum gt_types_enum type)
315 {
316 struct ptr_data **slot;
317
318 if (obj == NULL || obj == (void *) 1)
319 return 0;
320
321 slot = (struct ptr_data **)
322 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
323 INSERT);
324 if (*slot != NULL)
325 {
326 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
327 && (*slot)->note_ptr_cookie == note_ptr_cookie);
328 return 0;
329 }
330
331 *slot = XCNEW (struct ptr_data);
332 (*slot)->obj = obj;
333 (*slot)->note_ptr_fn = note_ptr_fn;
334 (*slot)->note_ptr_cookie = note_ptr_cookie;
335 if (note_ptr_fn == gt_pch_p_S)
336 (*slot)->size = strlen ((const char *)obj) + 1;
337 else
338 (*slot)->size = ggc_get_size (obj);
339 (*slot)->type = type;
340 return 1;
341 }
342
343 /* Register an object in the hash table. */
344
345 void
346 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
347 gt_handle_reorder reorder_fn)
348 {
349 struct ptr_data *data;
350
351 if (obj == NULL || obj == (void *) 1)
352 return;
353
354 data = (struct ptr_data *)
355 htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
356 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
357
358 data->reorder_fn = reorder_fn;
359 }
360
361 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
362
363 static hashval_t
364 saving_htab_hash (const void *p)
365 {
366 return POINTER_HASH (((const struct ptr_data *)p)->obj);
367 }
368
369 static int
370 saving_htab_eq (const void *p1, const void *p2)
371 {
372 return ((const struct ptr_data *)p1)->obj == p2;
373 }
374
375 /* Handy state for the traversal functions. */
376
377 struct traversal_state
378 {
379 FILE *f;
380 struct ggc_pch_data *d;
381 size_t count;
382 struct ptr_data **ptrs;
383 size_t ptrs_i;
384 };
385
386 /* Callbacks for htab_traverse. */
387
388 static int
389 call_count (void **slot, void *state_p)
390 {
391 struct ptr_data *d = (struct ptr_data *)*slot;
392 struct traversal_state *state = (struct traversal_state *)state_p;
393
394 ggc_pch_count_object (state->d, d->obj, d->size,
395 d->note_ptr_fn == gt_pch_p_S,
396 d->type);
397 state->count++;
398 return 1;
399 }
400
401 static int
402 call_alloc (void **slot, void *state_p)
403 {
404 struct ptr_data *d = (struct ptr_data *)*slot;
405 struct traversal_state *state = (struct traversal_state *)state_p;
406
407 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
408 d->note_ptr_fn == gt_pch_p_S,
409 d->type);
410 state->ptrs[state->ptrs_i++] = d;
411 return 1;
412 }
413
414 /* Callback for qsort. */
415
416 static int
417 compare_ptr_data (const void *p1_p, const void *p2_p)
418 {
419 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
420 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
421 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
422 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
423 }
424
425 /* Callbacks for note_ptr_fn. */
426
427 static void
428 relocate_ptrs (void *ptr_p, void *state_p)
429 {
430 void **ptr = (void **)ptr_p;
431 struct traversal_state *state ATTRIBUTE_UNUSED
432 = (struct traversal_state *)state_p;
433 struct ptr_data *result;
434
435 if (*ptr == NULL || *ptr == (void *)1)
436 return;
437
438 result = (struct ptr_data *)
439 htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
440 gcc_assert (result);
441 *ptr = result->new_addr;
442 }
443
444 /* Write out, after relocation, the pointers in TAB. */
445 static void
446 write_pch_globals (const struct ggc_root_tab * const *tab,
447 struct traversal_state *state)
448 {
449 const struct ggc_root_tab *const *rt;
450 const struct ggc_root_tab *rti;
451 size_t i;
452
453 for (rt = tab; *rt; rt++)
454 for (rti = *rt; rti->base != NULL; rti++)
455 for (i = 0; i < rti->nelt; i++)
456 {
457 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
458 struct ptr_data *new_ptr;
459 if (ptr == NULL || ptr == (void *)1)
460 {
461 if (fwrite (&ptr, sizeof (void *), 1, state->f)
462 != 1)
463 fatal_error ("can%'t write PCH file: %m");
464 }
465 else
466 {
467 new_ptr = (struct ptr_data *)
468 htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr));
469 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
470 != 1)
471 fatal_error ("can%'t write PCH file: %m");
472 }
473 }
474 }
475
476 /* Hold the information we need to mmap the file back in. */
477
478 struct mmap_info
479 {
480 size_t offset;
481 size_t size;
482 void *preferred_base;
483 };
484
485 /* Write out the state of the compiler to F. */
486
487 void
488 gt_pch_save (FILE *f)
489 {
490 const struct ggc_root_tab *const *rt;
491 const struct ggc_root_tab *rti;
492 size_t i;
493 struct traversal_state state;
494 char *this_object = NULL;
495 size_t this_object_size = 0;
496 struct mmap_info mmi;
497 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity();
498
499 gt_pch_save_stringpool ();
500
501 timevar_push (TV_PCH_PTR_REALLOC);
502 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
503
504 for (rt = gt_ggc_rtab; *rt; rt++)
505 for (rti = *rt; rti->base != NULL; rti++)
506 for (i = 0; i < rti->nelt; i++)
507 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
508
509 for (rt = gt_pch_cache_rtab; *rt; rt++)
510 for (rti = *rt; rti->base != NULL; rti++)
511 for (i = 0; i < rti->nelt; i++)
512 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
513
514 /* Prepare the objects for writing, determine addresses and such. */
515 state.f = f;
516 state.d = init_ggc_pch ();
517 state.count = 0;
518 htab_traverse (saving_htab, call_count, &state);
519
520 mmi.size = ggc_pch_total_size (state.d);
521
522 /* Try to arrange things so that no relocation is necessary, but
523 don't try very hard. On most platforms, this will always work,
524 and on the rest it's a lot of work to do better.
525 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
526 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
527 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
528
529 ggc_pch_this_base (state.d, mmi.preferred_base);
530
531 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
532 state.ptrs_i = 0;
533
534 htab_traverse (saving_htab, call_alloc, &state);
535 timevar_pop (TV_PCH_PTR_REALLOC);
536
537 timevar_push (TV_PCH_PTR_SORT);
538 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
539 timevar_pop (TV_PCH_PTR_SORT);
540
541 /* Write out all the scalar variables. */
542 for (rt = gt_pch_scalar_rtab; *rt; rt++)
543 for (rti = *rt; rti->base != NULL; rti++)
544 if (fwrite (rti->base, rti->stride, 1, f) != 1)
545 fatal_error ("can%'t write PCH file: %m");
546
547 /* Write out all the global pointers, after translation. */
548 write_pch_globals (gt_ggc_rtab, &state);
549 write_pch_globals (gt_pch_cache_rtab, &state);
550
551 /* Pad the PCH file so that the mmapped area starts on an allocation
552 granularity (usually page) boundary. */
553 {
554 long o;
555 o = ftell (state.f) + sizeof (mmi);
556 if (o == -1)
557 fatal_error ("can%'t get position in PCH file: %m");
558 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
559 if (mmi.offset == mmap_offset_alignment)
560 mmi.offset = 0;
561 mmi.offset += o;
562 }
563 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
564 fatal_error ("can%'t write PCH file: %m");
565 if (mmi.offset != 0
566 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
567 fatal_error ("can%'t write padding to PCH file: %m");
568
569 ggc_pch_prepare_write (state.d, state.f);
570
571 /* Actually write out the objects. */
572 for (i = 0; i < state.count; i++)
573 {
574 if (this_object_size < state.ptrs[i]->size)
575 {
576 this_object_size = state.ptrs[i]->size;
577 this_object = XRESIZEVAR (char, this_object, this_object_size);
578 }
579 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
580 if (state.ptrs[i]->reorder_fn != NULL)
581 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
582 state.ptrs[i]->note_ptr_cookie,
583 relocate_ptrs, &state);
584 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
585 state.ptrs[i]->note_ptr_cookie,
586 relocate_ptrs, &state);
587 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
588 state.ptrs[i]->new_addr, state.ptrs[i]->size,
589 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
590 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
591 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
592 }
593 ggc_pch_finish (state.d, state.f);
594 gt_pch_fixup_stringpool ();
595
596 free (state.ptrs);
597 htab_delete (saving_htab);
598 }
599
600 /* Read the state of the compiler back in from F. */
601
602 void
603 gt_pch_restore (FILE *f)
604 {
605 const struct ggc_root_tab *const *rt;
606 const struct ggc_root_tab *rti;
607 size_t i;
608 struct mmap_info mmi;
609 int result;
610
611 /* Delete any deletable objects. This makes ggc_pch_read much
612 faster, as it can be sure that no GCable objects remain other
613 than the ones just read in. */
614 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
615 for (rti = *rt; rti->base != NULL; rti++)
616 memset (rti->base, 0, rti->stride);
617
618 /* Read in all the scalar variables. */
619 for (rt = gt_pch_scalar_rtab; *rt; rt++)
620 for (rti = *rt; rti->base != NULL; rti++)
621 if (fread (rti->base, rti->stride, 1, f) != 1)
622 fatal_error ("can%'t read PCH file: %m");
623
624 /* Read in all the global pointers, in 6 easy loops. */
625 for (rt = gt_ggc_rtab; *rt; rt++)
626 for (rti = *rt; rti->base != NULL; rti++)
627 for (i = 0; i < rti->nelt; i++)
628 if (fread ((char *)rti->base + rti->stride * i,
629 sizeof (void *), 1, f) != 1)
630 fatal_error ("can%'t read PCH file: %m");
631
632 for (rt = gt_pch_cache_rtab; *rt; rt++)
633 for (rti = *rt; rti->base != NULL; rti++)
634 for (i = 0; i < rti->nelt; i++)
635 if (fread ((char *)rti->base + rti->stride * i,
636 sizeof (void *), 1, f) != 1)
637 fatal_error ("can%'t read PCH file: %m");
638
639 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
640 fatal_error ("can%'t read PCH file: %m");
641
642 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
643 fileno (f), mmi.offset);
644 if (result < 0)
645 fatal_error ("had to relocate PCH");
646 if (result == 0)
647 {
648 if (fseek (f, mmi.offset, SEEK_SET) != 0
649 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
650 fatal_error ("can%'t read PCH file: %m");
651 }
652 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
653 fatal_error ("can%'t read PCH file: %m");
654
655 ggc_pch_read (f, mmi.preferred_base);
656
657 gt_pch_restore_stringpool ();
658 }
659
660 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
661 Select no address whatsoever, and let gt_pch_save choose what it will with
662 malloc, presumably. */
663
664 void *
665 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
666 int fd ATTRIBUTE_UNUSED)
667 {
668 return NULL;
669 }
670
671 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
672 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
673 same as base, indicating that the memory has been allocated but needs to
674 be read in from the file. Return -1 if the address differs, to relocation
675 of the PCH file would be required. */
676
677 int
678 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
679 size_t offset ATTRIBUTE_UNUSED)
680 {
681 void *addr = xmalloc (size);
682 return (addr == base) - 1;
683 }
684
685 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
686 alignment required for allocating virtual memory. Usually this is the
687 same as pagesize. */
688
689 size_t
690 default_gt_pch_alloc_granularity (void)
691 {
692 return getpagesize();
693 }
694
695 #if HAVE_MMAP_FILE
696 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
697 We temporarily allocate SIZE bytes, and let the kernel place the data
698 wherever it will. If it worked, that's our spot, if not we're likely
699 to be in trouble. */
700
701 void *
702 mmap_gt_pch_get_address (size_t size, int fd)
703 {
704 void *ret;
705
706 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
707 if (ret == (void *) MAP_FAILED)
708 ret = NULL;
709 else
710 munmap ((caddr_t) ret, size);
711
712 return ret;
713 }
714
715 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
716 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
717 mapping the data at BASE, -1 if we couldn't.
718
719 This version assumes that the kernel honors the START operand of mmap
720 even without MAP_FIXED if START through START+SIZE are not currently
721 mapped with something. */
722
723 int
724 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
725 {
726 void *addr;
727
728 /* We're called with size == 0 if we're not planning to load a PCH
729 file at all. This allows the hook to free any static space that
730 we might have allocated at link time. */
731 if (size == 0)
732 return -1;
733
734 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
735 fd, offset);
736
737 return addr == base ? 1 : -1;
738 }
739 #endif /* HAVE_MMAP_FILE */
740
741 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
742
743 /* Modify the bound based on rlimits. */
744 static double
745 ggc_rlimit_bound (double limit)
746 {
747 #if defined(HAVE_GETRLIMIT)
748 struct rlimit rlim;
749 # if defined (RLIMIT_AS)
750 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
751 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
752 if (getrlimit (RLIMIT_AS, &rlim) == 0
753 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
754 && rlim.rlim_cur < limit)
755 limit = rlim.rlim_cur;
756 # elif defined (RLIMIT_DATA)
757 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
758 might be on an OS that has a broken mmap. (Others don't bound
759 mmap at all, apparently.) */
760 if (getrlimit (RLIMIT_DATA, &rlim) == 0
761 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
762 && rlim.rlim_cur < limit
763 /* Darwin has this horribly bogus default setting of
764 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
765 appears to be ignored. Ignore such silliness. If a limit
766 this small was actually effective for mmap, GCC wouldn't even
767 start up. */
768 && rlim.rlim_cur >= 8 * 1024 * 1024)
769 limit = rlim.rlim_cur;
770 # endif /* RLIMIT_AS or RLIMIT_DATA */
771 #endif /* HAVE_GETRLIMIT */
772
773 return limit;
774 }
775
776 /* Heuristic to set a default for GGC_MIN_EXPAND. */
777 static int
778 ggc_min_expand_heuristic (void)
779 {
780 double min_expand = physmem_total();
781
782 /* Adjust for rlimits. */
783 min_expand = ggc_rlimit_bound (min_expand);
784
785 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
786 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
787 min_expand /= 1024*1024*1024;
788 min_expand *= 70;
789 min_expand = MIN (min_expand, 70);
790 min_expand += 30;
791
792 return min_expand;
793 }
794
795 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
796 static int
797 ggc_min_heapsize_heuristic (void)
798 {
799 double phys_kbytes = physmem_total();
800 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
801
802 phys_kbytes /= 1024; /* Convert to Kbytes. */
803 limit_kbytes /= 1024;
804
805 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
806 bound of 128M (when RAM >= 1GB). */
807 phys_kbytes /= 8;
808
809 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
810 /* Try not to overrun the RSS limit while doing garbage collection.
811 The RSS limit is only advisory, so no margin is subtracted. */
812 {
813 struct rlimit rlim;
814 if (getrlimit (RLIMIT_RSS, &rlim) == 0
815 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
816 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
817 }
818 # endif
819
820 /* Don't blindly run over our data limit; do GC at least when the
821 *next* GC would be within 20Mb of the limit or within a quarter of
822 the limit, whichever is larger. If GCC does hit the data limit,
823 compilation will fail, so this tries to be conservative. */
824 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
825 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
826 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
827
828 phys_kbytes = MAX (phys_kbytes, 4 * 1024);
829 phys_kbytes = MIN (phys_kbytes, 128 * 1024);
830
831 return phys_kbytes;
832 }
833 #endif
834
835 void
836 init_ggc_heuristics (void)
837 {
838 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
839 set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
840 set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
841 #endif
842 }
843
844 /* Datastructure used to store per-call-site statistics. */
845 struct loc_descriptor
846 {
847 const char *file;
848 int line;
849 const char *function;
850 int times;
851 size_t allocated;
852 size_t overhead;
853 size_t freed;
854 size_t collected;
855 };
856
857 /* Hashtable used for statistics. */
858 static htab_t loc_hash;
859
860 /* Hash table helpers functions. */
861 static hashval_t
862 hash_descriptor (const void *p)
863 {
864 const struct loc_descriptor *const d = (const struct loc_descriptor *) p;
865
866 return htab_hash_pointer (d->function) | d->line;
867 }
868
869 static int
870 eq_descriptor (const void *p1, const void *p2)
871 {
872 const struct loc_descriptor *const d = (const struct loc_descriptor *) p1;
873 const struct loc_descriptor *const d2 = (const struct loc_descriptor *) p2;
874
875 return (d->file == d2->file && d->line == d2->line
876 && d->function == d2->function);
877 }
878
879 /* Hashtable converting address of allocated field to loc descriptor. */
880 static htab_t ptr_hash;
881 struct ptr_hash_entry
882 {
883 void *ptr;
884 struct loc_descriptor *loc;
885 size_t size;
886 };
887
888 /* Hash table helpers functions. */
889 static hashval_t
890 hash_ptr (const void *p)
891 {
892 const struct ptr_hash_entry *const d = (const struct ptr_hash_entry *) p;
893
894 return htab_hash_pointer (d->ptr);
895 }
896
897 static int
898 eq_ptr (const void *p1, const void *p2)
899 {
900 const struct ptr_hash_entry *const p = (const struct ptr_hash_entry *) p1;
901
902 return (p->ptr == p2);
903 }
904
905 /* Return descriptor for given call site, create new one if needed. */
906 static struct loc_descriptor *
907 loc_descriptor (const char *name, int line, const char *function)
908 {
909 struct loc_descriptor loc;
910 struct loc_descriptor **slot;
911
912 loc.file = name;
913 loc.line = line;
914 loc.function = function;
915 if (!loc_hash)
916 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
917
918 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, INSERT);
919 if (*slot)
920 return *slot;
921 *slot = XCNEW (struct loc_descriptor);
922 (*slot)->file = name;
923 (*slot)->line = line;
924 (*slot)->function = function;
925 return *slot;
926 }
927
928 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
929 void
930 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
931 const char *name, int line, const char *function)
932 {
933 struct loc_descriptor *loc = loc_descriptor (name, line, function);
934 struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
935 PTR *slot;
936
937 p->ptr = ptr;
938 p->loc = loc;
939 p->size = allocated + overhead;
940 if (!ptr_hash)
941 ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
942 slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
943 gcc_assert (!*slot);
944 *slot = p;
945
946 loc->times++;
947 loc->allocated+=allocated;
948 loc->overhead+=overhead;
949 }
950
951 /* Helper function for prune_overhead_list. See if SLOT is still marked and
952 remove it from hashtable if it is not. */
953 static int
954 ggc_prune_ptr (void **slot, void *b ATTRIBUTE_UNUSED)
955 {
956 struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
957 if (!ggc_marked_p (p->ptr))
958 {
959 p->loc->collected += p->size;
960 htab_clear_slot (ptr_hash, slot);
961 free (p);
962 }
963 return 1;
964 }
965
966 /* After live values has been marked, walk all recorded pointers and see if
967 they are still live. */
968 void
969 ggc_prune_overhead_list (void)
970 {
971 htab_traverse (ptr_hash, ggc_prune_ptr, NULL);
972 }
973
974 /* Notice that the pointer has been freed. */
975 void
976 ggc_free_overhead (void *ptr)
977 {
978 PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
979 NO_INSERT);
980 struct ptr_hash_entry *p;
981 /* The pointer might be not found if a PCH read happened between allocation
982 and ggc_free () call. FIXME: account memory properly in the presence of
983 PCH. */
984 if (!slot)
985 return;
986 p = (struct ptr_hash_entry *) *slot;
987 p->loc->freed += p->size;
988 htab_clear_slot (ptr_hash, slot);
989 free (p);
990 }
991
992 /* Helper for qsort; sort descriptors by amount of memory consumed. */
993 static int
994 final_cmp_statistic (const void *loc1, const void *loc2)
995 {
996 const struct loc_descriptor *const l1 =
997 *(const struct loc_descriptor *const *) loc1;
998 const struct loc_descriptor *const l2 =
999 *(const struct loc_descriptor *const *) loc2;
1000 long diff;
1001 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1002 (l2->allocated + l2->overhead - l2->freed));
1003 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1004 }
1005
1006 /* Helper for qsort; sort descriptors by amount of memory consumed. */
1007 static int
1008 cmp_statistic (const void *loc1, const void *loc2)
1009 {
1010 const struct loc_descriptor *const l1 =
1011 *(const struct loc_descriptor *const *) loc1;
1012 const struct loc_descriptor *const l2 =
1013 *(const struct loc_descriptor *const *) loc2;
1014 long diff;
1015
1016 diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1017 (l2->allocated + l2->overhead - l2->freed - l2->collected));
1018 if (diff)
1019 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1020 diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1021 (l2->allocated + l2->overhead - l2->freed));
1022 return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1023 }
1024
1025 /* Collect array of the descriptors from hashtable. */
1026 static struct loc_descriptor **loc_array;
1027 static int
1028 add_statistics (void **slot, void *b)
1029 {
1030 int *n = (int *)b;
1031 loc_array[*n] = (struct loc_descriptor *) *slot;
1032 (*n)++;
1033 return 1;
1034 }
1035
1036 /* Dump per-site memory statistics. */
1037
1038 void
1039 dump_ggc_loc_statistics (bool final)
1040 {
1041 int nentries = 0;
1042 char s[4096];
1043 size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1044 int i;
1045
1046 if (! GATHER_STATISTICS)
1047 return;
1048
1049 ggc_force_collect = true;
1050 ggc_collect ();
1051
1052 loc_array = XCNEWVEC (struct loc_descriptor *, loc_hash->n_elements);
1053 fprintf (stderr, "-------------------------------------------------------\n");
1054 fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
1055 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1056 fprintf (stderr, "-------------------------------------------------------\n");
1057 htab_traverse (loc_hash, add_statistics, &nentries);
1058 qsort (loc_array, nentries, sizeof (*loc_array),
1059 final ? final_cmp_statistic : cmp_statistic);
1060 for (i = 0; i < nentries; i++)
1061 {
1062 struct loc_descriptor *d = loc_array[i];
1063 allocated += d->allocated;
1064 times += d->times;
1065 freed += d->freed;
1066 collected += d->collected;
1067 overhead += d->overhead;
1068 }
1069 for (i = 0; i < nentries; i++)
1070 {
1071 struct loc_descriptor *d = loc_array[i];
1072 if (d->allocated)
1073 {
1074 const char *s1 = d->file;
1075 const char *s2;
1076 while ((s2 = strstr (s1, "gcc/")))
1077 s1 = s2 + 4;
1078 sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1079 s[48] = 0;
1080 fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1081 (long)d->collected,
1082 (d->collected) * 100.0 / collected,
1083 (long)d->freed,
1084 (d->freed) * 100.0 / freed,
1085 (long)(d->allocated + d->overhead - d->freed - d->collected),
1086 (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1087 / (allocated + overhead - freed - collected),
1088 (long)d->overhead,
1089 d->overhead * 100.0 / overhead,
1090 (long)d->times);
1091 }
1092 }
1093 fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n",
1094 "Total", (long)collected, (long)freed,
1095 (long)(allocated + overhead - freed - collected), (long)overhead,
1096 (long)times);
1097 fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n",
1098 "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1099 fprintf (stderr, "-------------------------------------------------------\n");
1100 ggc_force_collect = false;
1101 }