]>
Commit | Line | Data |
---|---|---|
bebb7b68 | 1 | /* Simple garbage collection for the GNU compiler. |
3aea1f79 | 2 | Copyright (C) 1999-2014 Free Software Foundation, Inc. |
bebb7b68 | 3 | |
f12b58b3 | 4 | This file is part of GCC. |
bebb7b68 | 5 | |
f12b58b3 | 6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 8 | Software Foundation; either version 3, or (at your option) any later |
f12b58b3 | 9 | version. |
bebb7b68 | 10 | |
f12b58b3 | 11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
155b05dc | 13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. | |
bebb7b68 | 15 | |
155b05dc | 16 | You should have received a copy of the GNU General Public License |
8c4c00c1 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
bebb7b68 | 19 | |
20 | /* Generic garbage collection (GC) functions and data, not specific to | |
21 | any particular GC implementation. */ | |
22 | ||
23 | #include "config.h" | |
24 | #include "system.h" | |
805e22b2 | 25 | #include "coretypes.h" |
d9dd21a8 | 26 | #include "hash-table.h" |
f8e15e8a | 27 | #include "ggc.h" |
ba72912a | 28 | #include "ggc-internal.h" |
0b205f4c | 29 | #include "diagnostic-core.h" |
7d83df95 | 30 | #include "params.h" |
ddf4604f | 31 | #include "hosthooks.h" |
53ee4dac | 32 | #include "hosthooks-def.h" |
740cd0be | 33 | #include "plugin.h" |
34 | #include "vec.h" | |
dbb19e66 | 35 | #include "timevar.h" |
573aba85 | 36 | |
0ca9a7b6 | 37 | /* When set, ggc_collect will do collection. */ |
38 | bool ggc_force_collect; | |
39 | ||
dfecde36 | 40 | /* When true, protect the contents of the identifier hash table. */ |
41 | bool ggc_protect_identifiers = true; | |
42 | ||
4e00b6fd | 43 | /* Statistics about the allocation. */ |
44 | static ggc_statistics *ggc_stats; | |
45 | ||
573aba85 | 46 | struct traversal_state; |
47 | ||
6ec1f4e0 | 48 | static int ggc_htab_delete (void **, void *); |
6ec1f4e0 | 49 | static int compare_ptr_data (const void *, const void *); |
50 | static void relocate_ptrs (void *, void *); | |
51 | static void write_pch_globals (const struct ggc_root_tab * const *tab, | |
52 | struct traversal_state *state); | |
bebb7b68 | 53 | |
54 | /* Maintain global roots that are preserved during GC. */ | |
55 | ||
15d769aa | 56 | /* Process a slot of an htab by deleting it if it has not been marked. */ |
57 | ||
58 | static int | |
6ec1f4e0 | 59 | ggc_htab_delete (void **slot, void *info) |
15d769aa | 60 | { |
1f3233d1 | 61 | const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info; |
15d769aa | 62 | |
63 | if (! (*r->marked_p) (*slot)) | |
1f3233d1 | 64 | htab_clear_slot (*r->base, slot); |
65 | else | |
66 | (*r->cb) (*slot); | |
15d769aa | 67 | |
68 | return 1; | |
69 | } | |
70 | ||
740cd0be | 71 | |
72 | /* This extra vector of dynamically registered root_tab-s is used by | |
73 | ggc_mark_roots and gives the ability to dynamically add new GGC root | |
86b63696 | 74 | tables, for instance from some plugins; this vector is on the heap |
75 | since it is used by GGC internally. */ | |
76 | typedef const struct ggc_root_tab *const_ggc_root_tab_t; | |
f1f41a6c | 77 | static vec<const_ggc_root_tab_t> extra_root_vec; |
740cd0be | 78 | |
740cd0be | 79 | /* Dynamically register a new GGC root table RT. This is useful for |
80 | plugins. */ | |
81 | ||
48e1416a | 82 | void |
740cd0be | 83 | ggc_register_root_tab (const struct ggc_root_tab* rt) |
84 | { | |
86b63696 | 85 | if (rt) |
f1f41a6c | 86 | extra_root_vec.safe_push (rt); |
740cd0be | 87 | } |
88 | ||
86b63696 | 89 | /* This extra vector of dynamically registered cache_tab-s is used by |
90 | ggc_mark_roots and gives the ability to dynamically add new GGC cache | |
91 | tables, for instance from some plugins; this vector is on the heap | |
92 | since it is used by GGC internally. */ | |
93 | typedef const struct ggc_cache_tab *const_ggc_cache_tab_t; | |
f1f41a6c | 94 | static vec<const_ggc_cache_tab_t> extra_cache_vec; |
86b63696 | 95 | |
96 | /* Dynamically register a new GGC cache table CT. This is useful for | |
97 | plugins. */ | |
98 | ||
99 | void | |
100 | ggc_register_cache_tab (const struct ggc_cache_tab* ct) | |
101 | { | |
102 | if (ct) | |
f1f41a6c | 103 | extra_cache_vec.safe_push (ct); |
86b63696 | 104 | } |
105 | ||
106 | /* Scan a hash table that has objects which are to be deleted if they are not | |
107 | already marked. */ | |
108 | ||
109 | static void | |
110 | ggc_scan_cache_tab (const_ggc_cache_tab_t ctp) | |
111 | { | |
112 | const struct ggc_cache_tab *cti; | |
113 | ||
114 | for (cti = ctp; cti->base != NULL; cti++) | |
115 | if (*cti->base) | |
116 | { | |
117 | ggc_set_mark (*cti->base); | |
118 | htab_traverse_noresize (*cti->base, ggc_htab_delete, | |
119 | CONST_CAST (void *, (const void *)cti)); | |
120 | ggc_set_mark ((*cti->base)->entries); | |
121 | } | |
122 | } | |
740cd0be | 123 | |
c03efe25 | 124 | /* Mark all the roots in the table RT. */ |
125 | ||
126 | static void | |
127 | ggc_mark_root_tab (const_ggc_root_tab_t rt) | |
128 | { | |
129 | size_t i; | |
130 | ||
131 | for ( ; rt->base != NULL; rt++) | |
132 | for (i = 0; i < rt->nelt; i++) | |
133 | (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i)); | |
134 | } | |
135 | ||
e3c4633e | 136 | /* Iterate through all registered roots and mark each element. */ |
137 | ||
bebb7b68 | 138 | void |
6ec1f4e0 | 139 | ggc_mark_roots (void) |
0046a2c2 | 140 | { |
1f3233d1 | 141 | const struct ggc_root_tab *const *rt; |
c03efe25 | 142 | const_ggc_root_tab_t rtp, rti; |
1f3233d1 | 143 | const struct ggc_cache_tab *const *ct; |
86b63696 | 144 | const_ggc_cache_tab_t ctp; |
1f3233d1 | 145 | size_t i; |
3cfec666 | 146 | |
1f3233d1 | 147 | for (rt = gt_ggc_deletable_rtab; *rt; rt++) |
148 | for (rti = *rt; rti->base != NULL; rti++) | |
149 | memset (rti->base, 0, rti->stride); | |
150 | ||
151 | for (rt = gt_ggc_rtab; *rt; rt++) | |
c03efe25 | 152 | ggc_mark_root_tab (*rt); |
740cd0be | 153 | |
f1f41a6c | 154 | FOR_EACH_VEC_ELT (extra_root_vec, i, rtp) |
c03efe25 | 155 | ggc_mark_root_tab (rtp); |
769177ac | 156 | |
dfecde36 | 157 | if (ggc_protect_identifiers) |
158 | ggc_mark_stringpool (); | |
769177ac | 159 | |
15d769aa | 160 | /* Now scan all hash tables that have objects which are to be deleted if |
1f3233d1 | 161 | they are not already marked. */ |
162 | for (ct = gt_ggc_cache_rtab; *ct; ct++) | |
86b63696 | 163 | ggc_scan_cache_tab (*ct); |
164 | ||
f1f41a6c | 165 | FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp) |
86b63696 | 166 | ggc_scan_cache_tab (ctp); |
dfecde36 | 167 | |
168 | if (! ggc_protect_identifiers) | |
169 | ggc_purge_stringpool (); | |
740cd0be | 170 | |
171 | /* Some plugins may call ggc_set_mark from here. */ | |
172 | invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL); | |
0046a2c2 | 173 | } |
174 | ||
1f3233d1 | 175 | /* Allocate a block of memory, then clear it. */ |
176 | void * | |
92f06184 | 177 | ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n |
178 | MEM_STAT_DECL) | |
cf6cce73 | 179 | { |
92f06184 | 180 | void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT); |
1f3233d1 | 181 | memset (buf, 0, size); |
182 | return buf; | |
cf6cce73 | 183 | } |
184 | ||
1f3233d1 | 185 | /* Resize a block of memory, possibly re-allocating it. */ |
186 | void * | |
881f903e | 187 | ggc_realloc (void *x, size_t size MEM_STAT_DECL) |
cf6cce73 | 188 | { |
1f3233d1 | 189 | void *r; |
190 | size_t old_size; | |
cf6cce73 | 191 | |
1f3233d1 | 192 | if (x == NULL) |
881f903e | 193 | return ggc_internal_alloc (size PASS_MEM_STAT); |
cf6cce73 | 194 | |
1f3233d1 | 195 | old_size = ggc_get_size (x); |
c4e03242 | 196 | |
1f3233d1 | 197 | if (size <= old_size) |
dd359afe | 198 | { |
199 | /* Mark the unwanted memory as unaccessible. We also need to make | |
200 | the "new" size accessible, since ggc_get_size returns the size of | |
201 | the pool, not the size of the individually allocated object, the | |
202 | size which was previously made accessible. Unfortunately, we | |
203 | don't know that previously allocated size. Without that | |
204 | knowledge we have to lose some initialization-tracking for the | |
205 | old parts of the object. An alternative is to mark the whole | |
6ec1f4e0 | 206 | old_size as reachable, but that would lose tracking of writes |
dd359afe | 207 | after the end of the object (by small offsets). Discard the |
208 | handle to avoid handle leak. */ | |
a7779e75 | 209 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size, |
210 | old_size - size)); | |
211 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size)); | |
dd359afe | 212 | return x; |
213 | } | |
cf6cce73 | 214 | |
881f903e | 215 | r = ggc_internal_alloc (size PASS_MEM_STAT); |
dd359afe | 216 | |
217 | /* Since ggc_get_size returns the size of the pool, not the size of the | |
218 | individually allocated object, we'd access parts of the old object | |
219 | that were marked invalid with the memcpy below. We lose a bit of the | |
220 | initialization-tracking since some of it may be uninitialized. */ | |
a7779e75 | 221 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size)); |
dd359afe | 222 | |
1f3233d1 | 223 | memcpy (r, x, old_size); |
dd359afe | 224 | |
225 | /* The old object is not supposed to be used anymore. */ | |
c4e03242 | 226 | ggc_free (x); |
dd359afe | 227 | |
1f3233d1 | 228 | return r; |
cf6cce73 | 229 | } |
230 | ||
791ceafe | 231 | void * |
ba72912a | 232 | ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED, |
233 | size_t n ATTRIBUTE_UNUSED) | |
791ceafe | 234 | { |
ba72912a | 235 | gcc_assert (c * n == sizeof (struct htab)); |
25a27413 | 236 | return ggc_cleared_alloc<htab> (); |
ba72912a | 237 | } |
238 | ||
239 | /* TODO: once we actually use type information in GGC, create a new tag | |
240 | gt_gcc_ptr_array and use it for pointer arrays. */ | |
241 | void * | |
242 | ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n) | |
243 | { | |
244 | gcc_assert (sizeof (PTR *) == n); | |
25a27413 | 245 | return ggc_cleared_vec_alloc<PTR *> (c); |
791ceafe | 246 | } |
247 | ||
573aba85 | 248 | /* These are for splay_tree_new_ggc. */ |
6ec1f4e0 | 249 | void * |
5cc13354 | 250 | ggc_splay_alloc (int sz, void *nl) |
573aba85 | 251 | { |
0d59b19d | 252 | gcc_assert (!nl); |
ba72912a | 253 | return ggc_internal_alloc (sz); |
573aba85 | 254 | } |
255 | ||
256 | void | |
6ec1f4e0 | 257 | ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl) |
573aba85 | 258 | { |
0d59b19d | 259 | gcc_assert (!nl); |
573aba85 | 260 | } |
261 | ||
4e00b6fd | 262 | /* Print statistics that are independent of the collector in use. */ |
2a8997e8 | 263 | #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ |
264 | ? (x) \ | |
265 | : ((x) < 1024*1024*10 \ | |
266 | ? (x) / 1024 \ | |
267 | : (x) / (1024*1024)))) | |
268 | #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) | |
4e00b6fd | 269 | |
270 | void | |
6ec1f4e0 | 271 | ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED, |
272 | ggc_statistics *stats) | |
4e00b6fd | 273 | { |
4e00b6fd | 274 | /* Set the pointer so that during collection we will actually gather |
275 | the statistics. */ | |
276 | ggc_stats = stats; | |
277 | ||
278 | /* Then do one collection to fill in the statistics. */ | |
279 | ggc_collect (); | |
280 | ||
573aba85 | 281 | /* At present, we don't really gather any interesting statistics. */ |
282 | ||
283 | /* Don't gather statistics any more. */ | |
284 | ggc_stats = NULL; | |
285 | } | |
286 | \f | |
287 | /* Functions for saving and restoring GCable memory to disk. */ | |
288 | ||
6ec1f4e0 | 289 | struct ptr_data |
573aba85 | 290 | { |
291 | void *obj; | |
292 | void *note_ptr_cookie; | |
293 | gt_note_pointers note_ptr_fn; | |
294 | gt_handle_reorder reorder_fn; | |
295 | size_t size; | |
296 | void *new_addr; | |
297 | }; | |
298 | ||
5df02853 | 299 | #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3) |
573aba85 | 300 | |
d9dd21a8 | 301 | /* Helper for hashing saving_htab. */ |
302 | ||
303 | struct saving_hasher : typed_free_remove <ptr_data> | |
304 | { | |
305 | typedef ptr_data value_type; | |
306 | typedef void compare_type; | |
307 | static inline hashval_t hash (const value_type *); | |
308 | static inline bool equal (const value_type *, const compare_type *); | |
309 | }; | |
310 | ||
311 | inline hashval_t | |
312 | saving_hasher::hash (const value_type *p) | |
313 | { | |
314 | return POINTER_HASH (p->obj); | |
315 | } | |
316 | ||
317 | inline bool | |
318 | saving_hasher::equal (const value_type *p1, const compare_type *p2) | |
319 | { | |
320 | return p1->obj == p2; | |
321 | } | |
322 | ||
c1f445d2 | 323 | static hash_table<saving_hasher> *saving_htab; |
d9dd21a8 | 324 | |
573aba85 | 325 | /* Register an object in the hash table. */ |
326 | ||
327 | int | |
6ec1f4e0 | 328 | gt_pch_note_object (void *obj, void *note_ptr_cookie, |
5cc13354 | 329 | gt_note_pointers note_ptr_fn) |
573aba85 | 330 | { |
331 | struct ptr_data **slot; | |
6ec1f4e0 | 332 | |
573aba85 | 333 | if (obj == NULL || obj == (void *) 1) |
334 | return 0; | |
335 | ||
336 | slot = (struct ptr_data **) | |
c1f445d2 | 337 | saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT); |
573aba85 | 338 | if (*slot != NULL) |
339 | { | |
0d59b19d | 340 | gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn |
341 | && (*slot)->note_ptr_cookie == note_ptr_cookie); | |
573aba85 | 342 | return 0; |
343 | } | |
6ec1f4e0 | 344 | |
4077bf7a | 345 | *slot = XCNEW (struct ptr_data); |
573aba85 | 346 | (*slot)->obj = obj; |
347 | (*slot)->note_ptr_fn = note_ptr_fn; | |
348 | (*slot)->note_ptr_cookie = note_ptr_cookie; | |
349 | if (note_ptr_fn == gt_pch_p_S) | |
4077bf7a | 350 | (*slot)->size = strlen ((const char *)obj) + 1; |
573aba85 | 351 | else |
352 | (*slot)->size = ggc_get_size (obj); | |
353 | return 1; | |
354 | } | |
355 | ||
356 | /* Register an object in the hash table. */ | |
357 | ||
358 | void | |
6ec1f4e0 | 359 | gt_pch_note_reorder (void *obj, void *note_ptr_cookie, |
360 | gt_handle_reorder reorder_fn) | |
573aba85 | 361 | { |
362 | struct ptr_data *data; | |
6ec1f4e0 | 363 | |
573aba85 | 364 | if (obj == NULL || obj == (void *) 1) |
365 | return; | |
366 | ||
4077bf7a | 367 | data = (struct ptr_data *) |
c1f445d2 | 368 | saving_htab->find_with_hash (obj, POINTER_HASH (obj)); |
0d59b19d | 369 | gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie); |
6ec1f4e0 | 370 | |
573aba85 | 371 | data->reorder_fn = reorder_fn; |
372 | } | |
373 | ||
573aba85 | 374 | /* Handy state for the traversal functions. */ |
375 | ||
6ec1f4e0 | 376 | struct traversal_state |
573aba85 | 377 | { |
378 | FILE *f; | |
379 | struct ggc_pch_data *d; | |
380 | size_t count; | |
381 | struct ptr_data **ptrs; | |
382 | size_t ptrs_i; | |
383 | }; | |
384 | ||
385 | /* Callbacks for htab_traverse. */ | |
386 | ||
d9dd21a8 | 387 | int |
388 | ggc_call_count (ptr_data **slot, traversal_state *state) | |
573aba85 | 389 | { |
d9dd21a8 | 390 | struct ptr_data *d = *slot; |
6ec1f4e0 | 391 | |
1bb42c87 | 392 | ggc_pch_count_object (state->d, d->obj, d->size, |
5cc13354 | 393 | d->note_ptr_fn == gt_pch_p_S); |
573aba85 | 394 | state->count++; |
395 | return 1; | |
396 | } | |
397 | ||
d9dd21a8 | 398 | int |
399 | ggc_call_alloc (ptr_data **slot, traversal_state *state) | |
573aba85 | 400 | { |
d9dd21a8 | 401 | struct ptr_data *d = *slot; |
6ec1f4e0 | 402 | |
1bb42c87 | 403 | d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, |
5cc13354 | 404 | d->note_ptr_fn == gt_pch_p_S); |
573aba85 | 405 | state->ptrs[state->ptrs_i++] = d; |
406 | return 1; | |
407 | } | |
408 | ||
409 | /* Callback for qsort. */ | |
410 | ||
411 | static int | |
6ec1f4e0 | 412 | compare_ptr_data (const void *p1_p, const void *p2_p) |
573aba85 | 413 | { |
9f627b1a | 414 | const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p; |
415 | const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p; | |
573aba85 | 416 | return (((size_t)p1->new_addr > (size_t)p2->new_addr) |
417 | - ((size_t)p1->new_addr < (size_t)p2->new_addr)); | |
418 | } | |
419 | ||
420 | /* Callbacks for note_ptr_fn. */ | |
421 | ||
422 | static void | |
6ec1f4e0 | 423 | relocate_ptrs (void *ptr_p, void *state_p) |
573aba85 | 424 | { |
425 | void **ptr = (void **)ptr_p; | |
6ec1f4e0 | 426 | struct traversal_state *state ATTRIBUTE_UNUSED |
573aba85 | 427 | = (struct traversal_state *)state_p; |
428 | struct ptr_data *result; | |
429 | ||
430 | if (*ptr == NULL || *ptr == (void *)1) | |
431 | return; | |
6ec1f4e0 | 432 | |
4077bf7a | 433 | result = (struct ptr_data *) |
c1f445d2 | 434 | saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr)); |
0d59b19d | 435 | gcc_assert (result); |
573aba85 | 436 | *ptr = result->new_addr; |
437 | } | |
438 | ||
439 | /* Write out, after relocation, the pointers in TAB. */ | |
440 | static void | |
6ec1f4e0 | 441 | write_pch_globals (const struct ggc_root_tab * const *tab, |
442 | struct traversal_state *state) | |
573aba85 | 443 | { |
444 | const struct ggc_root_tab *const *rt; | |
445 | const struct ggc_root_tab *rti; | |
446 | size_t i; | |
447 | ||
448 | for (rt = tab; *rt; rt++) | |
449 | for (rti = *rt; rti->base != NULL; rti++) | |
450 | for (i = 0; i < rti->nelt; i++) | |
451 | { | |
452 | void *ptr = *(void **)((char *)rti->base + rti->stride * i); | |
453 | struct ptr_data *new_ptr; | |
454 | if (ptr == NULL || ptr == (void *)1) | |
455 | { | |
6ec1f4e0 | 456 | if (fwrite (&ptr, sizeof (void *), 1, state->f) |
573aba85 | 457 | != 1) |
bf776685 | 458 | fatal_error ("can%'t write PCH file: %m"); |
573aba85 | 459 | } |
460 | else | |
461 | { | |
4077bf7a | 462 | new_ptr = (struct ptr_data *) |
c1f445d2 | 463 | saving_htab->find_with_hash (ptr, POINTER_HASH (ptr)); |
6ec1f4e0 | 464 | if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f) |
573aba85 | 465 | != 1) |
bf776685 | 466 | fatal_error ("can%'t write PCH file: %m"); |
573aba85 | 467 | } |
468 | } | |
469 | } | |
470 | ||
471 | /* Hold the information we need to mmap the file back in. */ | |
472 | ||
6ec1f4e0 | 473 | struct mmap_info |
573aba85 | 474 | { |
475 | size_t offset; | |
476 | size_t size; | |
477 | void *preferred_base; | |
478 | }; | |
479 | ||
480 | /* Write out the state of the compiler to F. */ | |
481 | ||
482 | void | |
6ec1f4e0 | 483 | gt_pch_save (FILE *f) |
573aba85 | 484 | { |
485 | const struct ggc_root_tab *const *rt; | |
486 | const struct ggc_root_tab *rti; | |
487 | size_t i; | |
488 | struct traversal_state state; | |
489 | char *this_object = NULL; | |
490 | size_t this_object_size = 0; | |
491 | struct mmap_info mmi; | |
9af5ce0c | 492 | const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity (); |
573aba85 | 493 | |
494 | gt_pch_save_stringpool (); | |
495 | ||
dbb19e66 | 496 | timevar_push (TV_PCH_PTR_REALLOC); |
c1f445d2 | 497 | saving_htab = new hash_table<saving_hasher> (50000); |
573aba85 | 498 | |
499 | for (rt = gt_ggc_rtab; *rt; rt++) | |
500 | for (rti = *rt; rti->base != NULL; rti++) | |
501 | for (i = 0; i < rti->nelt; i++) | |
502 | (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); | |
503 | ||
504 | for (rt = gt_pch_cache_rtab; *rt; rt++) | |
505 | for (rti = *rt; rti->base != NULL; rti++) | |
506 | for (i = 0; i < rti->nelt; i++) | |
507 | (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); | |
508 | ||
509 | /* Prepare the objects for writing, determine addresses and such. */ | |
510 | state.f = f; | |
ba72912a | 511 | state.d = init_ggc_pch (); |
573aba85 | 512 | state.count = 0; |
c1f445d2 | 513 | saving_htab->traverse <traversal_state *, ggc_call_count> (&state); |
573aba85 | 514 | |
515 | mmi.size = ggc_pch_total_size (state.d); | |
516 | ||
ddf4604f | 517 | /* Try to arrange things so that no relocation is necessary, but |
518 | don't try very hard. On most platforms, this will always work, | |
48e1416a | 519 | and on the rest it's a lot of work to do better. |
ddf4604f | 520 | (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and |
521 | HOST_HOOKS_GT_PCH_USE_ADDRESS.) */ | |
53ee4dac | 522 | mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f)); |
48e1416a | 523 | |
573aba85 | 524 | ggc_pch_this_base (state.d, mmi.preferred_base); |
525 | ||
4c36ffe6 | 526 | state.ptrs = XNEWVEC (struct ptr_data *, state.count); |
573aba85 | 527 | state.ptrs_i = 0; |
dbb19e66 | 528 | |
c1f445d2 | 529 | saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state); |
dbb19e66 | 530 | timevar_pop (TV_PCH_PTR_REALLOC); |
531 | ||
532 | timevar_push (TV_PCH_PTR_SORT); | |
573aba85 | 533 | qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data); |
dbb19e66 | 534 | timevar_pop (TV_PCH_PTR_SORT); |
573aba85 | 535 | |
536 | /* Write out all the scalar variables. */ | |
537 | for (rt = gt_pch_scalar_rtab; *rt; rt++) | |
538 | for (rti = *rt; rti->base != NULL; rti++) | |
539 | if (fwrite (rti->base, rti->stride, 1, f) != 1) | |
bf776685 | 540 | fatal_error ("can%'t write PCH file: %m"); |
573aba85 | 541 | |
542 | /* Write out all the global pointers, after translation. */ | |
543 | write_pch_globals (gt_ggc_rtab, &state); | |
544 | write_pch_globals (gt_pch_cache_rtab, &state); | |
545 | ||
62b4d90e | 546 | /* Pad the PCH file so that the mmapped area starts on an allocation |
547 | granularity (usually page) boundary. */ | |
573aba85 | 548 | { |
04ea9445 | 549 | long o; |
550 | o = ftell (state.f) + sizeof (mmi); | |
551 | if (o == -1) | |
bf776685 | 552 | fatal_error ("can%'t get position in PCH file: %m"); |
62b4d90e | 553 | mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment; |
554 | if (mmi.offset == mmap_offset_alignment) | |
573aba85 | 555 | mmi.offset = 0; |
556 | mmi.offset += o; | |
557 | } | |
558 | if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1) | |
bf776685 | 559 | fatal_error ("can%'t write PCH file: %m"); |
573aba85 | 560 | if (mmi.offset != 0 |
561 | && fseek (state.f, mmi.offset, SEEK_SET) != 0) | |
bf776685 | 562 | fatal_error ("can%'t write padding to PCH file: %m"); |
573aba85 | 563 | |
1bb42c87 | 564 | ggc_pch_prepare_write (state.d, state.f); |
565 | ||
5ceebb21 | 566 | #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS |
567 | vec<char> vbits = vNULL; | |
568 | #endif | |
569 | ||
573aba85 | 570 | /* Actually write out the objects. */ |
571 | for (i = 0; i < state.count; i++) | |
4e00b6fd | 572 | { |
573aba85 | 573 | if (this_object_size < state.ptrs[i]->size) |
574 | { | |
575 | this_object_size = state.ptrs[i]->size; | |
4077bf7a | 576 | this_object = XRESIZEVAR (char, this_object, this_object_size); |
573aba85 | 577 | } |
5ceebb21 | 578 | #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS |
579 | /* obj might contain uninitialized bytes, e.g. in the trailing | |
580 | padding of the object. Avoid warnings by making the memory | |
581 | temporarily defined and then restoring previous state. */ | |
582 | int get_vbits = 0; | |
583 | size_t valid_size = state.ptrs[i]->size; | |
584 | if (__builtin_expect (RUNNING_ON_VALGRIND, 0)) | |
585 | { | |
586 | if (vbits.length () < valid_size) | |
587 | vbits.safe_grow (valid_size); | |
588 | get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj, | |
589 | vbits.address (), valid_size); | |
590 | if (get_vbits == 3) | |
591 | { | |
592 | /* We assume that first part of obj is addressable, and | |
593 | the rest is unaddressable. Find out where the boundary is | |
594 | using binary search. */ | |
595 | size_t lo = 0, hi = valid_size; | |
596 | while (hi > lo) | |
597 | { | |
598 | size_t mid = (lo + hi) / 2; | |
599 | get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj | |
600 | + mid, vbits.address (), | |
601 | 1); | |
602 | if (get_vbits == 3) | |
603 | hi = mid; | |
604 | else if (get_vbits == 1) | |
605 | lo = mid + 1; | |
606 | else | |
607 | break; | |
608 | } | |
609 | if (get_vbits == 1 || get_vbits == 3) | |
610 | { | |
611 | valid_size = lo; | |
612 | get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj, | |
613 | vbits.address (), | |
614 | valid_size); | |
615 | } | |
616 | } | |
617 | if (get_vbits == 1) | |
618 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj, | |
619 | state.ptrs[i]->size)); | |
620 | } | |
621 | #endif | |
573aba85 | 622 | memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size); |
623 | if (state.ptrs[i]->reorder_fn != NULL) | |
6ec1f4e0 | 624 | state.ptrs[i]->reorder_fn (state.ptrs[i]->obj, |
573aba85 | 625 | state.ptrs[i]->note_ptr_cookie, |
626 | relocate_ptrs, &state); | |
6ec1f4e0 | 627 | state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj, |
573aba85 | 628 | state.ptrs[i]->note_ptr_cookie, |
629 | relocate_ptrs, &state); | |
630 | ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj, | |
53ee4dac | 631 | state.ptrs[i]->new_addr, state.ptrs[i]->size, |
632 | state.ptrs[i]->note_ptr_fn == gt_pch_p_S); | |
573aba85 | 633 | if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S) |
634 | memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size); | |
5ceebb21 | 635 | #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS |
636 | if (__builtin_expect (get_vbits == 1, 0)) | |
637 | { | |
638 | (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (), | |
639 | valid_size); | |
640 | if (valid_size != state.ptrs[i]->size) | |
641 | VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) | |
642 | state.ptrs[i]->obj | |
643 | + valid_size, | |
644 | state.ptrs[i]->size | |
645 | - valid_size)); | |
646 | } | |
647 | #endif | |
4e00b6fd | 648 | } |
5ceebb21 | 649 | #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS |
650 | vbits.release (); | |
651 | #endif | |
652 | ||
573aba85 | 653 | ggc_pch_finish (state.d, state.f); |
78e9fc50 | 654 | gt_pch_fixup_stringpool (); |
573aba85 | 655 | |
5ceebb21 | 656 | XDELETE (state.ptrs); |
657 | XDELETE (this_object); | |
c1f445d2 | 658 | delete saving_htab; |
659 | saving_htab = NULL; | |
573aba85 | 660 | } |
661 | ||
662 | /* Read the state of the compiler back in from F. */ | |
663 | ||
664 | void | |
6ec1f4e0 | 665 | gt_pch_restore (FILE *f) |
573aba85 | 666 | { |
667 | const struct ggc_root_tab *const *rt; | |
668 | const struct ggc_root_tab *rti; | |
669 | size_t i; | |
670 | struct mmap_info mmi; | |
53ee4dac | 671 | int result; |
573aba85 | 672 | |
673 | /* Delete any deletable objects. This makes ggc_pch_read much | |
674 | faster, as it can be sure that no GCable objects remain other | |
675 | than the ones just read in. */ | |
676 | for (rt = gt_ggc_deletable_rtab; *rt; rt++) | |
677 | for (rti = *rt; rti->base != NULL; rti++) | |
678 | memset (rti->base, 0, rti->stride); | |
679 | ||
680 | /* Read in all the scalar variables. */ | |
681 | for (rt = gt_pch_scalar_rtab; *rt; rt++) | |
682 | for (rti = *rt; rti->base != NULL; rti++) | |
683 | if (fread (rti->base, rti->stride, 1, f) != 1) | |
bf776685 | 684 | fatal_error ("can%'t read PCH file: %m"); |
573aba85 | 685 | |
686 | /* Read in all the global pointers, in 6 easy loops. */ | |
687 | for (rt = gt_ggc_rtab; *rt; rt++) | |
688 | for (rti = *rt; rti->base != NULL; rti++) | |
689 | for (i = 0; i < rti->nelt; i++) | |
690 | if (fread ((char *)rti->base + rti->stride * i, | |
691 | sizeof (void *), 1, f) != 1) | |
bf776685 | 692 | fatal_error ("can%'t read PCH file: %m"); |
573aba85 | 693 | |
694 | for (rt = gt_pch_cache_rtab; *rt; rt++) | |
695 | for (rti = *rt; rti->base != NULL; rti++) | |
696 | for (i = 0; i < rti->nelt; i++) | |
697 | if (fread ((char *)rti->base + rti->stride * i, | |
698 | sizeof (void *), 1, f) != 1) | |
bf776685 | 699 | fatal_error ("can%'t read PCH file: %m"); |
573aba85 | 700 | |
701 | if (fread (&mmi, sizeof (mmi), 1, f) != 1) | |
bf776685 | 702 | fatal_error ("can%'t read PCH file: %m"); |
6ec1f4e0 | 703 | |
53ee4dac | 704 | result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size, |
705 | fileno (f), mmi.offset); | |
706 | if (result < 0) | |
707 | fatal_error ("had to relocate PCH"); | |
708 | if (result == 0) | |
ddf4604f | 709 | { |
53ee4dac | 710 | if (fseek (f, mmi.offset, SEEK_SET) != 0 |
711 | || fread (mmi.preferred_base, mmi.size, 1, f) != 1) | |
bf776685 | 712 | fatal_error ("can%'t read PCH file: %m"); |
53ee4dac | 713 | } |
714 | else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0) | |
bf776685 | 715 | fatal_error ("can%'t read PCH file: %m"); |
605921ec | 716 | |
53ee4dac | 717 | ggc_pch_read (f, mmi.preferred_base); |
ddf4604f | 718 | |
53ee4dac | 719 | gt_pch_restore_stringpool (); |
720 | } | |
ddf4604f | 721 | |
53ee4dac | 722 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present. |
723 | Select no address whatsoever, and let gt_pch_save choose what it will with | |
724 | malloc, presumably. */ | |
48443f83 | 725 | |
53ee4dac | 726 | void * |
727 | default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED, | |
728 | int fd ATTRIBUTE_UNUSED) | |
729 | { | |
730 | return NULL; | |
731 | } | |
48443f83 | 732 | |
53ee4dac | 733 | /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present. |
734 | Allocate SIZE bytes with malloc. Return 0 if the address we got is the | |
735 | same as base, indicating that the memory has been allocated but needs to | |
736 | be read in from the file. Return -1 if the address differs, to relocation | |
737 | of the PCH file would be required. */ | |
738 | ||
739 | int | |
740 | default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED, | |
741 | size_t offset ATTRIBUTE_UNUSED) | |
742 | { | |
743 | void *addr = xmalloc (size); | |
744 | return (addr == base) - 1; | |
745 | } | |
48443f83 | 746 | |
62b4d90e | 747 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the |
748 | alignment required for allocating virtual memory. Usually this is the | |
749 | same as pagesize. */ | |
750 | ||
751 | size_t | |
752 | default_gt_pch_alloc_granularity (void) | |
753 | { | |
9af5ce0c | 754 | return getpagesize (); |
62b4d90e | 755 | } |
756 | ||
53ee4dac | 757 | #if HAVE_MMAP_FILE |
758 | /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present. | |
759 | We temporarily allocate SIZE bytes, and let the kernel place the data | |
3927afe0 | 760 | wherever it will. If it worked, that's our spot, if not we're likely |
53ee4dac | 761 | to be in trouble. */ |
605921ec | 762 | |
53ee4dac | 763 | void * |
764 | mmap_gt_pch_get_address (size_t size, int fd) | |
765 | { | |
766 | void *ret; | |
ddf4604f | 767 | |
53ee4dac | 768 | ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); |
769 | if (ret == (void *) MAP_FAILED) | |
770 | ret = NULL; | |
771 | else | |
1d5a5c71 | 772 | munmap ((caddr_t) ret, size); |
4e00b6fd | 773 | |
53ee4dac | 774 | return ret; |
775 | } | |
4e00b6fd | 776 | |
53ee4dac | 777 | /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present. |
48e1416a | 778 | Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at |
53ee4dac | 779 | mapping the data at BASE, -1 if we couldn't. |
6ec1f4e0 | 780 | |
53ee4dac | 781 | This version assumes that the kernel honors the START operand of mmap |
782 | even without MAP_FIXED if START through START+SIZE are not currently | |
783 | mapped with something. */ | |
573aba85 | 784 | |
53ee4dac | 785 | int |
786 | mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset) | |
787 | { | |
788 | void *addr; | |
573aba85 | 789 | |
53ee4dac | 790 | /* We're called with size == 0 if we're not planning to load a PCH |
791 | file at all. This allows the hook to free any static space that | |
792 | we might have allocated at link time. */ | |
793 | if (size == 0) | |
794 | return -1; | |
795 | ||
1d5a5c71 | 796 | addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, |
53ee4dac | 797 | fd, offset); |
798 | ||
799 | return addr == base ? 1 : -1; | |
4e00b6fd | 800 | } |
53ee4dac | 801 | #endif /* HAVE_MMAP_FILE */ |
7d83df95 | 802 | |
9ca7413c | 803 | #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT |
804 | ||
4cb2b0dd | 805 | /* Modify the bound based on rlimits. */ |
2577ed5c | 806 | static double |
6ec1f4e0 | 807 | ggc_rlimit_bound (double limit) |
2577ed5c | 808 | { |
809 | #if defined(HAVE_GETRLIMIT) | |
810 | struct rlimit rlim; | |
4cb2b0dd | 811 | # if defined (RLIMIT_AS) |
812 | /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably | |
813 | any OS which has RLIMIT_AS also has a working mmap that GCC will use. */ | |
814 | if (getrlimit (RLIMIT_AS, &rlim) == 0 | |
26106956 | 815 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY |
2577ed5c | 816 | && rlim.rlim_cur < limit) |
817 | limit = rlim.rlim_cur; | |
4cb2b0dd | 818 | # elif defined (RLIMIT_DATA) |
819 | /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we | |
820 | might be on an OS that has a broken mmap. (Others don't bound | |
821 | mmap at all, apparently.) */ | |
2577ed5c | 822 | if (getrlimit (RLIMIT_DATA, &rlim) == 0 |
26106956 | 823 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY |
4cb2b0dd | 824 | && rlim.rlim_cur < limit |
825 | /* Darwin has this horribly bogus default setting of | |
826 | RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA | |
827 | appears to be ignored. Ignore such silliness. If a limit | |
828 | this small was actually effective for mmap, GCC wouldn't even | |
829 | start up. */ | |
830 | && rlim.rlim_cur >= 8 * 1024 * 1024) | |
2577ed5c | 831 | limit = rlim.rlim_cur; |
4cb2b0dd | 832 | # endif /* RLIMIT_AS or RLIMIT_DATA */ |
2577ed5c | 833 | #endif /* HAVE_GETRLIMIT */ |
834 | ||
835 | return limit; | |
836 | } | |
837 | ||
7d83df95 | 838 | /* Heuristic to set a default for GGC_MIN_EXPAND. */ |
9ca7413c | 839 | static int |
6ec1f4e0 | 840 | ggc_min_expand_heuristic (void) |
7d83df95 | 841 | { |
9af5ce0c | 842 | double min_expand = physmem_total (); |
2577ed5c | 843 | |
844 | /* Adjust for rlimits. */ | |
845 | min_expand = ggc_rlimit_bound (min_expand); | |
6ec1f4e0 | 846 | |
7d83df95 | 847 | /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding |
848 | a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */ | |
849 | min_expand /= 1024*1024*1024; | |
850 | min_expand *= 70; | |
851 | min_expand = MIN (min_expand, 70); | |
852 | min_expand += 30; | |
853 | ||
854 | return min_expand; | |
855 | } | |
856 | ||
857 | /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */ | |
9ca7413c | 858 | static int |
6ec1f4e0 | 859 | ggc_min_heapsize_heuristic (void) |
7d83df95 | 860 | { |
9af5ce0c | 861 | double phys_kbytes = physmem_total (); |
4cb2b0dd | 862 | double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2); |
2577ed5c | 863 | |
4cb2b0dd | 864 | phys_kbytes /= 1024; /* Convert to Kbytes. */ |
865 | limit_kbytes /= 1024; | |
6ec1f4e0 | 866 | |
7d83df95 | 867 | /* The heuristic is RAM/8, with a lower bound of 4M and an upper |
868 | bound of 128M (when RAM >= 1GB). */ | |
4cb2b0dd | 869 | phys_kbytes /= 8; |
870 | ||
871 | #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS) | |
48e1416a | 872 | /* Try not to overrun the RSS limit while doing garbage collection. |
4cb2b0dd | 873 | The RSS limit is only advisory, so no margin is subtracted. */ |
874 | { | |
875 | struct rlimit rlim; | |
876 | if (getrlimit (RLIMIT_RSS, &rlim) == 0 | |
877 | && rlim.rlim_cur != (rlim_t) RLIM_INFINITY) | |
878 | phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024); | |
879 | } | |
880 | # endif | |
881 | ||
882 | /* Don't blindly run over our data limit; do GC at least when the | |
3a1e1e08 | 883 | *next* GC would be within 20Mb of the limit or within a quarter of |
884 | the limit, whichever is larger. If GCC does hit the data limit, | |
885 | compilation will fail, so this tries to be conservative. */ | |
886 | limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024)); | |
ba72912a | 887 | limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ()); |
4cb2b0dd | 888 | phys_kbytes = MIN (phys_kbytes, limit_kbytes); |
889 | ||
890 | phys_kbytes = MAX (phys_kbytes, 4 * 1024); | |
891 | phys_kbytes = MIN (phys_kbytes, 128 * 1024); | |
7d83df95 | 892 | |
4cb2b0dd | 893 | return phys_kbytes; |
7d83df95 | 894 | } |
9ca7413c | 895 | #endif |
7d83df95 | 896 | |
897 | void | |
6ec1f4e0 | 898 | init_ggc_heuristics (void) |
7d83df95 | 899 | { |
55074432 | 900 | #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT |
686e2769 | 901 | set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ()); |
902 | set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ()); | |
7d83df95 | 903 | #endif |
904 | } | |
674b05f5 | 905 | |
674b05f5 | 906 | /* Datastructure used to store per-call-site statistics. */ |
9908fe4d | 907 | struct ggc_loc_descriptor |
674b05f5 | 908 | { |
909 | const char *file; | |
910 | int line; | |
911 | const char *function; | |
912 | int times; | |
913 | size_t allocated; | |
914 | size_t overhead; | |
0ca9a7b6 | 915 | size_t freed; |
916 | size_t collected; | |
674b05f5 | 917 | }; |
918 | ||
d9dd21a8 | 919 | /* Hash table helper. */ |
674b05f5 | 920 | |
9908fe4d | 921 | struct ggc_loc_desc_hasher : typed_noop_remove <ggc_loc_descriptor> |
674b05f5 | 922 | { |
9908fe4d | 923 | typedef ggc_loc_descriptor value_type; |
924 | typedef ggc_loc_descriptor compare_type; | |
d9dd21a8 | 925 | static inline hashval_t hash (const value_type *); |
926 | static inline bool equal (const value_type *, const compare_type *); | |
927 | }; | |
674b05f5 | 928 | |
d9dd21a8 | 929 | inline hashval_t |
9908fe4d | 930 | ggc_loc_desc_hasher::hash (const value_type *d) |
d9dd21a8 | 931 | { |
674b05f5 | 932 | return htab_hash_pointer (d->function) | d->line; |
933 | } | |
934 | ||
d9dd21a8 | 935 | inline bool |
9908fe4d | 936 | ggc_loc_desc_hasher::equal (const value_type *d, const compare_type *d2) |
674b05f5 | 937 | { |
674b05f5 | 938 | return (d->file == d2->file && d->line == d2->line |
939 | && d->function == d2->function); | |
940 | } | |
941 | ||
d9dd21a8 | 942 | /* Hashtable used for statistics. */ |
9908fe4d | 943 | static hash_table<ggc_loc_desc_hasher> *loc_hash; |
d9dd21a8 | 944 | |
9908fe4d | 945 | struct ggc_ptr_hash_entry |
0ca9a7b6 | 946 | { |
947 | void *ptr; | |
9908fe4d | 948 | struct ggc_loc_descriptor *loc; |
0ca9a7b6 | 949 | size_t size; |
950 | }; | |
951 | ||
d9dd21a8 | 952 | /* Helper for ptr_hash table. */ |
953 | ||
9908fe4d | 954 | struct ptr_hash_hasher : typed_noop_remove <ggc_ptr_hash_entry> |
0ca9a7b6 | 955 | { |
9908fe4d | 956 | typedef ggc_ptr_hash_entry value_type; |
d9dd21a8 | 957 | typedef void compare_type; |
958 | static inline hashval_t hash (const value_type *); | |
959 | static inline bool equal (const value_type *, const compare_type *); | |
960 | }; | |
0ca9a7b6 | 961 | |
d9dd21a8 | 962 | inline hashval_t |
963 | ptr_hash_hasher::hash (const value_type *d) | |
964 | { | |
0ca9a7b6 | 965 | return htab_hash_pointer (d->ptr); |
966 | } | |
967 | ||
d9dd21a8 | 968 | inline bool |
969 | ptr_hash_hasher::equal (const value_type *p, const compare_type *p2) | |
0ca9a7b6 | 970 | { |
0ca9a7b6 | 971 | return (p->ptr == p2); |
972 | } | |
973 | ||
d9dd21a8 | 974 | /* Hashtable converting address of allocated field to loc descriptor. */ |
c1f445d2 | 975 | static hash_table<ptr_hash_hasher> *ptr_hash; |
d9dd21a8 | 976 | |
674b05f5 | 977 | /* Return descriptor for given call site, create new one if needed. */ |
9908fe4d | 978 | static struct ggc_loc_descriptor * |
d9dd21a8 | 979 | make_loc_descriptor (const char *name, int line, const char *function) |
674b05f5 | 980 | { |
9908fe4d | 981 | struct ggc_loc_descriptor loc; |
982 | struct ggc_loc_descriptor **slot; | |
674b05f5 | 983 | |
984 | loc.file = name; | |
985 | loc.line = line; | |
986 | loc.function = function; | |
c1f445d2 | 987 | if (!loc_hash) |
9908fe4d | 988 | loc_hash = new hash_table<ggc_loc_desc_hasher> (10); |
674b05f5 | 989 | |
c1f445d2 | 990 | slot = loc_hash->find_slot (&loc, INSERT); |
674b05f5 | 991 | if (*slot) |
992 | return *slot; | |
9908fe4d | 993 | *slot = XCNEW (struct ggc_loc_descriptor); |
674b05f5 | 994 | (*slot)->file = name; |
995 | (*slot)->line = line; | |
996 | (*slot)->function = function; | |
997 | return *slot; | |
998 | } | |
999 | ||
3927afe0 | 1000 | /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */ |
1001 | void | |
0ca9a7b6 | 1002 | ggc_record_overhead (size_t allocated, size_t overhead, void *ptr, |
3927afe0 | 1003 | const char *name, int line, const char *function) |
674b05f5 | 1004 | { |
9908fe4d | 1005 | struct ggc_loc_descriptor *loc = make_loc_descriptor (name, line, function); |
1006 | struct ggc_ptr_hash_entry *p = XNEW (struct ggc_ptr_hash_entry); | |
1007 | ggc_ptr_hash_entry **slot; | |
0ca9a7b6 | 1008 | |
1009 | p->ptr = ptr; | |
1010 | p->loc = loc; | |
1011 | p->size = allocated + overhead; | |
c1f445d2 | 1012 | if (!ptr_hash) |
1013 | ptr_hash = new hash_table<ptr_hash_hasher> (10); | |
1014 | slot = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT); | |
0d59b19d | 1015 | gcc_assert (!*slot); |
0ca9a7b6 | 1016 | *slot = p; |
674b05f5 | 1017 | |
1018 | loc->times++; | |
1019 | loc->allocated+=allocated; | |
1020 | loc->overhead+=overhead; | |
1021 | } | |
1022 | ||
0ca9a7b6 | 1023 | /* Helper function for prune_overhead_list. See if SLOT is still marked and |
1024 | remove it from hashtable if it is not. */ | |
d9dd21a8 | 1025 | int |
9908fe4d | 1026 | ggc_prune_ptr (ggc_ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED) |
0ca9a7b6 | 1027 | { |
9908fe4d | 1028 | struct ggc_ptr_hash_entry *p = *slot; |
0ca9a7b6 | 1029 | if (!ggc_marked_p (p->ptr)) |
1030 | { | |
1031 | p->loc->collected += p->size; | |
c1f445d2 | 1032 | ptr_hash->clear_slot (slot); |
0ca9a7b6 | 1033 | free (p); |
1034 | } | |
1035 | return 1; | |
1036 | } | |
1037 | ||
1038 | /* After live values has been marked, walk all recorded pointers and see if | |
1039 | they are still live. */ | |
1040 | void | |
1041 | ggc_prune_overhead_list (void) | |
1042 | { | |
c1f445d2 | 1043 | ptr_hash->traverse <void *, ggc_prune_ptr> (NULL); |
0ca9a7b6 | 1044 | } |
1045 | ||
1046 | /* Notice that the pointer has been freed. */ | |
2e2fd8fe | 1047 | void |
1048 | ggc_free_overhead (void *ptr) | |
0ca9a7b6 | 1049 | { |
9908fe4d | 1050 | ggc_ptr_hash_entry **slot |
c1f445d2 | 1051 | = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT); |
9908fe4d | 1052 | struct ggc_ptr_hash_entry *p; |
9ca7413c | 1053 | /* The pointer might be not found if a PCH read happened between allocation |
1054 | and ggc_free () call. FIXME: account memory properly in the presence of | |
1055 | PCH. */ | |
1056 | if (!slot) | |
1057 | return; | |
9908fe4d | 1058 | p = (struct ggc_ptr_hash_entry *) *slot; |
0ca9a7b6 | 1059 | p->loc->freed += p->size; |
c1f445d2 | 1060 | ptr_hash->clear_slot (slot); |
0ca9a7b6 | 1061 | free (p); |
1062 | } | |
1063 | ||
674b05f5 | 1064 | /* Helper for qsort; sort descriptors by amount of memory consumed. */ |
1065 | static int | |
51949610 | 1066 | final_cmp_statistic (const void *loc1, const void *loc2) |
674b05f5 | 1067 | { |
9908fe4d | 1068 | const struct ggc_loc_descriptor *const l1 = |
1069 | *(const struct ggc_loc_descriptor *const *) loc1; | |
1070 | const struct ggc_loc_descriptor *const l2 = | |
1071 | *(const struct ggc_loc_descriptor *const *) loc2; | |
51949610 | 1072 | long diff; |
1073 | diff = ((long)(l1->allocated + l1->overhead - l1->freed) - | |
0c07444c | 1074 | (l2->allocated + l2->overhead - l2->freed)); |
51949610 | 1075 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; |
1076 | } | |
1077 | ||
1078 | /* Helper for qsort; sort descriptors by amount of memory consumed. */ | |
1079 | static int | |
1080 | cmp_statistic (const void *loc1, const void *loc2) | |
1081 | { | |
9908fe4d | 1082 | const struct ggc_loc_descriptor *const l1 = |
1083 | *(const struct ggc_loc_descriptor *const *) loc1; | |
1084 | const struct ggc_loc_descriptor *const l2 = | |
1085 | *(const struct ggc_loc_descriptor *const *) loc2; | |
51949610 | 1086 | long diff; |
1087 | ||
1088 | diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) - | |
1089 | (l2->allocated + l2->overhead - l2->freed - l2->collected)); | |
1090 | if (diff) | |
1091 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
1092 | diff = ((long)(l1->allocated + l1->overhead - l1->freed) - | |
1093 | (l2->allocated + l2->overhead - l2->freed)); | |
1094 | return diff > 0 ? 1 : diff < 0 ? -1 : 0; | |
674b05f5 | 1095 | } |
1096 | ||
1097 | /* Collect array of the descriptors from hashtable. */ | |
9908fe4d | 1098 | static struct ggc_loc_descriptor **loc_array; |
d9dd21a8 | 1099 | int |
9908fe4d | 1100 | ggc_add_statistics (ggc_loc_descriptor **slot, int *n) |
674b05f5 | 1101 | { |
d9dd21a8 | 1102 | loc_array[*n] = *slot; |
674b05f5 | 1103 | (*n)++; |
1104 | return 1; | |
1105 | } | |
1106 | ||
1107 | /* Dump per-site memory statistics. */ | |
ecd52ea9 | 1108 | |
2e2fd8fe | 1109 | void |
ecd52ea9 | 1110 | dump_ggc_loc_statistics (bool final) |
674b05f5 | 1111 | { |
674b05f5 | 1112 | int nentries = 0; |
1113 | char s[4096]; | |
0ca9a7b6 | 1114 | size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0; |
674b05f5 | 1115 | int i; |
1116 | ||
ecd52ea9 | 1117 | if (! GATHER_STATISTICS) |
1118 | return; | |
1119 | ||
0ca9a7b6 | 1120 | ggc_force_collect = true; |
1121 | ggc_collect (); | |
1122 | ||
9908fe4d | 1123 | loc_array = XCNEWVEC (struct ggc_loc_descriptor *, |
c1f445d2 | 1124 | loc_hash->elements_with_deleted ()); |
674b05f5 | 1125 | fprintf (stderr, "-------------------------------------------------------\n"); |
0ca9a7b6 | 1126 | fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n", |
1127 | "source location", "Garbage", "Freed", "Leak", "Overhead", "Times"); | |
674b05f5 | 1128 | fprintf (stderr, "-------------------------------------------------------\n"); |
c1f445d2 | 1129 | loc_hash->traverse <int *, ggc_add_statistics> (&nentries); |
51949610 | 1130 | qsort (loc_array, nentries, sizeof (*loc_array), |
1131 | final ? final_cmp_statistic : cmp_statistic); | |
674b05f5 | 1132 | for (i = 0; i < nentries; i++) |
1133 | { | |
9908fe4d | 1134 | struct ggc_loc_descriptor *d = loc_array[i]; |
0ca9a7b6 | 1135 | allocated += d->allocated; |
1136 | times += d->times; | |
1137 | freed += d->freed; | |
1138 | collected += d->collected; | |
674b05f5 | 1139 | overhead += d->overhead; |
1140 | } | |
1141 | for (i = 0; i < nentries; i++) | |
1142 | { | |
9908fe4d | 1143 | struct ggc_loc_descriptor *d = loc_array[i]; |
674b05f5 | 1144 | if (d->allocated) |
1145 | { | |
1146 | const char *s1 = d->file; | |
1147 | const char *s2; | |
1148 | while ((s2 = strstr (s1, "gcc/"))) | |
1149 | s1 = s2 + 4; | |
1150 | sprintf (s, "%s:%i (%s)", s1, d->line, d->function); | |
0ca9a7b6 | 1151 | s[48] = 0; |
1152 | fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s, | |
1153 | (long)d->collected, | |
1154 | (d->collected) * 100.0 / collected, | |
1155 | (long)d->freed, | |
1156 | (d->freed) * 100.0 / freed, | |
1157 | (long)(d->allocated + d->overhead - d->freed - d->collected), | |
1158 | (d->allocated + d->overhead - d->freed - d->collected) * 100.0 | |
1159 | / (allocated + overhead - freed - collected), | |
1160 | (long)d->overhead, | |
1161 | d->overhead * 100.0 / overhead, | |
1162 | (long)d->times); | |
674b05f5 | 1163 | } |
1164 | } | |
0ca9a7b6 | 1165 | fprintf (stderr, "%-48s %10ld %10ld %10ld %10ld %10ld\n", |
1166 | "Total", (long)collected, (long)freed, | |
1167 | (long)(allocated + overhead - freed - collected), (long)overhead, | |
1168 | (long)times); | |
1169 | fprintf (stderr, "%-48s %10s %10s %10s %10s %10s\n", | |
1170 | "source location", "Garbage", "Freed", "Leak", "Overhead", "Times"); | |
674b05f5 | 1171 | fprintf (stderr, "-------------------------------------------------------\n"); |
5c3cc815 | 1172 | ggc_force_collect = false; |
674b05f5 | 1173 | } |