]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ggc-common.c
c-decl.c: Fix comment formatting.
[thirdparty/gcc.git] / gcc / ggc-common.c
1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* Generic garbage collection (GC) functions and data, not specific to
23 any particular GC implementation. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "ggc.h"
30 #include "toplev.h"
31 #include "params.h"
32 #include "hosthooks.h"
33
34 #ifdef HAVE_SYS_RESOURCE_H
35 # include <sys/resource.h>
36 #endif
37
38 #ifdef HAVE_MMAP_FILE
39 # include <sys/mman.h>
40 # ifdef HAVE_MINCORE
41 /* This is on Solaris. */
42 # include <sys/types.h>
43 # endif
44 #endif
45
46 #ifndef MAP_FAILED
47 # define MAP_FAILED ((void *)-1)
48 #endif
49
50 #ifdef ENABLE_VALGRIND_CHECKING
51 # ifdef HAVE_MEMCHECK_H
52 # include <memcheck.h>
53 # else
54 # include <valgrind.h>
55 # endif
56 #else
57 /* Avoid #ifdef:s when we can help it. */
58 #define VALGRIND_DISCARD(x)
59 #endif
60
61 /* Statistics about the allocation. */
62 static ggc_statistics *ggc_stats;
63
64 struct traversal_state;
65
66 static int ggc_htab_delete (void **, void *);
67 static hashval_t saving_htab_hash (const void *);
68 static int saving_htab_eq (const void *, const void *);
69 static int call_count (void **, void *);
70 static int call_alloc (void **, void *);
71 static int compare_ptr_data (const void *, const void *);
72 static void relocate_ptrs (void *, void *);
73 static void write_pch_globals (const struct ggc_root_tab * const *tab,
74 struct traversal_state *state);
75 static double ggc_rlimit_bound (double);
76
77 /* Maintain global roots that are preserved during GC. */
78
79 /* Process a slot of an htab by deleting it if it has not been marked. */
80
81 static int
82 ggc_htab_delete (void **slot, void *info)
83 {
84 const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
85
86 if (! (*r->marked_p) (*slot))
87 htab_clear_slot (*r->base, slot);
88 else
89 (*r->cb) (*slot);
90
91 return 1;
92 }
93
94 /* Iterate through all registered roots and mark each element. */
95
96 void
97 ggc_mark_roots (void)
98 {
99 const struct ggc_root_tab *const *rt;
100 const struct ggc_root_tab *rti;
101 const struct ggc_cache_tab *const *ct;
102 const struct ggc_cache_tab *cti;
103 size_t i;
104
105 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
106 for (rti = *rt; rti->base != NULL; rti++)
107 memset (rti->base, 0, rti->stride);
108
109 for (rt = gt_ggc_rtab; *rt; rt++)
110 for (rti = *rt; rti->base != NULL; rti++)
111 for (i = 0; i < rti->nelt; i++)
112 (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
113
114 ggc_mark_stringpool ();
115
116 /* Now scan all hash tables that have objects which are to be deleted if
117 they are not already marked. */
118 for (ct = gt_ggc_cache_rtab; *ct; ct++)
119 for (cti = *ct; cti->base != NULL; cti++)
120 if (*cti->base)
121 {
122 ggc_set_mark (*cti->base);
123 htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
124 ggc_set_mark ((*cti->base)->entries);
125 }
126 }
127
128 /* Allocate a block of memory, then clear it. */
129 void *
130 ggc_alloc_cleared (size_t size)
131 {
132 void *buf = ggc_alloc (size);
133 memset (buf, 0, size);
134 return buf;
135 }
136
137 /* Resize a block of memory, possibly re-allocating it. */
138 void *
139 ggc_realloc (void *x, size_t size)
140 {
141 void *r;
142 size_t old_size;
143
144 if (x == NULL)
145 return ggc_alloc (size);
146
147 old_size = ggc_get_size (x);
148 if (size <= old_size)
149 {
150 /* Mark the unwanted memory as unaccessible. We also need to make
151 the "new" size accessible, since ggc_get_size returns the size of
152 the pool, not the size of the individually allocated object, the
153 size which was previously made accessible. Unfortunately, we
154 don't know that previously allocated size. Without that
155 knowledge we have to lose some initialization-tracking for the
156 old parts of the object. An alternative is to mark the whole
157 old_size as reachable, but that would lose tracking of writes
158 after the end of the object (by small offsets). Discard the
159 handle to avoid handle leak. */
160 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
161 old_size - size));
162 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
163 return x;
164 }
165
166 r = ggc_alloc (size);
167
168 /* Since ggc_get_size returns the size of the pool, not the size of the
169 individually allocated object, we'd access parts of the old object
170 that were marked invalid with the memcpy below. We lose a bit of the
171 initialization-tracking since some of it may be uninitialized. */
172 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
173
174 memcpy (r, x, old_size);
175
176 /* The old object is not supposed to be used anymore. */
177 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x, old_size));
178
179 return r;
180 }
181
182 /* Like ggc_alloc_cleared, but performs a multiplication. */
183 void *
184 ggc_calloc (size_t s1, size_t s2)
185 {
186 return ggc_alloc_cleared (s1 * s2);
187 }
188
189 /* These are for splay_tree_new_ggc. */
190 void *
191 ggc_splay_alloc (int sz, void *nl)
192 {
193 if (nl != NULL)
194 abort ();
195 return ggc_alloc (sz);
196 }
197
198 void
199 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
200 {
201 if (nl != NULL)
202 abort ();
203 }
204
205 /* Print statistics that are independent of the collector in use. */
206 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
207 ? (x) \
208 : ((x) < 1024*1024*10 \
209 ? (x) / 1024 \
210 : (x) / (1024*1024))))
211 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
212
213 void
214 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
215 ggc_statistics *stats)
216 {
217 /* Set the pointer so that during collection we will actually gather
218 the statistics. */
219 ggc_stats = stats;
220
221 /* Then do one collection to fill in the statistics. */
222 ggc_collect ();
223
224 /* At present, we don't really gather any interesting statistics. */
225
226 /* Don't gather statistics any more. */
227 ggc_stats = NULL;
228 }
229 \f
230 /* Functions for saving and restoring GCable memory to disk. */
231
232 static htab_t saving_htab;
233
234 struct ptr_data
235 {
236 void *obj;
237 void *note_ptr_cookie;
238 gt_note_pointers note_ptr_fn;
239 gt_handle_reorder reorder_fn;
240 size_t size;
241 void *new_addr;
242 };
243
244 #define POINTER_HASH(x) (hashval_t)((long)x >> 3)
245
246 /* Register an object in the hash table. */
247
248 int
249 gt_pch_note_object (void *obj, void *note_ptr_cookie,
250 gt_note_pointers note_ptr_fn)
251 {
252 struct ptr_data **slot;
253
254 if (obj == NULL || obj == (void *) 1)
255 return 0;
256
257 slot = (struct ptr_data **)
258 htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
259 INSERT);
260 if (*slot != NULL)
261 {
262 if ((*slot)->note_ptr_fn != note_ptr_fn
263 || (*slot)->note_ptr_cookie != note_ptr_cookie)
264 abort ();
265 return 0;
266 }
267
268 *slot = xcalloc (sizeof (struct ptr_data), 1);
269 (*slot)->obj = obj;
270 (*slot)->note_ptr_fn = note_ptr_fn;
271 (*slot)->note_ptr_cookie = note_ptr_cookie;
272 if (note_ptr_fn == gt_pch_p_S)
273 (*slot)->size = strlen (obj) + 1;
274 else
275 (*slot)->size = ggc_get_size (obj);
276 return 1;
277 }
278
279 /* Register an object in the hash table. */
280
281 void
282 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
283 gt_handle_reorder reorder_fn)
284 {
285 struct ptr_data *data;
286
287 if (obj == NULL || obj == (void *) 1)
288 return;
289
290 data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
291 if (data == NULL
292 || data->note_ptr_cookie != note_ptr_cookie)
293 abort ();
294
295 data->reorder_fn = reorder_fn;
296 }
297
298 /* Hash and equality functions for saving_htab, callbacks for htab_create. */
299
300 static hashval_t
301 saving_htab_hash (const void *p)
302 {
303 return POINTER_HASH (((struct ptr_data *)p)->obj);
304 }
305
306 static int
307 saving_htab_eq (const void *p1, const void *p2)
308 {
309 return ((struct ptr_data *)p1)->obj == p2;
310 }
311
312 /* Handy state for the traversal functions. */
313
314 struct traversal_state
315 {
316 FILE *f;
317 struct ggc_pch_data *d;
318 size_t count;
319 struct ptr_data **ptrs;
320 size_t ptrs_i;
321 };
322
323 /* Callbacks for htab_traverse. */
324
325 static int
326 call_count (void **slot, void *state_p)
327 {
328 struct ptr_data *d = (struct ptr_data *)*slot;
329 struct traversal_state *state = (struct traversal_state *)state_p;
330
331 ggc_pch_count_object (state->d, d->obj, d->size);
332 state->count++;
333 return 1;
334 }
335
336 static int
337 call_alloc (void **slot, void *state_p)
338 {
339 struct ptr_data *d = (struct ptr_data *)*slot;
340 struct traversal_state *state = (struct traversal_state *)state_p;
341
342 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size);
343 state->ptrs[state->ptrs_i++] = d;
344 return 1;
345 }
346
347 /* Callback for qsort. */
348
349 static int
350 compare_ptr_data (const void *p1_p, const void *p2_p)
351 {
352 struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
353 struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
354 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
355 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
356 }
357
358 /* Callbacks for note_ptr_fn. */
359
360 static void
361 relocate_ptrs (void *ptr_p, void *state_p)
362 {
363 void **ptr = (void **)ptr_p;
364 struct traversal_state *state ATTRIBUTE_UNUSED
365 = (struct traversal_state *)state_p;
366 struct ptr_data *result;
367
368 if (*ptr == NULL || *ptr == (void *)1)
369 return;
370
371 result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
372 if (result == NULL)
373 abort ();
374 *ptr = result->new_addr;
375 }
376
377 /* Write out, after relocation, the pointers in TAB. */
378 static void
379 write_pch_globals (const struct ggc_root_tab * const *tab,
380 struct traversal_state *state)
381 {
382 const struct ggc_root_tab *const *rt;
383 const struct ggc_root_tab *rti;
384 size_t i;
385
386 for (rt = tab; *rt; rt++)
387 for (rti = *rt; rti->base != NULL; rti++)
388 for (i = 0; i < rti->nelt; i++)
389 {
390 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
391 struct ptr_data *new_ptr;
392 if (ptr == NULL || ptr == (void *)1)
393 {
394 if (fwrite (&ptr, sizeof (void *), 1, state->f)
395 != 1)
396 fatal_error ("can't write PCH file: %m");
397 }
398 else
399 {
400 new_ptr = htab_find_with_hash (saving_htab, ptr,
401 POINTER_HASH (ptr));
402 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
403 != 1)
404 fatal_error ("can't write PCH file: %m");
405 }
406 }
407 }
408
409 /* Hold the information we need to mmap the file back in. */
410
411 struct mmap_info
412 {
413 size_t offset;
414 size_t size;
415 void *preferred_base;
416 };
417
418 /* Write out the state of the compiler to F. */
419
420 void
421 gt_pch_save (FILE *f)
422 {
423 const struct ggc_root_tab *const *rt;
424 const struct ggc_root_tab *rti;
425 size_t i;
426 struct traversal_state state;
427 char *this_object = NULL;
428 size_t this_object_size = 0;
429 struct mmap_info mmi;
430 size_t page_size = getpagesize();
431
432 gt_pch_save_stringpool ();
433
434 saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
435
436 for (rt = gt_ggc_rtab; *rt; rt++)
437 for (rti = *rt; rti->base != NULL; rti++)
438 for (i = 0; i < rti->nelt; i++)
439 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
440
441 for (rt = gt_pch_cache_rtab; *rt; rt++)
442 for (rti = *rt; rti->base != NULL; rti++)
443 for (i = 0; i < rti->nelt; i++)
444 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
445
446 /* Prepare the objects for writing, determine addresses and such. */
447 state.f = f;
448 state.d = init_ggc_pch();
449 state.count = 0;
450 htab_traverse (saving_htab, call_count, &state);
451
452 mmi.size = ggc_pch_total_size (state.d);
453
454 /* Try to arrange things so that no relocation is necessary, but
455 don't try very hard. On most platforms, this will always work,
456 and on the rest it's a lot of work to do better.
457 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
458 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
459 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size);
460
461 #if HAVE_MMAP_FILE
462 if (mmi.preferred_base == NULL)
463 {
464 mmi.preferred_base = mmap (NULL, mmi.size,
465 PROT_READ | PROT_WRITE, MAP_PRIVATE,
466 fileno (state.f), 0);
467 if (mmi.preferred_base == (void *) MAP_FAILED)
468 mmi.preferred_base = NULL;
469 else
470 munmap (mmi.preferred_base, mmi.size);
471 }
472 #endif /* HAVE_MMAP_FILE */
473
474 ggc_pch_this_base (state.d, mmi.preferred_base);
475
476 state.ptrs = xmalloc (state.count * sizeof (*state.ptrs));
477 state.ptrs_i = 0;
478 htab_traverse (saving_htab, call_alloc, &state);
479 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
480
481 /* Write out all the scalar variables. */
482 for (rt = gt_pch_scalar_rtab; *rt; rt++)
483 for (rti = *rt; rti->base != NULL; rti++)
484 if (fwrite (rti->base, rti->stride, 1, f) != 1)
485 fatal_error ("can't write PCH file: %m");
486
487 /* Write out all the global pointers, after translation. */
488 write_pch_globals (gt_ggc_rtab, &state);
489 write_pch_globals (gt_pch_cache_rtab, &state);
490
491 ggc_pch_prepare_write (state.d, state.f);
492
493 /* Pad the PCH file so that the mmapped area starts on a page boundary. */
494 {
495 long o;
496 o = ftell (state.f) + sizeof (mmi);
497 if (o == -1)
498 fatal_error ("can't get position in PCH file: %m");
499 mmi.offset = page_size - o % page_size;
500 if (mmi.offset == page_size)
501 mmi.offset = 0;
502 mmi.offset += o;
503 }
504 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
505 fatal_error ("can't write PCH file: %m");
506 if (mmi.offset != 0
507 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
508 fatal_error ("can't write padding to PCH file: %m");
509
510 /* Actually write out the objects. */
511 for (i = 0; i < state.count; i++)
512 {
513 if (this_object_size < state.ptrs[i]->size)
514 {
515 this_object_size = state.ptrs[i]->size;
516 this_object = xrealloc (this_object, this_object_size);
517 }
518 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
519 if (state.ptrs[i]->reorder_fn != NULL)
520 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
521 state.ptrs[i]->note_ptr_cookie,
522 relocate_ptrs, &state);
523 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
524 state.ptrs[i]->note_ptr_cookie,
525 relocate_ptrs, &state);
526 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
527 state.ptrs[i]->new_addr, state.ptrs[i]->size);
528 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
529 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
530 }
531 ggc_pch_finish (state.d, state.f);
532 gt_pch_fixup_stringpool ();
533
534 free (state.ptrs);
535 htab_delete (saving_htab);
536 }
537
538 /* Read the state of the compiler back in from F. */
539
540 void
541 gt_pch_restore (FILE *f)
542 {
543 const struct ggc_root_tab *const *rt;
544 const struct ggc_root_tab *rti;
545 size_t i;
546 struct mmap_info mmi;
547 void *addr;
548 bool needs_read;
549
550 /* Delete any deletable objects. This makes ggc_pch_read much
551 faster, as it can be sure that no GCable objects remain other
552 than the ones just read in. */
553 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
554 for (rti = *rt; rti->base != NULL; rti++)
555 memset (rti->base, 0, rti->stride);
556
557 /* Read in all the scalar variables. */
558 for (rt = gt_pch_scalar_rtab; *rt; rt++)
559 for (rti = *rt; rti->base != NULL; rti++)
560 if (fread (rti->base, rti->stride, 1, f) != 1)
561 fatal_error ("can't read PCH file: %m");
562
563 /* Read in all the global pointers, in 6 easy loops. */
564 for (rt = gt_ggc_rtab; *rt; rt++)
565 for (rti = *rt; rti->base != NULL; rti++)
566 for (i = 0; i < rti->nelt; i++)
567 if (fread ((char *)rti->base + rti->stride * i,
568 sizeof (void *), 1, f) != 1)
569 fatal_error ("can't read PCH file: %m");
570
571 for (rt = gt_pch_cache_rtab; *rt; rt++)
572 for (rti = *rt; rti->base != NULL; rti++)
573 for (i = 0; i < rti->nelt; i++)
574 if (fread ((char *)rti->base + rti->stride * i,
575 sizeof (void *), 1, f) != 1)
576 fatal_error ("can't read PCH file: %m");
577
578 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
579 fatal_error ("can't read PCH file: %m");
580
581 if (host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size))
582 {
583 #if HAVE_MMAP_FILE
584 void *mmap_result;
585
586 mmap_result = mmap (mmi.preferred_base, mmi.size,
587 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
588 fileno (f), mmi.offset);
589
590 /* The file might not be mmap-able. */
591 needs_read = mmap_result == (void *) MAP_FAILED;
592
593 /* Sanity check for broken MAP_FIXED. */
594 if (! needs_read && mmap_result != mmi.preferred_base)
595 abort ();
596 #else
597 needs_read = true;
598 #endif
599 addr = mmi.preferred_base;
600 }
601 else
602 {
603 #if HAVE_MMAP_FILE
604 addr = mmap (mmi.preferred_base, mmi.size,
605 PROT_READ | PROT_WRITE, MAP_PRIVATE,
606 fileno (f), mmi.offset);
607
608 #if HAVE_MINCORE
609 if (addr != mmi.preferred_base)
610 {
611 size_t page_size = getpagesize();
612 char one_byte;
613
614 if (addr != (void *) MAP_FAILED)
615 munmap (addr, mmi.size);
616
617 /* We really want to be mapped at mmi.preferred_base
618 so we're going to resort to MAP_FIXED. But before,
619 make sure that we can do so without destroying a
620 previously mapped area, by looping over all pages
621 that would be affected by the fixed mapping. */
622 errno = 0;
623
624 for (i = 0; i < mmi.size; i+= page_size)
625 if (mincore ((char *)mmi.preferred_base + i, page_size,
626 (void *)&one_byte) == -1
627 && errno == ENOMEM)
628 continue; /* The page is not mapped. */
629 else
630 break;
631
632 if (i >= mmi.size)
633 addr = mmap (mmi.preferred_base, mmi.size,
634 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
635 fileno (f), mmi.offset);
636 }
637 #endif /* HAVE_MINCORE */
638
639 needs_read = addr == (void *) MAP_FAILED;
640
641 #else /* HAVE_MMAP_FILE */
642 needs_read = true;
643 #endif /* HAVE_MMAP_FILE */
644 if (needs_read)
645 addr = xmalloc (mmi.size);
646 }
647
648 if (needs_read)
649 {
650 if (fseek (f, mmi.offset, SEEK_SET) != 0
651 || fread (&mmi, mmi.size, 1, f) != 1)
652 fatal_error ("can't read PCH file: %m");
653 }
654 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
655 fatal_error ("can't read PCH file: %m");
656
657 ggc_pch_read (f, addr);
658
659 if (addr != mmi.preferred_base)
660 {
661 for (rt = gt_ggc_rtab; *rt; rt++)
662 for (rti = *rt; rti->base != NULL; rti++)
663 for (i = 0; i < rti->nelt; i++)
664 {
665 char **ptr = (char **)((char *)rti->base + rti->stride * i);
666 if (*ptr != NULL)
667 *ptr += (size_t)addr - (size_t)mmi.preferred_base;
668 }
669
670 for (rt = gt_pch_cache_rtab; *rt; rt++)
671 for (rti = *rt; rti->base != NULL; rti++)
672 for (i = 0; i < rti->nelt; i++)
673 {
674 char **ptr = (char **)((char *)rti->base + rti->stride * i);
675 if (*ptr != NULL)
676 *ptr += (size_t)addr - (size_t)mmi.preferred_base;
677 }
678
679 sorry ("had to relocate PCH");
680 }
681
682 gt_pch_restore_stringpool ();
683 }
684
685 /* Modify the bound based on rlimits. Keep the smallest number found. */
686 static double
687 ggc_rlimit_bound (double limit)
688 {
689 #if defined(HAVE_GETRLIMIT)
690 struct rlimit rlim;
691 # ifdef RLIMIT_RSS
692 if (getrlimit (RLIMIT_RSS, &rlim) == 0
693 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
694 && rlim.rlim_cur < limit)
695 limit = rlim.rlim_cur;
696 # endif
697 # ifdef RLIMIT_DATA
698 if (getrlimit (RLIMIT_DATA, &rlim) == 0
699 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
700 && rlim.rlim_cur < limit)
701 limit = rlim.rlim_cur;
702 # endif
703 # ifdef RLIMIT_AS
704 if (getrlimit (RLIMIT_AS, &rlim) == 0
705 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
706 && rlim.rlim_cur < limit)
707 limit = rlim.rlim_cur;
708 # endif
709 #endif /* HAVE_GETRLIMIT */
710
711 return limit;
712 }
713
714 /* Heuristic to set a default for GGC_MIN_EXPAND. */
715 int
716 ggc_min_expand_heuristic (void)
717 {
718 double min_expand = physmem_total();
719
720 /* Adjust for rlimits. */
721 min_expand = ggc_rlimit_bound (min_expand);
722
723 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
724 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
725 min_expand /= 1024*1024*1024;
726 min_expand *= 70;
727 min_expand = MIN (min_expand, 70);
728 min_expand += 30;
729
730 return min_expand;
731 }
732
733 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
734 int
735 ggc_min_heapsize_heuristic (void)
736 {
737 double min_heap_kbytes = physmem_total();
738
739 /* Adjust for rlimits. */
740 min_heap_kbytes = ggc_rlimit_bound (min_heap_kbytes);
741
742 min_heap_kbytes /= 1024; /* Convert to Kbytes. */
743
744 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
745 bound of 128M (when RAM >= 1GB). */
746 min_heap_kbytes /= 8;
747 min_heap_kbytes = MAX (min_heap_kbytes, 4 * 1024);
748 min_heap_kbytes = MIN (min_heap_kbytes, 128 * 1024);
749
750 return min_heap_kbytes;
751 }
752
753 void
754 init_ggc_heuristics (void)
755 {
756 #ifndef ENABLE_GC_ALWAYS_COLLECT
757 set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
758 set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
759 #endif
760 }