]>
Commit | Line | Data |
---|---|---|
73ffefd0 TT |
1 | /* |
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
3 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
4 | * | |
5 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
6 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
7 | * | |
8 | * Permission is hereby granted to use or copy this program | |
9 | * for any purpose, provided the above notices are retained on all copies. | |
10 | * Permission to modify the code and to distribute modified code is granted, | |
11 | * provided the above notices are retained, and a notice that the code was | |
12 | * modified is included with the above copyright notice. | |
13 | */ | |
73ffefd0 | 14 | # include <stdio.h> |
9110a741 | 15 | # include "private/gc_priv.h" |
73ffefd0 | 16 | |
73ffefd0 TT |
17 | /* Data structure for list of root sets. */ |
18 | /* We keep a hash table, so that we can filter out duplicate additions. */ | |
19 | /* Under Win32, we need to do a better job of filtering overlaps, so */ | |
20 | /* we resort to sequential search, and pay the price. */ | |
20bbd3cd | 21 | /* This is really declared in gc_priv.h: |
73ffefd0 TT |
22 | struct roots { |
23 | ptr_t r_start; | |
24 | ptr_t r_end; | |
045a52c8 | 25 | # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
73ffefd0 | 26 | struct roots * r_next; |
20bbd3cd | 27 | # endif |
73ffefd0 | 28 | GC_bool r_tmp; |
20bbd3cd | 29 | -- Delete before registering new dynamic libraries |
73ffefd0 TT |
30 | }; |
31 | ||
20bbd3cd TT |
32 | struct roots GC_static_roots[MAX_ROOT_SETS]; |
33 | */ | |
73ffefd0 | 34 | |
9110a741 BM |
35 | int GC_no_dls = 0; /* Register dynamic library data segments. */ |
36 | ||
73ffefd0 TT |
37 | static int n_root_sets = 0; |
38 | ||
20bbd3cd | 39 | /* GC_static_roots[0..n_root_sets) contains the valid root sets. */ |
73ffefd0 TT |
40 | |
41 | # if !defined(NO_DEBUGGING) | |
42 | /* For debugging: */ | |
43 | void GC_print_static_roots() | |
44 | { | |
45 | register int i; | |
46 | size_t total = 0; | |
47 | ||
48 | for (i = 0; i < n_root_sets; i++) { | |
49 | GC_printf2("From 0x%lx to 0x%lx ", | |
20bbd3cd TT |
50 | (unsigned long) GC_static_roots[i].r_start, |
51 | (unsigned long) GC_static_roots[i].r_end); | |
52 | if (GC_static_roots[i].r_tmp) { | |
73ffefd0 TT |
53 | GC_printf0(" (temporary)\n"); |
54 | } else { | |
55 | GC_printf0("\n"); | |
56 | } | |
20bbd3cd | 57 | total += GC_static_roots[i].r_end - GC_static_roots[i].r_start; |
73ffefd0 TT |
58 | } |
59 | GC_printf1("Total size: %ld\n", (unsigned long) total); | |
60 | if (GC_root_size != total) { | |
61 | GC_printf1("GC_root_size incorrect: %ld!!\n", | |
62 | (unsigned long) GC_root_size); | |
63 | } | |
64 | } | |
65 | # endif /* NO_DEBUGGING */ | |
66 | ||
67 | /* Primarily for debugging support: */ | |
68 | /* Is the address p in one of the registered static */ | |
69 | /* root sections? */ | |
70 | GC_bool GC_is_static_root(p) | |
71 | ptr_t p; | |
72 | { | |
9110a741 | 73 | static int last_root_set = MAX_ROOT_SETS; |
73ffefd0 TT |
74 | register int i; |
75 | ||
76 | ||
9110a741 BM |
77 | if (last_root_set < n_root_sets |
78 | && p >= GC_static_roots[last_root_set].r_start | |
20bbd3cd | 79 | && p < GC_static_roots[last_root_set].r_end) return(TRUE); |
73ffefd0 | 80 | for (i = 0; i < n_root_sets; i++) { |
20bbd3cd TT |
81 | if (p >= GC_static_roots[i].r_start |
82 | && p < GC_static_roots[i].r_end) { | |
73ffefd0 TT |
83 | last_root_set = i; |
84 | return(TRUE); | |
85 | } | |
86 | } | |
87 | return(FALSE); | |
88 | } | |
89 | ||
045a52c8 | 90 | #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
20bbd3cd | 91 | /* |
73ffefd0 | 92 | # define LOG_RT_SIZE 6 |
20bbd3cd | 93 | # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS |
73ffefd0 | 94 | |
20bbd3cd TT |
95 | struct roots * GC_root_index[RT_SIZE]; |
96 | -- Hash table header. Used only to check whether a range is | |
97 | -- already present. | |
98 | -- really defined in gc_priv.h | |
99 | */ | |
73ffefd0 TT |
100 | |
101 | static int rt_hash(addr) | |
102 | char * addr; | |
103 | { | |
104 | word result = (word) addr; | |
105 | # if CPP_WORDSZ > 8*LOG_RT_SIZE | |
106 | result ^= result >> 8*LOG_RT_SIZE; | |
107 | # endif | |
108 | # if CPP_WORDSZ > 4*LOG_RT_SIZE | |
109 | result ^= result >> 4*LOG_RT_SIZE; | |
110 | # endif | |
111 | result ^= result >> 2*LOG_RT_SIZE; | |
112 | result ^= result >> LOG_RT_SIZE; | |
113 | result &= (RT_SIZE-1); | |
114 | return(result); | |
115 | } | |
116 | ||
117 | /* Is a range starting at b already in the table? If so return a */ | |
118 | /* pointer to it, else NIL. */ | |
119 | struct roots * GC_roots_present(b) | |
120 | char *b; | |
121 | { | |
122 | register int h = rt_hash(b); | |
20bbd3cd | 123 | register struct roots *p = GC_root_index[h]; |
73ffefd0 TT |
124 | |
125 | while (p != 0) { | |
126 | if (p -> r_start == (ptr_t)b) return(p); | |
127 | p = p -> r_next; | |
128 | } | |
129 | return(FALSE); | |
130 | } | |
131 | ||
132 | /* Add the given root structure to the index. */ | |
133 | static void add_roots_to_index(p) | |
134 | struct roots *p; | |
135 | { | |
136 | register int h = rt_hash(p -> r_start); | |
137 | ||
20bbd3cd TT |
138 | p -> r_next = GC_root_index[h]; |
139 | GC_root_index[h] = p; | |
73ffefd0 TT |
140 | } |
141 | ||
045a52c8 | 142 | # else /* MSWIN32 || MSWINCE || CYGWIN32 */ |
73ffefd0 TT |
143 | |
144 | # define add_roots_to_index(p) | |
145 | ||
146 | # endif | |
147 | ||
148 | ||
149 | ||
150 | ||
151 | word GC_root_size = 0; | |
152 | ||
153 | void GC_add_roots(b, e) | |
154 | char * b; char * e; | |
155 | { | |
156 | DCL_LOCK_STATE; | |
157 | ||
158 | DISABLE_SIGNALS(); | |
159 | LOCK(); | |
160 | GC_add_roots_inner(b, e, FALSE); | |
161 | UNLOCK(); | |
162 | ENABLE_SIGNALS(); | |
163 | } | |
164 | ||
165 | ||
166 | /* Add [b,e) to the root set. Adding the same interval a second time */ | |
167 | /* is a moderately fast noop, and hence benign. We do not handle */ | |
168 | /* different but overlapping intervals efficiently. (We do handle */ | |
169 | /* them correctly.) */ | |
170 | /* Tmp specifies that the interval may be deleted before */ | |
171 | /* reregistering dynamic libraries. */ | |
172 | void GC_add_roots_inner(b, e, tmp) | |
173 | char * b; char * e; | |
174 | GC_bool tmp; | |
175 | { | |
176 | struct roots * old; | |
177 | ||
045a52c8 | 178 | # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) |
73ffefd0 TT |
179 | /* Spend the time to ensure that there are no overlapping */ |
180 | /* or adjacent intervals. */ | |
181 | /* This could be done faster with e.g. a */ | |
182 | /* balanced tree. But the execution time here is */ | |
183 | /* virtually guaranteed to be dominated by the time it */ | |
184 | /* takes to scan the roots. */ | |
185 | { | |
186 | register int i; | |
187 | ||
188 | for (i = 0; i < n_root_sets; i++) { | |
20bbd3cd | 189 | old = GC_static_roots + i; |
73ffefd0 TT |
190 | if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) { |
191 | if ((ptr_t)b < old -> r_start) { | |
192 | old -> r_start = (ptr_t)b; | |
193 | GC_root_size += (old -> r_start - (ptr_t)b); | |
194 | } | |
195 | if ((ptr_t)e > old -> r_end) { | |
196 | old -> r_end = (ptr_t)e; | |
197 | GC_root_size += ((ptr_t)e - old -> r_end); | |
198 | } | |
199 | old -> r_tmp &= tmp; | |
200 | break; | |
201 | } | |
202 | } | |
203 | if (i < n_root_sets) { | |
204 | /* merge other overlapping intervals */ | |
205 | struct roots *other; | |
206 | ||
207 | for (i++; i < n_root_sets; i++) { | |
20bbd3cd | 208 | other = GC_static_roots + i; |
73ffefd0 TT |
209 | b = (char *)(other -> r_start); |
210 | e = (char *)(other -> r_end); | |
211 | if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) { | |
212 | if ((ptr_t)b < old -> r_start) { | |
213 | old -> r_start = (ptr_t)b; | |
214 | GC_root_size += (old -> r_start - (ptr_t)b); | |
215 | } | |
216 | if ((ptr_t)e > old -> r_end) { | |
217 | old -> r_end = (ptr_t)e; | |
218 | GC_root_size += ((ptr_t)e - old -> r_end); | |
219 | } | |
220 | old -> r_tmp &= other -> r_tmp; | |
221 | /* Delete this entry. */ | |
222 | GC_root_size -= (other -> r_end - other -> r_start); | |
20bbd3cd TT |
223 | other -> r_start = GC_static_roots[n_root_sets-1].r_start; |
224 | other -> r_end = GC_static_roots[n_root_sets-1].r_end; | |
73ffefd0 TT |
225 | n_root_sets--; |
226 | } | |
227 | } | |
228 | return; | |
229 | } | |
230 | } | |
231 | # else | |
232 | old = GC_roots_present(b); | |
233 | if (old != 0) { | |
234 | if ((ptr_t)e <= old -> r_end) /* already there */ return; | |
235 | /* else extend */ | |
236 | GC_root_size += (ptr_t)e - old -> r_end; | |
237 | old -> r_end = (ptr_t)e; | |
238 | return; | |
239 | } | |
240 | # endif | |
241 | if (n_root_sets == MAX_ROOT_SETS) { | |
242 | ABORT("Too many root sets\n"); | |
243 | } | |
20bbd3cd TT |
244 | GC_static_roots[n_root_sets].r_start = (ptr_t)b; |
245 | GC_static_roots[n_root_sets].r_end = (ptr_t)e; | |
246 | GC_static_roots[n_root_sets].r_tmp = tmp; | |
045a52c8 | 247 | # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
20bbd3cd | 248 | GC_static_roots[n_root_sets].r_next = 0; |
73ffefd0 | 249 | # endif |
20bbd3cd | 250 | add_roots_to_index(GC_static_roots + n_root_sets); |
73ffefd0 TT |
251 | GC_root_size += (ptr_t)e - (ptr_t)b; |
252 | n_root_sets++; | |
253 | } | |
254 | ||
79f777fd | 255 | static GC_bool roots_were_cleared = FALSE; |
9110a741 | 256 | |
73ffefd0 TT |
257 | void GC_clear_roots GC_PROTO((void)) |
258 | { | |
259 | DCL_LOCK_STATE; | |
260 | ||
261 | DISABLE_SIGNALS(); | |
262 | LOCK(); | |
9110a741 | 263 | roots_were_cleared = TRUE; |
73ffefd0 TT |
264 | n_root_sets = 0; |
265 | GC_root_size = 0; | |
045a52c8 | 266 | # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
73ffefd0 TT |
267 | { |
268 | register int i; | |
269 | ||
20bbd3cd | 270 | for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0; |
73ffefd0 TT |
271 | } |
272 | # endif | |
273 | UNLOCK(); | |
274 | ENABLE_SIGNALS(); | |
275 | } | |
276 | ||
30c3de1f JS |
277 | /* Internal use only; lock held. */ |
278 | static void GC_remove_root_at_pos(i) | |
279 | int i; | |
280 | { | |
281 | GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start); | |
282 | GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start; | |
283 | GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end; | |
284 | GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp; | |
285 | n_root_sets--; | |
286 | } | |
287 | ||
045a52c8 | 288 | #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
30c3de1f JS |
289 | static void GC_rebuild_root_index() |
290 | { | |
291 | register int i; | |
292 | ||
293 | for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0; | |
294 | for (i = 0; i < n_root_sets; i++) | |
295 | add_roots_to_index(GC_static_roots + i); | |
296 | } | |
297 | #endif | |
298 | ||
73ffefd0 TT |
299 | /* Internal use only; lock held. */ |
300 | void GC_remove_tmp_roots() | |
301 | { | |
302 | register int i; | |
303 | ||
304 | for (i = 0; i < n_root_sets; ) { | |
20bbd3cd | 305 | if (GC_static_roots[i].r_tmp) { |
30c3de1f | 306 | GC_remove_root_at_pos(i); |
73ffefd0 TT |
307 | } else { |
308 | i++; | |
73ffefd0 | 309 | } |
73ffefd0 | 310 | } |
045a52c8 | 311 | #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
30c3de1f JS |
312 | GC_rebuild_root_index(); |
313 | #endif | |
314 | } | |
315 | ||
045a52c8 | 316 | #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) |
30c3de1f JS |
317 | void GC_remove_roots(b, e) |
318 | char * b; char * e; | |
319 | { | |
320 | DCL_LOCK_STATE; | |
73ffefd0 | 321 | |
30c3de1f JS |
322 | DISABLE_SIGNALS(); |
323 | LOCK(); | |
324 | GC_remove_roots_inner(b, e); | |
325 | UNLOCK(); | |
326 | ENABLE_SIGNALS(); | |
327 | } | |
328 | ||
329 | /* Should only be called when the lock is held */ | |
330 | void GC_remove_roots_inner(b,e) | |
331 | char * b; char * e; | |
332 | { | |
333 | int i; | |
334 | for (i = 0; i < n_root_sets; ) { | |
335 | if (GC_static_roots[i].r_start >= (ptr_t)b && GC_static_roots[i].r_end <= (ptr_t)e) { | |
336 | GC_remove_root_at_pos(i); | |
337 | } else { | |
338 | i++; | |
339 | } | |
340 | } | |
341 | GC_rebuild_root_index(); | |
73ffefd0 | 342 | } |
045a52c8 | 343 | #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */ |
73ffefd0 | 344 | |
045a52c8 | 345 | #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION) || defined(CYGWIN32) |
9110a741 BM |
346 | /* Workaround for the OS mapping and unmapping behind our back: */ |
347 | /* Is the address p in one of the temporary static root sections? */ | |
348 | GC_bool GC_is_tmp_root(p) | |
349 | ptr_t p; | |
350 | { | |
351 | static int last_root_set = MAX_ROOT_SETS; | |
352 | register int i; | |
353 | ||
354 | if (last_root_set < n_root_sets | |
355 | && p >= GC_static_roots[last_root_set].r_start | |
356 | && p < GC_static_roots[last_root_set].r_end) | |
357 | return GC_static_roots[last_root_set].r_tmp; | |
358 | for (i = 0; i < n_root_sets; i++) { | |
359 | if (p >= GC_static_roots[i].r_start | |
360 | && p < GC_static_roots[i].r_end) { | |
361 | last_root_set = i; | |
362 | return GC_static_roots[i].r_tmp; | |
363 | } | |
364 | } | |
365 | return(FALSE); | |
366 | } | |
045a52c8 | 367 | #endif /* MSWIN32 || _WIN32_WCE_EMULATION || defined(CYGWIN32) */ |
9110a741 | 368 | |
73ffefd0 TT |
369 | ptr_t GC_approx_sp() |
370 | { | |
328d1d4c | 371 | VOLATILE word dummy; |
9110a741 | 372 | |
328d1d4c HB |
373 | dummy = 42; /* Force stack to grow if necessary. Otherwise the */ |
374 | /* later accesses might cause the kernel to think we're */ | |
375 | /* doing something wrong. */ | |
9110a741 BM |
376 | # ifdef _MSC_VER |
377 | # pragma warning(disable:4172) | |
378 | # endif | |
2fc5ecb5 BE |
379 | #ifdef __GNUC__ |
380 | /* Eliminate a warning from GCC about taking the address of a | |
381 | local variable. */ | |
382 | return __builtin_frame_address (0); | |
383 | #else | |
384 | return ((ptr_t)(&dummy)); | |
385 | #endif /* __GNUC__ */ | |
9110a741 BM |
386 | # ifdef _MSC_VER |
387 | # pragma warning(default:4172) | |
388 | # endif | |
73ffefd0 TT |
389 | } |
390 | ||
391 | /* | |
392 | * Data structure for excluded static roots. | |
20bbd3cd TT |
393 | * Real declaration is in gc_priv.h. |
394 | ||
73ffefd0 TT |
395 | struct exclusion { |
396 | ptr_t e_start; | |
397 | ptr_t e_end; | |
398 | }; | |
399 | ||
20bbd3cd TT |
400 | struct exclusion GC_excl_table[MAX_EXCLUSIONS]; |
401 | -- Array of exclusions, ascending | |
402 | -- address order. | |
403 | */ | |
404 | ||
405 | size_t GC_excl_table_entries = 0; /* Number of entries in use. */ | |
73ffefd0 TT |
406 | |
407 | /* Return the first exclusion range that includes an address >= start_addr */ | |
408 | /* Assumes the exclusion table contains at least one entry (namely the */ | |
409 | /* GC data structures). */ | |
410 | struct exclusion * GC_next_exclusion(start_addr) | |
411 | ptr_t start_addr; | |
412 | { | |
413 | size_t low = 0; | |
20bbd3cd | 414 | size_t high = GC_excl_table_entries - 1; |
73ffefd0 TT |
415 | size_t mid; |
416 | ||
417 | while (high > low) { | |
418 | mid = (low + high) >> 1; | |
419 | /* low <= mid < high */ | |
20bbd3cd | 420 | if ((word) GC_excl_table[mid].e_end <= (word) start_addr) { |
73ffefd0 TT |
421 | low = mid + 1; |
422 | } else { | |
423 | high = mid; | |
424 | } | |
425 | } | |
20bbd3cd TT |
426 | if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0; |
427 | return GC_excl_table + low; | |
73ffefd0 TT |
428 | } |
429 | ||
430 | void GC_exclude_static_roots(start, finish) | |
431 | GC_PTR start; | |
432 | GC_PTR finish; | |
433 | { | |
434 | struct exclusion * next; | |
435 | size_t next_index, i; | |
436 | ||
20bbd3cd | 437 | if (0 == GC_excl_table_entries) { |
73ffefd0 TT |
438 | next = 0; |
439 | } else { | |
440 | next = GC_next_exclusion(start); | |
441 | } | |
442 | if (0 != next) { | |
443 | if ((word)(next -> e_start) < (word) finish) { | |
444 | /* incomplete error check. */ | |
445 | ABORT("exclusion ranges overlap"); | |
446 | } | |
447 | if ((word)(next -> e_start) == (word) finish) { | |
448 | /* extend old range backwards */ | |
449 | next -> e_start = (ptr_t)start; | |
450 | return; | |
451 | } | |
20bbd3cd TT |
452 | next_index = next - GC_excl_table; |
453 | for (i = GC_excl_table_entries; i > next_index; --i) { | |
454 | GC_excl_table[i] = GC_excl_table[i-1]; | |
73ffefd0 TT |
455 | } |
456 | } else { | |
20bbd3cd | 457 | next_index = GC_excl_table_entries; |
73ffefd0 | 458 | } |
20bbd3cd TT |
459 | if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions"); |
460 | GC_excl_table[next_index].e_start = (ptr_t)start; | |
461 | GC_excl_table[next_index].e_end = (ptr_t)finish; | |
462 | ++GC_excl_table_entries; | |
73ffefd0 TT |
463 | } |
464 | ||
465 | /* Invoke push_conditional on ranges that are not excluded. */ | |
466 | void GC_push_conditional_with_exclusions(bottom, top, all) | |
467 | ptr_t bottom; | |
468 | ptr_t top; | |
469 | int all; | |
470 | { | |
471 | struct exclusion * next; | |
472 | ptr_t excl_start; | |
473 | ||
474 | while (bottom < top) { | |
475 | next = GC_next_exclusion(bottom); | |
476 | if (0 == next || (excl_start = next -> e_start) >= top) { | |
477 | GC_push_conditional(bottom, top, all); | |
478 | return; | |
479 | } | |
480 | if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all); | |
481 | bottom = next -> e_end; | |
482 | } | |
483 | } | |
484 | ||
20bbd3cd TT |
485 | /* |
486 | * In the absence of threads, push the stack contents. | |
487 | * In the presence of threads, push enough of the current stack | |
488 | * to ensure that callee-save registers saved in collector frames have been | |
489 | * seen. | |
490 | */ | |
491 | void GC_push_current_stack(cold_gc_frame) | |
492 | ptr_t cold_gc_frame; | |
493 | { | |
494 | # if defined(THREADS) | |
495 | if (0 == cold_gc_frame) return; | |
496 | # ifdef STACK_GROWS_DOWN | |
497 | GC_push_all_eager(GC_approx_sp(), cold_gc_frame); | |
93002327 BM |
498 | /* For IA64, the register stack backing store is handled */ |
499 | /* in the thread-specific code. */ | |
20bbd3cd TT |
500 | # else |
501 | GC_push_all_eager( cold_gc_frame, GC_approx_sp() ); | |
502 | # endif | |
503 | # else | |
504 | # ifdef STACK_GROWS_DOWN | |
505 | GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom, | |
506 | cold_gc_frame ); | |
507 | # ifdef IA64 | |
508 | /* We also need to push the register stack backing store. */ | |
509 | /* This should really be done in the same way as the */ | |
510 | /* regular stack. For now we fudge it a bit. */ | |
511 | /* Note that the backing store grows up, so we can't use */ | |
512 | /* GC_push_all_stack_partially_eager. */ | |
513 | { | |
514 | extern word GC_save_regs_ret_val; | |
515 | /* Previously set to backing store pointer. */ | |
516 | ptr_t bsp = (ptr_t) GC_save_regs_ret_val; | |
517 | ptr_t cold_gc_bs_pointer; | |
9110a741 | 518 | if (GC_all_interior_pointers) { |
20bbd3cd TT |
519 | cold_gc_bs_pointer = bsp - 2048; |
520 | if (cold_gc_bs_pointer < BACKING_STORE_BASE) { | |
521 | cold_gc_bs_pointer = BACKING_STORE_BASE; | |
5a2586cf TT |
522 | } else { |
523 | GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer); | |
20bbd3cd | 524 | } |
9110a741 | 525 | } else { |
20bbd3cd | 526 | cold_gc_bs_pointer = BACKING_STORE_BASE; |
9110a741 | 527 | } |
20bbd3cd TT |
528 | GC_push_all_eager(cold_gc_bs_pointer, bsp); |
529 | /* All values should be sufficiently aligned that we */ | |
530 | /* dont have to worry about the boundary. */ | |
531 | } | |
532 | # endif | |
533 | # else | |
534 | GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(), | |
535 | cold_gc_frame ); | |
536 | # endif | |
537 | # endif /* !THREADS */ | |
538 | } | |
539 | ||
9110a741 BM |
540 | /* |
541 | * Push GC internal roots. Only called if there is some reason to believe | |
542 | * these would not otherwise get registered. | |
543 | */ | |
544 | void GC_push_gc_structures GC_PROTO((void)) | |
545 | { | |
546 | GC_push_finalizer_structures(); | |
547 | GC_push_stubborn_structures(); | |
548 | # if defined(THREADS) | |
549 | GC_push_thread_structures(); | |
550 | # endif | |
551 | } | |
552 | ||
5a2586cf TT |
553 | #ifdef THREAD_LOCAL_ALLOC |
554 | void GC_mark_thread_local_free_lists(); | |
555 | #endif | |
556 | ||
ebcc6a7e HB |
557 | void GC_cond_register_dynamic_libraries() |
558 | { | |
559 | # if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \ | |
045a52c8 | 560 | || defined(CYGWIN32) || defined(PCR)) && !defined(SRC_M3) |
ebcc6a7e HB |
561 | GC_remove_tmp_roots(); |
562 | if (!GC_no_dls) GC_register_dynamic_libraries(); | |
563 | # else | |
564 | GC_no_dls = TRUE; | |
565 | # endif | |
566 | } | |
567 | ||
73ffefd0 TT |
568 | /* |
569 | * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional | |
570 | * on groups of pointers) on every top level accessible pointer. | |
571 | * If all is FALSE, arrange to push only possibly altered values. | |
20bbd3cd TT |
572 | * Cold_gc_frame is an address inside a GC frame that |
573 | * remains valid until all marking is complete. | |
574 | * A zero value indicates that it's OK to miss some | |
575 | * register values. | |
73ffefd0 | 576 | */ |
20bbd3cd | 577 | void GC_push_roots(all, cold_gc_frame) |
73ffefd0 | 578 | GC_bool all; |
20bbd3cd | 579 | ptr_t cold_gc_frame; |
73ffefd0 | 580 | { |
ebcc6a7e HB |
581 | int i; |
582 | int kind; | |
73ffefd0 | 583 | |
73ffefd0 TT |
584 | /* |
585 | * Next push static data. This must happen early on, since it's | |
586 | * not robust against mark stack overflow. | |
587 | */ | |
ebcc6a7e HB |
588 | /* Reregister dynamic libraries, in case one got added. */ |
589 | /* There is some argument for doing this as late as possible, */ | |
590 | /* especially on win32, where it can change asynchronously. */ | |
591 | /* In those cases, we do it here. But on other platforms, it's */ | |
592 | /* not safe with the world stopped, so we do it earlier. */ | |
593 | # if !defined(REGISTER_LIBRARIES_EARLY) | |
594 | GC_cond_register_dynamic_libraries(); | |
73ffefd0 | 595 | # endif |
9110a741 | 596 | |
73ffefd0 TT |
597 | /* Mark everything in static data areas */ |
598 | for (i = 0; i < n_root_sets; i++) { | |
599 | GC_push_conditional_with_exclusions( | |
20bbd3cd TT |
600 | GC_static_roots[i].r_start, |
601 | GC_static_roots[i].r_end, all); | |
73ffefd0 TT |
602 | } |
603 | ||
ebcc6a7e HB |
604 | /* Mark all free list header blocks, if those were allocated from */ |
605 | /* the garbage collected heap. This makes sure they don't */ | |
606 | /* disappear if we are not marking from static data. It also */ | |
607 | /* saves us the trouble of scanning them, and possibly that of */ | |
608 | /* marking the freelists. */ | |
609 | for (kind = 0; kind < GC_n_kinds; kind++) { | |
610 | GC_PTR base = GC_base(GC_obj_kinds[kind].ok_freelist); | |
611 | if (0 != base) { | |
612 | GC_set_mark_bit(base); | |
613 | } | |
614 | } | |
615 | ||
9110a741 BM |
616 | /* Mark from GC internal roots if those might otherwise have */ |
617 | /* been excluded. */ | |
618 | if (GC_no_dls || roots_were_cleared) { | |
619 | GC_push_gc_structures(); | |
620 | } | |
621 | ||
5a2586cf TT |
622 | /* Mark thread local free lists, even if their mark */ |
623 | /* descriptor excludes the link field. */ | |
30c3de1f JS |
624 | /* If the world is not stopped, this is unsafe. It is */ |
625 | /* also unnecessary, since we will do this again with the */ | |
626 | /* world stopped. */ | |
5a2586cf | 627 | # ifdef THREAD_LOCAL_ALLOC |
30c3de1f | 628 | if (GC_world_stopped) GC_mark_thread_local_free_lists(); |
5a2586cf TT |
629 | # endif |
630 | ||
73ffefd0 | 631 | /* |
79f777fd BM |
632 | * Now traverse stacks, and mark from register contents. |
633 | * These must be done last, since they can legitimately overflow | |
634 | * the mark stack. | |
73ffefd0 | 635 | */ |
79f777fd BM |
636 | # ifdef USE_GENERIC_PUSH_REGS |
637 | GC_generic_push_regs(cold_gc_frame); | |
638 | /* Also pushes stack, so that we catch callee-save registers */ | |
639 | /* saved inside the GC_push_regs frame. */ | |
640 | # else | |
641 | /* | |
642 | * push registers - i.e., call GC_push_one(r) for each | |
643 | * register contents r. | |
644 | */ | |
645 | GC_push_regs(); /* usually defined in machine_dep.c */ | |
20bbd3cd | 646 | GC_push_current_stack(cold_gc_frame); |
4c7726b1 | 647 | /* In the threads case, this only pushes collector frames. */ |
4c7726b1 | 648 | /* In the case of linux threads on IA64, the hot section of */ |
93002327 BM |
649 | /* the main stack is marked here, but the register stack */ |
650 | /* backing store is handled in the threads-specific code. */ | |
73ffefd0 TT |
651 | # endif |
652 | if (GC_push_other_roots != 0) (*GC_push_other_roots)(); | |
653 | /* In the threads case, this also pushes thread stacks. */ | |
79f777fd BM |
654 | /* Note that without interior pointer recognition lots */ |
655 | /* of stuff may have been pushed already, and this */ | |
656 | /* should be careful about mark stack overflows. */ | |
73ffefd0 TT |
657 | } |
658 |