]>
Commit | Line | Data |
---|---|---|
73ffefd0 TT |
1 | /* |
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
3 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
4 | * Copyright (c) 1996 by Silicon Graphics. All rights reserved. | |
9110a741 | 5 | * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. |
73ffefd0 TT |
6 | * |
7 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
8 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
9 | * | |
10 | * Permission is hereby granted to use or copy this program | |
11 | * for any purpose, provided the above notices are retained on all copies. | |
12 | * Permission to modify the code and to distribute modified code is granted, | |
13 | * provided the above notices are retained, and a notice that the code was | |
14 | * modified is included with the above copyright notice. | |
15 | */ | |
16 | ||
17 | /* | |
18 | * These are extra allocation routines which are likely to be less | |
19 | * frequently used than those in malloc.c. They are separate in the | |
20 | * hope that the .o file will be excluded from statically linked | |
21 | * executables. We should probably break this up further. | |
22 | */ | |
23 | ||
24 | #include <stdio.h> | |
9110a741 | 25 | #include "private/gc_priv.h" |
73ffefd0 TT |
26 | |
27 | extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */ | |
28 | void GC_extend_size_map(); /* in misc.c. */ | |
29 | GC_bool GC_alloc_reclaim_list(); /* in malloc.c */ | |
30 | ||
31 | /* Some externally visible but unadvertised variables to allow access to */ | |
32 | /* free lists from inlined allocators without including gc_priv.h */ | |
33 | /* or introducing dependencies on internal data structure layouts. */ | |
9110a741 BM |
34 | ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist; |
35 | ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist; | |
36 | ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist; | |
73ffefd0 | 37 | # ifdef ATOMIC_UNCOLLECTABLE |
9110a741 | 38 | ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist; |
73ffefd0 TT |
39 | # endif |
40 | ||
9110a741 BM |
41 | |
42 | GC_PTR GC_generic_or_special_malloc(lb,knd) | |
43 | word lb; | |
44 | int knd; | |
73ffefd0 | 45 | { |
9110a741 BM |
46 | switch(knd) { |
47 | # ifdef STUBBORN_ALLOC | |
48 | case STUBBORN: | |
49 | return(GC_malloc_stubborn((size_t)lb)); | |
50 | # endif | |
51 | case PTRFREE: | |
52 | return(GC_malloc_atomic((size_t)lb)); | |
53 | case NORMAL: | |
54 | return(GC_malloc((size_t)lb)); | |
55 | case UNCOLLECTABLE: | |
56 | return(GC_malloc_uncollectable((size_t)lb)); | |
57 | # ifdef ATOMIC_UNCOLLECTABLE | |
58 | case AUNCOLLECTABLE: | |
59 | return(GC_malloc_atomic_uncollectable((size_t)lb)); | |
60 | # endif /* ATOMIC_UNCOLLECTABLE */ | |
61 | default: | |
62 | return(GC_generic_malloc(lb,knd)); | |
20bbd3cd | 63 | } |
9110a741 BM |
64 | } |
65 | ||
66 | ||
67 | /* Change the size of the block pointed to by p to contain at least */ | |
68 | /* lb bytes. The object may be (and quite likely will be) moved. */ | |
69 | /* The kind (e.g. atomic) is the same as that of the old. */ | |
70 | /* Shrinking of large blocks is not implemented well. */ | |
71 | # ifdef __STDC__ | |
72 | GC_PTR GC_realloc(GC_PTR p, size_t lb) | |
73 | # else | |
74 | GC_PTR GC_realloc(p,lb) | |
75 | GC_PTR p; | |
76 | size_t lb; | |
77 | # endif | |
78 | { | |
79 | register struct hblk * h; | |
80 | register hdr * hhdr; | |
81 | register word sz; /* Current size in bytes */ | |
82 | register word orig_sz; /* Original sz in bytes */ | |
83 | int obj_kind; | |
84 | ||
85 | if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */ | |
86 | h = HBLKPTR(p); | |
87 | hhdr = HDR(h); | |
88 | sz = hhdr -> hb_sz; | |
89 | obj_kind = hhdr -> hb_obj_kind; | |
90 | sz = WORDS_TO_BYTES(sz); | |
91 | orig_sz = sz; | |
92 | ||
93 | if (sz > MAXOBJBYTES) { | |
94 | /* Round it up to the next whole heap block */ | |
95 | register word descr; | |
96 | ||
97 | sz = (sz+HBLKSIZE-1) & (~HBLKMASK); | |
98 | hhdr -> hb_sz = BYTES_TO_WORDS(sz); | |
99 | descr = GC_obj_kinds[obj_kind].ok_descriptor; | |
100 | if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz; | |
101 | hhdr -> hb_descr = descr; | |
102 | if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz); | |
103 | /* Extra area is already cleared by GC_alloc_large_and_clear. */ | |
104 | } | |
105 | if (ADD_SLOP(lb) <= sz) { | |
106 | if (lb >= (sz >> 1)) { | |
107 | # ifdef STUBBORN_ALLOC | |
108 | if (obj_kind == STUBBORN) GC_change_stubborn(p); | |
109 | # endif | |
110 | if (orig_sz > lb) { | |
111 | /* Clear unneeded part of object to avoid bogus pointer */ | |
112 | /* tracing. */ | |
113 | /* Safe for stubborn objects. */ | |
114 | BZERO(((ptr_t)p) + lb, orig_sz - lb); | |
115 | } | |
116 | return(p); | |
117 | } else { | |
118 | /* shrink */ | |
119 | GC_PTR result = | |
120 | GC_generic_or_special_malloc((word)lb, obj_kind); | |
121 | ||
122 | if (result == 0) return(0); | |
123 | /* Could also return original object. But this */ | |
124 | /* gives the client warning of imminent disaster. */ | |
125 | BCOPY(p, result, lb); | |
126 | # ifndef IGNORE_FREE | |
127 | GC_free(p); | |
128 | # endif | |
129 | return(result); | |
130 | } | |
73ffefd0 | 131 | } else { |
9110a741 BM |
132 | /* grow */ |
133 | GC_PTR result = | |
134 | GC_generic_or_special_malloc((word)lb, obj_kind); | |
135 | ||
136 | if (result == 0) return(0); | |
137 | BCOPY(p, result, sz); | |
138 | # ifndef IGNORE_FREE | |
139 | GC_free(p); | |
140 | # endif | |
141 | return(result); | |
73ffefd0 | 142 | } |
73ffefd0 TT |
143 | } |
144 | ||
30c3de1f JS |
145 | # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC) |
146 | # define REDIRECT_REALLOC GC_realloc | |
147 | # endif | |
148 | ||
149 | # ifdef REDIRECT_REALLOC | |
4109fe85 BM |
150 | |
151 | /* As with malloc, avoid two levels of extra calls here. */ | |
152 | # ifdef GC_ADD_CALLER | |
153 | # define RA GC_RETURN_ADDR, | |
154 | # else | |
155 | # define RA | |
156 | # endif | |
157 | # define GC_debug_realloc_replacement(p, lb) \ | |
158 | GC_debug_realloc(p, lb, RA "unknown", 0) | |
159 | ||
9110a741 BM |
160 | # ifdef __STDC__ |
161 | GC_PTR realloc(GC_PTR p, size_t lb) | |
162 | # else | |
163 | GC_PTR realloc(p,lb) | |
164 | GC_PTR p; | |
165 | size_t lb; | |
166 | # endif | |
167 | { | |
30c3de1f | 168 | return(REDIRECT_REALLOC(p, lb)); |
9110a741 | 169 | } |
4109fe85 BM |
170 | |
171 | # undef GC_debug_realloc_replacement | |
30c3de1f | 172 | # endif /* REDIRECT_REALLOC */ |
9110a741 BM |
173 | |
174 | ||
54f28c21 BM |
175 | /* Allocate memory such that only pointers to near the */ |
176 | /* beginning of the object are considered. */ | |
9110a741 | 177 | /* We avoid holding allocation lock while we clear memory. */ |
73ffefd0 TT |
178 | ptr_t GC_generic_malloc_ignore_off_page(lb, k) |
179 | register size_t lb; | |
180 | register int k; | |
181 | { | |
182 | register ptr_t result; | |
9110a741 BM |
183 | word lw; |
184 | word n_blocks; | |
185 | GC_bool init; | |
73ffefd0 TT |
186 | DCL_LOCK_STATE; |
187 | ||
9110a741 BM |
188 | if (SMALL_OBJ(lb)) |
189 | return(GC_generic_malloc((word)lb, k)); | |
190 | lw = ROUNDED_UP_WORDS(lb); | |
191 | n_blocks = OBJ_SZ_TO_BLOCKS(lw); | |
192 | init = GC_obj_kinds[k].ok_init; | |
30c3de1f | 193 | if (GC_have_errors) GC_print_all_errors(); |
73ffefd0 TT |
194 | GC_INVOKE_FINALIZERS(); |
195 | DISABLE_SIGNALS(); | |
196 | LOCK(); | |
9110a741 BM |
197 | result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE); |
198 | if (0 != result) { | |
199 | if (GC_debugging_started) { | |
200 | BZERO(result, n_blocks * HBLKSIZE); | |
201 | } else { | |
202 | # ifdef THREADS | |
203 | /* Clear any memory that might be used for GC descriptors */ | |
204 | /* before we release the lock. */ | |
205 | ((word *)result)[0] = 0; | |
206 | ((word *)result)[1] = 0; | |
207 | ((word *)result)[lw-1] = 0; | |
208 | ((word *)result)[lw-2] = 0; | |
209 | # endif | |
210 | } | |
211 | } | |
212 | GC_words_allocd += lw; | |
73ffefd0 TT |
213 | UNLOCK(); |
214 | ENABLE_SIGNALS(); | |
215 | if (0 == result) { | |
216 | return((*GC_oom_fn)(lb)); | |
217 | } else { | |
5d2082d1 | 218 | if (init && !GC_debugging_started) { |
9110a741 BM |
219 | BZERO(result, n_blocks * HBLKSIZE); |
220 | } | |
73ffefd0 TT |
221 | return(result); |
222 | } | |
223 | } | |
224 | ||
225 | # if defined(__STDC__) || defined(__cplusplus) | |
226 | void * GC_malloc_ignore_off_page(size_t lb) | |
227 | # else | |
228 | char * GC_malloc_ignore_off_page(lb) | |
229 | register size_t lb; | |
230 | # endif | |
231 | { | |
232 | return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL)); | |
233 | } | |
234 | ||
235 | # if defined(__STDC__) || defined(__cplusplus) | |
236 | void * GC_malloc_atomic_ignore_off_page(size_t lb) | |
237 | # else | |
238 | char * GC_malloc_atomic_ignore_off_page(lb) | |
239 | register size_t lb; | |
240 | # endif | |
241 | { | |
242 | return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE)); | |
243 | } | |
244 | ||
245 | /* Increment GC_words_allocd from code that doesn't have direct access */ | |
246 | /* to GC_arrays. */ | |
247 | # ifdef __STDC__ | |
248 | void GC_incr_words_allocd(size_t n) | |
249 | { | |
250 | GC_words_allocd += n; | |
251 | } | |
252 | ||
253 | /* The same for GC_mem_freed. */ | |
254 | void GC_incr_mem_freed(size_t n) | |
255 | { | |
256 | GC_mem_freed += n; | |
257 | } | |
258 | # endif /* __STDC__ */ | |
259 | ||
260 | /* Analogous to the above, but assumes a small object size, and */ | |
261 | /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */ | |
93002327 BM |
262 | ptr_t GC_generic_malloc_words_small_inner(lw, k) |
263 | register word lw; | |
264 | register int k; | |
73ffefd0 TT |
265 | { |
266 | register ptr_t op; | |
267 | register ptr_t *opp; | |
268 | register struct obj_kind * kind = GC_obj_kinds + k; | |
73ffefd0 | 269 | |
73ffefd0 TT |
270 | opp = &(kind -> ok_freelist[lw]); |
271 | if( (op = *opp) == 0 ) { | |
272 | if (!GC_is_initialized) { | |
273 | GC_init_inner(); | |
274 | } | |
275 | if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) { | |
20bbd3cd | 276 | op = GC_clear_stack(GC_allocobj((word)lw, k)); |
73ffefd0 TT |
277 | } |
278 | if (op == 0) { | |
279 | UNLOCK(); | |
280 | ENABLE_SIGNALS(); | |
281 | return ((*GC_oom_fn)(WORDS_TO_BYTES(lw))); | |
282 | } | |
283 | } | |
284 | *opp = obj_link(op); | |
285 | obj_link(op) = 0; | |
286 | GC_words_allocd += lw; | |
93002327 BM |
287 | return((ptr_t)op); |
288 | } | |
289 | ||
290 | /* Analogous to the above, but assumes a small object size, and */ | |
291 | /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */ | |
292 | #ifdef __STDC__ | |
293 | ptr_t GC_generic_malloc_words_small(size_t lw, int k) | |
294 | #else | |
295 | ptr_t GC_generic_malloc_words_small(lw, k) | |
296 | register word lw; | |
297 | register int k; | |
298 | #endif | |
299 | { | |
300 | register ptr_t op; | |
301 | DCL_LOCK_STATE; | |
302 | ||
30c3de1f | 303 | if (GC_have_errors) GC_print_all_errors(); |
93002327 BM |
304 | GC_INVOKE_FINALIZERS(); |
305 | DISABLE_SIGNALS(); | |
306 | LOCK(); | |
307 | op = GC_generic_malloc_words_small_inner(lw, k); | |
73ffefd0 TT |
308 | UNLOCK(); |
309 | ENABLE_SIGNALS(); | |
310 | return((ptr_t)op); | |
311 | } | |
312 | ||
313 | #if defined(THREADS) && !defined(SRC_M3) | |
9110a741 BM |
314 | |
315 | extern signed_word GC_mem_found; /* Protected by GC lock. */ | |
316 | ||
317 | #ifdef PARALLEL_MARK | |
318 | volatile signed_word GC_words_allocd_tmp = 0; | |
319 | /* Number of words of memory allocated since */ | |
320 | /* we released the GC lock. Instead of */ | |
321 | /* reacquiring the GC lock just to add this in, */ | |
322 | /* we add it in the next time we reacquire */ | |
323 | /* the lock. (Atomically adding it doesn't */ | |
324 | /* work, since we would have to atomically */ | |
325 | /* update it in GC_malloc, which is too */ | |
326 | /* expensive. */ | |
327 | #endif /* PARALLEL_MARK */ | |
328 | ||
329 | /* See reclaim.c: */ | |
330 | extern ptr_t GC_reclaim_generic(); | |
331 | ||
73ffefd0 TT |
332 | /* Return a list of 1 or more objects of the indicated size, linked */ |
333 | /* through the first word in the object. This has the advantage that */ | |
334 | /* it acquires the allocation lock only once, and may greatly reduce */ | |
335 | /* time wasted contending for the allocation lock. Typical usage would */ | |
336 | /* be in a thread that requires many items of the same size. It would */ | |
337 | /* keep its own free list in thread-local storage, and call */ | |
338 | /* GC_malloc_many or friends to replenish it. (We do not round up */ | |
339 | /* object sizes, since a call indicates the intention to consume many */ | |
340 | /* objects of exactly this size.) */ | |
5a2586cf TT |
341 | /* We return the free-list by assigning it to *result, since it is */ |
342 | /* not safe to return, e.g. a linked list of pointer-free objects, */ | |
343 | /* since the collector would not retain the entire list if it were */ | |
344 | /* invoked just as we were returning. */ | |
73ffefd0 | 345 | /* Note that the client should usually clear the link field. */ |
5a2586cf | 346 | void GC_generic_malloc_many(lb, k, result) |
73ffefd0 TT |
347 | register word lb; |
348 | register int k; | |
5a2586cf | 349 | ptr_t *result; |
73ffefd0 TT |
350 | { |
351 | ptr_t op; | |
9110a741 | 352 | ptr_t p; |
73ffefd0 TT |
353 | ptr_t *opp; |
354 | word lw; | |
9110a741 BM |
355 | word my_words_allocd = 0; |
356 | struct obj_kind * ok = &(GC_obj_kinds[k]); | |
73ffefd0 TT |
357 | DCL_LOCK_STATE; |
358 | ||
9110a741 BM |
359 | # if defined(GATHERSTATS) || defined(PARALLEL_MARK) |
360 | # define COUNT_ARG , &my_words_allocd | |
361 | # else | |
362 | # define COUNT_ARG | |
363 | # define NEED_TO_COUNT | |
364 | # endif | |
73ffefd0 TT |
365 | if (!SMALL_OBJ(lb)) { |
366 | op = GC_generic_malloc(lb, k); | |
367 | if(0 != op) obj_link(op) = 0; | |
5a2586cf TT |
368 | *result = op; |
369 | return; | |
73ffefd0 TT |
370 | } |
371 | lw = ALIGNED_WORDS(lb); | |
30c3de1f | 372 | if (GC_have_errors) GC_print_all_errors(); |
73ffefd0 TT |
373 | GC_INVOKE_FINALIZERS(); |
374 | DISABLE_SIGNALS(); | |
375 | LOCK(); | |
9110a741 | 376 | if (!GC_is_initialized) GC_init_inner(); |
5a2586cf TT |
377 | /* Do our share of marking work */ |
378 | if (GC_incremental && !GC_dont_gc) { | |
379 | ENTER_GC(); | |
380 | GC_collect_a_little_inner(1); | |
381 | EXIT_GC(); | |
382 | } | |
9110a741 BM |
383 | /* First see if we can reclaim a page of objects waiting to be */ |
384 | /* reclaimed. */ | |
385 | { | |
386 | struct hblk ** rlh = ok -> ok_reclaim_list; | |
387 | struct hblk * hbp; | |
388 | hdr * hhdr; | |
389 | ||
390 | rlh += lw; | |
391 | while ((hbp = *rlh) != 0) { | |
392 | hhdr = HDR(hbp); | |
393 | *rlh = hhdr -> hb_next; | |
30c3de1f | 394 | hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no; |
9110a741 BM |
395 | # ifdef PARALLEL_MARK |
396 | { | |
397 | signed_word my_words_allocd_tmp = GC_words_allocd_tmp; | |
398 | ||
399 | GC_ASSERT(my_words_allocd_tmp >= 0); | |
400 | /* We only decrement it while holding the GC lock. */ | |
401 | /* Thus we can't accidentally adjust it down in more */ | |
402 | /* than one thread simultaneously. */ | |
403 | if (my_words_allocd_tmp != 0) { | |
404 | (void)GC_atomic_add( | |
405 | (volatile GC_word *)(&GC_words_allocd_tmp), | |
406 | (GC_word)(-my_words_allocd_tmp)); | |
407 | GC_words_allocd += my_words_allocd_tmp; | |
408 | } | |
409 | } | |
410 | GC_acquire_mark_lock(); | |
411 | ++ GC_fl_builder_count; | |
412 | UNLOCK(); | |
413 | ENABLE_SIGNALS(); | |
414 | GC_release_mark_lock(); | |
415 | # endif | |
416 | op = GC_reclaim_generic(hbp, hhdr, lw, | |
417 | ok -> ok_init, 0 COUNT_ARG); | |
418 | if (op != 0) { | |
419 | # ifdef NEED_TO_COUNT | |
420 | /* We are neither gathering statistics, nor marking in */ | |
421 | /* parallel. Thus GC_reclaim_generic doesn't count */ | |
422 | /* for us. */ | |
423 | for (p = op; p != 0; p = obj_link(p)) { | |
424 | my_words_allocd += lw; | |
425 | } | |
426 | # endif | |
427 | # if defined(GATHERSTATS) | |
428 | /* We also reclaimed memory, so we need to adjust */ | |
429 | /* that count. */ | |
430 | /* This should be atomic, so the results may be */ | |
431 | /* inaccurate. */ | |
432 | GC_mem_found += my_words_allocd; | |
433 | # endif | |
434 | # ifdef PARALLEL_MARK | |
5a2586cf | 435 | *result = op; |
9110a741 BM |
436 | (void)GC_atomic_add( |
437 | (volatile GC_word *)(&GC_words_allocd_tmp), | |
438 | (GC_word)(my_words_allocd)); | |
439 | GC_acquire_mark_lock(); | |
440 | -- GC_fl_builder_count; | |
441 | if (GC_fl_builder_count == 0) GC_notify_all_builder(); | |
442 | GC_release_mark_lock(); | |
5a2586cf TT |
443 | (void) GC_clear_stack(0); |
444 | return; | |
9110a741 BM |
445 | # else |
446 | GC_words_allocd += my_words_allocd; | |
447 | goto out; | |
448 | # endif | |
449 | } | |
450 | # ifdef PARALLEL_MARK | |
451 | GC_acquire_mark_lock(); | |
452 | -- GC_fl_builder_count; | |
453 | if (GC_fl_builder_count == 0) GC_notify_all_builder(); | |
454 | GC_release_mark_lock(); | |
455 | DISABLE_SIGNALS(); | |
456 | LOCK(); | |
457 | /* GC lock is needed for reclaim list access. We */ | |
458 | /* must decrement fl_builder_count before reaquiring GC */ | |
459 | /* lock. Hopefully this path is rare. */ | |
460 | # endif | |
461 | } | |
73ffefd0 | 462 | } |
9110a741 BM |
463 | /* Next try to use prefix of global free list if there is one. */ |
464 | /* We don't refill it, but we need to use it up before allocating */ | |
465 | /* a new block ourselves. */ | |
466 | opp = &(GC_obj_kinds[k].ok_freelist[lw]); | |
467 | if ( (op = *opp) != 0 ) { | |
468 | *opp = 0; | |
469 | my_words_allocd = 0; | |
470 | for (p = op; p != 0; p = obj_link(p)) { | |
471 | my_words_allocd += lw; | |
472 | if (my_words_allocd >= BODY_SZ) { | |
73ffefd0 TT |
473 | *opp = obj_link(p); |
474 | obj_link(p) = 0; | |
475 | break; | |
9110a741 | 476 | } |
73ffefd0 | 477 | } |
9110a741 BM |
478 | GC_words_allocd += my_words_allocd; |
479 | goto out; | |
480 | } | |
481 | /* Next try to allocate a new block worth of objects of this size. */ | |
482 | { | |
483 | struct hblk *h = GC_allochblk(lw, k, 0); | |
484 | if (h != 0) { | |
485 | if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h)); | |
486 | GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE) | |
487 | - BYTES_TO_WORDS(HBLKSIZE) % lw; | |
488 | # ifdef PARALLEL_MARK | |
489 | GC_acquire_mark_lock(); | |
490 | ++ GC_fl_builder_count; | |
491 | UNLOCK(); | |
492 | ENABLE_SIGNALS(); | |
493 | GC_release_mark_lock(); | |
494 | # endif | |
495 | ||
496 | op = GC_build_fl(h, lw, ok -> ok_init, 0); | |
497 | # ifdef PARALLEL_MARK | |
5a2586cf | 498 | *result = op; |
9110a741 BM |
499 | GC_acquire_mark_lock(); |
500 | -- GC_fl_builder_count; | |
501 | if (GC_fl_builder_count == 0) GC_notify_all_builder(); | |
502 | GC_release_mark_lock(); | |
5a2586cf TT |
503 | (void) GC_clear_stack(0); |
504 | return; | |
9110a741 BM |
505 | # else |
506 | goto out; | |
507 | # endif | |
508 | } | |
73ffefd0 | 509 | } |
73ffefd0 | 510 | |
9110a741 BM |
511 | /* As a last attempt, try allocating a single object. Note that */ |
512 | /* this may trigger a collection or expand the heap. */ | |
513 | op = GC_generic_malloc_inner(lb, k); | |
514 | if (0 != op) obj_link(op) = 0; | |
515 | ||
516 | out: | |
5a2586cf | 517 | *result = op; |
73ffefd0 TT |
518 | UNLOCK(); |
519 | ENABLE_SIGNALS(); | |
5a2586cf | 520 | (void) GC_clear_stack(0); |
73ffefd0 TT |
521 | } |
522 | ||
9110a741 | 523 | GC_PTR GC_malloc_many(size_t lb) |
73ffefd0 | 524 | { |
5a2586cf TT |
525 | ptr_t result; |
526 | GC_generic_malloc_many(lb, NORMAL, &result); | |
527 | return result; | |
73ffefd0 TT |
528 | } |
529 | ||
530 | /* Note that the "atomic" version of this would be unsafe, since the */ | |
531 | /* links would not be seen by the collector. */ | |
532 | # endif | |
533 | ||
534 | /* Allocate lb bytes of pointerful, traced, but not collectable data */ | |
535 | # ifdef __STDC__ | |
536 | GC_PTR GC_malloc_uncollectable(size_t lb) | |
537 | # else | |
538 | GC_PTR GC_malloc_uncollectable(lb) | |
539 | size_t lb; | |
540 | # endif | |
541 | { | |
542 | register ptr_t op; | |
543 | register ptr_t *opp; | |
544 | register word lw; | |
545 | DCL_LOCK_STATE; | |
546 | ||
547 | if( SMALL_OBJ(lb) ) { | |
548 | # ifdef MERGE_SIZES | |
9110a741 | 549 | if (EXTRA_BYTES != 0 && lb != 0) lb--; |
73ffefd0 TT |
550 | /* We don't need the extra byte, since this won't be */ |
551 | /* collected anyway. */ | |
73ffefd0 TT |
552 | lw = GC_size_map[lb]; |
553 | # else | |
554 | lw = ALIGNED_WORDS(lb); | |
555 | # endif | |
556 | opp = &(GC_uobjfreelist[lw]); | |
557 | FASTLOCK(); | |
558 | if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) { | |
559 | /* See above comment on signals. */ | |
560 | *opp = obj_link(op); | |
561 | obj_link(op) = 0; | |
562 | GC_words_allocd += lw; | |
563 | /* Mark bit ws already set on free list. It will be */ | |
564 | /* cleared only temporarily during a collection, as a */ | |
565 | /* result of the normal free list mark bit clearing. */ | |
566 | GC_non_gc_bytes += WORDS_TO_BYTES(lw); | |
567 | FASTUNLOCK(); | |
568 | return((GC_PTR) op); | |
569 | } | |
570 | FASTUNLOCK(); | |
571 | op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); | |
572 | } else { | |
573 | op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE); | |
574 | } | |
575 | if (0 == op) return(0); | |
576 | /* We don't need the lock here, since we have an undisguised */ | |
577 | /* pointer. We do need to hold the lock while we adjust */ | |
578 | /* mark bits. */ | |
579 | { | |
580 | register struct hblk * h; | |
581 | ||
582 | h = HBLKPTR(op); | |
583 | lw = HDR(h) -> hb_sz; | |
584 | ||
585 | DISABLE_SIGNALS(); | |
586 | LOCK(); | |
587 | GC_set_mark_bit(op); | |
588 | GC_non_gc_bytes += WORDS_TO_BYTES(lw); | |
589 | UNLOCK(); | |
590 | ENABLE_SIGNALS(); | |
591 | return((GC_PTR) op); | |
592 | } | |
593 | } | |
594 | ||
30c3de1f JS |
595 | #ifdef __STDC__ |
596 | /* Not well tested nor integrated. */ | |
597 | /* Debug version is tricky and currently missing. */ | |
598 | #include <limits.h> | |
599 | ||
600 | GC_PTR GC_memalign(size_t align, size_t lb) | |
601 | { | |
602 | size_t new_lb; | |
603 | size_t offset; | |
604 | ptr_t result; | |
605 | ||
606 | # ifdef ALIGN_DOUBLE | |
607 | if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb); | |
608 | # endif | |
609 | if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb); | |
610 | if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) { | |
611 | if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */; | |
612 | return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb); | |
613 | /* Will be HBLKSIZE aligned. */ | |
614 | } | |
615 | /* We could also try to make sure that the real rounded-up object size */ | |
616 | /* is a multiple of align. That would be correct up to HBLKSIZE. */ | |
617 | new_lb = lb + align - 1; | |
618 | result = GC_malloc(new_lb); | |
619 | offset = (word)result % align; | |
620 | if (offset != 0) { | |
621 | offset = align - offset; | |
622 | if (!GC_all_interior_pointers) { | |
623 | if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE); | |
624 | GC_register_displacement(offset); | |
625 | } | |
626 | } | |
627 | result = (GC_PTR) ((ptr_t)result + offset); | |
628 | GC_ASSERT((word)result % align == 0); | |
629 | return result; | |
630 | } | |
631 | #endif | |
632 | ||
73ffefd0 TT |
633 | # ifdef ATOMIC_UNCOLLECTABLE |
634 | /* Allocate lb bytes of pointerfree, untraced, uncollectable data */ | |
635 | /* This is normally roughly equivalent to the system malloc. */ | |
636 | /* But it may be useful if malloc is redefined. */ | |
637 | # ifdef __STDC__ | |
638 | GC_PTR GC_malloc_atomic_uncollectable(size_t lb) | |
639 | # else | |
640 | GC_PTR GC_malloc_atomic_uncollectable(lb) | |
641 | size_t lb; | |
642 | # endif | |
643 | { | |
644 | register ptr_t op; | |
645 | register ptr_t *opp; | |
646 | register word lw; | |
647 | DCL_LOCK_STATE; | |
648 | ||
649 | if( SMALL_OBJ(lb) ) { | |
650 | # ifdef MERGE_SIZES | |
9110a741 | 651 | if (EXTRA_BYTES != 0 && lb != 0) lb--; |
73ffefd0 TT |
652 | /* We don't need the extra byte, since this won't be */ |
653 | /* collected anyway. */ | |
73ffefd0 TT |
654 | lw = GC_size_map[lb]; |
655 | # else | |
656 | lw = ALIGNED_WORDS(lb); | |
657 | # endif | |
658 | opp = &(GC_auobjfreelist[lw]); | |
659 | FASTLOCK(); | |
660 | if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) { | |
661 | /* See above comment on signals. */ | |
662 | *opp = obj_link(op); | |
663 | obj_link(op) = 0; | |
664 | GC_words_allocd += lw; | |
665 | /* Mark bit was already set while object was on free list. */ | |
666 | GC_non_gc_bytes += WORDS_TO_BYTES(lw); | |
667 | FASTUNLOCK(); | |
668 | return((GC_PTR) op); | |
669 | } | |
670 | FASTUNLOCK(); | |
671 | op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE); | |
672 | } else { | |
673 | op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE); | |
674 | } | |
675 | if (0 == op) return(0); | |
676 | /* We don't need the lock here, since we have an undisguised */ | |
677 | /* pointer. We do need to hold the lock while we adjust */ | |
678 | /* mark bits. */ | |
679 | { | |
680 | register struct hblk * h; | |
681 | ||
682 | h = HBLKPTR(op); | |
683 | lw = HDR(h) -> hb_sz; | |
684 | ||
685 | DISABLE_SIGNALS(); | |
686 | LOCK(); | |
687 | GC_set_mark_bit(op); | |
688 | GC_non_gc_bytes += WORDS_TO_BYTES(lw); | |
689 | UNLOCK(); | |
690 | ENABLE_SIGNALS(); | |
691 | return((GC_PTR) op); | |
692 | } | |
693 | } | |
694 | ||
695 | #endif /* ATOMIC_UNCOLLECTABLE */ |