]> git.ipfire.org Git - thirdparty/gcc.git/blame - boehm-gc/mallocx.c
acinclude.m4: Remove bogus duplicate call to AC_CANONICAL_BUILD.
[thirdparty/gcc.git] / boehm-gc / mallocx.c
CommitLineData
73ffefd0
TT
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
9110a741 5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
73ffefd0
TT
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17/*
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
22 */
23
24#include <stdio.h>
9110a741 25#include "private/gc_priv.h"
73ffefd0
TT
26
27extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
28void GC_extend_size_map(); /* in misc.c. */
29GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
30
31/* Some externally visible but unadvertised variables to allow access to */
32/* free lists from inlined allocators without including gc_priv.h */
33/* or introducing dependencies on internal data structure layouts. */
9110a741
BM
34ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
35ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
36ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
73ffefd0 37# ifdef ATOMIC_UNCOLLECTABLE
9110a741 38 ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
73ffefd0
TT
39# endif
40
9110a741
BM
41
42GC_PTR GC_generic_or_special_malloc(lb,knd)
43word lb;
44int knd;
73ffefd0 45{
9110a741
BM
46 switch(knd) {
47# ifdef STUBBORN_ALLOC
48 case STUBBORN:
49 return(GC_malloc_stubborn((size_t)lb));
50# endif
51 case PTRFREE:
52 return(GC_malloc_atomic((size_t)lb));
53 case NORMAL:
54 return(GC_malloc((size_t)lb));
55 case UNCOLLECTABLE:
56 return(GC_malloc_uncollectable((size_t)lb));
57# ifdef ATOMIC_UNCOLLECTABLE
58 case AUNCOLLECTABLE:
59 return(GC_malloc_atomic_uncollectable((size_t)lb));
60# endif /* ATOMIC_UNCOLLECTABLE */
61 default:
62 return(GC_generic_malloc(lb,knd));
20bbd3cd 63 }
9110a741
BM
64}
65
66
67/* Change the size of the block pointed to by p to contain at least */
68/* lb bytes. The object may be (and quite likely will be) moved. */
69/* The kind (e.g. atomic) is the same as that of the old. */
70/* Shrinking of large blocks is not implemented well. */
71# ifdef __STDC__
72 GC_PTR GC_realloc(GC_PTR p, size_t lb)
73# else
74 GC_PTR GC_realloc(p,lb)
75 GC_PTR p;
76 size_t lb;
77# endif
78{
79register struct hblk * h;
80register hdr * hhdr;
81register word sz; /* Current size in bytes */
82register word orig_sz; /* Original sz in bytes */
83int obj_kind;
84
85 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
86 h = HBLKPTR(p);
87 hhdr = HDR(h);
88 sz = hhdr -> hb_sz;
89 obj_kind = hhdr -> hb_obj_kind;
90 sz = WORDS_TO_BYTES(sz);
91 orig_sz = sz;
92
93 if (sz > MAXOBJBYTES) {
94 /* Round it up to the next whole heap block */
95 register word descr;
96
97 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
98 hhdr -> hb_sz = BYTES_TO_WORDS(sz);
99 descr = GC_obj_kinds[obj_kind].ok_descriptor;
100 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
101 hhdr -> hb_descr = descr;
102 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
103 /* Extra area is already cleared by GC_alloc_large_and_clear. */
104 }
105 if (ADD_SLOP(lb) <= sz) {
106 if (lb >= (sz >> 1)) {
107# ifdef STUBBORN_ALLOC
108 if (obj_kind == STUBBORN) GC_change_stubborn(p);
109# endif
110 if (orig_sz > lb) {
111 /* Clear unneeded part of object to avoid bogus pointer */
112 /* tracing. */
113 /* Safe for stubborn objects. */
114 BZERO(((ptr_t)p) + lb, orig_sz - lb);
115 }
116 return(p);
117 } else {
118 /* shrink */
119 GC_PTR result =
120 GC_generic_or_special_malloc((word)lb, obj_kind);
121
122 if (result == 0) return(0);
123 /* Could also return original object. But this */
124 /* gives the client warning of imminent disaster. */
125 BCOPY(p, result, lb);
126# ifndef IGNORE_FREE
127 GC_free(p);
128# endif
129 return(result);
130 }
73ffefd0 131 } else {
9110a741
BM
132 /* grow */
133 GC_PTR result =
134 GC_generic_or_special_malloc((word)lb, obj_kind);
135
136 if (result == 0) return(0);
137 BCOPY(p, result, sz);
138# ifndef IGNORE_FREE
139 GC_free(p);
140# endif
141 return(result);
73ffefd0 142 }
73ffefd0
TT
143}
144
30c3de1f
JS
145# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
146# define REDIRECT_REALLOC GC_realloc
147# endif
148
149# ifdef REDIRECT_REALLOC
9110a741
BM
150# ifdef __STDC__
151 GC_PTR realloc(GC_PTR p, size_t lb)
152# else
153 GC_PTR realloc(p,lb)
154 GC_PTR p;
155 size_t lb;
156# endif
157 {
30c3de1f 158 return(REDIRECT_REALLOC(p, lb));
9110a741 159 }
30c3de1f 160# endif /* REDIRECT_REALLOC */
9110a741
BM
161
162
163/* The same thing, except caller does not hold allocation lock. */
164/* We avoid holding allocation lock while we clear memory. */
73ffefd0
TT
165ptr_t GC_generic_malloc_ignore_off_page(lb, k)
166register size_t lb;
167register int k;
168{
169 register ptr_t result;
9110a741
BM
170 word lw;
171 word n_blocks;
172 GC_bool init;
73ffefd0
TT
173 DCL_LOCK_STATE;
174
9110a741
BM
175 if (SMALL_OBJ(lb))
176 return(GC_generic_malloc((word)lb, k));
177 lw = ROUNDED_UP_WORDS(lb);
178 n_blocks = OBJ_SZ_TO_BLOCKS(lw);
179 init = GC_obj_kinds[k].ok_init;
30c3de1f 180 if (GC_have_errors) GC_print_all_errors();
73ffefd0
TT
181 GC_INVOKE_FINALIZERS();
182 DISABLE_SIGNALS();
183 LOCK();
9110a741
BM
184 result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
185 if (0 != result) {
186 if (GC_debugging_started) {
187 BZERO(result, n_blocks * HBLKSIZE);
188 } else {
189# ifdef THREADS
190 /* Clear any memory that might be used for GC descriptors */
191 /* before we release the lock. */
192 ((word *)result)[0] = 0;
193 ((word *)result)[1] = 0;
194 ((word *)result)[lw-1] = 0;
195 ((word *)result)[lw-2] = 0;
196# endif
197 }
198 }
199 GC_words_allocd += lw;
73ffefd0
TT
200 UNLOCK();
201 ENABLE_SIGNALS();
202 if (0 == result) {
203 return((*GC_oom_fn)(lb));
204 } else {
5d2082d1 205 if (init && !GC_debugging_started) {
9110a741
BM
206 BZERO(result, n_blocks * HBLKSIZE);
207 }
73ffefd0
TT
208 return(result);
209 }
210}
211
212# if defined(__STDC__) || defined(__cplusplus)
213 void * GC_malloc_ignore_off_page(size_t lb)
214# else
215 char * GC_malloc_ignore_off_page(lb)
216 register size_t lb;
217# endif
218{
219 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
220}
221
222# if defined(__STDC__) || defined(__cplusplus)
223 void * GC_malloc_atomic_ignore_off_page(size_t lb)
224# else
225 char * GC_malloc_atomic_ignore_off_page(lb)
226 register size_t lb;
227# endif
228{
229 return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
230}
231
232/* Increment GC_words_allocd from code that doesn't have direct access */
233/* to GC_arrays. */
234# ifdef __STDC__
235void GC_incr_words_allocd(size_t n)
236{
237 GC_words_allocd += n;
238}
239
240/* The same for GC_mem_freed. */
241void GC_incr_mem_freed(size_t n)
242{
243 GC_mem_freed += n;
244}
245# endif /* __STDC__ */
246
247/* Analogous to the above, but assumes a small object size, and */
248/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
93002327
BM
249ptr_t GC_generic_malloc_words_small_inner(lw, k)
250register word lw;
251register int k;
73ffefd0
TT
252{
253register ptr_t op;
254register ptr_t *opp;
255register struct obj_kind * kind = GC_obj_kinds + k;
73ffefd0 256
73ffefd0
TT
257 opp = &(kind -> ok_freelist[lw]);
258 if( (op = *opp) == 0 ) {
259 if (!GC_is_initialized) {
260 GC_init_inner();
261 }
262 if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
20bbd3cd 263 op = GC_clear_stack(GC_allocobj((word)lw, k));
73ffefd0
TT
264 }
265 if (op == 0) {
266 UNLOCK();
267 ENABLE_SIGNALS();
268 return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
269 }
270 }
271 *opp = obj_link(op);
272 obj_link(op) = 0;
273 GC_words_allocd += lw;
93002327
BM
274 return((ptr_t)op);
275}
276
277/* Analogous to the above, but assumes a small object size, and */
278/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
279#ifdef __STDC__
280 ptr_t GC_generic_malloc_words_small(size_t lw, int k)
281#else
282 ptr_t GC_generic_malloc_words_small(lw, k)
283 register word lw;
284 register int k;
285#endif
286{
287register ptr_t op;
288DCL_LOCK_STATE;
289
30c3de1f 290 if (GC_have_errors) GC_print_all_errors();
93002327
BM
291 GC_INVOKE_FINALIZERS();
292 DISABLE_SIGNALS();
293 LOCK();
294 op = GC_generic_malloc_words_small_inner(lw, k);
73ffefd0
TT
295 UNLOCK();
296 ENABLE_SIGNALS();
297 return((ptr_t)op);
298}
299
300#if defined(THREADS) && !defined(SRC_M3)
9110a741
BM
301
302extern signed_word GC_mem_found; /* Protected by GC lock. */
303
304#ifdef PARALLEL_MARK
305volatile signed_word GC_words_allocd_tmp = 0;
306 /* Number of words of memory allocated since */
307 /* we released the GC lock. Instead of */
308 /* reacquiring the GC lock just to add this in, */
309 /* we add it in the next time we reacquire */
310 /* the lock. (Atomically adding it doesn't */
311 /* work, since we would have to atomically */
312 /* update it in GC_malloc, which is too */
313 /* expensive. */
314#endif /* PARALLEL_MARK */
315
316/* See reclaim.c: */
317extern ptr_t GC_reclaim_generic();
318
73ffefd0
TT
319/* Return a list of 1 or more objects of the indicated size, linked */
320/* through the first word in the object. This has the advantage that */
321/* it acquires the allocation lock only once, and may greatly reduce */
322/* time wasted contending for the allocation lock. Typical usage would */
323/* be in a thread that requires many items of the same size. It would */
324/* keep its own free list in thread-local storage, and call */
325/* GC_malloc_many or friends to replenish it. (We do not round up */
326/* object sizes, since a call indicates the intention to consume many */
327/* objects of exactly this size.) */
5a2586cf
TT
328/* We return the free-list by assigning it to *result, since it is */
329/* not safe to return, e.g. a linked list of pointer-free objects, */
330/* since the collector would not retain the entire list if it were */
331/* invoked just as we were returning. */
73ffefd0 332/* Note that the client should usually clear the link field. */
5a2586cf 333void GC_generic_malloc_many(lb, k, result)
73ffefd0
TT
334register word lb;
335register int k;
5a2586cf 336ptr_t *result;
73ffefd0
TT
337{
338ptr_t op;
9110a741 339ptr_t p;
73ffefd0
TT
340ptr_t *opp;
341word lw;
9110a741
BM
342word my_words_allocd = 0;
343struct obj_kind * ok = &(GC_obj_kinds[k]);
73ffefd0
TT
344DCL_LOCK_STATE;
345
9110a741
BM
346# if defined(GATHERSTATS) || defined(PARALLEL_MARK)
347# define COUNT_ARG , &my_words_allocd
348# else
349# define COUNT_ARG
350# define NEED_TO_COUNT
351# endif
73ffefd0
TT
352 if (!SMALL_OBJ(lb)) {
353 op = GC_generic_malloc(lb, k);
354 if(0 != op) obj_link(op) = 0;
5a2586cf
TT
355 *result = op;
356 return;
73ffefd0
TT
357 }
358 lw = ALIGNED_WORDS(lb);
30c3de1f 359 if (GC_have_errors) GC_print_all_errors();
73ffefd0
TT
360 GC_INVOKE_FINALIZERS();
361 DISABLE_SIGNALS();
362 LOCK();
9110a741 363 if (!GC_is_initialized) GC_init_inner();
5a2586cf
TT
364 /* Do our share of marking work */
365 if (GC_incremental && !GC_dont_gc) {
366 ENTER_GC();
367 GC_collect_a_little_inner(1);
368 EXIT_GC();
369 }
9110a741
BM
370 /* First see if we can reclaim a page of objects waiting to be */
371 /* reclaimed. */
372 {
373 struct hblk ** rlh = ok -> ok_reclaim_list;
374 struct hblk * hbp;
375 hdr * hhdr;
376
377 rlh += lw;
378 while ((hbp = *rlh) != 0) {
379 hhdr = HDR(hbp);
380 *rlh = hhdr -> hb_next;
30c3de1f 381 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
9110a741
BM
382# ifdef PARALLEL_MARK
383 {
384 signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
385
386 GC_ASSERT(my_words_allocd_tmp >= 0);
387 /* We only decrement it while holding the GC lock. */
388 /* Thus we can't accidentally adjust it down in more */
389 /* than one thread simultaneously. */
390 if (my_words_allocd_tmp != 0) {
391 (void)GC_atomic_add(
392 (volatile GC_word *)(&GC_words_allocd_tmp),
393 (GC_word)(-my_words_allocd_tmp));
394 GC_words_allocd += my_words_allocd_tmp;
395 }
396 }
397 GC_acquire_mark_lock();
398 ++ GC_fl_builder_count;
399 UNLOCK();
400 ENABLE_SIGNALS();
401 GC_release_mark_lock();
402# endif
403 op = GC_reclaim_generic(hbp, hhdr, lw,
404 ok -> ok_init, 0 COUNT_ARG);
405 if (op != 0) {
406# ifdef NEED_TO_COUNT
407 /* We are neither gathering statistics, nor marking in */
408 /* parallel. Thus GC_reclaim_generic doesn't count */
409 /* for us. */
410 for (p = op; p != 0; p = obj_link(p)) {
411 my_words_allocd += lw;
412 }
413# endif
414# if defined(GATHERSTATS)
415 /* We also reclaimed memory, so we need to adjust */
416 /* that count. */
417 /* This should be atomic, so the results may be */
418 /* inaccurate. */
419 GC_mem_found += my_words_allocd;
420# endif
421# ifdef PARALLEL_MARK
5a2586cf 422 *result = op;
9110a741
BM
423 (void)GC_atomic_add(
424 (volatile GC_word *)(&GC_words_allocd_tmp),
425 (GC_word)(my_words_allocd));
426 GC_acquire_mark_lock();
427 -- GC_fl_builder_count;
428 if (GC_fl_builder_count == 0) GC_notify_all_builder();
429 GC_release_mark_lock();
5a2586cf
TT
430 (void) GC_clear_stack(0);
431 return;
9110a741
BM
432# else
433 GC_words_allocd += my_words_allocd;
434 goto out;
435# endif
436 }
437# ifdef PARALLEL_MARK
438 GC_acquire_mark_lock();
439 -- GC_fl_builder_count;
440 if (GC_fl_builder_count == 0) GC_notify_all_builder();
441 GC_release_mark_lock();
442 DISABLE_SIGNALS();
443 LOCK();
444 /* GC lock is needed for reclaim list access. We */
445 /* must decrement fl_builder_count before reaquiring GC */
446 /* lock. Hopefully this path is rare. */
447# endif
448 }
73ffefd0 449 }
9110a741
BM
450 /* Next try to use prefix of global free list if there is one. */
451 /* We don't refill it, but we need to use it up before allocating */
452 /* a new block ourselves. */
453 opp = &(GC_obj_kinds[k].ok_freelist[lw]);
454 if ( (op = *opp) != 0 ) {
455 *opp = 0;
456 my_words_allocd = 0;
457 for (p = op; p != 0; p = obj_link(p)) {
458 my_words_allocd += lw;
459 if (my_words_allocd >= BODY_SZ) {
73ffefd0
TT
460 *opp = obj_link(p);
461 obj_link(p) = 0;
462 break;
9110a741 463 }
73ffefd0 464 }
9110a741
BM
465 GC_words_allocd += my_words_allocd;
466 goto out;
467 }
468 /* Next try to allocate a new block worth of objects of this size. */
469 {
470 struct hblk *h = GC_allochblk(lw, k, 0);
471 if (h != 0) {
472 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
473 GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
474 - BYTES_TO_WORDS(HBLKSIZE) % lw;
475# ifdef PARALLEL_MARK
476 GC_acquire_mark_lock();
477 ++ GC_fl_builder_count;
478 UNLOCK();
479 ENABLE_SIGNALS();
480 GC_release_mark_lock();
481# endif
482
483 op = GC_build_fl(h, lw, ok -> ok_init, 0);
484# ifdef PARALLEL_MARK
5a2586cf 485 *result = op;
9110a741
BM
486 GC_acquire_mark_lock();
487 -- GC_fl_builder_count;
488 if (GC_fl_builder_count == 0) GC_notify_all_builder();
489 GC_release_mark_lock();
5a2586cf
TT
490 (void) GC_clear_stack(0);
491 return;
9110a741
BM
492# else
493 goto out;
494# endif
495 }
73ffefd0 496 }
73ffefd0 497
9110a741
BM
498 /* As a last attempt, try allocating a single object. Note that */
499 /* this may trigger a collection or expand the heap. */
500 op = GC_generic_malloc_inner(lb, k);
501 if (0 != op) obj_link(op) = 0;
502
503 out:
5a2586cf 504 *result = op;
73ffefd0
TT
505 UNLOCK();
506 ENABLE_SIGNALS();
5a2586cf 507 (void) GC_clear_stack(0);
73ffefd0
TT
508}
509
9110a741 510GC_PTR GC_malloc_many(size_t lb)
73ffefd0 511{
5a2586cf
TT
512 ptr_t result;
513 GC_generic_malloc_many(lb, NORMAL, &result);
514 return result;
73ffefd0
TT
515}
516
517/* Note that the "atomic" version of this would be unsafe, since the */
518/* links would not be seen by the collector. */
519# endif
520
521/* Allocate lb bytes of pointerful, traced, but not collectable data */
522# ifdef __STDC__
523 GC_PTR GC_malloc_uncollectable(size_t lb)
524# else
525 GC_PTR GC_malloc_uncollectable(lb)
526 size_t lb;
527# endif
528{
529register ptr_t op;
530register ptr_t *opp;
531register word lw;
532DCL_LOCK_STATE;
533
534 if( SMALL_OBJ(lb) ) {
535# ifdef MERGE_SIZES
9110a741 536 if (EXTRA_BYTES != 0 && lb != 0) lb--;
73ffefd0
TT
537 /* We don't need the extra byte, since this won't be */
538 /* collected anyway. */
73ffefd0
TT
539 lw = GC_size_map[lb];
540# else
541 lw = ALIGNED_WORDS(lb);
542# endif
543 opp = &(GC_uobjfreelist[lw]);
544 FASTLOCK();
545 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
546 /* See above comment on signals. */
547 *opp = obj_link(op);
548 obj_link(op) = 0;
549 GC_words_allocd += lw;
550 /* Mark bit ws already set on free list. It will be */
551 /* cleared only temporarily during a collection, as a */
552 /* result of the normal free list mark bit clearing. */
553 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
554 FASTUNLOCK();
555 return((GC_PTR) op);
556 }
557 FASTUNLOCK();
558 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
559 } else {
560 op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
561 }
562 if (0 == op) return(0);
563 /* We don't need the lock here, since we have an undisguised */
564 /* pointer. We do need to hold the lock while we adjust */
565 /* mark bits. */
566 {
567 register struct hblk * h;
568
569 h = HBLKPTR(op);
570 lw = HDR(h) -> hb_sz;
571
572 DISABLE_SIGNALS();
573 LOCK();
574 GC_set_mark_bit(op);
575 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
576 UNLOCK();
577 ENABLE_SIGNALS();
578 return((GC_PTR) op);
579 }
580}
581
30c3de1f
JS
582#ifdef __STDC__
583/* Not well tested nor integrated. */
584/* Debug version is tricky and currently missing. */
585#include <limits.h>
586
587GC_PTR GC_memalign(size_t align, size_t lb)
588{
589 size_t new_lb;
590 size_t offset;
591 ptr_t result;
592
593# ifdef ALIGN_DOUBLE
594 if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
595# endif
596 if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
597 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
598 if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
599 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
600 /* Will be HBLKSIZE aligned. */
601 }
602 /* We could also try to make sure that the real rounded-up object size */
603 /* is a multiple of align. That would be correct up to HBLKSIZE. */
604 new_lb = lb + align - 1;
605 result = GC_malloc(new_lb);
606 offset = (word)result % align;
607 if (offset != 0) {
608 offset = align - offset;
609 if (!GC_all_interior_pointers) {
610 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
611 GC_register_displacement(offset);
612 }
613 }
614 result = (GC_PTR) ((ptr_t)result + offset);
615 GC_ASSERT((word)result % align == 0);
616 return result;
617}
618#endif
619
73ffefd0
TT
620# ifdef ATOMIC_UNCOLLECTABLE
621/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
622/* This is normally roughly equivalent to the system malloc. */
623/* But it may be useful if malloc is redefined. */
624# ifdef __STDC__
625 GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
626# else
627 GC_PTR GC_malloc_atomic_uncollectable(lb)
628 size_t lb;
629# endif
630{
631register ptr_t op;
632register ptr_t *opp;
633register word lw;
634DCL_LOCK_STATE;
635
636 if( SMALL_OBJ(lb) ) {
637# ifdef MERGE_SIZES
9110a741 638 if (EXTRA_BYTES != 0 && lb != 0) lb--;
73ffefd0
TT
639 /* We don't need the extra byte, since this won't be */
640 /* collected anyway. */
73ffefd0
TT
641 lw = GC_size_map[lb];
642# else
643 lw = ALIGNED_WORDS(lb);
644# endif
645 opp = &(GC_auobjfreelist[lw]);
646 FASTLOCK();
647 if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
648 /* See above comment on signals. */
649 *opp = obj_link(op);
650 obj_link(op) = 0;
651 GC_words_allocd += lw;
652 /* Mark bit was already set while object was on free list. */
653 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
654 FASTUNLOCK();
655 return((GC_PTR) op);
656 }
657 FASTUNLOCK();
658 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
659 } else {
660 op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
661 }
662 if (0 == op) return(0);
663 /* We don't need the lock here, since we have an undisguised */
664 /* pointer. We do need to hold the lock while we adjust */
665 /* mark bits. */
666 {
667 register struct hblk * h;
668
669 h = HBLKPTR(op);
670 lw = HDR(h) -> hb_sz;
671
672 DISABLE_SIGNALS();
673 LOCK();
674 GC_set_mark_bit(op);
675 GC_non_gc_bytes += WORDS_TO_BYTES(lw);
676 UNLOCK();
677 ENABLE_SIGNALS();
678 return((GC_PTR) op);
679 }
680}
681
682#endif /* ATOMIC_UNCOLLECTABLE */