1 /* Alternative malloc implementation for multiple threads without
2 lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas
4 Boost Software License - Version 1.0 - August 17th, 2003
6 Permission is hereby granted, free of charge, to any person or organization
7 obtaining a copy of the software and accompanying documentation covered by
8 this license (the "Software") to use, reproduce, display, distribute,
9 execute, and transmit the Software, and to prepare derivative works of the
10 Software, and to permit third-parties to whom the Software is furnished to
11 do so, all subject to the following:
13 The copyright notices in the Software and this entire statement, including
14 the above license grant, this restriction and the following disclaimer,
15 must be included in all copies of the Software, in whole or in part, and
16 all derivative works of the Software, unless such copies or derivative
17 works are solely in the form of machine-executable object code generated by
18 a source language processor.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
23 SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
24 FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
25 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 DEALINGS IN THE SOFTWARE.
30 /* Enable full aliasing on MSVC */
31 /*#pragma optimize("a", on)*/
34 #pragma GCC diagnostic ignored "-Wunused-parameter"
36 /*#define FULLSANITYCHECKS*/
38 #include "nedmalloc.h"
43 #define ONLY_MSPACES 1
47 #define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */
48 #undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */
54 #ifdef NDEBUG /* Disable assert checking on release builds */
57 /* The default of 64Kb means we spend too much time kernel-side */
58 #ifndef DEFAULT_GRANULARITY
59 #define DEFAULT_GRANULARITY (1*1024*1024)
61 /*#define USE_SPIN_LOCKS 0*/
64 /*#define FORCEINLINE*/
66 #ifdef NDEBUG /* Disable assert checking on release builds */
70 /* The maximum concurrent threads in a pool possible */
71 #ifndef MAXTHREADSINPOOL
72 #define MAXTHREADSINPOOL 16
74 /* The maximum number of threadcaches which can be allocated */
75 #ifndef THREADCACHEMAXCACHES
76 #define THREADCACHEMAXCACHES 256
78 /* The maximum size to be allocated from the thread cache */
79 #ifndef THREADCACHEMAX
80 #define THREADCACHEMAX 8192
83 /* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */
84 #define THREADCACHEMAXBINS ((13-4)*2)
86 /* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */
87 #define THREADCACHEMAXBINS (13-4)
89 /* Point at which the free space in a thread cache is garbage collected */
90 #ifndef THREADCACHEMAXFREESPACE
91 #define THREADCACHEMAXFREESPACE (512*1024)
97 #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k))
98 #define TLSFREE(k) (!TlsFree(k))
99 #define TLSGET(k) TlsGetValue(k)
100 #define TLSSET(k, a) (!TlsSetValue(k, a))
102 static LPVOID
ChkedTlsGetValue(DWORD idx
)
104 LPVOID ret
=TlsGetValue(idx
);
105 assert(S_OK
==GetLastError());
109 #define TLSGET(k) ChkedTlsGetValue(k)
112 #define TLSVAR pthread_key_t
113 #define TLSALLOC(k) pthread_key_create(k, 0)
114 #define TLSFREE(k) pthread_key_delete(k)
115 #define TLSGET(k) pthread_getspecific(k)
116 #define TLSSET(k, a) pthread_setspecific(k, a)
120 /* Only enable if testing with valgrind. Causes misoperation */
121 #define mspace_malloc(p, s) malloc(s)
122 #define mspace_realloc(p, m, s) realloc(m, s)
123 #define mspace_calloc(p, n, s) calloc(n, s)
124 #define mspace_free(p, m) free(m)
128 #if defined(__cplusplus)
129 #if !defined(NO_NED_NAMESPACE)
136 size_t nedblksize(void *mem
) THROWSPEC
139 /* Only enable if testing with valgrind. Causes misoperation */
140 return THREADCACHEMAX
;
144 mchunkptr p
=mem2chunk(mem
);
145 assert(cinuse(p
)); /* If this fails, someone tried to free a block twice */
147 return chunksize(p
)-overhead_for(p
);
153 void nedsetvalue(void *v
) THROWSPEC
{ nedpsetvalue(0, v
); }
154 void * nedmalloc(size_t size
) THROWSPEC
{ return nedpmalloc(0, size
); }
155 void * nedcalloc(size_t no
, size_t size
) THROWSPEC
{ return nedpcalloc(0, no
, size
); }
156 void * nedrealloc(void *mem
, size_t size
) THROWSPEC
{ return nedprealloc(0, mem
, size
); }
157 void nedfree(void *mem
) THROWSPEC
{ nedpfree(0, mem
); }
158 void * nedmemalign(size_t alignment
, size_t bytes
) THROWSPEC
{ return nedpmemalign(0, alignment
, bytes
); }
160 struct mallinfo
nedmallinfo(void) THROWSPEC
{ return nedpmallinfo(0); }
162 int nedmallopt(int parno
, int value
) THROWSPEC
{ return nedpmallopt(0, parno
, value
); }
163 int nedmalloc_trim(size_t pad
) THROWSPEC
{ return nedpmalloc_trim(0, pad
); }
164 void nedmalloc_stats(void) THROWSPEC
{ nedpmalloc_stats(0); }
165 size_t nedmalloc_footprint(void) THROWSPEC
{ return nedpmalloc_footprint(0); }
166 void **nedindependent_calloc(size_t elemsno
, size_t elemsize
, void **chunks
) THROWSPEC
{ return nedpindependent_calloc(0, elemsno
, elemsize
, chunks
); }
167 void **nedindependent_comalloc(size_t elems
, size_t *sizes
, void **chunks
) THROWSPEC
{ return nedpindependent_comalloc(0, elems
, sizes
, chunks
); }
169 struct threadcacheblk_t
;
170 typedef struct threadcacheblk_t threadcacheblk
;
171 struct threadcacheblk_t
172 { /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */
173 #ifdef FULLSANITYCHECKS
176 unsigned int lastUsed
, size
;
177 threadcacheblk
*next
, *prev
;
179 typedef struct threadcache_t
181 #ifdef FULLSANITYCHECKS
184 int mymspace
; /* Last mspace entry this thread used */
186 unsigned int mallocs
, frees
, successes
;
187 size_t freeInCache
; /* How much free space is stored in this cache */
188 threadcacheblk
*bins
[(THREADCACHEMAXBINS
+1)*2];
189 #ifdef FULLSANITYCHECKS
197 int threads
; /* Max entries in m to use */
198 threadcache
*caches
[THREADCACHEMAXCACHES
];
199 TLSVAR mycache
; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */
200 mstate m
[MAXTHREADSINPOOL
+1]; /* mspace entries for this pool */
202 static nedpool syspool
;
204 static FORCEINLINE
unsigned int size2binidx(size_t _size
) THROWSPEC
205 { /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */
206 unsigned int topbit
, size
=(unsigned int)(_size
>>4);
207 /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */
209 #if defined(__GNUC__)
210 topbit
= sizeof(size
)*__CHAR_BIT__
- 1 - __builtin_clz(size
);
211 #elif defined(_MSC_VER) && _MSC_VER>=1300
213 unsigned long bsrTopBit
;
215 _BitScanReverse(&bsrTopBit
, size
);
227 asDouble
= (double)size
+ 0.5;
228 topbit
= (asInt
[!FOX_BIGENDIAN
] >> 20) - 1023;
238 x
= x
- ((x
>> 1) & 0x55555555);
239 x
= (x
& 0x33333333) + ((x
>> 2) & 0x33333333);
240 x
= (x
+ (x
>> 4)) & 0x0F0F0F0F;
243 topbit
=31 - (x
>> 24);
251 #ifdef FULLSANITYCHECKS
252 static void tcsanitycheck(threadcacheblk
**ptr
) THROWSPEC
254 assert((ptr
[0] && ptr
[1]) || (!ptr
[0] && !ptr
[1]));
257 assert(nedblksize(ptr
[0])>=sizeof(threadcacheblk
));
258 assert(nedblksize(ptr
[1])>=sizeof(threadcacheblk
));
259 assert(*(unsigned int *) "NEDN"==ptr
[0]->magic
);
260 assert(*(unsigned int *) "NEDN"==ptr
[1]->magic
);
261 assert(!ptr
[0]->prev
);
262 assert(!ptr
[1]->next
);
265 assert(!ptr
[0]->next
);
266 assert(!ptr
[1]->prev
);
270 static void tcfullsanitycheck(threadcache
*tc
) THROWSPEC
272 threadcacheblk
**tcbptr
=tc
->bins
;
274 for(n
=0; n
<=THREADCACHEMAXBINS
; n
++, tcbptr
+=2)
276 threadcacheblk
*b
, *ob
=0;
277 tcsanitycheck(tcbptr
);
278 for(b
=tcbptr
[0]; b
; ob
=b
, b
=b
->next
)
280 assert(*(unsigned int *) "NEDN"==b
->magic
);
281 assert(!ob
|| ob
->next
==b
);
282 assert(!ob
|| b
->prev
==ob
);
288 static NOINLINE
void RemoveCacheEntries(nedpool
*p
, threadcache
*tc
, unsigned int age
) THROWSPEC
290 #ifdef FULLSANITYCHECKS
291 tcfullsanitycheck(tc
);
295 threadcacheblk
**tcbptr
=tc
->bins
;
297 for(n
=0; n
<=THREADCACHEMAXBINS
; n
++, tcbptr
+=2)
299 threadcacheblk
**tcb
=tcbptr
+1; /* come from oldest end of list */
300 /*tcsanitycheck(tcbptr);*/
301 for(; *tcb
&& tc
->frees
-(*tcb
)->lastUsed
>=age
; )
303 threadcacheblk
*f
=*tcb
;
304 size_t blksize
=f
->size
; /*nedblksize(f);*/
305 assert(blksize
<=nedblksize(f
));
307 #ifdef FULLSANITYCHECKS
308 assert(*(unsigned int *) "NEDN"==(*tcb
)->magic
);
315 tc
->freeInCache
-=blksize
;
316 assert((long) tc
->freeInCache
>=0);
318 /*tcsanitycheck(tcbptr);*/
322 #ifdef FULLSANITYCHECKS
323 tcfullsanitycheck(tc
);
326 static void DestroyCaches(nedpool
*p
) THROWSPEC
331 for(n
=0; n
<THREADCACHEMAXCACHES
; n
++)
333 if((tc
=p
->caches
[n
]))
336 RemoveCacheEntries(p
, tc
, 0);
337 assert(!tc
->freeInCache
);
347 static NOINLINE threadcache
*AllocCache(nedpool
*p
) THROWSPEC
351 ACQUIRE_LOCK(&p
->mutex
);
352 for(n
=0; n
<THREADCACHEMAXCACHES
&& p
->caches
[n
]; n
++);
353 if(THREADCACHEMAXCACHES
==n
)
354 { /* List exhausted, so disable for this thread */
355 RELEASE_LOCK(&p
->mutex
);
358 tc
=p
->caches
[n
]=(threadcache
*) mspace_calloc(p
->m
[0], 1, sizeof(threadcache
));
361 RELEASE_LOCK(&p
->mutex
);
364 #ifdef FULLSANITYCHECKS
365 tc
->magic1
=*(unsigned int *)"NEDMALC1";
366 tc
->magic2
=*(unsigned int *)"NEDMALC2";
368 tc
->threadid
=(long)(size_t)CURRENT_THREAD
;
369 for(end
=0; p
->m
[end
]; end
++);
370 tc
->mymspace
=tc
->threadid
% end
;
371 RELEASE_LOCK(&p
->mutex
);
372 if(TLSSET(p
->mycache
, (void *)(size_t)(n
+1))) abort();
376 static void *threadcache_malloc(nedpool
*p
, threadcache
*tc
, size_t *size
) THROWSPEC
379 unsigned int bestsize
;
380 unsigned int idx
=size2binidx(*size
);
382 threadcacheblk
*blk
, **binsptr
;
383 #ifdef FULLSANITYCHECKS
384 tcfullsanitycheck(tc
);
386 /* Calculate best fit bin size */
389 /* Finer grained bin fit */
394 bestsize
+=bestsize
>>1;
399 bestsize
=1<<(4+(idx
>>1));
408 assert(bestsize
>=*size
);
409 if(*size
<bestsize
) *size
=bestsize
;
410 assert(*size
<=THREADCACHEMAX
);
411 assert(idx
<=THREADCACHEMAXBINS
);
412 binsptr
=&tc
->bins
[idx
*2];
413 /* Try to match close, but move up a bin if necessary */
415 if(!blk
|| blk
->size
<*size
)
416 { /* Bump it up a bin */
417 if(idx
<THREADCACHEMAXBINS
)
426 blksize
=blk
->size
; /*nedblksize(blk);*/
427 assert(nedblksize(blk
)>=blksize
);
428 assert(blksize
>=*size
);
434 #ifdef FULLSANITYCHECKS
437 assert(binsptr
[0]!=blk
&& binsptr
[1]!=blk
);
438 assert(nedblksize(blk
)>=sizeof(threadcacheblk
) && nedblksize(blk
)<=THREADCACHEMAX
+CHUNK_OVERHEAD
);
439 /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/
445 assert(blksize
>=*size
);
447 tc
->freeInCache
-=blksize
;
448 assert((long) tc
->freeInCache
>=0);
450 #if defined(DEBUG) && 0
451 if(!(tc
->mallocs
& 0xfff))
453 printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc
->threadid
, tc
->mallocs
,
454 (float) tc
->successes
/tc
->mallocs
, tc
->frees
, (float) tc
->successes
/tc
->frees
, (unsigned int) tc
->freeInCache
);
457 #ifdef FULLSANITYCHECKS
458 tcfullsanitycheck(tc
);
462 static NOINLINE
void ReleaseFreeInCache(nedpool
*p
, threadcache
*tc
, int mymspace
) THROWSPEC
464 unsigned int age
=THREADCACHEMAXFREESPACE
/8192;
465 /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/
466 while(age
&& tc
->freeInCache
>=THREADCACHEMAXFREESPACE
)
468 RemoveCacheEntries(p
, tc
, age
);
469 /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/
472 /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/
474 static void threadcache_free(nedpool
*p
, threadcache
*tc
, int mymspace
, void *mem
, size_t size
) THROWSPEC
476 unsigned int bestsize
;
477 unsigned int idx
=size2binidx(size
);
478 threadcacheblk
**binsptr
, *tck
=(threadcacheblk
*) mem
;
479 assert(size
>=sizeof(threadcacheblk
) && size
<=THREADCACHEMAX
+CHUNK_OVERHEAD
);
481 { /* Make sure this is a valid memory block */
482 mchunkptr p
= mem2chunk(mem
);
483 mstate fm
= get_mstate_for(p
);
485 USAGE_ERROR_ACTION(fm
, p
);
490 #ifdef FULLSANITYCHECKS
491 tcfullsanitycheck(tc
);
493 /* Calculate best fit bin size */
496 /* Finer grained bin fit */
500 unsigned int biggerbestsize
=bestsize
+bestsize
<<1;
501 if(size
>=biggerbestsize
)
504 bestsize
=biggerbestsize
;
508 if(bestsize
!=size
) /* dlmalloc can round up, so we round down to preserve indexing */
510 binsptr
=&tc
->bins
[idx
*2];
511 assert(idx
<=THREADCACHEMAXBINS
);
514 fprintf(stderr
, "Attempt to free already freed memory block %p - aborting!\n", (void *)tck
);
517 #ifdef FULLSANITYCHECKS
518 tck
->magic
=*(unsigned int *) "NEDN";
520 tck
->lastUsed
=++tc
->frees
;
521 tck
->size
=(unsigned int) size
;
528 assert(!*binsptr
|| (*binsptr
)->size
==tck
->size
);
530 assert(tck
==tc
->bins
[idx
*2]);
531 assert(tc
->bins
[idx
*2+1]==tck
|| binsptr
[0]->next
->prev
==tck
);
532 /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/
533 tc
->freeInCache
+=size
;
534 #ifdef FULLSANITYCHECKS
535 tcfullsanitycheck(tc
);
538 if(tc
->freeInCache
>=THREADCACHEMAXFREESPACE
)
539 ReleaseFreeInCache(p
, tc
, mymspace
);
546 static NOINLINE
int InitPool(nedpool
*p
, size_t capacity
, int threads
) THROWSPEC
547 { /* threads is -1 for system pool */
548 ensure_initialization();
549 ACQUIRE_MALLOC_GLOBAL_LOCK();
550 if(p
->threads
) goto done
;
551 if(INITIAL_LOCK(&p
->mutex
)) goto err
;
552 if(TLSALLOC(&p
->mycache
)) goto err
;
553 if(!(p
->m
[0]=(mstate
) create_mspace(capacity
, 1))) goto err
;
555 p
->threads
=(threads
<1 || threads
>MAXTHREADSINPOOL
) ? MAXTHREADSINPOOL
: threads
;
557 RELEASE_MALLOC_GLOBAL_LOCK();
561 abort(); /* If you can't allocate for system pool, we're screwed */
565 destroy_mspace(p
->m
[0]);
570 if(TLSFREE(p
->mycache
)) abort();
573 RELEASE_MALLOC_GLOBAL_LOCK();
576 static NOINLINE mstate
FindMSpace(nedpool
*p
, threadcache
*tc
, int *lastUsed
, size_t size
) THROWSPEC
577 { /* Gets called when thread's last used mspace is in use. The strategy
578 is to run through the list of all available mspaces looking for an
579 unlocked one and if we fail, we create a new one so long as we don't
582 for(n
=end
=*lastUsed
+1; p
->m
[n
]; end
=++n
)
584 if(TRY_LOCK(&p
->m
[n
]->mutex
)) goto found
;
586 for(n
=0; n
<*lastUsed
&& p
->m
[n
]; n
++)
588 if(TRY_LOCK(&p
->m
[n
]->mutex
)) goto found
;
593 if(!(temp
=(mstate
) create_mspace(size
, 1)))
595 /* Now we're ready to modify the lists, we lock */
596 ACQUIRE_LOCK(&p
->mutex
);
597 while(p
->m
[end
] && end
<p
->threads
)
600 { /* Drat, must destroy it now */
601 RELEASE_LOCK(&p
->mutex
);
602 destroy_mspace((mspace
) temp
);
605 /* We really want to make sure this goes into memory now but we
606 have to be careful of breaking aliasing rules, so write it twice */
608 volatile struct malloc_state
**_m
=(volatile struct malloc_state
**) &p
->m
[end
];
609 *_m
=(p
->m
[end
]=temp
);
611 ACQUIRE_LOCK(&p
->m
[end
]->mutex
);
612 /*printf("Created mspace idx %d\n", end);*/
613 RELEASE_LOCK(&p
->mutex
);
617 /* Let it lock on the last one it used */
619 ACQUIRE_LOCK(&p
->m
[*lastUsed
]->mutex
);
620 return p
->m
[*lastUsed
];
627 if(TLSSET(p
->mycache
, (void *)(size_t)(-(n
+1)))) abort();
632 nedpool
*nedcreatepool(size_t capacity
, int threads
) THROWSPEC
635 if(!(ret
=(nedpool
*) nedpcalloc(0, 1, sizeof(nedpool
)))) return 0;
636 if(!InitPool(ret
, capacity
, threads
))
643 void neddestroypool(nedpool
*p
) THROWSPEC
646 ACQUIRE_LOCK(&p
->mutex
);
648 for(n
=0; p
->m
[n
]; n
++)
650 destroy_mspace(p
->m
[n
]);
653 RELEASE_LOCK(&p
->mutex
);
654 if(TLSFREE(p
->mycache
)) abort();
658 void nedpsetvalue(nedpool
*p
, void *v
) THROWSPEC
660 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
663 void *nedgetvalue(nedpool
**p
, void *mem
) THROWSPEC
666 mchunkptr mcp
=mem2chunk(mem
);
668 if(!(is_aligned(chunk2mem(mcp
))) && mcp
->head
!= FENCEPOST_HEAD
) return 0;
669 if(!cinuse(mcp
)) return 0;
670 if(!next_pinuse(mcp
)) return 0;
671 if(!is_mmapped(mcp
) && !pinuse(mcp
))
673 if(next_chunk(prev_chunk(mcp
))!=mcp
) return 0;
675 fm
=get_mstate_for(mcp
);
676 if(!ok_magic(fm
)) return 0;
677 if(!ok_address(fm
, mcp
)) return 0;
678 if(!fm
->extp
) return 0;
679 np
=(nedpool
*) fm
->extp
;
681 return np
->uservalue
;
684 void neddisablethreadcache(nedpool
*p
) THROWSPEC
690 if(!syspool
.threads
) InitPool(&syspool
, 0, -1);
692 mycache
=(int)(size_t) TLSGET(p
->mycache
);
694 { /* Set to mspace 0 */
695 if(TLSSET(p
->mycache
, (void *)-1)) abort();
698 { /* Set to last used mspace */
699 threadcache
*tc
=p
->caches
[mycache
-1];
701 printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n",
702 100.0*tc
->successes
/tc
->mallocs
, 100.0*((double) tc
->mallocs
-tc
->frees
)/tc
->mallocs
);
704 if(TLSSET(p
->mycache
, (void *)(size_t)(-tc
->mymspace
))) abort();
706 RemoveCacheEntries(p
, tc
, 0);
707 assert(!tc
->freeInCache
);
710 mspace_free(0, p
->caches
[mycache
-1]);
711 p
->caches
[mycache
-1]=0;
715 #define GETMSPACE(m,p,tc,ms,s,action) \
718 mstate m = GetMSpace((p),(tc),(ms),(s)); \
720 RELEASE_LOCK(&m->mutex); \
723 static FORCEINLINE mstate
GetMSpace(nedpool
*p
, threadcache
*tc
, int mymspace
, size_t size
) THROWSPEC
724 { /* Returns a locked and ready for use mspace */
725 mstate m
=p
->m
[mymspace
];
727 if(!TRY_LOCK(&p
->m
[mymspace
]->mutex
)) m
=FindMSpace(p
, tc
, &mymspace
, size
);\
728 /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/
731 static FORCEINLINE
void GetThreadCache(nedpool
**p
, threadcache
**tc
, int *mymspace
, size_t *size
) THROWSPEC
734 if(size
&& *size
<sizeof(threadcacheblk
)) *size
=sizeof(threadcacheblk
);
738 if(!syspool
.threads
) InitPool(&syspool
, 0, -1);
740 mycache
=(int)(size_t) TLSGET((*p
)->mycache
);
743 *tc
=(*p
)->caches
[mycache
-1];
744 *mymspace
=(*tc
)->mymspace
;
751 if(TLSSET((*p
)->mycache
, (void *)-1)) abort();
755 *mymspace
=(*tc
)->mymspace
;
760 *mymspace
=-mycache
-1;
762 assert(*mymspace
>=0);
763 assert((long)(size_t)CURRENT_THREAD
==(*tc
)->threadid
);
764 #ifdef FULLSANITYCHECKS
767 if(*(unsigned int *)"NEDMALC1"!=(*tc
)->magic1
|| *(unsigned int *)"NEDMALC2"!=(*tc
)->magic2
)
775 void * nedpmalloc(nedpool
*p
, size_t size
) THROWSPEC
780 GetThreadCache(&p
, &tc
, &mymspace
, &size
);
782 if(tc
&& size
<=THREADCACHEMAX
)
783 { /* Use the thread cache */
784 ret
=threadcache_malloc(p
, tc
, &size
);
788 { /* Use this thread's mspace */
789 GETMSPACE(m
, p
, tc
, mymspace
, size
,
790 ret
=mspace_malloc(m
, size
));
794 void * nedpcalloc(nedpool
*p
, size_t no
, size_t size
) THROWSPEC
796 size_t rsize
=size
*no
;
800 GetThreadCache(&p
, &tc
, &mymspace
, &rsize
);
802 if(tc
&& rsize
<=THREADCACHEMAX
)
803 { /* Use the thread cache */
804 if((ret
=threadcache_malloc(p
, tc
, &rsize
)))
805 memset(ret
, 0, rsize
);
809 { /* Use this thread's mspace */
810 GETMSPACE(m
, p
, tc
, mymspace
, rsize
,
811 ret
=mspace_calloc(m
, 1, rsize
));
815 void * nedprealloc(nedpool
*p
, void *mem
, size_t size
) THROWSPEC
820 if(!mem
) return nedpmalloc(p
, size
);
821 GetThreadCache(&p
, &tc
, &mymspace
, &size
);
823 if(tc
&& size
&& size
<=THREADCACHEMAX
)
824 { /* Use the thread cache */
825 size_t memsize
=nedblksize(mem
);
827 if((ret
=threadcache_malloc(p
, tc
, &size
)))
829 memcpy(ret
, mem
, memsize
<size
? memsize
: size
);
830 if(memsize
<=THREADCACHEMAX
)
831 threadcache_free(p
, tc
, mymspace
, mem
, memsize
);
838 { /* Reallocs always happen in the mspace they happened in, so skip
839 locking the preferred mspace for this thread */
840 ret
=mspace_realloc(0, mem
, size
);
844 void nedpfree(nedpool
*p
, void *mem
) THROWSPEC
845 { /* Frees always happen in the mspace they happened in, so skip
846 locking the preferred mspace for this thread */
851 GetThreadCache(&p
, &tc
, &mymspace
, 0);
853 memsize
=nedblksize(mem
);
855 if(mem
&& tc
&& memsize
<=(THREADCACHEMAX
+CHUNK_OVERHEAD
))
856 threadcache_free(p
, tc
, mymspace
, mem
, memsize
);
861 void * nedpmemalign(nedpool
*p
, size_t alignment
, size_t bytes
) THROWSPEC
866 GetThreadCache(&p
, &tc
, &mymspace
, &bytes
);
867 { /* Use this thread's mspace */
868 GETMSPACE(m
, p
, tc
, mymspace
, bytes
,
869 ret
=mspace_memalign(m
, alignment
, bytes
));
874 struct mallinfo
nedpmallinfo(nedpool
*p
) THROWSPEC
877 struct mallinfo ret
={0};
878 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
879 for(n
=0; p
->m
[n
]; n
++)
881 struct mallinfo t
=mspace_mallinfo(p
->m
[n
]);
883 ret
.ordblks
+=t
.ordblks
;
884 ret
.hblkhd
+=t
.hblkhd
;
885 ret
.usmblks
+=t
.usmblks
;
886 ret
.uordblks
+=t
.uordblks
;
887 ret
.fordblks
+=t
.fordblks
;
888 ret
.keepcost
+=t
.keepcost
;
893 int nedpmallopt(nedpool
*p
, int parno
, int value
) THROWSPEC
895 return mspace_mallopt(parno
, value
);
897 int nedpmalloc_trim(nedpool
*p
, size_t pad
) THROWSPEC
900 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
901 for(n
=0; p
->m
[n
]; n
++)
903 ret
+=mspace_trim(p
->m
[n
], pad
);
907 void nedpmalloc_stats(nedpool
*p
) THROWSPEC
910 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
911 for(n
=0; p
->m
[n
]; n
++)
913 mspace_malloc_stats(p
->m
[n
]);
916 size_t nedpmalloc_footprint(nedpool
*p
) THROWSPEC
920 if(!p
) { p
=&syspool
; if(!syspool
.threads
) InitPool(&syspool
, 0, -1); }
921 for(n
=0; p
->m
[n
]; n
++)
923 ret
+=mspace_footprint(p
->m
[n
]);
927 void **nedpindependent_calloc(nedpool
*p
, size_t elemsno
, size_t elemsize
, void **chunks
) THROWSPEC
932 GetThreadCache(&p
, &tc
, &mymspace
, &elemsize
);
933 GETMSPACE(m
, p
, tc
, mymspace
, elemsno
*elemsize
,
934 ret
=mspace_independent_calloc(m
, elemsno
, elemsize
, chunks
));
937 void **nedpindependent_comalloc(nedpool
*p
, size_t elems
, size_t *sizes
, void **chunks
) THROWSPEC
942 size_t i
, *adjustedsizes
=(size_t *) alloca(elems
*sizeof(size_t));
943 if(!adjustedsizes
) return 0;
944 for(i
=0; i
<elems
; i
++)
945 adjustedsizes
[i
]=sizes
[i
]<sizeof(threadcacheblk
) ? sizeof(threadcacheblk
) : sizes
[i
];
946 GetThreadCache(&p
, &tc
, &mymspace
, 0);
947 GETMSPACE(m
, p
, tc
, mymspace
, 0,
948 ret
=mspace_independent_comalloc(m
, elems
, adjustedsizes
, chunks
));
952 #if defined(__cplusplus)