#if USE_TCACHE
/* Maximum number of buckets to use. */
- int tcache_max;
+ size_t tcache_max;
/* Maximum number of chunks in each bucket. */
- int tcache_count;
+ size_t tcache_count;
#endif
};
__MTB_TRACE_ENTRY (MALLOC, bytes, NULL);
+ void *(*hook) (size_t, const void *)
+ = atomic_forced_read (__malloc_hook);
+ if (__builtin_expect (hook != NULL, 0))
+ {
+ __MTB_TRACE_PATH (hook);
+ __MTB_THREAD_TRACE_DISABLE ();
+ victim = (*hook)(bytes, RETURN_ADDRESS (0));
+ __MTB_THREAD_TRACE_ENABLE ();
+ __MTB_TRACE_RECORD ();
+ if (victim != NULL)
+ __MTB_TRACE_SET (size3, chunksize (mem2chunk (victim)));
+ return victim;
+ }
+
#if USE_TCACHE
/* int_free also calls request2size, be careful to not pad twice. */
size_t tbytes = request2size(bytes);
- int tc_idx = size2tidx (tbytes);
+ size_t tc_idx = size2tidx (tbytes);
if (tcache.initted == 0)
{
}
if (tc_idx < mp_.tcache_max
+ && tc_idx < TCACHE_IDX /* to appease gcc */
&& tcache.entries[tc_idx] != NULL
&& tcache.initted == 1)
{
}
#endif
- void *(*hook) (size_t, const void *)
- = atomic_forced_read (__malloc_hook);
- if (__builtin_expect (hook != NULL, 0))
- {
- __MTB_TRACE_PATH (hook);
- __MTB_THREAD_TRACE_DISABLE ();
- victim = (*hook)(bytes, RETURN_ADDRESS (0));
- __MTB_THREAD_TRACE_ENABLE ();
- __MTB_TRACE_RECORD ();
- if (victim != NULL)
- __MTB_TRACE_SET (size3, chunksize (mem2chunk (victim)));
- return victim;
- }
-
#if 0 && USE_TCACHE
/* This is fast but causes internal fragmentation, as it always
pulls large chunks but puts small chunks, leading to a large
int n = chunksize(chunk) / original_nb;
mchunkptr m;
TCacheEntry *e;
- int tc_idx = size2tidx (original_nb - SIZE_SZ);
- int bits = chunk->size & SIZE_BITS;
+ size_t tc_idx = size2tidx (original_nb - SIZE_SZ);
+ size_t bits = chunk->size & SIZE_BITS;
if (tc_idx > mp_.tcache_max)
return chunk;
/* Given a chunk of size ACTUAL_SIZE and a user request of size
DESIRED_SIZE, compute the largest ACTUAL_SIZE that would fill the
tcache. */
-static int
+static size_t
_tcache_maxsize (INTERNAL_SIZE_T desired_size, INTERNAL_SIZE_T actual_size)
{
if (size2tidx(desired_size-SIZE_SZ) > mp_.tcache_max)
#if USE_TCACHE
/* While we're here, if we see other chunk of the same size,
stash them in the tcache. */
- int tc_idx = size2tidx (nb-SIZE_SZ);
+ size_t tc_idx = size2tidx (nb-SIZE_SZ);
if (tc_idx < mp_.tcache_max)
{
mchunkptr tc_victim;
#if USE_TCACHE
/* While we're here, if we see other chunk of the same size,
stash them in the tcache. */
- int tc_idx = size2tidx (nb-SIZE_SZ);
+ size_t tc_idx = size2tidx (nb-SIZE_SZ);
if (tc_idx < mp_.tcache_max)
{
mchunkptr tc_victim;
//INTERNAL_SIZE_T tcache_max = 0;
if (size2tidx (nb-SIZE_SZ) <= mp_.tcache_max)
{
- //int tc_idx = size2tidx (bytes);
+ //size_t tc_idx = size2tidx (bytes);
tcache_nb = nb;
//tcache_max = nb * (mp_.tcache_count - tcache.counts[tc_idx]);
}
- int tc_idx = size2tidx (nb-SIZE_SZ);
+ size_t tc_idx = size2tidx (nb-SIZE_SZ);
int return_cached = 0;
#endif
#if USE_TCACHE
{
- int tc_idx = size2tidx (size - SIZE_SZ);
+ size_t tc_idx = size2tidx (size - SIZE_SZ);
if (tc_idx < mp_.tcache_max
&& tcache.counts[tc_idx] < mp_.tcache_count