void *victim;
#if USE_TCACHE
- bytes = request2size(bytes);
- int tc_idx = size2tidx (bytes);
+ /* int_free also calls request2size, be careful to not pad twice. */
+ size_t tbytes = request2size(bytes);
+ int tc_idx = size2tidx (tbytes);
if (tcache.initted == 0)
{
__MTB_TRACE_ENTRY (MALLOC,bytes,NULL);
#if USE_TCACHE
- if (bytes < MAX_TCACHE_SIZE
+ if (tbytes < MAX_TCACHE_SIZE
&& tcache.entries[tc_idx] != NULL
&& tcache.initted == 1)
{
/* This is fast but causes internal fragmentation, as it always
pulls large chunks but puts small chunks, leading to a large
backlog of small chunks. */
- if (bytes < MAX_TCACHE_SIZE
+ if (tbytes < MAX_TCACHE_SIZE
&& tcache.initted == 1)
{
void *ent;
size_t total_bytes;
int i;
- assert (tc_bytes >= bytes);
+ assert (tc_bytes >= tbytes);
if (tc_bytes < 2 * SIZE_SZ)
tc_bytes = 2 * SIZE_SZ;