# define TCACHE_FILL_COUNT 7
/* Maximum chunks in tcache bins for tunables. This value must fit the range
- of tcache->counts[] entries, else they may overflow. */
+ of tcache->num_slots[] entries, else they may overflow. */
# define MAX_TCACHE_COUNT UINT16_MAX
#endif
/* There is one of these for each thread, which contains the
per-thread cache (hence "tcache_perthread_struct"). Keeping
- overall size low is mildly important. Note that COUNTS and ENTRIES
- are redundant (we could have just counted the linked list each
- time), this is for performance reasons. */
+ overall size low is mildly important. The 'entries' field is linked list of
+ free blocks, while 'num_slots' contains the number of free blocks that can
+ be added. Each bin may allow a different maximum number of free blocks,
+ and can be disabled by initializing 'num_slots' to zero. */
typedef struct tcache_perthread_struct
{
- uint16_t counts[TCACHE_MAX_BINS];
+ uint16_t num_slots[TCACHE_MAX_BINS];
tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct;
e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
tcache->entries[tc_idx] = e;
- ++(tcache->counts[tc_idx]);
+ --(tcache->num_slots[tc_idx]);
}
/* Caller must ensure that we know tc_idx is valid and there's
else
*ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
- --(tcache->counts[tc_idx]);
+ ++(tcache->num_slots[tc_idx]);
e->key = 0;
return (void *) e;
}
{
if (tc_idx < mp_.tcache_bins
&& tcache != NULL
- && tcache->counts[tc_idx] > 0)
+ && tcache->entries[tc_idx] != NULL)
return true;
else
return false;
{
tcache = (tcache_perthread_struct *) victim;
memset (tcache, 0, sizeof (tcache_perthread_struct));
+ for (int i = 0; i < TCACHE_MAX_BINS; i++)
+ tcache->num_slots[i] = mp_.tcache_count;
}
}
if (__glibc_unlikely (e->key == tcache_key))
return tcache_double_free_verify (e);
- if (__glibc_likely (tcache->counts[tc_idx] < mp_.tcache_count))
+ if (__glibc_likely (tcache->num_slots[tc_idx] != 0))
return tcache_put (p, tc_idx);
}
#endif
mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks. */
- while (tcache->counts[tc_idx] < mp_.tcache_count
- && (tc_victim = *fb) != NULL)
+ while (tcache->num_slots[tc_idx] != 0 && (tc_victim = *fb) != NULL)
{
if (__glibc_unlikely (misaligned_chunk (tc_victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */
- while (tcache->counts[tc_idx] < mp_.tcache_count
+ while (tcache->num_slots[tc_idx] != 0
&& (tc_victim = last (bin)) != bin)
{
if (tc_victim != NULL)
#if USE_TCACHE
/* Fill cache first, return to user only if cache fills.
We may return one of these chunks later. */
- if (tcache_nb > 0
- && tcache->counts[tc_idx] < mp_.tcache_count)
+ if (tcache_nb > 0 && tcache->num_slots[tc_idx] != 0)
{
tcache_put (victim, tc_idx);
return_cached = 1;