# define TCACHE_IDX ((MAX_TCACHE_SIZE / MALLOC_ALIGNMENT) + 1)
# define size2tidx_(bytes) (((bytes) + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
-# define tidx2csize(idx) ((idx)*MALLOC_ALIGNMENT + SIZE_SZ)
-# define tidx2usize(idx) ((idx)*MALLOC_ALIGNMENT)
+# define tidx2csize(idx) ((idx) * MALLOC_ALIGNMENT + SIZE_SZ)
+# define tidx2usize(idx) ((idx) * MALLOC_ALIGNMENT)
/* When "x" is a user-provided size. */
-# define usize2tidx(x) size2tidx_(x)
+# define usize2tidx(x) size2tidx_ (x)
/* When "x" is from chunksize(). */
-# define csize2tidx(x) size2tidx_((x)-SIZE_SZ)
+# define csize2tidx(x) size2tidx_ ((x) - SIZE_SZ)
/* Rounds up, so...
idx 0 bytes 0
static __thread char tcache_shutting_down = 0;
static __thread TCache *tcache = NULL;
+static void
+tcache_put (mchunkptr chunk, size_t tc_idx)
+{
+ TCacheEntry *e = (TCacheEntry *) chunk2mem (chunk);
+ e->next = tcache->entries[tc_idx];
+ tcache->entries[tc_idx] = e;
+ ++(tcache->counts[tc_idx]);
+}
+
+static void *
+tcache_get (size_t tc_idx)
+{
+ TCacheEntry *e = tcache->entries[tc_idx];
+ tcache->entries[tc_idx] = e->next;
+ --(tcache->counts[tc_idx]);
+ return (void *) e;
+}
+
static void __attribute__ ((section ("__libc_thread_freeres_fn")))
tcache_thread_freeres (void)
{
tcache = NULL;
- for (i=0; i<TCACHE_IDX; i++) {
+ for (i = 0; i < TCACHE_IDX; ++i) {
while (tcache_tmp->entries[i])
{
TCacheEntry *e = tcache_tmp->entries[i];
if (__glibc_unlikely (tcache == NULL)) \
tcache_init();
+#else
+#define MAYBE_INIT_TCACHE()
#endif
void *
&& tcache
&& tcache->entries[tc_idx] != NULL)
{
- TCacheEntry *e = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e->next;
- --(tcache->counts[tc_idx]);
- return (void *) e;
+ return tcache_get (tc_idx);
}
#endif
return;
}
-#if USE_TCACHE
MAYBE_INIT_TCACHE ();
-#endif
ar_ptr = arena_for_chunk (p);
_int_free (ar_ptr, p, 0);
ar_ptr = NULL;
else
{
-#if USE_TCACHE
MAYBE_INIT_TCACHE ();
-#endif
ar_ptr = arena_for_chunk (oldp);
}
sz = bytes;
-#if USE_TCACHE
MAYBE_INIT_TCACHE ();
-#endif
arena_get (av, sz);
if (av)
!= tc_victim);
if (tc_victim != 0)
{
- TCacheEntry *e = (TCacheEntry *) chunk2mem (tc_victim);
- e->next = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e;
- ++(tcache->counts[tc_idx]);
+ tcache_put (tc_victim, tc_idx);
++found;
}
}
bin->bk = bck;
bck->fd = bin;
- TCacheEntry *e = (TCacheEntry *) chunk2mem (tc_victim);
- e->next = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e;
- ++(tcache->counts[tc_idx]);
+ tcache_put (tc_victim, tc_idx);
++found;
}
}
if (tcache_nb
&& tcache->counts[tc_idx] < mp_.tcache_count)
{
- TCacheEntry *e = (TCacheEntry *) chunk2mem (victim);
- e->next = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e;
- ++(tcache->counts[tc_idx]);
+ tcache_put (victim, tc_idx);
return_cached = 1;
continue;
}
&& mp_.tcache_unsorted_limit > 0
&& tcache_unsorted_count > mp_.tcache_unsorted_limit)
{
- TCacheEntry *e = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e->next;
- --(tcache->counts[tc_idx]);
- return (void *) e;
+ return tcache_get (tc_idx);
}
#endif
/* If all the small chunks we found ended up cached, return one now. */
if (return_cached)
{
- TCacheEntry *e = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e->next;
- --(tcache->counts[tc_idx]);
- return (void *) e;
+ return tcache_get (tc_idx);
}
#endif
&& tc_idx < mp_.tcache_max
&& tcache->counts[tc_idx] < mp_.tcache_count)
{
- TCacheEntry *e = (TCacheEntry *) chunk2mem (p);
- e->next = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e;
- ++(tcache->counts[tc_idx]);
+ tcache_put (p, tc_idx);
return;
}
}