mach-interface-list = @mach_interface_list@
+experimental-malloc = @experimental_malloc@
+
nss-crypt = @libc_cv_nss_crypt@
# Configuration options.
build_nscd
link_obsolete_rpc
libc_cv_nss_crypt
+experimental_malloc
enable_werror
all_warnings
force_install
enable_all_warnings
enable_werror
enable_multi_arch
+enable_experimental_malloc
enable_nss_crypt
enable_obsolete_rpc
enable_systemtap
--disable-werror do not build with -Werror
--enable-multi-arch enable single DSO with optimizations for multiple
architectures
+ --disable-experimental-malloc
+ disable experimental malloc features
--enable-nss-crypt enable libcrypt to use nss
--enable-obsolete-rpc build and install the obsolete RPC code for
link-time usage
fi
+# Check whether --enable-experimental-malloc was given.
+if test "${enable_experimental_malloc+set}" = set; then :
+ enableval=$enable_experimental_malloc; experimental_malloc=$enableval
+else
+ experimental_malloc=yes
+fi
+
+
+
# Check whether --enable-nss-crypt was given.
if test "${enable_nss_crypt+set}" = set; then :
enableval=$enable_nss_crypt; nss_crypt=$enableval
[multi_arch=$enableval],
[multi_arch=default])
+AC_ARG_ENABLE([experimental-malloc],
+ AC_HELP_STRING([--disable-experimental-malloc],
+ [disable experimental malloc features]),
+ [experimental_malloc=$enableval],
+ [experimental_malloc=yes])
+AC_SUBST(experimental_malloc)
+
AC_ARG_ENABLE([nss-crypt],
AC_HELP_STRING([--enable-nss-crypt],
[enable libcrypt to use nss]),
$(objpfx)trace_dump: $(objpfx)trace_dump.o
$(LINK.o) -o $@ $(objpfx)trace_dump.o
-ifeq (${CXX},)
-CXX = g++
-endif
-
$(objpfx)trace2wl: $(objpfx)trace2wl.o
$(LINK.o) -o $@ $(objpfx)trace2wl.o
$(objpfx)tst-malloc-thread-fail: $(shared-thread-library)
$(objpfx)tst-malloc-fork-deadlock: $(shared-thread-library)
+ifeq ($(experimental-malloc),yes)
+CPPFLAGS-malloc.c += -DUSE_TCACHE
+endif
+
# Export the __malloc_initialize_hook variable to libc.so.
LDFLAGS-tst-mallocstate = -rdynamic
/* Save current position for next visit. */
*position = ++current;
+
break;
}
- *position = ++current;
+ ++current;
}
return result;
char **runp = _environ;
char *envline;
- while (*runp)
- {
- if (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
+ while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
0))
{
size_t len = strcspn (envline, "=");
if (memcmp (envline, "ARENA_TEST", 10) == 0)
__libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
}
+#if USE_TCACHE
if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TCACHE_MAX", 10) == 0)
__libc_mallopt (M_TCACHE_MAX, atoi (&envline[11]));
}
+#endif
break;
+#if USE_TCACHE
case 12:
if (!__builtin_expect (__libc_enable_secure, 0))
{
__libc_mallopt (M_TCACHE_COUNT, atoi (&envline[13]));
}
break;
+#endif
case 15:
if (!__builtin_expect (__libc_enable_secure, 0))
{
break;
}
}
- }
}
if (s && s[0])
{
#define TRACE_COUNT_TO_MAPPING_IDX(count) ((count) % TRACE_N_PER_MAPPING)
/* Global mutex for the trace buffer tree itself. */
-libc_lock_define_initialized (static, __malloc_trace_mutex);
+__libc_lock_define_initialized (static, __malloc_trace_mutex);
/* Global counter, "full" when equal to TRACE_MAX_COUNT. Points to
the next available slot, so POST-INCREMENT it. */
{
if (tcache.initted == 1)
{
- libc_lock_lock (tcache_mutex);
+ __libc_lock_lock (tcache_mutex);
tcache.initted = 2;
if (tcache.next)
tcache.next->prev = tcache.prev;
tcache.prev->next = tcache.next;
else
tcache_list = tcache.next;
- libc_lock_unlock (tcache_mutex);
+ __libc_lock_unlock (tcache_mutex);
}
}
text_set_element (__libc_thread_subfreeres, tcache_thread_freeres);
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
{
-
idx = fastbin_index (nb);
mfastbinptr *fb = &fastbin (av, idx);
mchunkptr pp = *fb;