static pthread_barrier_t b;
+static int expected_n_arenas;
+
static void *
tf (void *closure)
{
+ void *p = xmalloc (1024);
+
/* Wait the thread startup, so thread stack is allocated. */
xpthread_barrier_wait (&b);
/* Wait the test to read the process mapping. */
xpthread_barrier_wait (&b);
+ free (p);
+
return NULL;
}
{
int n_def_threads;
int n_user_threads;
+ int n_arenas;
+ int n_malloc_mmap;
+ int n_loader_malloc_mmap;
};
static struct proc_maps_t
r.n_def_threads++;
else if (strstr (line, "[anon: glibc: pthread user stack:") != NULL)
r.n_user_threads++;
+ else if (strstr (line, "[anon: glibc: malloc arena]") != NULL)
+ r.n_arenas++;
+ else if (strstr (line, "[anon: glibc: malloc]") != NULL)
+ r.n_malloc_mmap++;
+ else if (strstr (line, "[anon: glibc: loader malloc]") != NULL)
+ r.n_loader_malloc_mmap++;
}
free (line);
xfclose (f);
xpthread_barrier_init (&b, NULL, num_threads + 1);
+ /* Issue a large malloc to trigger a mmap call. */
+ void *p = xmalloc (256 * 1024);
+
pthread_t thr[num_threads];
{
int i = 0;
struct proc_maps_t r = read_proc_maps ();
TEST_COMPARE (r.n_def_threads, num_def_threads);
TEST_COMPARE (r.n_user_threads, num_user_threads);
+ TEST_COMPARE (r.n_arenas, expected_n_arenas);
+ TEST_COMPARE (r.n_malloc_mmap, 1);
+ /* On some architectures the loader might use more than one page. */
+ TEST_VERIFY (r.n_loader_malloc_mmap >= 1);
}
/* Let the threads finish. */
struct proc_maps_t r = read_proc_maps ();
TEST_COMPARE (r.n_def_threads, 0);
TEST_COMPARE (r.n_user_threads, 0);
+ TEST_COMPARE (r.n_arenas, expected_n_arenas);
+ TEST_COMPARE (r.n_malloc_mmap, 1);
+ TEST_VERIFY (r.n_loader_malloc_mmap >= 1);
}
+
+ free (p);
+}
+
+static void
+do_prepare (int argc, char *argv[])
+{
+ TEST_VERIFY_EXIT (argc == 2);
+ expected_n_arenas = strtol (argv[1], NULL, 10);
+ expected_n_arenas = expected_n_arenas - 1;
}
+#define PREPARE do_prepare
static int
do_test (void)
#include <sys/sysinfo.h>
#include <ldsodefs.h>
+#include <setvmaname.h>
#include <unistd.h>
#include <stdio.h> /* needed for malloc_stats */
madvise_thp (mm, size);
#endif
+ __set_vma_name (mm, size, " glibc: malloc");
+
/*
The offset to the start of the mmapped region is stored in the prev_size
field of the chunk. This allows us to adjust returned start address to
madvise_thp (mbrk, size);
#endif
+ __set_vma_name (mbrk, size, " glibc: malloc");
+
/* Record that we no longer have a contiguous sbrk region. After the first
time mmap is used as backup, we do not ever rely on contiguous space
since this could incorrectly bridge regions. */
tst-setuid1-static-ENV = \
LD_LIBRARY_PATH=$(ld-library-path):$(common-objpfx)elf:$(common-objpfx)nss
+tst-pthread-proc-maps-ENV = \
+ GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024
+tst-pthread-proc-maps-ARGS = 8
+
# The tests here better do not run in parallel.
ifeq ($(run-built-tests),yes)
ifneq ($(filter %tests,$(MAKECMDGOALS)),)