]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/tst-mallocstate.c
elf: Refuse to dlopen PIE objects [BZ #24323]
[thirdparty/glibc.git] / malloc / tst-mallocstate.c
CommitLineData
e863cce5 1/* Emulate Emacs heap dumping to test malloc_set_state.
04277e02 2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
fa8d436c
UD
19
20#include <errno.h>
e863cce5 21#include <stdbool.h>
fa8d436c 22#include <stdio.h>
e863cce5
FW
23#include <string.h>
24#include <libc-symbols.h>
25#include <shlib-compat.h>
05b38d64
SE
26#include <support/check.h>
27#include <support/support.h>
28#include <support/test-driver.h>
e863cce5 29
fa8d436c
UD
30#include "malloc.h"
31
05b38d64
SE
32#if TEST_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
33
e863cce5
FW
34/* Make the compatibility symbols availabile to this test case. */
35void *malloc_get_state (void);
36compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
37int malloc_set_state (void *);
38compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
39
e863cce5
FW
40/* Maximum object size in the fake heap. */
41enum { max_size = 64 };
42
43/* Allocation actions. These are randomized actions executed on the
44 dumped heap (see allocation_tasks below). They are interspersed
45 with operations on the new heap (see heap_activity). */
46enum allocation_action
47 {
48 action_free, /* Dumped and freed. */
49 action_realloc, /* Dumped and realloc'ed. */
50 action_realloc_same, /* Dumped and realloc'ed, same size. */
51 action_realloc_smaller, /* Dumped and realloc'ed, shrinked. */
52 action_count
53 };
54
55/* Dumped heap. Initialize it, so that the object is placed into the
56 .data section, for increased realism. The size is an upper bound;
57 we use about half of the space. */
58static size_t dumped_heap[action_count * max_size * max_size
59 / sizeof (size_t)] = {1};
60
61/* Next free space in the dumped heap. Also top of the heap at the
62 end of the initialization procedure. */
63static size_t *next_heap_chunk;
64
65/* Copied from malloc.c and hooks.c. The version is deliberately
66 lower than the final version of malloc_set_state. */
05b38d64
SE
67# define NBINS 128
68# define MALLOC_STATE_MAGIC 0x444c4541l
69# define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
e863cce5
FW
70static struct
71{
72 long magic;
73 long version;
74 void *av[NBINS * 2 + 2];
75 char *sbrk_base;
76 int sbrked_mem_bytes;
77 unsigned long trim_threshold;
78 unsigned long top_pad;
79 unsigned int n_mmaps_max;
80 unsigned long mmap_threshold;
81 int check_action;
82 unsigned long max_sbrked_mem;
83 unsigned long max_total_mem;
84 unsigned int n_mmaps;
85 unsigned int max_n_mmaps;
86 unsigned long mmapped_mem;
87 unsigned long max_mmapped_mem;
88 int using_malloc_checking;
89 unsigned long max_fast;
90 unsigned long arena_test;
91 unsigned long arena_max;
92 unsigned long narenas;
93} save_state =
94 {
95 .magic = MALLOC_STATE_MAGIC,
96 .version = MALLOC_STATE_VERSION,
97 };
98
99/* Allocate a blob in the fake heap. */
100static void *
101dumped_heap_alloc (size_t length)
102{
103 /* malloc needs three state bits in the size field, so the minimum
104 alignment is 8 even on 32-bit architectures. malloc_set_state
105 should be compatible with such heaps even if it currently
106 provides more alignment to applications. */
107 enum
108 {
109 heap_alignment = 8,
110 heap_alignment_mask = heap_alignment - 1
111 };
112 _Static_assert (sizeof (size_t) <= heap_alignment,
113 "size_t compatible with heap alignment");
114
115 /* Need at least this many bytes for metadata and application
116 data. */
117 size_t chunk_size = sizeof (size_t) + length;
118 /* Round up the allocation size to the heap alignment. */
119 chunk_size += heap_alignment_mask;
120 chunk_size &= ~heap_alignment_mask;
05b38d64 121 TEST_VERIFY_EXIT ((chunk_size & 3) == 0);
e863cce5
FW
122 if (next_heap_chunk == NULL)
123 /* Initialize the top of the heap. Add one word of zero padding,
124 to match existing practice. */
125 {
126 dumped_heap[0] = 0;
127 next_heap_chunk = dumped_heap + 1;
128 }
129 else
130 /* The previous chunk is allocated. */
131 chunk_size |= 1;
132 *next_heap_chunk = chunk_size;
133
134 /* User data starts after the chunk header. */
135 void *result = next_heap_chunk + 1;
136 next_heap_chunk += chunk_size / sizeof (size_t);
137
138 /* Mark the previous chunk as used. */
139 *next_heap_chunk = 1;
140 return result;
141}
142
143/* Global seed variable for the random number generator. */
144static unsigned long long global_seed;
145
146/* Simple random number generator. The numbers are in the range from
147 0 to UINT_MAX (inclusive). */
148static unsigned int
149rand_next (unsigned long long *seed)
150{
151 /* Linear congruential generated as used for MMIX. */
152 *seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
153 return *seed >> 32;
154}
155
156/* Fill LENGTH bytes at BUFFER with random contents, as determined by
157 SEED. */
158static void
159randomize_buffer (unsigned char *buffer, size_t length,
160 unsigned long long seed)
161{
162 for (size_t i = 0; i < length; ++i)
163 buffer[i] = rand_next (&seed);
164}
fa8d436c 165
e863cce5 166/* Dumps the buffer to standard output, in hexadecimal. */
fa8d436c 167static void
e863cce5 168dump_hex (unsigned char *buffer, size_t length)
fa8d436c 169{
e863cce5
FW
170 for (int i = 0; i < length; ++i)
171 printf (" %02X", buffer[i]);
172}
173
174/* Set to true if an error is encountered. */
175static bool errors = false;
176
177/* Keep track of object allocations. */
178struct allocation
179{
180 unsigned char *data;
181 unsigned int size;
182 unsigned int seed;
183};
184
185/* Check that the allocation task allocation has the expected
186 contents. */
187static void
188check_allocation (const struct allocation *alloc, int index)
189{
190 size_t size = alloc->size;
191 if (alloc->data == NULL)
192 {
193 printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
194 size, index, alloc->seed);
195 errors = true;
196 return;
197 }
198
199 unsigned char expected[4096];
200 if (size > sizeof (expected))
201 {
202 printf ("error: invalid allocation size %zu at %d, seed %u\n",
203 size, index, alloc->seed);
204 errors = true;
205 return;
206 }
207 randomize_buffer (expected, size, alloc->seed);
208 if (memcmp (alloc->data, expected, size) != 0)
209 {
210 printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
211 index, size, alloc->seed);
212 printf (" expected:");
213 dump_hex (expected, size);
214 putc ('\n', stdout);
215 printf (" actual:");
216 dump_hex (alloc->data, size);
217 putc ('\n', stdout);
218 errors = true;
219 }
220}
221
222/* A heap allocation combined with pending actions on it. */
223struct allocation_task
224{
225 struct allocation allocation;
226 enum allocation_action action;
227};
228
229/* Allocation tasks. Initialized by init_allocation_tasks and used by
230 perform_allocations. */
231enum { allocation_task_count = action_count * max_size };
232static struct allocation_task allocation_tasks[allocation_task_count];
233
234/* Fisher-Yates shuffle of allocation_tasks. */
235static void
236shuffle_allocation_tasks (void)
237{
238 for (int i = 0; i < allocation_task_count - 1; ++i)
239 {
240 /* Pick pair in the tail of the array. */
241 int j = i + (rand_next (&global_seed)
242 % ((unsigned) (allocation_task_count - i)));
05b38d64 243 TEST_VERIFY_EXIT (j >= 0 && j < allocation_task_count);
e863cce5
FW
244 /* Exchange. */
245 struct allocation_task tmp = allocation_tasks[i];
246 allocation_tasks[i] = allocation_tasks[j];
247 allocation_tasks[j] = tmp;
248 }
249}
250
251/* Set up the allocation tasks and the dumped heap. */
252static void
253initial_allocations (void)
254{
255 /* Initialize in a position-dependent way. */
256 for (int i = 0; i < allocation_task_count; ++i)
257 allocation_tasks[i] = (struct allocation_task)
258 {
259 .allocation =
260 {
261 .size = 1 + (i / action_count),
262 .seed = i,
263 },
264 .action = i % action_count
265 };
266
267 /* Execute the tasks in a random order. */
268 shuffle_allocation_tasks ();
269
270 /* Initialize the contents of the dumped heap. */
271 for (int i = 0; i < allocation_task_count; ++i)
272 {
273 struct allocation_task *task = allocation_tasks + i;
274 task->allocation.data = dumped_heap_alloc (task->allocation.size);
275 randomize_buffer (task->allocation.data, task->allocation.size,
276 task->allocation.seed);
277 }
278
279 for (int i = 0; i < allocation_task_count; ++i)
280 check_allocation (&allocation_tasks[i].allocation, i);
281}
282
283/* Indicates whether init_heap has run. This variable needs to be
284 volatile because malloc is declared __THROW, which implies it is a
285 leaf function, but we expect it to run our hooks. */
286static volatile bool heap_initialized;
287
288/* Executed by glibc malloc, through __malloc_initialize_hook
289 below. */
290static void
291init_heap (void)
292{
05b38d64
SE
293 if (test_verbose)
294 printf ("info: performing heap initialization\n");
e863cce5
FW
295 heap_initialized = true;
296
297 /* Populate the dumped heap. */
298 initial_allocations ();
299
300 /* Complete initialization of the saved heap data structure. */
301 save_state.sbrk_base = (void *) dumped_heap;
302 save_state.sbrked_mem_bytes = sizeof (dumped_heap);
303 /* Top pointer. Adjust so that it points to the start of struct
304 malloc_chunk. */
305 save_state.av[2] = (void *) (next_heap_chunk - 1);
306
307 /* Integrate the dumped heap into the process heap. */
05b38d64 308 TEST_VERIFY_EXIT (malloc_set_state (&save_state) == 0);
e863cce5
FW
309}
310
311/* Interpose the initialization callback. */
312void (*volatile __malloc_initialize_hook) (void) = init_heap;
313
314/* Simulate occasional unrelated heap activity in the non-dumped
315 heap. */
316enum { heap_activity_allocations_count = 32 };
317static struct allocation heap_activity_allocations
318 [heap_activity_allocations_count] = {};
319static int heap_activity_seed_counter = 1000 * 1000;
320
321static void
322heap_activity (void)
323{
324 /* Only do this from time to time. */
325 if ((rand_next (&global_seed) % 4) == 0)
326 {
327 int slot = rand_next (&global_seed) % heap_activity_allocations_count;
328 struct allocation *alloc = heap_activity_allocations + slot;
329 if (alloc->data == NULL)
330 {
331 alloc->size = rand_next (&global_seed) % (4096U + 1);
332 alloc->data = xmalloc (alloc->size);
333 alloc->seed = heap_activity_seed_counter++;
334 randomize_buffer (alloc->data, alloc->size, alloc->seed);
335 check_allocation (alloc, 1000 + slot);
336 }
337 else
338 {
339 check_allocation (alloc, 1000 + slot);
340 free (alloc->data);
341 alloc->data = NULL;
342 }
343 }
344}
345
346static void
347heap_activity_deallocate (void)
348{
349 for (int i = 0; i < heap_activity_allocations_count; ++i)
350 free (heap_activity_allocations[i].data);
351}
352
353/* Perform a full heap check across the dumped heap allocation tasks,
354 and the simulated heap activity directly above. */
355static void
356full_heap_check (void)
357{
358 /* Dumped heap. */
359 for (int i = 0; i < allocation_task_count; ++i)
360 if (allocation_tasks[i].allocation.data != NULL)
361 check_allocation (&allocation_tasks[i].allocation, i);
362
363 /* Heap activity allocations. */
364 for (int i = 0; i < heap_activity_allocations_count; ++i)
365 if (heap_activity_allocations[i].data != NULL)
366 check_allocation (heap_activity_allocations + i, i);
367}
368
369/* Used as an optimization barrier to force a heap allocation. */
370__attribute__ ((noinline, noclone))
371static void
372my_free (void *ptr)
373{
374 free (ptr);
fa8d436c
UD
375}
376
29955b5d
AS
377static int
378do_test (void)
fa8d436c 379{
e863cce5 380 my_free (malloc (1));
05b38d64 381 TEST_VERIFY_EXIT (heap_initialized);
fa8d436c 382
e863cce5
FW
383 /* The first pass performs the randomly generated allocation
384 tasks. */
05b38d64
SE
385 if (test_verbose)
386 printf ("info: first pass through allocation tasks\n");
e863cce5
FW
387 full_heap_check ();
388
389 /* Execute the post-undump tasks in a random order. */
390 shuffle_allocation_tasks ();
391
392 for (int i = 0; i < allocation_task_count; ++i)
393 {
394 heap_activity ();
395 struct allocation_task *task = allocation_tasks + i;
396 switch (task->action)
397 {
398 case action_free:
399 check_allocation (&task->allocation, i);
400 free (task->allocation.data);
401 task->allocation.data = NULL;
402 break;
fa8d436c 403
e863cce5
FW
404 case action_realloc:
405 check_allocation (&task->allocation, i);
406 task->allocation.data = xrealloc
407 (task->allocation.data, task->allocation.size + max_size);
408 check_allocation (&task->allocation, i);
409 break;
fa8d436c 410
e863cce5
FW
411 case action_realloc_same:
412 check_allocation (&task->allocation, i);
413 task->allocation.data = xrealloc
414 (task->allocation.data, task->allocation.size);
415 check_allocation (&task->allocation, i);
416 break;
fa8d436c 417
e863cce5
FW
418 case action_realloc_smaller:
419 check_allocation (&task->allocation, i);
420 size_t new_size = task->allocation.size - 1;
421 task->allocation.data = xrealloc (task->allocation.data, new_size);
422 if (new_size == 0)
423 {
424 if (task->allocation.data != NULL)
425 {
426 printf ("error: realloc with size zero did not deallocate\n");
427 errors = true;
428 }
429 /* No further action on this task. */
430 task->action = action_free;
431 }
432 else
433 {
434 task->allocation.size = new_size;
435 check_allocation (&task->allocation, i);
436 }
437 break;
fa8d436c 438
e863cce5 439 case action_count:
05b38d64 440 FAIL_EXIT1 ("task->action should never be action_count");
e863cce5
FW
441 }
442 full_heap_check ();
443 }
444
445 /* The second pass frees the objects which were allocated during the
446 first pass. */
05b38d64
SE
447 if (test_verbose)
448 printf ("info: second pass through allocation tasks\n");
e863cce5
FW
449
450 shuffle_allocation_tasks ();
451 for (int i = 0; i < allocation_task_count; ++i)
fa8d436c 452 {
e863cce5
FW
453 heap_activity ();
454 struct allocation_task *task = allocation_tasks + i;
455 switch (task->action)
6c8dbf00 456 {
e863cce5
FW
457 case action_free:
458 /* Already freed, nothing to do. */
459 break;
460
461 case action_realloc:
462 case action_realloc_same:
463 case action_realloc_smaller:
464 check_allocation (&task->allocation, i);
465 free (task->allocation.data);
466 task->allocation.data = NULL;
6c8dbf00 467 break;
e863cce5
FW
468
469 case action_count:
05b38d64 470 FAIL_EXIT1 ("task->action should never be action_count");
6c8dbf00 471 }
e863cce5 472 full_heap_check ();
fa8d436c
UD
473 }
474
e863cce5 475 heap_activity_deallocate ();
fa8d436c 476
e863cce5
FW
477 /* Check that the malloc_get_state stub behaves in the intended
478 way. */
479 errno = 0;
480 if (malloc_get_state () != NULL)
481 {
482 printf ("error: malloc_get_state succeeded\n");
483 errors = true;
484 }
485 if (errno != ENOSYS)
486 {
487 printf ("error: malloc_get_state: %m\n");
488 errors = true;
489 }
29955b5d 490
e863cce5
FW
491 return errors;
492}
05b38d64
SE
493#else
494static int
495do_test (void)
496{
497 return 77;
498}
499#endif
500
501#include <support/test-driver.c>