]> git.ipfire.org Git - thirdparty/glibc.git/blame - malloc/tst-mallocstate.c
malloc: Remove malloc_get_state, malloc_set_state [BZ #19473]
[thirdparty/glibc.git] / malloc / tst-mallocstate.c
CommitLineData
e863cce5
FW
1/* Emulate Emacs heap dumping to test malloc_set_state.
2 Copyright (C) 2001-2016 Free Software Foundation, Inc.
fa8d436c
UD
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
fa8d436c
UD
19
20#include <errno.h>
e863cce5 21#include <stdbool.h>
fa8d436c 22#include <stdio.h>
e863cce5
FW
23#include <string.h>
24#include <libc-symbols.h>
25#include <shlib-compat.h>
26
fa8d436c
UD
27#include "malloc.h"
28
e863cce5
FW
29/* Make the compatibility symbols availabile to this test case. */
30void *malloc_get_state (void);
31compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
32int malloc_set_state (void *);
33compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
34
35static int do_test (void);
36#define TEST_FUNCTION do_test ()
37#include "../test-skeleton.c"
38
39/* Maximum object size in the fake heap. */
40enum { max_size = 64 };
41
42/* Allocation actions. These are randomized actions executed on the
43 dumped heap (see allocation_tasks below). They are interspersed
44 with operations on the new heap (see heap_activity). */
45enum allocation_action
46 {
47 action_free, /* Dumped and freed. */
48 action_realloc, /* Dumped and realloc'ed. */
49 action_realloc_same, /* Dumped and realloc'ed, same size. */
50 action_realloc_smaller, /* Dumped and realloc'ed, shrinked. */
51 action_count
52 };
53
54/* Dumped heap. Initialize it, so that the object is placed into the
55 .data section, for increased realism. The size is an upper bound;
56 we use about half of the space. */
57static size_t dumped_heap[action_count * max_size * max_size
58 / sizeof (size_t)] = {1};
59
60/* Next free space in the dumped heap. Also top of the heap at the
61 end of the initialization procedure. */
62static size_t *next_heap_chunk;
63
64/* Copied from malloc.c and hooks.c. The version is deliberately
65 lower than the final version of malloc_set_state. */
66#define NBINS 128
67#define MALLOC_STATE_MAGIC 0x444c4541l
68#define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
69static struct
70{
71 long magic;
72 long version;
73 void *av[NBINS * 2 + 2];
74 char *sbrk_base;
75 int sbrked_mem_bytes;
76 unsigned long trim_threshold;
77 unsigned long top_pad;
78 unsigned int n_mmaps_max;
79 unsigned long mmap_threshold;
80 int check_action;
81 unsigned long max_sbrked_mem;
82 unsigned long max_total_mem;
83 unsigned int n_mmaps;
84 unsigned int max_n_mmaps;
85 unsigned long mmapped_mem;
86 unsigned long max_mmapped_mem;
87 int using_malloc_checking;
88 unsigned long max_fast;
89 unsigned long arena_test;
90 unsigned long arena_max;
91 unsigned long narenas;
92} save_state =
93 {
94 .magic = MALLOC_STATE_MAGIC,
95 .version = MALLOC_STATE_VERSION,
96 };
97
98/* Allocate a blob in the fake heap. */
99static void *
100dumped_heap_alloc (size_t length)
101{
102 /* malloc needs three state bits in the size field, so the minimum
103 alignment is 8 even on 32-bit architectures. malloc_set_state
104 should be compatible with such heaps even if it currently
105 provides more alignment to applications. */
106 enum
107 {
108 heap_alignment = 8,
109 heap_alignment_mask = heap_alignment - 1
110 };
111 _Static_assert (sizeof (size_t) <= heap_alignment,
112 "size_t compatible with heap alignment");
113
114 /* Need at least this many bytes for metadata and application
115 data. */
116 size_t chunk_size = sizeof (size_t) + length;
117 /* Round up the allocation size to the heap alignment. */
118 chunk_size += heap_alignment_mask;
119 chunk_size &= ~heap_alignment_mask;
120 if ((chunk_size & 3) != 0)
121 {
122 /* The lower three bits in the chunk size have to be 0. */
123 write_message ("error: dumped_heap_alloc computed invalid chunk size\n");
124 _exit (1);
125 }
126 if (next_heap_chunk == NULL)
127 /* Initialize the top of the heap. Add one word of zero padding,
128 to match existing practice. */
129 {
130 dumped_heap[0] = 0;
131 next_heap_chunk = dumped_heap + 1;
132 }
133 else
134 /* The previous chunk is allocated. */
135 chunk_size |= 1;
136 *next_heap_chunk = chunk_size;
137
138 /* User data starts after the chunk header. */
139 void *result = next_heap_chunk + 1;
140 next_heap_chunk += chunk_size / sizeof (size_t);
141
142 /* Mark the previous chunk as used. */
143 *next_heap_chunk = 1;
144 return result;
145}
146
147/* Global seed variable for the random number generator. */
148static unsigned long long global_seed;
149
150/* Simple random number generator. The numbers are in the range from
151 0 to UINT_MAX (inclusive). */
152static unsigned int
153rand_next (unsigned long long *seed)
154{
155 /* Linear congruential generated as used for MMIX. */
156 *seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
157 return *seed >> 32;
158}
159
160/* Fill LENGTH bytes at BUFFER with random contents, as determined by
161 SEED. */
162static void
163randomize_buffer (unsigned char *buffer, size_t length,
164 unsigned long long seed)
165{
166 for (size_t i = 0; i < length; ++i)
167 buffer[i] = rand_next (&seed);
168}
fa8d436c 169
e863cce5 170/* Dumps the buffer to standard output, in hexadecimal. */
fa8d436c 171static void
e863cce5 172dump_hex (unsigned char *buffer, size_t length)
fa8d436c 173{
e863cce5
FW
174 for (int i = 0; i < length; ++i)
175 printf (" %02X", buffer[i]);
176}
177
178/* Set to true if an error is encountered. */
179static bool errors = false;
180
181/* Keep track of object allocations. */
182struct allocation
183{
184 unsigned char *data;
185 unsigned int size;
186 unsigned int seed;
187};
188
189/* Check that the allocation task allocation has the expected
190 contents. */
191static void
192check_allocation (const struct allocation *alloc, int index)
193{
194 size_t size = alloc->size;
195 if (alloc->data == NULL)
196 {
197 printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
198 size, index, alloc->seed);
199 errors = true;
200 return;
201 }
202
203 unsigned char expected[4096];
204 if (size > sizeof (expected))
205 {
206 printf ("error: invalid allocation size %zu at %d, seed %u\n",
207 size, index, alloc->seed);
208 errors = true;
209 return;
210 }
211 randomize_buffer (expected, size, alloc->seed);
212 if (memcmp (alloc->data, expected, size) != 0)
213 {
214 printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
215 index, size, alloc->seed);
216 printf (" expected:");
217 dump_hex (expected, size);
218 putc ('\n', stdout);
219 printf (" actual:");
220 dump_hex (alloc->data, size);
221 putc ('\n', stdout);
222 errors = true;
223 }
224}
225
226/* A heap allocation combined with pending actions on it. */
227struct allocation_task
228{
229 struct allocation allocation;
230 enum allocation_action action;
231};
232
233/* Allocation tasks. Initialized by init_allocation_tasks and used by
234 perform_allocations. */
235enum { allocation_task_count = action_count * max_size };
236static struct allocation_task allocation_tasks[allocation_task_count];
237
238/* Fisher-Yates shuffle of allocation_tasks. */
239static void
240shuffle_allocation_tasks (void)
241{
242 for (int i = 0; i < allocation_task_count - 1; ++i)
243 {
244 /* Pick pair in the tail of the array. */
245 int j = i + (rand_next (&global_seed)
246 % ((unsigned) (allocation_task_count - i)));
247 if (j < 0 || j >= allocation_task_count)
248 {
249 write_message ("error: test bug in shuffle\n");
250 _exit (1);
251 }
252 /* Exchange. */
253 struct allocation_task tmp = allocation_tasks[i];
254 allocation_tasks[i] = allocation_tasks[j];
255 allocation_tasks[j] = tmp;
256 }
257}
258
259/* Set up the allocation tasks and the dumped heap. */
260static void
261initial_allocations (void)
262{
263 /* Initialize in a position-dependent way. */
264 for (int i = 0; i < allocation_task_count; ++i)
265 allocation_tasks[i] = (struct allocation_task)
266 {
267 .allocation =
268 {
269 .size = 1 + (i / action_count),
270 .seed = i,
271 },
272 .action = i % action_count
273 };
274
275 /* Execute the tasks in a random order. */
276 shuffle_allocation_tasks ();
277
278 /* Initialize the contents of the dumped heap. */
279 for (int i = 0; i < allocation_task_count; ++i)
280 {
281 struct allocation_task *task = allocation_tasks + i;
282 task->allocation.data = dumped_heap_alloc (task->allocation.size);
283 randomize_buffer (task->allocation.data, task->allocation.size,
284 task->allocation.seed);
285 }
286
287 for (int i = 0; i < allocation_task_count; ++i)
288 check_allocation (&allocation_tasks[i].allocation, i);
289}
290
291/* Indicates whether init_heap has run. This variable needs to be
292 volatile because malloc is declared __THROW, which implies it is a
293 leaf function, but we expect it to run our hooks. */
294static volatile bool heap_initialized;
295
296/* Executed by glibc malloc, through __malloc_initialize_hook
297 below. */
298static void
299init_heap (void)
300{
301 write_message ("info: performing heap initialization\n");
302 heap_initialized = true;
303
304 /* Populate the dumped heap. */
305 initial_allocations ();
306
307 /* Complete initialization of the saved heap data structure. */
308 save_state.sbrk_base = (void *) dumped_heap;
309 save_state.sbrked_mem_bytes = sizeof (dumped_heap);
310 /* Top pointer. Adjust so that it points to the start of struct
311 malloc_chunk. */
312 save_state.av[2] = (void *) (next_heap_chunk - 1);
313
314 /* Integrate the dumped heap into the process heap. */
315 if (malloc_set_state (&save_state) != 0)
316 {
317 write_message ("error: malloc_set_state failed\n");
318 _exit (1);
319 }
320}
321
322/* Interpose the initialization callback. */
323void (*volatile __malloc_initialize_hook) (void) = init_heap;
324
325/* Simulate occasional unrelated heap activity in the non-dumped
326 heap. */
327enum { heap_activity_allocations_count = 32 };
328static struct allocation heap_activity_allocations
329 [heap_activity_allocations_count] = {};
330static int heap_activity_seed_counter = 1000 * 1000;
331
332static void
333heap_activity (void)
334{
335 /* Only do this from time to time. */
336 if ((rand_next (&global_seed) % 4) == 0)
337 {
338 int slot = rand_next (&global_seed) % heap_activity_allocations_count;
339 struct allocation *alloc = heap_activity_allocations + slot;
340 if (alloc->data == NULL)
341 {
342 alloc->size = rand_next (&global_seed) % (4096U + 1);
343 alloc->data = xmalloc (alloc->size);
344 alloc->seed = heap_activity_seed_counter++;
345 randomize_buffer (alloc->data, alloc->size, alloc->seed);
346 check_allocation (alloc, 1000 + slot);
347 }
348 else
349 {
350 check_allocation (alloc, 1000 + slot);
351 free (alloc->data);
352 alloc->data = NULL;
353 }
354 }
355}
356
357static void
358heap_activity_deallocate (void)
359{
360 for (int i = 0; i < heap_activity_allocations_count; ++i)
361 free (heap_activity_allocations[i].data);
362}
363
364/* Perform a full heap check across the dumped heap allocation tasks,
365 and the simulated heap activity directly above. */
366static void
367full_heap_check (void)
368{
369 /* Dumped heap. */
370 for (int i = 0; i < allocation_task_count; ++i)
371 if (allocation_tasks[i].allocation.data != NULL)
372 check_allocation (&allocation_tasks[i].allocation, i);
373
374 /* Heap activity allocations. */
375 for (int i = 0; i < heap_activity_allocations_count; ++i)
376 if (heap_activity_allocations[i].data != NULL)
377 check_allocation (heap_activity_allocations + i, i);
378}
379
380/* Used as an optimization barrier to force a heap allocation. */
381__attribute__ ((noinline, noclone))
382static void
383my_free (void *ptr)
384{
385 free (ptr);
fa8d436c
UD
386}
387
29955b5d
AS
388static int
389do_test (void)
fa8d436c 390{
e863cce5
FW
391 my_free (malloc (1));
392 if (!heap_initialized)
393 {
394 printf ("error: heap was not initialized by malloc\n");
395 return 1;
396 }
fa8d436c 397
e863cce5
FW
398 /* The first pass performs the randomly generated allocation
399 tasks. */
400 write_message ("info: first pass through allocation tasks\n");
401 full_heap_check ();
402
403 /* Execute the post-undump tasks in a random order. */
404 shuffle_allocation_tasks ();
405
406 for (int i = 0; i < allocation_task_count; ++i)
407 {
408 heap_activity ();
409 struct allocation_task *task = allocation_tasks + i;
410 switch (task->action)
411 {
412 case action_free:
413 check_allocation (&task->allocation, i);
414 free (task->allocation.data);
415 task->allocation.data = NULL;
416 break;
fa8d436c 417
e863cce5
FW
418 case action_realloc:
419 check_allocation (&task->allocation, i);
420 task->allocation.data = xrealloc
421 (task->allocation.data, task->allocation.size + max_size);
422 check_allocation (&task->allocation, i);
423 break;
fa8d436c 424
e863cce5
FW
425 case action_realloc_same:
426 check_allocation (&task->allocation, i);
427 task->allocation.data = xrealloc
428 (task->allocation.data, task->allocation.size);
429 check_allocation (&task->allocation, i);
430 break;
fa8d436c 431
e863cce5
FW
432 case action_realloc_smaller:
433 check_allocation (&task->allocation, i);
434 size_t new_size = task->allocation.size - 1;
435 task->allocation.data = xrealloc (task->allocation.data, new_size);
436 if (new_size == 0)
437 {
438 if (task->allocation.data != NULL)
439 {
440 printf ("error: realloc with size zero did not deallocate\n");
441 errors = true;
442 }
443 /* No further action on this task. */
444 task->action = action_free;
445 }
446 else
447 {
448 task->allocation.size = new_size;
449 check_allocation (&task->allocation, i);
450 }
451 break;
fa8d436c 452
e863cce5
FW
453 case action_count:
454 abort ();
455 }
456 full_heap_check ();
457 }
458
459 /* The second pass frees the objects which were allocated during the
460 first pass. */
461 write_message ("info: second pass through allocation tasks\n");
462
463 shuffle_allocation_tasks ();
464 for (int i = 0; i < allocation_task_count; ++i)
fa8d436c 465 {
e863cce5
FW
466 heap_activity ();
467 struct allocation_task *task = allocation_tasks + i;
468 switch (task->action)
6c8dbf00 469 {
e863cce5
FW
470 case action_free:
471 /* Already freed, nothing to do. */
472 break;
473
474 case action_realloc:
475 case action_realloc_same:
476 case action_realloc_smaller:
477 check_allocation (&task->allocation, i);
478 free (task->allocation.data);
479 task->allocation.data = NULL;
6c8dbf00 480 break;
e863cce5
FW
481
482 case action_count:
483 abort ();
6c8dbf00 484 }
e863cce5 485 full_heap_check ();
fa8d436c
UD
486 }
487
e863cce5 488 heap_activity_deallocate ();
fa8d436c 489
e863cce5
FW
490 /* Check that the malloc_get_state stub behaves in the intended
491 way. */
492 errno = 0;
493 if (malloc_get_state () != NULL)
494 {
495 printf ("error: malloc_get_state succeeded\n");
496 errors = true;
497 }
498 if (errno != ENOSYS)
499 {
500 printf ("error: malloc_get_state: %m\n");
501 errors = true;
502 }
29955b5d 503
e863cce5
FW
504 return errors;
505}