]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/glibc-2.38/0008-malloc-Remove-bin-scanning-from-memalign-bug-30723.patch
core185: Ship binutils
[ipfire-2.x.git] / src / patches / glibc-2.38 / 0008-malloc-Remove-bin-scanning-from-memalign-bug-30723.patch
CommitLineData
b9215da1
MT
1From 2af141bda3cd407abd4bedf615f9e45fe79518e2 Mon Sep 17 00:00:00 2001
2From: Florian Weimer <fweimer@redhat.com>
3Date: Thu, 10 Aug 2023 19:36:56 +0200
a61a21ef 4Subject: [PATCH 08/44] malloc: Remove bin scanning from memalign (bug 30723)
b9215da1
MT
5
6On the test workload (mpv --cache=yes with VP9 video decoding), the
7bin scanning has a very poor success rate (less than 2%). The tcache
8scanning has about 50% success rate, so keep that.
9
10Update comments in malloc/tst-memalign-2 to indicate the purpose
11of the tests. Even with the scanning removed, the additional
12merging opportunities since commit 542b1105852568c3ebc712225ae78b
13("malloc: Enable merging of remainders in memalign (bug 30723)")
14are sufficient to pass the existing large bins test.
15
16Remove leftover variables from _int_free from refactoring in the
17same commit.
18
19Reviewed-by: DJ Delorie <dj@redhat.com>
20(cherry picked from commit 0dc7fc1cf094406a138e4d1bcf9553e59edcf89d)
21---
22 NEWS | 1 +
23 malloc/malloc.c | 169 ++--------------------------------------
24 malloc/tst-memalign-2.c | 7 +-
25 3 files changed, 11 insertions(+), 166 deletions(-)
26
27diff --git a/NEWS b/NEWS
28index 872bc8907b..c339cb444e 100644
29--- a/NEWS
30+++ b/NEWS
31@@ -132,6 +132,7 @@ The following bugs are resolved with this release:
32 [30555] string: strerror can incorrectly return NULL
33 [30579] malloc: trim_threshold in realloc lead to high memory usage
34 [30662] nscd: Group and password cache use errno in place of errval
35+ [30723] posix_memalign repeatedly scans long bin lists
36 \f
37 Version 2.37
38
39diff --git a/malloc/malloc.c b/malloc/malloc.c
40index 948f9759af..d0bbbf3710 100644
41--- a/malloc/malloc.c
42+++ b/malloc/malloc.c
43@@ -4488,12 +4488,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
44 {
45 INTERNAL_SIZE_T size; /* its size */
46 mfastbinptr *fb; /* associated fastbin */
47- mchunkptr nextchunk; /* next contiguous chunk */
48- INTERNAL_SIZE_T nextsize; /* its size */
49- int nextinuse; /* true if nextchunk is used */
50- INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
51- mchunkptr bck; /* misc temp for linking */
52- mchunkptr fwd; /* misc temp for linking */
53
54 size = chunksize (p);
55
56@@ -5032,42 +5026,6 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
57 ------------------------------ memalign ------------------------------
58 */
59
60-/* Returns 0 if the chunk is not and does not contain the requested
61- aligned sub-chunk, else returns the amount of "waste" from
62- trimming. NB is the *chunk* byte size, not the user byte
63- size. */
64-static size_t
65-chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t nb)
66-{
67- void *m = chunk2mem (p);
68- INTERNAL_SIZE_T size = chunksize (p);
69- void *aligned_m = m;
70-
71- if (__glibc_unlikely (misaligned_chunk (p)))
72- malloc_printerr ("_int_memalign(): unaligned chunk detected");
73-
74- aligned_m = PTR_ALIGN_UP (m, alignment);
75-
76- INTERNAL_SIZE_T front_extra = (intptr_t) aligned_m - (intptr_t) m;
77-
78- /* We can't trim off the front as it's too small. */
79- if (front_extra > 0 && front_extra < MINSIZE)
80- return 0;
81-
82- /* If it's a perfect fit, it's an exception to the return value rule
83- (we would return zero waste, which looks like "not usable"), so
84- handle it here by returning a small non-zero value instead. */
85- if (size == nb && front_extra == 0)
86- return 1;
87-
88- /* If the block we need fits in the chunk, calculate total waste. */
89- if (size > nb + front_extra)
90- return size - nb;
91-
92- /* Can't use this chunk. */
93- return 0;
94-}
95-
96 /* BYTES is user requested bytes, not requested chunksize bytes. */
97 static void *
98 _int_memalign (mstate av, size_t alignment, size_t bytes)
99@@ -5082,7 +5040,6 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
100 mchunkptr remainder; /* spare room at end to split off */
101 unsigned long remainder_size; /* its size */
102 INTERNAL_SIZE_T size;
103- mchunkptr victim;
104
105 nb = checked_request2size (bytes);
106 if (nb == 0)
107@@ -5101,129 +5058,13 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
108 we don't find anything in those bins, the common malloc code will
109 scan starting at 2x. */
110
111- /* This will be set if we found a candidate chunk. */
112- victim = NULL;
113-
114- /* Fast bins are singly-linked, hard to remove a chunk from the middle
115- and unlikely to meet our alignment requirements. We have not done
116- any experimentation with searching for aligned fastbins. */
117-
118- if (av != NULL)
119- {
120- int first_bin_index;
121- int first_largebin_index;
122- int last_bin_index;
123-
124- if (in_smallbin_range (nb))
125- first_bin_index = smallbin_index (nb);
126- else
127- first_bin_index = largebin_index (nb);
128-
129- if (in_smallbin_range (nb * 2))
130- last_bin_index = smallbin_index (nb * 2);
131- else
132- last_bin_index = largebin_index (nb * 2);
133-
134- first_largebin_index = largebin_index (MIN_LARGE_SIZE);
135-
136- int victim_index; /* its bin index */
137-
138- for (victim_index = first_bin_index;
139- victim_index < last_bin_index;
140- victim_index ++)
141- {
142- victim = NULL;
143-
144- if (victim_index < first_largebin_index)
145- {
146- /* Check small bins. Small bin chunks are doubly-linked despite
147- being the same size. */
148-
149- mchunkptr fwd; /* misc temp for linking */
150- mchunkptr bck; /* misc temp for linking */
151-
152- bck = bin_at (av, victim_index);
153- fwd = bck->fd;
154- while (fwd != bck)
155- {
156- if (chunk_ok_for_memalign (fwd, alignment, nb) > 0)
157- {
158- victim = fwd;
159-
160- /* Unlink it */
161- victim->fd->bk = victim->bk;
162- victim->bk->fd = victim->fd;
163- break;
164- }
165-
166- fwd = fwd->fd;
167- }
168- }
169- else
170- {
171- /* Check large bins. */
172- mchunkptr fwd; /* misc temp for linking */
173- mchunkptr bck; /* misc temp for linking */
174- mchunkptr best = NULL;
175- size_t best_size = 0;
176-
177- bck = bin_at (av, victim_index);
178- fwd = bck->fd;
179+ /* Call malloc with worst case padding to hit alignment. */
180+ m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
181
182- while (fwd != bck)
183- {
184- int extra;
185-
186- if (chunksize (fwd) < nb)
187- break;
188- extra = chunk_ok_for_memalign (fwd, alignment, nb);
189- if (extra > 0
190- && (extra <= best_size || best == NULL))
191- {
192- best = fwd;
193- best_size = extra;
194- }
195+ if (m == 0)
196+ return 0; /* propagate failure */
197
198- fwd = fwd->fd;
199- }
200- victim = best;
201-
202- if (victim != NULL)
203- {
204- unlink_chunk (av, victim);
205- break;
206- }
207- }
208-
209- if (victim != NULL)
210- break;
211- }
212- }
213-
214- /* Strategy: find a spot within that chunk that meets the alignment
215- request, and then possibly free the leading and trailing space.
216- This strategy is incredibly costly and can lead to external
217- fragmentation if header and footer chunks are unused. */
218-
219- if (victim != NULL)
220- {
221- p = victim;
222- m = chunk2mem (p);
223- set_inuse (p);
224- if (av != &main_arena)
225- set_non_main_arena (p);
226- }
227- else
228- {
229- /* Call malloc with worst case padding to hit alignment. */
230-
231- m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
232-
233- if (m == 0)
234- return 0; /* propagate failure */
235-
236- p = mem2chunk (m);
237- }
238+ p = mem2chunk (m);
239
240 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
241 {
242diff --git a/malloc/tst-memalign-2.c b/malloc/tst-memalign-2.c
243index f229283dbf..ecd6fa249e 100644
244--- a/malloc/tst-memalign-2.c
245+++ b/malloc/tst-memalign-2.c
246@@ -86,7 +86,8 @@ do_test (void)
247 TEST_VERIFY (tcache_allocs[i].ptr1 == tcache_allocs[i].ptr2);
248 }
249
250- /* Test for non-head tcache hits. */
251+ /* Test for non-head tcache hits. This exercises the memalign
252+ scanning code to find matching allocations. */
253 for (i = 0; i < array_length (ptr); ++ i)
254 {
255 if (i == 4)
256@@ -113,7 +114,9 @@ do_test (void)
257 free (p);
258 TEST_VERIFY (count > 0);
259
260- /* Large bins test. */
261+ /* Large bins test. This verifies that the over-allocated parts
262+ that memalign releases for future allocations can be reused by
263+ memalign itself at least in some cases. */
264
265 for (i = 0; i < LN; ++ i)
266 {
267--
2682.39.2
269