]> git.ipfire.org Git - thirdparty/gcc.git/blame - libbacktrace/mmap.c
libbacktrace: update to current libgo test file
[thirdparty/gcc.git] / libbacktrace / mmap.c
CommitLineData
eff02e4f 1/* mmap.c -- Memory allocation with mmap.
8d9254fc 2 Copyright (C) 2012-2020 Free Software Foundation, Inc.
eff02e4f
ILT
3 Written by Ian Lance Taylor, Google.
4
5Redistribution and use in source and binary forms, with or without
6modification, are permitted provided that the following conditions are
7met:
8
9 (1) Redistributions of source code must retain the above copyright
84ebf639 10 notice, this list of conditions and the following disclaimer.
eff02e4f
ILT
11
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
84ebf639
CL
15 distribution.
16
eff02e4f
ILT
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
20
21THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31POSSIBILITY OF SUCH DAMAGE. */
32
33#include "config.h"
34
35#include <errno.h>
36#include <string.h>
c0558468 37#include <stdlib.h>
eff02e4f 38#include <unistd.h>
85d8c21e 39#include <sys/types.h>
eff02e4f
ILT
40#include <sys/mman.h>
41
42#include "backtrace.h"
43#include "internal.h"
44
45/* Memory allocation on systems that provide anonymous mmap. This
46 permits the backtrace functions to be invoked from a signal
47 handler, assuming that mmap is async-signal safe. */
48
49#ifndef MAP_ANONYMOUS
50#define MAP_ANONYMOUS MAP_ANON
51#endif
52
09b08e17
JDA
53#ifndef MAP_FAILED
54#define MAP_FAILED ((void *)-1)
55#endif
56
eff02e4f
ILT
57/* A list of free memory blocks. */
58
59struct backtrace_freelist_struct
60{
61 /* Next on list. */
62 struct backtrace_freelist_struct *next;
63 /* Size of this block, including this structure. */
64 size_t size;
65};
66
67/* Free memory allocated by backtrace_alloc. */
68
69static void
70backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
71{
3fe3c7d7
ILT
72 /* Just leak small blocks. We don't have to be perfect. Don't put
73 more than 16 entries on the free list, to avoid wasting time
74 searching when allocating a block. If we have more than 16
75 entries, leak the smallest entry. */
76
eff02e4f
ILT
77 if (size >= sizeof (struct backtrace_freelist_struct))
78 {
3fe3c7d7
ILT
79 size_t c;
80 struct backtrace_freelist_struct **ppsmall;
81 struct backtrace_freelist_struct **pp;
eff02e4f
ILT
82 struct backtrace_freelist_struct *p;
83
3fe3c7d7
ILT
84 c = 0;
85 ppsmall = NULL;
86 for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
87 {
88 if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
89 ppsmall = pp;
90 ++c;
91 }
92 if (c >= 16)
93 {
94 if (size <= (*ppsmall)->size)
95 return;
96 *ppsmall = (*ppsmall)->next;
97 }
98
eff02e4f
ILT
99 p = (struct backtrace_freelist_struct *) addr;
100 p->next = state->freelist;
101 p->size = size;
102 state->freelist = p;
103 }
104}
105
c478516b
ILT
106/* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
107 report an error. */
eff02e4f
ILT
108
109void *
110backtrace_alloc (struct backtrace_state *state,
111 size_t size, backtrace_error_callback error_callback,
112 void *data)
113{
114 void *ret;
2a5195d9 115 int locked;
eff02e4f
ILT
116 struct backtrace_freelist_struct **pp;
117 size_t pagesize;
118 size_t asksize;
119 void *page;
120
121 ret = NULL;
122
123 /* If we can acquire the lock, then see if there is space on the
124 free list. If we can't acquire the lock, drop straight into
125 using mmap. __sync_lock_test_and_set returns the old state of
126 the lock, so we have acquired it if it returns 0. */
127
2a5195d9
ILT
128 if (!state->threaded)
129 locked = 1;
130 else
131 locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
132
133 if (locked)
eff02e4f
ILT
134 {
135 for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
136 {
137 if ((*pp)->size >= size)
138 {
139 struct backtrace_freelist_struct *p;
140
141 p = *pp;
142 *pp = p->next;
143
144 /* Round for alignment; we assume that no type we care about
145 is more than 8 bytes. */
146 size = (size + 7) & ~ (size_t) 7;
147 if (size < p->size)
148 backtrace_free_locked (state, (char *) p + size,
149 p->size - size);
150
151 ret = (void *) p;
152
153 break;
154 }
155 }
156
2a5195d9
ILT
157 if (state->threaded)
158 __sync_lock_release (&state->lock_alloc);
eff02e4f
ILT
159 }
160
161 if (ret == NULL)
162 {
163 /* Allocate a new page. */
164
165 pagesize = getpagesize ();
166 asksize = (size + pagesize - 1) & ~ (pagesize - 1);
167 page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
168 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
981d281f 169 if (page == MAP_FAILED)
c478516b
ILT
170 {
171 if (error_callback)
172 error_callback (data, "mmap", errno);
173 }
eff02e4f
ILT
174 else
175 {
176 size = (size + 7) & ~ (size_t) 7;
177 if (size < asksize)
178 backtrace_free (state, (char *) page + size, asksize - size,
179 error_callback, data);
180
181 ret = page;
182 }
183 }
184
185 return ret;
186}
187
188/* Free memory allocated by backtrace_alloc. */
189
190void
191backtrace_free (struct backtrace_state *state, void *addr, size_t size,
192 backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
193 void *data ATTRIBUTE_UNUSED)
194{
2a5195d9
ILT
195 int locked;
196
d573fd89
ILT
197 /* If we are freeing a large aligned block, just release it back to
198 the system. This case arises when growing a vector for a large
199 binary with lots of debug info. Calling munmap here may cause us
200 to call mmap again if there is also a large shared library; we
201 just live with that. */
202 if (size >= 16 * 4096)
203 {
204 size_t pagesize;
205
206 pagesize = getpagesize ();
207 if (((uintptr_t) addr & (pagesize - 1)) == 0
208 && (size & (pagesize - 1)) == 0)
209 {
210 /* If munmap fails for some reason, just add the block to
211 the freelist. */
212 if (munmap (addr, size) == 0)
213 return;
214 }
215 }
216
eff02e4f
ILT
217 /* If we can acquire the lock, add the new space to the free list.
218 If we can't acquire the lock, just leak the memory.
219 __sync_lock_test_and_set returns the old state of the lock, so we
220 have acquired it if it returns 0. */
2a5195d9
ILT
221
222 if (!state->threaded)
223 locked = 1;
224 else
225 locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
226
227 if (locked)
eff02e4f
ILT
228 {
229 backtrace_free_locked (state, addr, size);
230
2a5195d9
ILT
231 if (state->threaded)
232 __sync_lock_release (&state->lock_alloc);
eff02e4f
ILT
233 }
234}
235
236/* Grow VEC by SIZE bytes. */
237
238void *
239backtrace_vector_grow (struct backtrace_state *state,size_t size,
240 backtrace_error_callback error_callback,
241 void *data, struct backtrace_vector *vec)
242{
243 void *ret;
244
245 if (size > vec->alc)
246 {
247 size_t pagesize;
248 size_t alc;
249 void *base;
250
251 pagesize = getpagesize ();
252 alc = vec->size + size;
253 if (vec->size == 0)
254 alc = 16 * size;
255 else if (alc < pagesize)
256 {
257 alc *= 2;
258 if (alc > pagesize)
259 alc = pagesize;
260 }
261 else
d573fd89
ILT
262 {
263 alc *= 2;
264 alc = (alc + pagesize - 1) & ~ (pagesize - 1);
265 }
eff02e4f
ILT
266 base = backtrace_alloc (state, alc, error_callback, data);
267 if (base == NULL)
268 return NULL;
269 if (vec->base != NULL)
270 {
271 memcpy (base, vec->base, vec->size);
d573fd89
ILT
272 backtrace_free (state, vec->base, vec->size + vec->alc,
273 error_callback, data);
eff02e4f
ILT
274 }
275 vec->base = base;
276 vec->alc = alc - vec->size;
277 }
278
279 ret = (char *) vec->base + vec->size;
280 vec->size += size;
281 vec->alc -= size;
282 return ret;
283}
284
285/* Finish the current allocation on VEC. */
286
bfd74f22
ILT
287void *
288backtrace_vector_finish (
289 struct backtrace_state *state ATTRIBUTE_UNUSED,
290 struct backtrace_vector *vec,
291 backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
292 void *data ATTRIBUTE_UNUSED)
eff02e4f 293{
bfd74f22
ILT
294 void *ret;
295
296 ret = vec->base;
eff02e4f
ILT
297 vec->base = (char *) vec->base + vec->size;
298 vec->size = 0;
bfd74f22 299 return ret;
eff02e4f
ILT
300}
301
302/* Release any extra space allocated for VEC. */
303
304int
305backtrace_vector_release (struct backtrace_state *state,
306 struct backtrace_vector *vec,
307 backtrace_error_callback error_callback,
308 void *data)
309{
8277de34
ILT
310 size_t size;
311 size_t alc;
312 size_t aligned;
313
314 /* Make sure that the block that we free is aligned on an 8-byte
315 boundary. */
316 size = vec->size;
317 alc = vec->alc;
318 aligned = (size + 7) & ~ (size_t) 7;
319 alc -= aligned - size;
320
8fe91dea
ILT
321 backtrace_free (state, (char *) vec->base + aligned, alc,
322 error_callback, data);
eff02e4f 323 vec->alc = 0;
6d760a01
TV
324 if (vec->size == 0)
325 vec->base = NULL;
eff02e4f
ILT
326 return 1;
327}