]> git.ipfire.org Git - thirdparty/gcc.git/blob - libbacktrace/mmap.c
Update copyright years in libbacktrace.
[thirdparty/gcc.git] / libbacktrace / mmap.c
1 /* mmap.c -- Memory allocation with mmap.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Google.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
7 met:
8
9 (1) Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11
12 (2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the
15 distribution.
16
17 (3) The name of the author may not be used to
18 endorse or promote products derived from this software without
19 specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. */
32
33 #include "config.h"
34
35 #include <errno.h>
36 #include <string.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <sys/mman.h>
40
41 #include "backtrace.h"
42 #include "internal.h"
43
44 /* Memory allocation on systems that provide anonymous mmap. This
45 permits the backtrace functions to be invoked from a signal
46 handler, assuming that mmap is async-signal safe. */
47
48 #ifndef MAP_ANONYMOUS
49 #define MAP_ANONYMOUS MAP_ANON
50 #endif
51
52 /* A list of free memory blocks. */
53
54 struct backtrace_freelist_struct
55 {
56 /* Next on list. */
57 struct backtrace_freelist_struct *next;
58 /* Size of this block, including this structure. */
59 size_t size;
60 };
61
62 /* Free memory allocated by backtrace_alloc. */
63
64 static void
65 backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
66 {
67 /* Just leak small blocks. We don't have to be perfect. */
68 if (size >= sizeof (struct backtrace_freelist_struct))
69 {
70 struct backtrace_freelist_struct *p;
71
72 p = (struct backtrace_freelist_struct *) addr;
73 p->next = state->freelist;
74 p->size = size;
75 state->freelist = p;
76 }
77 }
78
79 /* Allocate memory like malloc. */
80
81 void *
82 backtrace_alloc (struct backtrace_state *state,
83 size_t size, backtrace_error_callback error_callback,
84 void *data)
85 {
86 void *ret;
87 int locked;
88 struct backtrace_freelist_struct **pp;
89 size_t pagesize;
90 size_t asksize;
91 void *page;
92
93 ret = NULL;
94
95 /* If we can acquire the lock, then see if there is space on the
96 free list. If we can't acquire the lock, drop straight into
97 using mmap. __sync_lock_test_and_set returns the old state of
98 the lock, so we have acquired it if it returns 0. */
99
100 if (!state->threaded)
101 locked = 1;
102 else
103 locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
104
105 if (locked)
106 {
107 for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
108 {
109 if ((*pp)->size >= size)
110 {
111 struct backtrace_freelist_struct *p;
112
113 p = *pp;
114 *pp = p->next;
115
116 /* Round for alignment; we assume that no type we care about
117 is more than 8 bytes. */
118 size = (size + 7) & ~ (size_t) 7;
119 if (size < p->size)
120 backtrace_free_locked (state, (char *) p + size,
121 p->size - size);
122
123 ret = (void *) p;
124
125 break;
126 }
127 }
128
129 if (state->threaded)
130 __sync_lock_release (&state->lock_alloc);
131 }
132
133 if (ret == NULL)
134 {
135 /* Allocate a new page. */
136
137 pagesize = getpagesize ();
138 asksize = (size + pagesize - 1) & ~ (pagesize - 1);
139 page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
140 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
141 if (page == NULL)
142 error_callback (data, "mmap", errno);
143 else
144 {
145 size = (size + 7) & ~ (size_t) 7;
146 if (size < asksize)
147 backtrace_free (state, (char *) page + size, asksize - size,
148 error_callback, data);
149
150 ret = page;
151 }
152 }
153
154 return ret;
155 }
156
157 /* Free memory allocated by backtrace_alloc. */
158
159 void
160 backtrace_free (struct backtrace_state *state, void *addr, size_t size,
161 backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
162 void *data ATTRIBUTE_UNUSED)
163 {
164 int locked;
165
166 /* If we can acquire the lock, add the new space to the free list.
167 If we can't acquire the lock, just leak the memory.
168 __sync_lock_test_and_set returns the old state of the lock, so we
169 have acquired it if it returns 0. */
170
171 if (!state->threaded)
172 locked = 1;
173 else
174 locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
175
176 if (locked)
177 {
178 backtrace_free_locked (state, addr, size);
179
180 if (state->threaded)
181 __sync_lock_release (&state->lock_alloc);
182 }
183 }
184
185 /* Grow VEC by SIZE bytes. */
186
187 void *
188 backtrace_vector_grow (struct backtrace_state *state,size_t size,
189 backtrace_error_callback error_callback,
190 void *data, struct backtrace_vector *vec)
191 {
192 void *ret;
193
194 if (size > vec->alc)
195 {
196 size_t pagesize;
197 size_t alc;
198 void *base;
199
200 pagesize = getpagesize ();
201 alc = vec->size + size;
202 if (vec->size == 0)
203 alc = 16 * size;
204 else if (alc < pagesize)
205 {
206 alc *= 2;
207 if (alc > pagesize)
208 alc = pagesize;
209 }
210 else
211 alc = (alc + pagesize - 1) & ~ (pagesize - 1);
212 base = backtrace_alloc (state, alc, error_callback, data);
213 if (base == NULL)
214 return NULL;
215 if (vec->base != NULL)
216 {
217 memcpy (base, vec->base, vec->size);
218 backtrace_free (state, vec->base, vec->alc, error_callback, data);
219 }
220 vec->base = base;
221 vec->alc = alc - vec->size;
222 }
223
224 ret = (char *) vec->base + vec->size;
225 vec->size += size;
226 vec->alc -= size;
227 return ret;
228 }
229
230 /* Finish the current allocation on VEC. */
231
232 void
233 backtrace_vector_finish (struct backtrace_state *state ATTRIBUTE_UNUSED,
234 struct backtrace_vector *vec)
235 {
236 vec->base = (char *) vec->base + vec->size;
237 vec->size = 0;
238 }
239
240 /* Release any extra space allocated for VEC. */
241
242 int
243 backtrace_vector_release (struct backtrace_state *state,
244 struct backtrace_vector *vec,
245 backtrace_error_callback error_callback,
246 void *data)
247 {
248 size_t size;
249 size_t alc;
250 size_t aligned;
251
252 /* Make sure that the block that we free is aligned on an 8-byte
253 boundary. */
254 size = vec->size;
255 alc = vec->alc;
256 aligned = (size + 7) & ~ (size_t) 7;
257 alc -= aligned - size;
258
259 backtrace_free (state, (char *) vec->base + aligned, alc,
260 error_callback, data);
261 vec->alc = 0;
262 return 1;
263 }