]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/libsupc++/eh_alloc.cc
ada: Fix for validity checks combined with aliasing checks
[thirdparty/gcc.git] / libstdc++-v3 / libsupc++ / eh_alloc.cc
CommitLineData
52a11cbf 1// -*- C++ -*- Allocate exception objects.
7adcbafe 2// Copyright (C) 2001-2022 Free Software Foundation, Inc.
52a11cbf 3//
cbecceb9 4// This file is part of GCC.
52a11cbf 5//
cbecceb9 6// GCC is free software; you can redistribute it and/or modify
52a11cbf 7// it under the terms of the GNU General Public License as published by
748086b7 8// the Free Software Foundation; either version 3, or (at your option)
52a11cbf
RH
9// any later version.
10//
cbecceb9 11// GCC is distributed in the hope that it will be useful,
52a11cbf
RH
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15//
748086b7
JJ
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
52a11cbf 19
748086b7
JJ
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
52a11cbf
RH
24
25// This is derived from the C++ ABI for IA-64. Where we diverge
26// for cross-architecture compatibility are noted with "@@@".
27
637e3668
JW
28#include <exception> // std::exception
29#include <new> // std::terminate
30#include <cstdlib> // std::malloc, std::free, std::strtoul
31#include <climits> // INT_MAX
32#include <bits/stl_function.h> // std::less
33#include "unwind-cxx.h"
4c24b21a 34#if _GLIBCXX_HOSTED
637e3668
JW
35# include <string_view> // std::string_view
36# include <cstring> // std::strchr, std::memset
37# include <ext/concurrence.h> // __gnu_cxx::__mutex, __gnu_cxx::__scoped_lock
4c24b21a 38#endif
637e3668
JW
39
40// We use an emergency buffer used for exceptions when malloc fails.
41// If _GLIBCXX_EH_POOL_STATIC is defined (e.g. by configure) then we use
42// a fixed-size static buffer. Otherwise, allocate on startup using malloc.
43//
44// The size of the buffer is N * (S * P + R + D), where:
45// N == The number of objects to reserve space for.
46// Defaults to EMERGENCY_OBJ_COUNT, defined below.
47// S == Estimated size of exception objects to account for.
48// This size is in units of sizeof(void*) not bytes.
49// Defaults to EMERGENCY_OBJ_SIZE, defined below.
50// P == sizeof(void*).
51// R == sizeof(__cxa_refcounted_exception).
52// D == sizeof(__cxa_dependent_exception).
53//
54// This provides space for N thrown exceptions of S words each, and an
55// additional N dependent exceptions from std::rethrow_exception.
56//
57// The calculation allows values of N and S to be target-independent,
58// as the size will be scaled by the size of basic types on the target,
59// and space for the C++ exception header (__cxa_refcounted_exception)
60// is added automatically.
61//
62// For a dynamically allocated buffer, N and S can be set from the environment.
63// Setting N=0 will disable the emergency buffer.
64// The GLIBCXX_TUNABLES environment variable will be checked for the following:
65// - Tunable glibcxx.eh_pool.obj_count overrides EMERGENCY_OBJ_COUNT.
66// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
52a11cbf 67
4c24b21a
MM
68#if _GLIBCXX_HOSTED
69using std::free;
70using std::malloc;
05a79eb6 71using std::memset;
4c24b21a 72#else
3cbc7af0
BK
73// In a freestanding environment, these functions may not be available
74// -- but for now, we assume that they are.
4c24b21a
MM
75extern "C" void *malloc (std::size_t);
76extern "C" void free(void *);
656032b8 77extern "C" void *memset (void *, int, std::size_t);
4c24b21a 78#endif
52a11cbf 79
4c24b21a 80using namespace __cxxabiv1;
52a11cbf 81
637e3668
JW
82// Assume that 6 * sizeof(void*) is a reasonable exception object size.
83// Throwing very many large objects will exhaust the pool more quickly.
84// N.B. sizeof(std::bad_alloc) == sizeof(void*)
85// and sizeof(std::runtime_error) == 2 * sizeof(void*)
86// and sizeof(std::system_error) == 4 * sizeof(void*).
87#define EMERGENCY_OBJ_SIZE 6
88
89#ifdef __GTHREADS
90// Assume that the number of concurrent exception objects scales with the
91// processor word size, i.e., 16-bit systems are not likely to have hundreds
92// of threads all simultaneously throwing on OOM conditions.
93# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
94# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
52a11cbf 95#else
637e3668
JW
96# define EMERGENCY_OBJ_COUNT 4
97# define MAX_OBJ_COUNT 64
52a11cbf
RH
98#endif
99
637e3668
JW
100// This can be set by configure.
101#ifdef _GLIBCXX_EH_POOL_NOBJS
102# if _GLIBCXX_EH_POOL_NOBJS > MAX_OBJ_COUNT
103# warning "_GLIBCXX_EH_POOL_NOBJS value is too large; ignoring it"
0dd9dd1f
JW
104# elif _GLIBCXX_EH_POOL_NOBJS < 0
105# warning "_GLIBCXX_EH_POOL_NOBJS value is negative; ignoring it"
637e3668
JW
106# else
107# undef EMERGENCY_OBJ_COUNT
108# define EMERGENCY_OBJ_COUNT _GLIBCXX_EH_POOL_NOBJS
109# endif
52a11cbf
RH
110#endif
111
0dd9dd1f
JW
112#if defined _GLIBCXX_EH_POOL_STATIC && EMERGENCY_OBJ_COUNT == 0
113# define USE_POOL 0
114#else
115# define USE_POOL 1
116#endif
117
118#if USE_POOL
00e6c25a
JW
119namespace __gnu_cxx
120{
637e3668 121 void __freeres() noexcept;
00e6c25a 122}
52a11cbf 123
cce93c76
RB
124namespace
125{
637e3668
JW
126 static constexpr std::size_t
127 buffer_size_in_bytes(std::size_t obj_count, std::size_t obj_size) noexcept
128 {
129 // N * (S * P + R + D)
130 constexpr std::size_t P = sizeof(void*);
131 constexpr std::size_t R = sizeof(__cxa_refcounted_exception);
132 constexpr std::size_t D = sizeof(__cxa_dependent_exception);
133 return obj_count * (obj_size * P + R + D);
134 }
135
cce93c76
RB
136 // A fixed-size heap, variable size object allocator
137 class pool
138 {
139 public:
637e3668 140 pool() noexcept;
cce93c76 141
637e3668
JW
142 _GLIBCXX_NODISCARD void *allocate (std::size_t) noexcept;
143 void free (void *) noexcept;
cce93c76 144
637e3668 145 bool in_pool (void *) const noexcept;
cce93c76
RB
146
147 private:
148 struct free_entry {
149 std::size_t size;
150 free_entry *next;
151 };
152 struct allocated_entry {
153 std::size_t size;
8aa33fad 154 char data[] __attribute__((aligned));
cce93c76
RB
155 };
156
23c3cbae 157#if _GLIBCXX_HOSTED
cce93c76
RB
158 // A single mutex controlling emergency allocations.
159 __gnu_cxx::__mutex emergency_mutex;
637e3668
JW
160 using __scoped_lock = __gnu_cxx::__scoped_lock;
161#else
162 int emergency_mutex = 0;
163 struct __scoped_lock { explicit __scoped_lock(int) { } };
164#endif
cce93c76
RB
165
166 // The free-list
637e3668 167 free_entry *first_free_entry = nullptr;
cce93c76
RB
168 // The arena itself - we need to keep track of these only
169 // to implement in_pool.
637e3668
JW
170#ifdef _GLIBCXX_EH_POOL_STATIC
171 static constexpr std::size_t arena_size
172 = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
0dd9dd1f 173 alignas(void*) char arena[arena_size];
637e3668
JW
174#else
175 char *arena = nullptr;
176 std::size_t arena_size = 0;
177#endif
00e6c25a 178
637e3668 179 friend void __gnu_cxx::__freeres() noexcept;
cce93c76
RB
180 };
181
637e3668 182 pool::pool() noexcept
cce93c76 183 {
637e3668
JW
184#ifndef _GLIBCXX_EH_POOL_STATIC
185 int obj_size = EMERGENCY_OBJ_SIZE;
186 int obj_count = EMERGENCY_OBJ_COUNT;
187
188#if _GLIBCXX_HOSTED
189#if _GLIBCXX_HAVE_SECURE_GETENV
190 const char* str = ::secure_getenv("GLIBCXX_TUNABLES");
191#else
192 const char* str = std::getenv("GLIBCXX_TUNABLES");
193#endif
194 const std::string_view ns_name = "glibcxx.eh_pool";
195 std::pair<std::string_view, int> tunables[]{
196 {"obj_size", 0}, {"obj_count", obj_count}
197 };
198 while (str)
199 {
200 if (*str == ':')
201 ++str;
202
203 if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
204 && str[ns_name.size()] == '.')
205 {
206 str += ns_name.size() + 1;
207 for (auto& t : tunables)
208 if (!t.first.compare(0, t.first.size(), str, t.first.size())
209 && str[t.first.size()] == '=')
210 {
211 str += t.first.size() + 1;
212 char* end;
213 unsigned long val = strtoul(str, &end, 0);
214 if ((*end == ':' || *end == '\0') && val <= INT_MAX)
215 t.second = val;
216 str = end;
217 break;
218 }
219 }
220 str = strchr(str, ':');
221 }
222 obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
223 if (tunables[0].second != 0)
224 obj_size = tunables[0].second;
225#endif // HOSTED
226
227 arena_size = buffer_size_in_bytes(obj_count, obj_size);
228 if (arena_size == 0)
229 return;
cce93c76
RB
230 arena = (char *)malloc (arena_size);
231 if (!arena)
232 {
233 // If the allocation failed go without an emergency pool.
234 arena_size = 0;
cce93c76
RB
235 return;
236 }
637e3668 237#endif // STATIC
52a11cbf 238
cce93c76
RB
239 // Populate the free-list with a single entry covering the whole arena
240 first_free_entry = reinterpret_cast <free_entry *> (arena);
241 new (first_free_entry) free_entry;
242 first_free_entry->size = arena_size;
243 first_free_entry->next = NULL;
244 }
52a11cbf 245
637e3668 246 void *pool::allocate (std::size_t size) noexcept
cce93c76 247 {
23c3cbae 248 __scoped_lock sentry(emergency_mutex);
8aa33fad
RB
249 // We need an additional size_t member plus the padding to
250 // ensure proper alignment of data.
251 size += offsetof (allocated_entry, data);
cce93c76
RB
252 // And we need to at least hand out objects of the size of
253 // a freelist entry.
254 if (size < sizeof (free_entry))
255 size = sizeof (free_entry);
8aa33fad
RB
256 // And we need to align objects we hand out to the maximum
257 // alignment required on the target (this really aligns the
cce93c76 258 // tail which will become a new freelist entry).
8aa33fad
RB
259 size = ((size + __alignof__ (allocated_entry::data) - 1)
260 & ~(__alignof__ (allocated_entry::data) - 1));
cce93c76
RB
261 // Search for an entry of proper size on the freelist.
262 free_entry **e;
263 for (e = &first_free_entry;
264 *e && (*e)->size < size;
265 e = &(*e)->next)
266 ;
267 if (!*e)
268 return NULL;
269 allocated_entry *x;
270 if ((*e)->size - size >= sizeof (free_entry))
271 {
1c26adb7 272 // Split block if it is too large.
cce93c76
RB
273 free_entry *f = reinterpret_cast <free_entry *>
274 (reinterpret_cast <char *> (*e) + size);
275 std::size_t sz = (*e)->size;
276 free_entry *next = (*e)->next;
277 new (f) free_entry;
278 f->next = next;
279 f->size = sz - size;
280 x = reinterpret_cast <allocated_entry *> (*e);
281 new (x) allocated_entry;
282 x->size = size;
283 *e = f;
284 }
285 else
286 {
287 // Exact size match or too small overhead for a free entry.
288 std::size_t sz = (*e)->size;
289 free_entry *next = (*e)->next;
290 x = reinterpret_cast <allocated_entry *> (*e);
291 new (x) allocated_entry;
292 x->size = sz;
293 *e = next;
294 }
295 return &x->data;
296 }
30a333ce 297
637e3668 298 void pool::free (void *data) noexcept
cce93c76 299 {
637e3668 300 __scoped_lock sentry(emergency_mutex);
cce93c76 301 allocated_entry *e = reinterpret_cast <allocated_entry *>
8aa33fad 302 (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
cce93c76 303 std::size_t sz = e->size;
1c26adb7
RB
304 if (!first_free_entry
305 || (reinterpret_cast <char *> (e) + sz
306 < reinterpret_cast <char *> (first_free_entry)))
cce93c76 307 {
1c26adb7
RB
308 // If the free list is empty or the entry is before the
309 // first element and cannot be merged with it add it as
310 // the first free entry.
cce93c76
RB
311 free_entry *f = reinterpret_cast <free_entry *> (e);
312 new (f) free_entry;
313 f->size = sz;
1c26adb7 314 f->next = first_free_entry;
cce93c76
RB
315 first_free_entry = f;
316 }
317 else if (reinterpret_cast <char *> (e) + sz
318 == reinterpret_cast <char *> (first_free_entry))
319 {
320 // Check if we can merge with the first free entry being right
321 // after us.
322 free_entry *f = reinterpret_cast <free_entry *> (e);
323 new (f) free_entry;
324 f->size = sz + first_free_entry->size;
325 f->next = first_free_entry->next;
326 first_free_entry = f;
327 }
328 else
329 {
330 // Else search for a free item we can merge with at its end.
331 free_entry **fe;
332 for (fe = &first_free_entry;
333 (*fe)->next
5bc2042d
KA
334 && (reinterpret_cast <char *> (e) + sz
335 > reinterpret_cast <char *> ((*fe)->next));
cce93c76
RB
336 fe = &(*fe)->next)
337 ;
1c26adb7
RB
338 // If we can merge the next block into us do so and continue
339 // with the cases below.
340 if (reinterpret_cast <char *> (e) + sz
341 == reinterpret_cast <char *> ((*fe)->next))
342 {
343 sz += (*fe)->next->size;
344 (*fe)->next = (*fe)->next->next;
345 }
cce93c76
RB
346 if (reinterpret_cast <char *> (*fe) + (*fe)->size
347 == reinterpret_cast <char *> (e))
1c26adb7 348 // Merge with the freelist entry.
cce93c76
RB
349 (*fe)->size += sz;
350 else
351 {
352 // Else put it after it which keeps the freelist sorted.
353 free_entry *f = reinterpret_cast <free_entry *> (e);
354 new (f) free_entry;
355 f->size = sz;
356 f->next = (*fe)->next;
357 (*fe)->next = f;
358 }
359 }
360 }
361
637e3668 362 inline bool pool::in_pool (void *ptr) const noexcept
cce93c76 363 {
637e3668
JW
364 std::less<const void*> less;
365 return less(ptr, arena + arena_size) && less(arena, ptr);
cce93c76
RB
366 }
367
368 pool emergency_pool;
52a11cbf 369}
52a11cbf 370
00e6c25a
JW
371namespace __gnu_cxx
372{
637e3668 373 __attribute__((cold))
00e6c25a 374 void
637e3668 375 __freeres() noexcept
00e6c25a 376 {
637e3668 377#ifndef _GLIBCXX_EH_POOL_STATIC
00e6c25a
JW
378 if (emergency_pool.arena)
379 {
380 ::free(emergency_pool.arena);
381 emergency_pool.arena = 0;
382 }
637e3668 383#endif
00e6c25a
JW
384 }
385}
0dd9dd1f 386#endif // USE_POOL
00e6c25a 387
52a11cbf 388extern "C" void *
637e3668 389__cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
52a11cbf 390{
c4bca01b 391 thrown_size += sizeof (__cxa_refcounted_exception);
637e3668
JW
392
393 void *ret = malloc (thrown_size);
52a11cbf 394
0dd9dd1f 395#if USE_POOL
cce93c76
RB
396 if (!ret)
397 ret = emergency_pool.allocate (thrown_size);
0dd9dd1f 398#endif
2e362c74 399
cce93c76
RB
400 if (!ret)
401 std::terminate ();
52a11cbf 402
c4bca01b 403 memset (ret, 0, sizeof (__cxa_refcounted_exception));
52a11cbf 404
c4bca01b 405 return (void *)((char *)ret + sizeof (__cxa_refcounted_exception));
52a11cbf
RH
406}
407
408
409extern "C" void
637e3668 410__cxxabiv1::__cxa_free_exception(void *vptr) noexcept
52a11cbf 411{
cce93c76 412 char *ptr = (char *) vptr - sizeof (__cxa_refcounted_exception);
0dd9dd1f 413#if USE_POOL
637e3668 414 if (emergency_pool.in_pool (ptr)) [[__unlikely__]]
cce93c76 415 emergency_pool.free (ptr);
52a11cbf 416 else
0dd9dd1f 417#endif
cce93c76 418 free (ptr);
52a11cbf 419}
30a333ce
PC
420
421
422extern "C" __cxa_dependent_exception*
637e3668 423__cxxabiv1::__cxa_allocate_dependent_exception() noexcept
30a333ce 424{
637e3668 425 void *ret = malloc (sizeof (__cxa_dependent_exception));
30a333ce 426
0dd9dd1f 427#if USE_POOL
30a333ce 428 if (!ret)
637e3668 429 ret = emergency_pool.allocate (sizeof (__cxa_dependent_exception));
0dd9dd1f 430#endif
30a333ce 431
cce93c76
RB
432 if (!ret)
433 std::terminate ();
30a333ce 434
30a333ce
PC
435 memset (ret, 0, sizeof (__cxa_dependent_exception));
436
637e3668 437 return static_cast<__cxa_dependent_exception*>(ret);
30a333ce
PC
438}
439
440
441extern "C" void
442__cxxabiv1::__cxa_free_dependent_exception
637e3668 443 (__cxa_dependent_exception *vptr) noexcept
30a333ce 444{
0dd9dd1f 445#if USE_POOL
637e3668 446 if (emergency_pool.in_pool (vptr)) [[__unlikely__]]
cce93c76 447 emergency_pool.free (vptr);
30a333ce 448 else
0dd9dd1f 449#endif
30a333ce
PC
450 free (vptr);
451}