]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/libsupc++/eh_alloc.cc
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / libsupc++ / eh_alloc.cc
CommitLineData
52a11cbf 1// -*- C++ -*- Allocate exception objects.
a945c346 2// Copyright (C) 2001-2024 Free Software Foundation, Inc.
52a11cbf 3//
cbecceb9 4// This file is part of GCC.
52a11cbf 5//
cbecceb9 6// GCC is free software; you can redistribute it and/or modify
52a11cbf 7// it under the terms of the GNU General Public License as published by
748086b7 8// the Free Software Foundation; either version 3, or (at your option)
52a11cbf
RH
9// any later version.
10//
cbecceb9 11// GCC is distributed in the hope that it will be useful,
52a11cbf
RH
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15//
748086b7
JJ
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
52a11cbf 19
748086b7
JJ
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
52a11cbf
RH
24
25// This is derived from the C++ ABI for IA-64. Where we diverge
26// for cross-architecture compatibility are noted with "@@@".
27
f505f37a
JW
28#ifndef _GNU_SOURCE
29// Cygwin needs this for secure_getenv
30# define _GNU_SOURCE 1
31#endif
32
637e3668
JW
33#include <exception> // std::exception
34#include <new> // std::terminate
35#include <cstdlib> // std::malloc, std::free, std::strtoul
36#include <climits> // INT_MAX
37#include <bits/stl_function.h> // std::less
38#include "unwind-cxx.h"
4c24b21a 39#if _GLIBCXX_HOSTED
637e3668
JW
40# include <string_view> // std::string_view
41# include <cstring> // std::strchr, std::memset
42# include <ext/concurrence.h> // __gnu_cxx::__mutex, __gnu_cxx::__scoped_lock
4c24b21a 43#endif
637e3668
JW
44
45// We use an emergency buffer used for exceptions when malloc fails.
46// If _GLIBCXX_EH_POOL_STATIC is defined (e.g. by configure) then we use
47// a fixed-size static buffer. Otherwise, allocate on startup using malloc.
48//
49// The size of the buffer is N * (S * P + R + D), where:
50// N == The number of objects to reserve space for.
51// Defaults to EMERGENCY_OBJ_COUNT, defined below.
52// S == Estimated size of exception objects to account for.
53// This size is in units of sizeof(void*) not bytes.
54// Defaults to EMERGENCY_OBJ_SIZE, defined below.
55// P == sizeof(void*).
56// R == sizeof(__cxa_refcounted_exception).
57// D == sizeof(__cxa_dependent_exception).
58//
59// This provides space for N thrown exceptions of S words each, and an
60// additional N dependent exceptions from std::rethrow_exception.
61//
62// The calculation allows values of N and S to be target-independent,
63// as the size will be scaled by the size of basic types on the target,
64// and space for the C++ exception header (__cxa_refcounted_exception)
65// is added automatically.
66//
67// For a dynamically allocated buffer, N and S can be set from the environment.
68// Setting N=0 will disable the emergency buffer.
69// The GLIBCXX_TUNABLES environment variable will be checked for the following:
70// - Tunable glibcxx.eh_pool.obj_count overrides EMERGENCY_OBJ_COUNT.
71// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
52a11cbf 72
4c24b21a
MM
73#if _GLIBCXX_HOSTED
74using std::free;
75using std::malloc;
05a79eb6 76using std::memset;
4c24b21a 77#else
3cbc7af0
BK
78// In a freestanding environment, these functions may not be available
79// -- but for now, we assume that they are.
4c24b21a
MM
80extern "C" void *malloc (std::size_t);
81extern "C" void free(void *);
656032b8 82extern "C" void *memset (void *, int, std::size_t);
4c24b21a 83#endif
52a11cbf 84
4c24b21a 85using namespace __cxxabiv1;
52a11cbf 86
637e3668
JW
87// Assume that 6 * sizeof(void*) is a reasonable exception object size.
88// Throwing very many large objects will exhaust the pool more quickly.
89// N.B. sizeof(std::bad_alloc) == sizeof(void*)
90// and sizeof(std::runtime_error) == 2 * sizeof(void*)
91// and sizeof(std::system_error) == 4 * sizeof(void*).
92#define EMERGENCY_OBJ_SIZE 6
93
94#ifdef __GTHREADS
95// Assume that the number of concurrent exception objects scales with the
96// processor word size, i.e., 16-bit systems are not likely to have hundreds
97// of threads all simultaneously throwing on OOM conditions.
98# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
99# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
52a11cbf 100#else
637e3668
JW
101# define EMERGENCY_OBJ_COUNT 4
102# define MAX_OBJ_COUNT 64
52a11cbf
RH
103#endif
104
637e3668
JW
105// This can be set by configure.
106#ifdef _GLIBCXX_EH_POOL_NOBJS
107# if _GLIBCXX_EH_POOL_NOBJS > MAX_OBJ_COUNT
108# warning "_GLIBCXX_EH_POOL_NOBJS value is too large; ignoring it"
0dd9dd1f
JW
109# elif _GLIBCXX_EH_POOL_NOBJS < 0
110# warning "_GLIBCXX_EH_POOL_NOBJS value is negative; ignoring it"
637e3668
JW
111# else
112# undef EMERGENCY_OBJ_COUNT
113# define EMERGENCY_OBJ_COUNT _GLIBCXX_EH_POOL_NOBJS
114# endif
52a11cbf
RH
115#endif
116
0dd9dd1f
JW
117#if defined _GLIBCXX_EH_POOL_STATIC && EMERGENCY_OBJ_COUNT == 0
118# define USE_POOL 0
119#else
120# define USE_POOL 1
121#endif
122
123#if USE_POOL
00e6c25a
JW
124namespace __gnu_cxx
125{
637e3668 126 void __freeres() noexcept;
00e6c25a 127}
52a11cbf 128
cce93c76
RB
129namespace
130{
637e3668
JW
131 static constexpr std::size_t
132 buffer_size_in_bytes(std::size_t obj_count, std::size_t obj_size) noexcept
133 {
134 // N * (S * P + R + D)
135 constexpr std::size_t P = sizeof(void*);
136 constexpr std::size_t R = sizeof(__cxa_refcounted_exception);
137 constexpr std::size_t D = sizeof(__cxa_dependent_exception);
138 return obj_count * (obj_size * P + R + D);
139 }
140
cce93c76
RB
141 // A fixed-size heap, variable size object allocator
142 class pool
143 {
144 public:
637e3668 145 pool() noexcept;
cce93c76 146
637e3668
JW
147 _GLIBCXX_NODISCARD void *allocate (std::size_t) noexcept;
148 void free (void *) noexcept;
cce93c76 149
637e3668 150 bool in_pool (void *) const noexcept;
cce93c76
RB
151
152 private:
153 struct free_entry {
154 std::size_t size;
155 free_entry *next;
156 };
157 struct allocated_entry {
158 std::size_t size;
8aa33fad 159 char data[] __attribute__((aligned));
cce93c76
RB
160 };
161
23c3cbae 162#if _GLIBCXX_HOSTED
cce93c76
RB
163 // A single mutex controlling emergency allocations.
164 __gnu_cxx::__mutex emergency_mutex;
637e3668
JW
165 using __scoped_lock = __gnu_cxx::__scoped_lock;
166#else
167 int emergency_mutex = 0;
168 struct __scoped_lock { explicit __scoped_lock(int) { } };
169#endif
cce93c76
RB
170
171 // The free-list
637e3668 172 free_entry *first_free_entry = nullptr;
cce93c76
RB
173 // The arena itself - we need to keep track of these only
174 // to implement in_pool.
637e3668
JW
175#ifdef _GLIBCXX_EH_POOL_STATIC
176 static constexpr std::size_t arena_size
177 = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
0dd9dd1f 178 alignas(void*) char arena[arena_size];
637e3668
JW
179#else
180 char *arena = nullptr;
181 std::size_t arena_size = 0;
182#endif
00e6c25a 183
637e3668 184 friend void __gnu_cxx::__freeres() noexcept;
cce93c76
RB
185 };
186
637e3668 187 pool::pool() noexcept
cce93c76 188 {
637e3668
JW
189#ifndef _GLIBCXX_EH_POOL_STATIC
190 int obj_size = EMERGENCY_OBJ_SIZE;
191 int obj_count = EMERGENCY_OBJ_COUNT;
192
193#if _GLIBCXX_HOSTED
194#if _GLIBCXX_HAVE_SECURE_GETENV
195 const char* str = ::secure_getenv("GLIBCXX_TUNABLES");
196#else
197 const char* str = std::getenv("GLIBCXX_TUNABLES");
198#endif
199 const std::string_view ns_name = "glibcxx.eh_pool";
200 std::pair<std::string_view, int> tunables[]{
201 {"obj_size", 0}, {"obj_count", obj_count}
202 };
203 while (str)
204 {
205 if (*str == ':')
206 ++str;
207
208 if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
209 && str[ns_name.size()] == '.')
210 {
211 str += ns_name.size() + 1;
212 for (auto& t : tunables)
213 if (!t.first.compare(0, t.first.size(), str, t.first.size())
214 && str[t.first.size()] == '=')
215 {
216 str += t.first.size() + 1;
217 char* end;
218 unsigned long val = strtoul(str, &end, 0);
219 if ((*end == ':' || *end == '\0') && val <= INT_MAX)
220 t.second = val;
221 str = end;
222 break;
223 }
224 }
225 str = strchr(str, ':');
226 }
227 obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
228 if (tunables[0].second != 0)
229 obj_size = tunables[0].second;
230#endif // HOSTED
231
232 arena_size = buffer_size_in_bytes(obj_count, obj_size);
233 if (arena_size == 0)
234 return;
cce93c76
RB
235 arena = (char *)malloc (arena_size);
236 if (!arena)
237 {
238 // If the allocation failed go without an emergency pool.
239 arena_size = 0;
cce93c76
RB
240 return;
241 }
637e3668 242#endif // STATIC
52a11cbf 243
cce93c76
RB
244 // Populate the free-list with a single entry covering the whole arena
245 first_free_entry = reinterpret_cast <free_entry *> (arena);
246 new (first_free_entry) free_entry;
247 first_free_entry->size = arena_size;
248 first_free_entry->next = NULL;
249 }
52a11cbf 250
637e3668 251 void *pool::allocate (std::size_t size) noexcept
cce93c76 252 {
23c3cbae 253 __scoped_lock sentry(emergency_mutex);
8aa33fad
RB
254 // We need an additional size_t member plus the padding to
255 // ensure proper alignment of data.
256 size += offsetof (allocated_entry, data);
cce93c76
RB
257 // And we need to at least hand out objects of the size of
258 // a freelist entry.
259 if (size < sizeof (free_entry))
260 size = sizeof (free_entry);
8aa33fad
RB
261 // And we need to align objects we hand out to the maximum
262 // alignment required on the target (this really aligns the
cce93c76 263 // tail which will become a new freelist entry).
8aa33fad
RB
264 size = ((size + __alignof__ (allocated_entry::data) - 1)
265 & ~(__alignof__ (allocated_entry::data) - 1));
cce93c76
RB
266 // Search for an entry of proper size on the freelist.
267 free_entry **e;
268 for (e = &first_free_entry;
269 *e && (*e)->size < size;
270 e = &(*e)->next)
271 ;
272 if (!*e)
273 return NULL;
274 allocated_entry *x;
275 if ((*e)->size - size >= sizeof (free_entry))
276 {
1c26adb7 277 // Split block if it is too large.
cce93c76
RB
278 free_entry *f = reinterpret_cast <free_entry *>
279 (reinterpret_cast <char *> (*e) + size);
280 std::size_t sz = (*e)->size;
281 free_entry *next = (*e)->next;
282 new (f) free_entry;
283 f->next = next;
284 f->size = sz - size;
285 x = reinterpret_cast <allocated_entry *> (*e);
286 new (x) allocated_entry;
287 x->size = size;
288 *e = f;
289 }
290 else
291 {
292 // Exact size match or too small overhead for a free entry.
293 std::size_t sz = (*e)->size;
294 free_entry *next = (*e)->next;
295 x = reinterpret_cast <allocated_entry *> (*e);
296 new (x) allocated_entry;
297 x->size = sz;
298 *e = next;
299 }
300 return &x->data;
301 }
30a333ce 302
637e3668 303 void pool::free (void *data) noexcept
cce93c76 304 {
637e3668 305 __scoped_lock sentry(emergency_mutex);
cce93c76 306 allocated_entry *e = reinterpret_cast <allocated_entry *>
8aa33fad 307 (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
cce93c76 308 std::size_t sz = e->size;
1c26adb7
RB
309 if (!first_free_entry
310 || (reinterpret_cast <char *> (e) + sz
311 < reinterpret_cast <char *> (first_free_entry)))
cce93c76 312 {
1c26adb7
RB
313 // If the free list is empty or the entry is before the
314 // first element and cannot be merged with it add it as
315 // the first free entry.
cce93c76
RB
316 free_entry *f = reinterpret_cast <free_entry *> (e);
317 new (f) free_entry;
318 f->size = sz;
1c26adb7 319 f->next = first_free_entry;
cce93c76
RB
320 first_free_entry = f;
321 }
322 else if (reinterpret_cast <char *> (e) + sz
323 == reinterpret_cast <char *> (first_free_entry))
324 {
325 // Check if we can merge with the first free entry being right
326 // after us.
327 free_entry *f = reinterpret_cast <free_entry *> (e);
328 new (f) free_entry;
329 f->size = sz + first_free_entry->size;
330 f->next = first_free_entry->next;
331 first_free_entry = f;
332 }
333 else
334 {
335 // Else search for a free item we can merge with at its end.
336 free_entry **fe;
337 for (fe = &first_free_entry;
338 (*fe)->next
5bc2042d
KA
339 && (reinterpret_cast <char *> (e) + sz
340 > reinterpret_cast <char *> ((*fe)->next));
cce93c76
RB
341 fe = &(*fe)->next)
342 ;
1c26adb7
RB
343 // If we can merge the next block into us do so and continue
344 // with the cases below.
345 if (reinterpret_cast <char *> (e) + sz
346 == reinterpret_cast <char *> ((*fe)->next))
347 {
348 sz += (*fe)->next->size;
349 (*fe)->next = (*fe)->next->next;
350 }
cce93c76
RB
351 if (reinterpret_cast <char *> (*fe) + (*fe)->size
352 == reinterpret_cast <char *> (e))
1c26adb7 353 // Merge with the freelist entry.
cce93c76
RB
354 (*fe)->size += sz;
355 else
356 {
357 // Else put it after it which keeps the freelist sorted.
358 free_entry *f = reinterpret_cast <free_entry *> (e);
359 new (f) free_entry;
360 f->size = sz;
361 f->next = (*fe)->next;
362 (*fe)->next = f;
363 }
364 }
365 }
366
637e3668 367 inline bool pool::in_pool (void *ptr) const noexcept
cce93c76 368 {
637e3668
JW
369 std::less<const void*> less;
370 return less(ptr, arena + arena_size) && less(arena, ptr);
cce93c76
RB
371 }
372
373 pool emergency_pool;
52a11cbf 374}
52a11cbf 375
00e6c25a
JW
376namespace __gnu_cxx
377{
637e3668 378 __attribute__((cold))
00e6c25a 379 void
637e3668 380 __freeres() noexcept
00e6c25a 381 {
637e3668 382#ifndef _GLIBCXX_EH_POOL_STATIC
00e6c25a
JW
383 if (emergency_pool.arena)
384 {
385 ::free(emergency_pool.arena);
386 emergency_pool.arena = 0;
387 }
637e3668 388#endif
00e6c25a
JW
389 }
390}
0dd9dd1f 391#endif // USE_POOL
00e6c25a 392
52a11cbf 393extern "C" void *
637e3668 394__cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
52a11cbf 395{
c4bca01b 396 thrown_size += sizeof (__cxa_refcounted_exception);
637e3668
JW
397
398 void *ret = malloc (thrown_size);
52a11cbf 399
0dd9dd1f 400#if USE_POOL
cce93c76
RB
401 if (!ret)
402 ret = emergency_pool.allocate (thrown_size);
0dd9dd1f 403#endif
2e362c74 404
cce93c76
RB
405 if (!ret)
406 std::terminate ();
52a11cbf 407
c4bca01b 408 memset (ret, 0, sizeof (__cxa_refcounted_exception));
52a11cbf 409
c4bca01b 410 return (void *)((char *)ret + sizeof (__cxa_refcounted_exception));
52a11cbf
RH
411}
412
413
414extern "C" void
637e3668 415__cxxabiv1::__cxa_free_exception(void *vptr) noexcept
52a11cbf 416{
cce93c76 417 char *ptr = (char *) vptr - sizeof (__cxa_refcounted_exception);
0dd9dd1f 418#if USE_POOL
637e3668 419 if (emergency_pool.in_pool (ptr)) [[__unlikely__]]
cce93c76 420 emergency_pool.free (ptr);
52a11cbf 421 else
0dd9dd1f 422#endif
cce93c76 423 free (ptr);
52a11cbf 424}
30a333ce
PC
425
426
427extern "C" __cxa_dependent_exception*
637e3668 428__cxxabiv1::__cxa_allocate_dependent_exception() noexcept
30a333ce 429{
637e3668 430 void *ret = malloc (sizeof (__cxa_dependent_exception));
30a333ce 431
0dd9dd1f 432#if USE_POOL
30a333ce 433 if (!ret)
637e3668 434 ret = emergency_pool.allocate (sizeof (__cxa_dependent_exception));
0dd9dd1f 435#endif
30a333ce 436
cce93c76
RB
437 if (!ret)
438 std::terminate ();
30a333ce 439
30a333ce
PC
440 memset (ret, 0, sizeof (__cxa_dependent_exception));
441
637e3668 442 return static_cast<__cxa_dependent_exception*>(ret);
30a333ce
PC
443}
444
445
446extern "C" void
447__cxxabiv1::__cxa_free_dependent_exception
637e3668 448 (__cxa_dependent_exception *vptr) noexcept
30a333ce 449{
0dd9dd1f 450#if USE_POOL
637e3668 451 if (emergency_pool.in_pool (vptr)) [[__unlikely__]]
cce93c76 452 emergency_pool.free (vptr);
30a333ce 453 else
0dd9dd1f 454#endif
30a333ce
PC
455 free (vptr);
456}