]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/allocator.c
c-family: regenerate c.opt.urls
[thirdparty/gcc.git] / libgomp / allocator.c
CommitLineData
a945c346 1/* Copyright (C) 2020-2024 Free Software Foundation, Inc.
e1071571
JJ
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
3
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
6
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26/* This file contains wrappers for the system allocation routines. Most
27 places in the OpenMP API do not make any provision for failure, so in
28 general we cannot allow memory allocation to fail. */
29
30#define _GNU_SOURCE
31#include "libgomp.h"
32#include <stdlib.h>
b38a4bd1 33#include <string.h>
450b05ce 34#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
35#include <dlfcn.h>
36#endif
e1071571 37
d4b6d147
TB
38/* Keeping track whether a Fortran scalar allocatable/pointer has been
39 allocated via 'omp allocators'/'omp allocate'. */
40
41struct fort_alloc_splay_tree_key_s {
42 void *ptr;
43};
44
45typedef struct fort_alloc_splay_tree_node_s *fort_alloc_splay_tree_node;
46typedef struct fort_alloc_splay_tree_s *fort_alloc_splay_tree;
47typedef struct fort_alloc_splay_tree_key_s *fort_alloc_splay_tree_key;
48
49static inline int
50fort_alloc_splay_compare (fort_alloc_splay_tree_key x, fort_alloc_splay_tree_key y)
51{
52 if (x->ptr < y->ptr)
53 return -1;
54 if (x->ptr > y->ptr)
55 return 1;
56 return 0;
57}
58#define splay_tree_prefix fort_alloc
59#define splay_tree_static
60#include "splay-tree.h"
61
62#define splay_tree_prefix fort_alloc
63#define splay_tree_static
64#define splay_tree_c
65#include "splay-tree.h"
66
67static struct fort_alloc_splay_tree_s fort_alloc_scalars;
68
69/* Add pointer as being alloced by GOMP_alloc. */
70void
71GOMP_add_alloc (void *ptr)
72{
73 if (ptr == NULL)
74 return;
75 fort_alloc_splay_tree_node item;
76 item = gomp_malloc (sizeof (struct splay_tree_node_s));
77 item->key.ptr = ptr;
78 item->left = NULL;
79 item->right = NULL;
80 fort_alloc_splay_tree_insert (&fort_alloc_scalars, item);
81}
82
83/* Remove pointer, either called by FREE or by REALLOC,
84 either of them can change the allocation status. */
85bool
86GOMP_is_alloc (void *ptr)
87{
88 struct fort_alloc_splay_tree_key_s needle;
89 fort_alloc_splay_tree_node n;
90 needle.ptr = ptr;
91 n = fort_alloc_splay_tree_lookup_node (&fort_alloc_scalars, &needle);
92 if (n)
93 {
94 fort_alloc_splay_tree_remove (&fort_alloc_scalars, &n->key);
95 free (n);
96 }
97 return n != NULL;
98}
99
100
e1071571 101#define omp_max_predefined_alloc omp_thread_mem_alloc
64001441
AS
102#define ompx_gnu_min_predefined_alloc ompx_gnu_pinned_mem_alloc
103#define ompx_gnu_max_predefined_alloc ompx_gnu_pinned_mem_alloc
e1071571 104
30486fab 105/* These macros may be overridden in config/<target>/allocator.c.
348874f0
AS
106 The defaults (no override) are to return NULL for pinned memory requests
107 and pass through to the regular OS calls otherwise.
30486fab
AS
108 The following definitions (ab)use comma operators to avoid unused
109 variable errors. */
110#ifndef MEMSPACE_ALLOC
348874f0
AS
111#define MEMSPACE_ALLOC(MEMSPACE, SIZE, PIN) \
112 (PIN ? NULL : malloc (((void)(MEMSPACE), (SIZE))))
30486fab
AS
113#endif
114#ifndef MEMSPACE_CALLOC
348874f0
AS
115#define MEMSPACE_CALLOC(MEMSPACE, SIZE, PIN) \
116 (PIN ? NULL : calloc (1, (((void)(MEMSPACE), (SIZE)))))
30486fab
AS
117#endif
118#ifndef MEMSPACE_REALLOC
348874f0
AS
119#define MEMSPACE_REALLOC(MEMSPACE, ADDR, OLDSIZE, SIZE, OLDPIN, PIN) \
120 ((PIN) || (OLDPIN) ? NULL \
121 : realloc (ADDR, (((void)(MEMSPACE), (void)(OLDSIZE), (SIZE)))))
30486fab
AS
122#endif
123#ifndef MEMSPACE_FREE
348874f0
AS
124#define MEMSPACE_FREE(MEMSPACE, ADDR, SIZE, PIN) \
125 if (PIN) free (((void)(MEMSPACE), (void)(SIZE), (ADDR)))
30486fab 126#endif
e9a19ead 127#ifndef MEMSPACE_VALIDATE
348874f0
AS
128#define MEMSPACE_VALIDATE(MEMSPACE, ACCESS, PIN) \
129 (PIN ? 0 : ((void)(MEMSPACE), (void)(ACCESS), 1))
e9a19ead 130#endif
30486fab
AS
131
132/* Map the predefined allocators to the correct memory space.
133 The index to this table is the omp_allocator_handle_t enum value.
134 When the user calls omp_alloc with a predefined allocator this
135 table determines what memory they get. */
64001441 136static const omp_memspace_handle_t predefined_omp_alloc_mapping[] = {
30486fab
AS
137 omp_default_mem_space, /* omp_null_allocator doesn't actually use this. */
138 omp_default_mem_space, /* omp_default_mem_alloc. */
139 omp_large_cap_mem_space, /* omp_large_cap_mem_alloc. */
140 omp_const_mem_space, /* omp_const_mem_alloc. */
141 omp_high_bw_mem_space, /* omp_high_bw_mem_alloc. */
142 omp_low_lat_mem_space, /* omp_low_lat_mem_alloc. */
143 omp_low_lat_mem_space, /* omp_cgroup_mem_alloc (implementation defined). */
144 omp_low_lat_mem_space, /* omp_pteam_mem_alloc (implementation defined). */
145 omp_low_lat_mem_space, /* omp_thread_mem_alloc (implementation defined). */
146};
64001441
AS
147static const omp_memspace_handle_t predefined_ompx_gnu_alloc_mapping[] = {
148 omp_default_mem_space, /* ompx_gnu_pinned_mem_alloc. */
149};
30486fab
AS
150
151#define ARRAY_SIZE(A) (sizeof (A) / sizeof ((A)[0]))
64001441 152_Static_assert (ARRAY_SIZE (predefined_omp_alloc_mapping)
30486fab 153 == omp_max_predefined_alloc + 1,
64001441
AS
154 "predefined_omp_alloc_mapping must match omp_memspace_handle_t");
155_Static_assert (ARRAY_SIZE (predefined_ompx_gnu_alloc_mapping)
156 == (ompx_gnu_max_predefined_alloc
157 - ompx_gnu_min_predefined_alloc) + 1,
158 "predefined_ompx_gnu_alloc_mapping must match"
159 " omp_memspace_handle_t");
160
161static inline bool
162predefined_allocator_p (omp_allocator_handle_t allocator)
163{
164 return allocator <= ompx_gnu_max_predefined_alloc;
165}
166
167static inline omp_memspace_handle_t
168predefined_alloc_mapping (omp_allocator_handle_t allocator)
169{
170 if (allocator <= omp_max_predefined_alloc)
171 return predefined_omp_alloc_mapping[allocator];
172 else if (allocator >= ompx_gnu_min_predefined_alloc
173 && allocator <= ompx_gnu_max_predefined_alloc)
174 {
175 int index = allocator - ompx_gnu_min_predefined_alloc;
176 return predefined_ompx_gnu_alloc_mapping[index];
177 }
178 else
179 /* This should never happen. */
180 return omp_default_mem_space;
181}
30486fab 182
450b05ce 183enum gomp_numa_memkind_kind
17f52a1c
JJ
184{
185 GOMP_MEMKIND_NONE = 0,
186#define GOMP_MEMKIND_KINDS \
187 GOMP_MEMKIND_KIND (HBW_INTERLEAVE), \
188 GOMP_MEMKIND_KIND (HBW_PREFERRED), \
189 GOMP_MEMKIND_KIND (DAX_KMEM_ALL), \
190 GOMP_MEMKIND_KIND (DAX_KMEM), \
191 GOMP_MEMKIND_KIND (INTERLEAVE), \
192 GOMP_MEMKIND_KIND (DEFAULT)
193#define GOMP_MEMKIND_KIND(kind) GOMP_MEMKIND_##kind
194 GOMP_MEMKIND_KINDS,
195#undef GOMP_MEMKIND_KIND
450b05ce
TB
196 GOMP_MEMKIND_COUNT,
197 GOMP_MEMKIND_LIBNUMA = GOMP_MEMKIND_COUNT
17f52a1c
JJ
198};
199
e1071571
JJ
200struct omp_allocator_data
201{
202 omp_memspace_handle_t memspace;
203 omp_uintptr_t alignment;
204 omp_uintptr_t pool_size;
205 omp_uintptr_t used_pool_size;
206 omp_allocator_handle_t fb_data;
207 unsigned int sync_hint : 8;
208 unsigned int access : 8;
209 unsigned int fallback : 8;
210 unsigned int pinned : 1;
211 unsigned int partition : 7;
450b05ce 212#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
213 unsigned int memkind : 8;
214#endif
e1071571
JJ
215#ifndef HAVE_SYNC_BUILTINS
216 gomp_mutex_t lock;
217#endif
218};
219
220struct omp_mem_header
221{
222 void *ptr;
223 size_t size;
224 omp_allocator_handle_t allocator;
225 void *pad;
226};
227
450b05ce
TB
228struct gomp_libnuma_data
229{
230 void *numa_handle;
231 void *(*numa_alloc_local) (size_t);
232 void *(*numa_realloc) (void *, size_t, size_t);
233 void (*numa_free) (void *, size_t);
234};
235
17f52a1c
JJ
236struct gomp_memkind_data
237{
238 void *memkind_handle;
239 void *(*memkind_malloc) (void *, size_t);
240 void *(*memkind_calloc) (void *, size_t, size_t);
241 void *(*memkind_realloc) (void *, void *, size_t);
242 void (*memkind_free) (void *, void *);
243 int (*memkind_check_available) (void *);
244 void **kinds[GOMP_MEMKIND_COUNT];
245};
246
450b05ce
TB
247#ifdef LIBGOMP_USE_LIBNUMA
248static struct gomp_libnuma_data *libnuma_data;
249static pthread_once_t libnuma_data_once = PTHREAD_ONCE_INIT;
250
251static void
252gomp_init_libnuma (void)
253{
254 void *handle = dlopen ("libnuma.so.1", RTLD_LAZY);
255 struct gomp_libnuma_data *data;
256
257 data = calloc (1, sizeof (struct gomp_libnuma_data));
258 if (data == NULL)
259 {
260 if (handle)
261 dlclose (handle);
262 return;
263 }
8f3c4517
TB
264 if (handle)
265 {
266 int (*numa_available) (void);
267 numa_available
268 = (__typeof (numa_available)) dlsym (handle, "numa_available");
269 if (!numa_available || numa_available () != 0)
270 {
271 dlclose (handle);
272 handle = NULL;
273 }
274 }
450b05ce
TB
275 if (!handle)
276 {
277 __atomic_store_n (&libnuma_data, data, MEMMODEL_RELEASE);
278 return;
279 }
280 data->numa_handle = handle;
281 data->numa_alloc_local
282 = (__typeof (data->numa_alloc_local)) dlsym (handle, "numa_alloc_local");
283 data->numa_realloc
284 = (__typeof (data->numa_realloc)) dlsym (handle, "numa_realloc");
285 data->numa_free
286 = (__typeof (data->numa_free)) dlsym (handle, "numa_free");
287 __atomic_store_n (&libnuma_data, data, MEMMODEL_RELEASE);
288}
289
290static struct gomp_libnuma_data *
291gomp_get_libnuma (void)
292{
293 struct gomp_libnuma_data *data
294 = __atomic_load_n (&libnuma_data, MEMMODEL_ACQUIRE);
295 if (data)
296 return data;
297 pthread_once (&libnuma_data_once, gomp_init_libnuma);
298 return __atomic_load_n (&libnuma_data, MEMMODEL_ACQUIRE);
299}
300#endif
301
17f52a1c
JJ
302#ifdef LIBGOMP_USE_MEMKIND
303static struct gomp_memkind_data *memkind_data;
304static pthread_once_t memkind_data_once = PTHREAD_ONCE_INIT;
305
306static void
307gomp_init_memkind (void)
308{
1eff4872 309 void *handle = dlopen ("libmemkind.so.0", RTLD_LAZY);
17f52a1c
JJ
310 struct gomp_memkind_data *data;
311 int i;
312 static const char *kinds[] = {
313 NULL,
314#define GOMP_MEMKIND_KIND(kind) "MEMKIND_" #kind
315 GOMP_MEMKIND_KINDS
316#undef GOMP_MEMKIND_KIND
317 };
318
319 data = calloc (1, sizeof (struct gomp_memkind_data));
320 if (data == NULL)
321 {
322 if (handle)
323 dlclose (handle);
324 return;
325 }
326 if (!handle)
327 {
328 __atomic_store_n (&memkind_data, data, MEMMODEL_RELEASE);
329 return;
330 }
331 data->memkind_handle = handle;
332 data->memkind_malloc
333 = (__typeof (data->memkind_malloc)) dlsym (handle, "memkind_malloc");
334 data->memkind_calloc
335 = (__typeof (data->memkind_calloc)) dlsym (handle, "memkind_calloc");
336 data->memkind_realloc
337 = (__typeof (data->memkind_realloc)) dlsym (handle, "memkind_realloc");
338 data->memkind_free
339 = (__typeof (data->memkind_free)) dlsym (handle, "memkind_free");
340 data->memkind_check_available
341 = (__typeof (data->memkind_check_available))
342 dlsym (handle, "memkind_check_available");
343 if (data->memkind_malloc
344 && data->memkind_calloc
345 && data->memkind_realloc
346 && data->memkind_free
347 && data->memkind_check_available)
348 for (i = 1; i < GOMP_MEMKIND_COUNT; ++i)
349 {
350 data->kinds[i] = (void **) dlsym (handle, kinds[i]);
351 if (data->kinds[i] && data->memkind_check_available (*data->kinds[i]))
352 data->kinds[i] = NULL;
353 }
354 __atomic_store_n (&memkind_data, data, MEMMODEL_RELEASE);
355}
356
357static struct gomp_memkind_data *
358gomp_get_memkind (void)
359{
360 struct gomp_memkind_data *data
361 = __atomic_load_n (&memkind_data, MEMMODEL_ACQUIRE);
362 if (data)
363 return data;
364 pthread_once (&memkind_data_once, gomp_init_memkind);
365 return __atomic_load_n (&memkind_data, MEMMODEL_ACQUIRE);
366}
367#endif
368
e1071571
JJ
369omp_allocator_handle_t
370omp_init_allocator (omp_memspace_handle_t memspace, int ntraits,
371 const omp_alloctrait_t traits[])
372{
373 struct omp_allocator_data data
374 = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all,
17f52a1c 375 omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment,
450b05ce 376#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
377 GOMP_MEMKIND_NONE
378#endif
379 };
e1071571
JJ
380 struct omp_allocator_data *ret;
381 int i;
382
383 if (memspace > omp_low_lat_mem_space)
384 return omp_null_allocator;
385 for (i = 0; i < ntraits; i++)
386 switch (traits[i].key)
387 {
388 case omp_atk_sync_hint:
389 switch (traits[i].value)
390 {
391 case omp_atv_default:
392 data.sync_hint = omp_atv_contended;
393 break;
394 case omp_atv_contended:
395 case omp_atv_uncontended:
f7097793 396 case omp_atv_serialized:
e1071571
JJ
397 case omp_atv_private:
398 data.sync_hint = traits[i].value;
399 break;
400 default:
401 return omp_null_allocator;
402 }
403 break;
404 case omp_atk_alignment:
ea82325a
JJ
405 if (traits[i].value == omp_atv_default)
406 {
407 data.alignment = 1;
408 break;
409 }
e1071571
JJ
410 if ((traits[i].value & (traits[i].value - 1)) != 0
411 || !traits[i].value)
412 return omp_null_allocator;
413 data.alignment = traits[i].value;
414 break;
415 case omp_atk_access:
416 switch (traits[i].value)
417 {
418 case omp_atv_default:
419 data.access = omp_atv_all;
420 break;
421 case omp_atv_all:
422 case omp_atv_cgroup:
423 case omp_atv_pteam:
424 case omp_atv_thread:
425 data.access = traits[i].value;
426 break;
427 default:
428 return omp_null_allocator;
429 }
430 break;
431 case omp_atk_pool_size:
ea82325a
JJ
432 if (traits[i].value == omp_atv_default)
433 data.pool_size = ~(uintptr_t) 0;
434 else
435 data.pool_size = traits[i].value;
e1071571
JJ
436 break;
437 case omp_atk_fallback:
438 switch (traits[i].value)
439 {
440 case omp_atv_default:
441 data.fallback = omp_atv_default_mem_fb;
442 break;
443 case omp_atv_default_mem_fb:
444 case omp_atv_null_fb:
445 case omp_atv_abort_fb:
446 case omp_atv_allocator_fb:
447 data.fallback = traits[i].value;
448 break;
449 default:
450 return omp_null_allocator;
451 }
452 break;
453 case omp_atk_fb_data:
454 data.fb_data = traits[i].value;
455 break;
456 case omp_atk_pinned:
457 switch (traits[i].value)
458 {
459 case omp_atv_default:
460 case omp_atv_false:
461 data.pinned = omp_atv_false;
462 break;
463 case omp_atv_true:
464 data.pinned = omp_atv_true;
465 break;
466 default:
467 return omp_null_allocator;
468 }
469 break;
470 case omp_atk_partition:
471 switch (traits[i].value)
472 {
473 case omp_atv_default:
474 data.partition = omp_atv_environment;
475 break;
476 case omp_atv_environment:
477 case omp_atv_nearest:
478 case omp_atv_blocked:
479 case omp_atv_interleaved:
480 data.partition = traits[i].value;
481 break;
482 default:
483 return omp_null_allocator;
484 }
485 break;
486 default:
487 return omp_null_allocator;
488 }
489
490 if (data.alignment < sizeof (void *))
491 data.alignment = sizeof (void *);
492
17f52a1c
JJ
493 switch (memspace)
494 {
17f52a1c 495#ifdef LIBGOMP_USE_MEMKIND
450b05ce 496 case omp_high_bw_mem_space:
17f52a1c
JJ
497 struct gomp_memkind_data *memkind_data;
498 memkind_data = gomp_get_memkind ();
499 if (data.partition == omp_atv_interleaved
500 && memkind_data->kinds[GOMP_MEMKIND_HBW_INTERLEAVE])
501 {
502 data.memkind = GOMP_MEMKIND_HBW_INTERLEAVE;
503 break;
504 }
505 else if (memkind_data->kinds[GOMP_MEMKIND_HBW_PREFERRED])
506 {
507 data.memkind = GOMP_MEMKIND_HBW_PREFERRED;
508 break;
509 }
8c2fc744 510 break;
17f52a1c 511 case omp_large_cap_mem_space:
17f52a1c
JJ
512 memkind_data = gomp_get_memkind ();
513 if (memkind_data->kinds[GOMP_MEMKIND_DAX_KMEM_ALL])
514 data.memkind = GOMP_MEMKIND_DAX_KMEM_ALL;
515 else if (memkind_data->kinds[GOMP_MEMKIND_DAX_KMEM])
516 data.memkind = GOMP_MEMKIND_DAX_KMEM;
17f52a1c 517 break;
450b05ce 518#endif
17f52a1c
JJ
519 default:
520#ifdef LIBGOMP_USE_MEMKIND
521 if (data.partition == omp_atv_interleaved)
522 {
523 memkind_data = gomp_get_memkind ();
524 if (memkind_data->kinds[GOMP_MEMKIND_INTERLEAVE])
525 data.memkind = GOMP_MEMKIND_INTERLEAVE;
526 }
527#endif
528 break;
529 }
530
450b05ce
TB
531#ifdef LIBGOMP_USE_LIBNUMA
532 if (data.memkind == GOMP_MEMKIND_NONE && data.partition == omp_atv_nearest)
533 {
450b05ce 534 libnuma_data = gomp_get_libnuma ();
407d68da
TB
535 if (libnuma_data->numa_alloc_local != NULL)
536 data.memkind = GOMP_MEMKIND_LIBNUMA;
450b05ce
TB
537 }
538#endif
539
e9a19ead 540 /* Reject unsupported memory spaces. */
348874f0 541 if (!MEMSPACE_VALIDATE (data.memspace, data.access, data.pinned))
e9a19ead
AS
542 return omp_null_allocator;
543
e1071571
JJ
544 ret = gomp_malloc (sizeof (struct omp_allocator_data));
545 *ret = data;
546#ifndef HAVE_SYNC_BUILTINS
547 gomp_mutex_init (&ret->lock);
548#endif
549 return (omp_allocator_handle_t) ret;
550}
551
552void
553omp_destroy_allocator (omp_allocator_handle_t allocator)
554{
555 if (allocator != omp_null_allocator)
556 {
557#ifndef HAVE_SYNC_BUILTINS
558 gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock);
559#endif
560 free ((void *) allocator);
561 }
562}
563
fff15bad
TB
564ialias (omp_init_allocator)
565ialias (omp_destroy_allocator)
566
b38a4bd1 567void *
6fcc3cac
JJ
568omp_aligned_alloc (size_t alignment, size_t size,
569 omp_allocator_handle_t allocator)
e1071571
JJ
570{
571 struct omp_allocator_data *allocator_data;
b38a4bd1 572 size_t new_size, new_alignment;
e1071571 573 void *ptr, *ret;
450b05ce
TB
574#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
575 enum gomp_numa_memkind_kind memkind;
17f52a1c 576#endif
e1071571 577
05e4db63
JJ
578 if (__builtin_expect (size == 0, 0))
579 return NULL;
580
e1071571 581retry:
b38a4bd1 582 new_alignment = alignment;
e1071571
JJ
583 if (allocator == omp_null_allocator)
584 {
585 struct gomp_thread *thr = gomp_thread ();
586 if (thr->ts.def_allocator == omp_null_allocator)
587 thr->ts.def_allocator = gomp_def_allocator;
588 allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
589 }
590
64001441 591 if (!predefined_allocator_p (allocator))
e1071571
JJ
592 {
593 allocator_data = (struct omp_allocator_data *) allocator;
b38a4bd1
JJ
594 if (new_alignment < allocator_data->alignment)
595 new_alignment = allocator_data->alignment;
450b05ce 596#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
597 memkind = allocator_data->memkind;
598#endif
e1071571
JJ
599 }
600 else
601 {
602 allocator_data = NULL;
b38a4bd1
JJ
603 if (new_alignment < sizeof (void *))
604 new_alignment = sizeof (void *);
450b05ce 605#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c 606 memkind = GOMP_MEMKIND_NONE;
450b05ce
TB
607#endif
608#ifdef LIBGOMP_USE_MEMKIND
17f52a1c
JJ
609 if (allocator == omp_high_bw_mem_alloc)
610 memkind = GOMP_MEMKIND_HBW_PREFERRED;
611 else if (allocator == omp_large_cap_mem_alloc)
612 memkind = GOMP_MEMKIND_DAX_KMEM_ALL;
613 if (memkind)
614 {
615 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
616 if (!memkind_data->kinds[memkind])
617 memkind = GOMP_MEMKIND_NONE;
618 }
619#endif
e1071571
JJ
620 }
621
622 new_size = sizeof (struct omp_mem_header);
b38a4bd1
JJ
623 if (new_alignment > sizeof (void *))
624 new_size += new_alignment - sizeof (void *);
e1071571
JJ
625 if (__builtin_add_overflow (size, new_size, &new_size))
626 goto fail;
e9a19ead
AS
627#ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
628 if (allocator == omp_low_lat_mem_alloc)
629 goto fail;
630#endif
e1071571
JJ
631
632 if (__builtin_expect (allocator_data
633 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
634 {
635 uintptr_t used_pool_size;
636 if (new_size > allocator_data->pool_size)
637 goto fail;
638#ifdef HAVE_SYNC_BUILTINS
639 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
640 MEMMODEL_RELAXED);
641 do
642 {
643 uintptr_t new_pool_size;
644 if (__builtin_add_overflow (used_pool_size, new_size,
645 &new_pool_size)
646 || new_pool_size > allocator_data->pool_size)
647 goto fail;
648 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
649 &used_pool_size, new_pool_size,
650 true, MEMMODEL_RELAXED,
651 MEMMODEL_RELAXED))
652 break;
653 }
654 while (1);
655#else
656 gomp_mutex_lock (&allocator_data->lock);
657 if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
658 &used_pool_size)
659 || used_pool_size > allocator_data->pool_size)
660 {
661 gomp_mutex_unlock (&allocator_data->lock);
662 goto fail;
663 }
664 allocator_data->used_pool_size = used_pool_size;
665 gomp_mutex_unlock (&allocator_data->lock);
666#endif
450b05ce
TB
667#ifdef LIBGOMP_USE_LIBNUMA
668 if (memkind == GOMP_MEMKIND_LIBNUMA)
669 ptr = libnuma_data->numa_alloc_local (new_size);
670# ifdef LIBGOMP_USE_MEMKIND
671 else
672# endif
673#endif
17f52a1c
JJ
674#ifdef LIBGOMP_USE_MEMKIND
675 if (memkind)
676 {
677 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
678 void *kind = *memkind_data->kinds[memkind];
679 ptr = memkind_data->memkind_malloc (kind, new_size);
680 }
681 else
682#endif
348874f0
AS
683 ptr = MEMSPACE_ALLOC (allocator_data->memspace, new_size,
684 allocator_data->pinned);
e1071571
JJ
685 if (ptr == NULL)
686 {
687#ifdef HAVE_SYNC_BUILTINS
688 __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
689 MEMMODEL_RELAXED);
690#else
691 gomp_mutex_lock (&allocator_data->lock);
692 allocator_data->used_pool_size -= new_size;
693 gomp_mutex_unlock (&allocator_data->lock);
694#endif
695 goto fail;
696 }
697 }
698 else
699 {
450b05ce
TB
700#ifdef LIBGOMP_USE_LIBNUMA
701 if (memkind == GOMP_MEMKIND_LIBNUMA)
702 ptr = libnuma_data->numa_alloc_local (new_size);
703# ifdef LIBGOMP_USE_MEMKIND
704 else
705# endif
706#endif
17f52a1c
JJ
707#ifdef LIBGOMP_USE_MEMKIND
708 if (memkind)
709 {
710 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
711 void *kind = *memkind_data->kinds[memkind];
712 ptr = memkind_data->memkind_malloc (kind, new_size);
713 }
714 else
715#endif
30486fab
AS
716 {
717 omp_memspace_handle_t memspace;
718 memspace = (allocator_data
719 ? allocator_data->memspace
64001441
AS
720 : predefined_alloc_mapping (allocator));
721 int pinned = (allocator_data
722 ? allocator_data->pinned
723 : allocator == ompx_gnu_pinned_mem_alloc);
724 ptr = MEMSPACE_ALLOC (memspace, new_size, pinned);
30486fab 725 }
e1071571
JJ
726 if (ptr == NULL)
727 goto fail;
728 }
729
b38a4bd1 730 if (new_alignment > sizeof (void *))
e1071571
JJ
731 ret = (void *) (((uintptr_t) ptr
732 + sizeof (struct omp_mem_header)
b38a4bd1
JJ
733 + new_alignment - sizeof (void *))
734 & ~(new_alignment - 1));
e1071571
JJ
735 else
736 ret = (char *) ptr + sizeof (struct omp_mem_header);
737 ((struct omp_mem_header *) ret)[-1].ptr = ptr;
738 ((struct omp_mem_header *) ret)[-1].size = new_size;
739 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
740 return ret;
741
30486fab
AS
742fail:;
743 int fallback = (allocator_data
744 ? allocator_data->fallback
64001441
AS
745 : (allocator == omp_default_mem_alloc
746 || allocator == ompx_gnu_pinned_mem_alloc)
30486fab
AS
747 ? omp_atv_null_fb
748 : omp_atv_default_mem_fb);
749 switch (fallback)
e1071571 750 {
30486fab
AS
751 case omp_atv_default_mem_fb:
752 allocator = omp_default_mem_alloc;
753 goto retry;
754 case omp_atv_null_fb:
755 break;
756 default:
757 case omp_atv_abort_fb:
758 gomp_fatal ("Out of memory allocating %lu bytes",
759 (unsigned long) size);
760 case omp_atv_allocator_fb:
761 allocator = allocator_data->fb_data;
762 goto retry;
e1071571
JJ
763 }
764 return NULL;
765}
766
b38a4bd1
JJ
767ialias (omp_aligned_alloc)
768
6fcc3cac
JJ
769void *
770omp_alloc (size_t size, omp_allocator_handle_t allocator)
771{
b38a4bd1 772 return ialias_call (omp_aligned_alloc) (1, size, allocator);
6fcc3cac
JJ
773}
774
775/* Like omp_aligned_alloc, but apply on top of that:
776 "For allocations that arise from this ... the null_fb value of the
777 fallback allocator trait behaves as if the abort_fb had been specified." */
778
779void *
780GOMP_alloc (size_t alignment, size_t size, uintptr_t allocator)
781{
b38a4bd1
JJ
782 void *ret
783 = ialias_call (omp_aligned_alloc) (alignment, size,
784 (omp_allocator_handle_t) allocator);
6fcc3cac
JJ
785 if (__builtin_expect (ret == NULL, 0) && size)
786 gomp_fatal ("Out of memory allocating %lu bytes",
787 (unsigned long) size);
788 return ret;
789}
790
e1071571
JJ
791void
792omp_free (void *ptr, omp_allocator_handle_t allocator)
793{
794 struct omp_mem_header *data;
30486fab 795 omp_memspace_handle_t memspace = omp_default_mem_space;
348874f0 796 int pinned = false;
e1071571
JJ
797
798 if (ptr == NULL)
799 return;
800 (void) allocator;
801 data = &((struct omp_mem_header *) ptr)[-1];
64001441 802 if (!predefined_allocator_p (data->allocator))
e1071571
JJ
803 {
804 struct omp_allocator_data *allocator_data
805 = (struct omp_allocator_data *) (data->allocator);
806 if (allocator_data->pool_size < ~(uintptr_t) 0)
807 {
808#ifdef HAVE_SYNC_BUILTINS
809 __atomic_add_fetch (&allocator_data->used_pool_size, -data->size,
810 MEMMODEL_RELAXED);
811#else
812 gomp_mutex_lock (&allocator_data->lock);
23438370 813 allocator_data->used_pool_size -= data->size;
e1071571
JJ
814 gomp_mutex_unlock (&allocator_data->lock);
815#endif
816 }
450b05ce
TB
817#ifdef LIBGOMP_USE_LIBNUMA
818 if (allocator_data->memkind == GOMP_MEMKIND_LIBNUMA)
819 {
820 libnuma_data->numa_free (data->ptr, data->size);
821 return;
822 }
823# ifdef LIBGOMP_USE_MEMKIND
824 else
825# endif
826#endif
17f52a1c
JJ
827#ifdef LIBGOMP_USE_MEMKIND
828 if (allocator_data->memkind)
829 {
830 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
831 void *kind = *memkind_data->kinds[allocator_data->memkind];
832 memkind_data->memkind_free (kind, data->ptr);
833 return;
834 }
835#endif
30486fab
AS
836
837 memspace = allocator_data->memspace;
348874f0 838 pinned = allocator_data->pinned;
e1071571 839 }
17f52a1c
JJ
840 else
841 {
30486fab 842#ifdef LIBGOMP_USE_MEMKIND
450b05ce 843 enum gomp_numa_memkind_kind memkind = GOMP_MEMKIND_NONE;
17f52a1c
JJ
844 if (data->allocator == omp_high_bw_mem_alloc)
845 memkind = GOMP_MEMKIND_HBW_PREFERRED;
846 else if (data->allocator == omp_large_cap_mem_alloc)
847 memkind = GOMP_MEMKIND_DAX_KMEM_ALL;
848 if (memkind)
849 {
850 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
851 if (memkind_data->kinds[memkind])
852 {
853 void *kind = *memkind_data->kinds[memkind];
854 memkind_data->memkind_free (kind, data->ptr);
855 return;
856 }
857 }
17f52a1c 858#endif
30486fab 859
64001441
AS
860 memspace = predefined_alloc_mapping (data->allocator);
861 pinned = (data->allocator == ompx_gnu_pinned_mem_alloc);
30486fab
AS
862 }
863
348874f0 864 MEMSPACE_FREE (memspace, data->ptr, data->size, pinned);
e1071571 865}
6fcc3cac
JJ
866
867ialias (omp_free)
868
869void
870GOMP_free (void *ptr, uintptr_t allocator)
871{
b38a4bd1
JJ
872 return ialias_call (omp_free) (ptr, (omp_allocator_handle_t) allocator);
873}
874
875void *
876omp_aligned_calloc (size_t alignment, size_t nmemb, size_t size,
877 omp_allocator_handle_t allocator)
878{
879 struct omp_allocator_data *allocator_data;
880 size_t new_size, size_temp, new_alignment;
881 void *ptr, *ret;
450b05ce
TB
882#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
883 enum gomp_numa_memkind_kind memkind;
17f52a1c 884#endif
b38a4bd1
JJ
885
886 if (__builtin_expect (size == 0 || nmemb == 0, 0))
887 return NULL;
888
889retry:
890 new_alignment = alignment;
891 if (allocator == omp_null_allocator)
892 {
893 struct gomp_thread *thr = gomp_thread ();
894 if (thr->ts.def_allocator == omp_null_allocator)
895 thr->ts.def_allocator = gomp_def_allocator;
896 allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
897 }
898
64001441 899 if (!predefined_allocator_p (allocator))
b38a4bd1
JJ
900 {
901 allocator_data = (struct omp_allocator_data *) allocator;
902 if (new_alignment < allocator_data->alignment)
903 new_alignment = allocator_data->alignment;
450b05ce 904#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
905 memkind = allocator_data->memkind;
906#endif
b38a4bd1
JJ
907 }
908 else
909 {
910 allocator_data = NULL;
911 if (new_alignment < sizeof (void *))
912 new_alignment = sizeof (void *);
450b05ce 913#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c 914 memkind = GOMP_MEMKIND_NONE;
450b05ce
TB
915#endif
916#ifdef LIBGOMP_USE_MEMKIND
17f52a1c
JJ
917 if (allocator == omp_high_bw_mem_alloc)
918 memkind = GOMP_MEMKIND_HBW_PREFERRED;
919 else if (allocator == omp_large_cap_mem_alloc)
920 memkind = GOMP_MEMKIND_DAX_KMEM_ALL;
921 if (memkind)
922 {
923 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
924 if (!memkind_data->kinds[memkind])
925 memkind = GOMP_MEMKIND_NONE;
926 }
927#endif
b38a4bd1
JJ
928 }
929
930 new_size = sizeof (struct omp_mem_header);
931 if (new_alignment > sizeof (void *))
932 new_size += new_alignment - sizeof (void *);
933 if (__builtin_mul_overflow (size, nmemb, &size_temp))
934 goto fail;
935 if (__builtin_add_overflow (size_temp, new_size, &new_size))
936 goto fail;
e9a19ead
AS
937#ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
938 if (allocator == omp_low_lat_mem_alloc)
939 goto fail;
940#endif
b38a4bd1
JJ
941
942 if (__builtin_expect (allocator_data
943 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
944 {
945 uintptr_t used_pool_size;
946 if (new_size > allocator_data->pool_size)
947 goto fail;
948#ifdef HAVE_SYNC_BUILTINS
949 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
950 MEMMODEL_RELAXED);
951 do
952 {
953 uintptr_t new_pool_size;
954 if (__builtin_add_overflow (used_pool_size, new_size,
955 &new_pool_size)
956 || new_pool_size > allocator_data->pool_size)
957 goto fail;
958 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
959 &used_pool_size, new_pool_size,
960 true, MEMMODEL_RELAXED,
961 MEMMODEL_RELAXED))
962 break;
963 }
964 while (1);
965#else
966 gomp_mutex_lock (&allocator_data->lock);
967 if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
968 &used_pool_size)
969 || used_pool_size > allocator_data->pool_size)
970 {
971 gomp_mutex_unlock (&allocator_data->lock);
972 goto fail;
973 }
974 allocator_data->used_pool_size = used_pool_size;
975 gomp_mutex_unlock (&allocator_data->lock);
976#endif
450b05ce
TB
977#ifdef LIBGOMP_USE_LIBNUMA
978 if (memkind == GOMP_MEMKIND_LIBNUMA)
979 /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning
980 memory that is initialized to zero. */
981 ptr = libnuma_data->numa_alloc_local (new_size);
982# ifdef LIBGOMP_USE_MEMKIND
983 else
984# endif
985#endif
17f52a1c
JJ
986#ifdef LIBGOMP_USE_MEMKIND
987 if (memkind)
988 {
989 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
990 void *kind = *memkind_data->kinds[memkind];
991 ptr = memkind_data->memkind_calloc (kind, 1, new_size);
992 }
993 else
994#endif
348874f0
AS
995 ptr = MEMSPACE_CALLOC (allocator_data->memspace, new_size,
996 allocator_data->pinned);
b38a4bd1
JJ
997 if (ptr == NULL)
998 {
999#ifdef HAVE_SYNC_BUILTINS
1000 __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
1001 MEMMODEL_RELAXED);
1002#else
1003 gomp_mutex_lock (&allocator_data->lock);
1004 allocator_data->used_pool_size -= new_size;
1005 gomp_mutex_unlock (&allocator_data->lock);
1006#endif
1007 goto fail;
1008 }
1009 }
1010 else
1011 {
450b05ce
TB
1012#ifdef LIBGOMP_USE_LIBNUMA
1013 if (memkind == GOMP_MEMKIND_LIBNUMA)
1014 /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning
1015 memory that is initialized to zero. */
1016 ptr = libnuma_data->numa_alloc_local (new_size);
1017# ifdef LIBGOMP_USE_MEMKIND
1018 else
1019# endif
1020#endif
17f52a1c
JJ
1021#ifdef LIBGOMP_USE_MEMKIND
1022 if (memkind)
1023 {
1024 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1025 void *kind = *memkind_data->kinds[memkind];
1026 ptr = memkind_data->memkind_calloc (kind, 1, new_size);
1027 }
1028 else
1029#endif
30486fab
AS
1030 {
1031 omp_memspace_handle_t memspace;
1032 memspace = (allocator_data
1033 ? allocator_data->memspace
64001441
AS
1034 : predefined_alloc_mapping (allocator));
1035 int pinned = (allocator_data
1036 ? allocator_data->pinned
1037 : allocator == ompx_gnu_pinned_mem_alloc);
1038 ptr = MEMSPACE_CALLOC (memspace, new_size, pinned);
30486fab 1039 }
b38a4bd1
JJ
1040 if (ptr == NULL)
1041 goto fail;
1042 }
1043
1044 if (new_alignment > sizeof (void *))
1045 ret = (void *) (((uintptr_t) ptr
1046 + sizeof (struct omp_mem_header)
1047 + new_alignment - sizeof (void *))
1048 & ~(new_alignment - 1));
1049 else
1050 ret = (char *) ptr + sizeof (struct omp_mem_header);
1051 ((struct omp_mem_header *) ret)[-1].ptr = ptr;
1052 ((struct omp_mem_header *) ret)[-1].size = new_size;
1053 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
1054 return ret;
1055
30486fab
AS
1056fail:;
1057 int fallback = (allocator_data
1058 ? allocator_data->fallback
64001441
AS
1059 : (allocator == omp_default_mem_alloc
1060 || allocator == ompx_gnu_pinned_mem_alloc)
30486fab
AS
1061 ? omp_atv_null_fb
1062 : omp_atv_default_mem_fb);
1063 switch (fallback)
b38a4bd1 1064 {
30486fab
AS
1065 case omp_atv_default_mem_fb:
1066 allocator = omp_default_mem_alloc;
1067 goto retry;
1068 case omp_atv_null_fb:
1069 break;
1070 default:
1071 case omp_atv_abort_fb:
1072 gomp_fatal ("Out of memory allocating %lu bytes",
1073 (unsigned long) (size * nmemb));
1074 case omp_atv_allocator_fb:
1075 allocator = allocator_data->fb_data;
1076 goto retry;
b38a4bd1
JJ
1077 }
1078 return NULL;
1079}
1080
1081ialias (omp_aligned_calloc)
1082
1083void *
1084omp_calloc (size_t nmemb, size_t size, omp_allocator_handle_t allocator)
1085{
1086 return ialias_call (omp_aligned_calloc) (1, nmemb, size, allocator);
1087}
1088
1089void *
1090omp_realloc (void *ptr, size_t size, omp_allocator_handle_t allocator,
1091 omp_allocator_handle_t free_allocator)
1092{
1093 struct omp_allocator_data *allocator_data, *free_allocator_data;
1094 size_t new_size, old_size, new_alignment, old_alignment;
1095 void *new_ptr, *ret;
1096 struct omp_mem_header *data;
450b05ce
TB
1097#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1098 enum gomp_numa_memkind_kind memkind, free_memkind;
17f52a1c 1099#endif
b38a4bd1
JJ
1100
1101 if (__builtin_expect (ptr == NULL, 0))
1102 return ialias_call (omp_aligned_alloc) (1, size, allocator);
1103
1104 if (__builtin_expect (size == 0, 0))
1105 {
1106 ialias_call (omp_free) (ptr, free_allocator);
1107 return NULL;
1108 }
1109
1110 data = &((struct omp_mem_header *) ptr)[-1];
1111 free_allocator = data->allocator;
1112
1113retry:
1114 new_alignment = sizeof (void *);
1115 if (allocator == omp_null_allocator)
1116 allocator = free_allocator;
1117
64001441 1118 if (!predefined_allocator_p (allocator))
b38a4bd1
JJ
1119 {
1120 allocator_data = (struct omp_allocator_data *) allocator;
1121 if (new_alignment < allocator_data->alignment)
1122 new_alignment = allocator_data->alignment;
450b05ce 1123#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
1124 memkind = allocator_data->memkind;
1125#endif
b38a4bd1
JJ
1126 }
1127 else
17f52a1c
JJ
1128 {
1129 allocator_data = NULL;
450b05ce 1130#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c 1131 memkind = GOMP_MEMKIND_NONE;
450b05ce
TB
1132#endif
1133#ifdef LIBGOMP_USE_MEMKIND
17f52a1c
JJ
1134 if (allocator == omp_high_bw_mem_alloc)
1135 memkind = GOMP_MEMKIND_HBW_PREFERRED;
1136 else if (allocator == omp_large_cap_mem_alloc)
1137 memkind = GOMP_MEMKIND_DAX_KMEM_ALL;
1138 if (memkind)
1139 {
1140 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1141 if (!memkind_data->kinds[memkind])
1142 memkind = GOMP_MEMKIND_NONE;
1143 }
1144#endif
1145 }
64001441 1146 if (!predefined_allocator_p (free_allocator))
17f52a1c
JJ
1147 {
1148 free_allocator_data = (struct omp_allocator_data *) free_allocator;
450b05ce 1149#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
1150 free_memkind = free_allocator_data->memkind;
1151#endif
1152 }
b38a4bd1 1153 else
17f52a1c
JJ
1154 {
1155 free_allocator_data = NULL;
450b05ce 1156#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c 1157 free_memkind = GOMP_MEMKIND_NONE;
450b05ce
TB
1158#endif
1159#ifdef LIBGOMP_USE_MEMKIND
17f52a1c
JJ
1160 if (free_allocator == omp_high_bw_mem_alloc)
1161 free_memkind = GOMP_MEMKIND_HBW_PREFERRED;
1162 else if (free_allocator == omp_large_cap_mem_alloc)
1163 free_memkind = GOMP_MEMKIND_DAX_KMEM_ALL;
1164 if (free_memkind)
1165 {
1166 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1167 if (!memkind_data->kinds[free_memkind])
1168 free_memkind = GOMP_MEMKIND_NONE;
1169 }
1170#endif
1171 }
b38a4bd1
JJ
1172 old_alignment = (uintptr_t) ptr - (uintptr_t) (data->ptr);
1173
1174 new_size = sizeof (struct omp_mem_header);
1175 if (new_alignment > sizeof (void *))
1176 new_size += new_alignment - sizeof (void *);
1177 if (__builtin_add_overflow (size, new_size, &new_size))
1178 goto fail;
1179 old_size = data->size;
e9a19ead
AS
1180#ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
1181 if (allocator == omp_low_lat_mem_alloc)
1182 goto fail;
1183#endif
b38a4bd1
JJ
1184
1185 if (__builtin_expect (allocator_data
1186 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
1187 {
1188 uintptr_t used_pool_size;
1189 size_t prev_size = 0;
1190 /* Check if we can use realloc. Don't use it if extra alignment
1191 was used previously or newly, because realloc might return a pointer
1192 with different alignment and then we'd need to memmove the data
1193 again. */
1194 if (free_allocator_data
1195 && free_allocator_data == allocator_data
1196 && new_alignment == sizeof (void *)
1197 && old_alignment == sizeof (struct omp_mem_header))
1198 prev_size = old_size;
1199 if (new_size > prev_size
1200 && new_size - prev_size > allocator_data->pool_size)
1201 goto fail;
1202#ifdef HAVE_SYNC_BUILTINS
1203 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
1204 MEMMODEL_RELAXED);
1205 do
1206 {
1207 uintptr_t new_pool_size;
1208 if (new_size > prev_size)
1209 {
1210 if (__builtin_add_overflow (used_pool_size, new_size - prev_size,
1211 &new_pool_size)
1212 || new_pool_size > allocator_data->pool_size)
1213 goto fail;
1214 }
1215 else
1216 new_pool_size = used_pool_size + new_size - prev_size;
1217 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
1218 &used_pool_size, new_pool_size,
1219 true, MEMMODEL_RELAXED,
1220 MEMMODEL_RELAXED))
1221 break;
1222 }
1223 while (1);
1224#else
1225 gomp_mutex_lock (&allocator_data->lock);
1226 if (new_size > prev_size)
1227 {
1228 if (__builtin_add_overflow (allocator_data->used_pool_size,
1229 new_size - prev_size,
1230 &used_pool_size)
1231 || used_pool_size > allocator_data->pool_size)
1232 {
1233 gomp_mutex_unlock (&allocator_data->lock);
1234 goto fail;
1235 }
1236 }
1237 else
1238 used_pool_size = (allocator_data->used_pool_size
1239 + new_size - prev_size);
1240 allocator_data->used_pool_size = used_pool_size;
1241 gomp_mutex_unlock (&allocator_data->lock);
17f52a1c 1242#endif
450b05ce
TB
1243#ifdef LIBGOMP_USE_LIBNUMA
1244 if (memkind == GOMP_MEMKIND_LIBNUMA)
1245 {
1246 if (prev_size)
1247 new_ptr = libnuma_data->numa_realloc (data->ptr, data->size,
1248 new_size);
1249 else
1250 new_ptr = libnuma_data->numa_alloc_local (new_size);
1251 }
1252# ifdef LIBGOMP_USE_MEMKIND
1253 else
1254# endif
1255#endif
17f52a1c
JJ
1256#ifdef LIBGOMP_USE_MEMKIND
1257 if (memkind)
1258 {
1259 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1260 void *kind = *memkind_data->kinds[memkind];
1261 if (prev_size)
1262 new_ptr = memkind_data->memkind_realloc (kind, data->ptr,
1263 new_size);
1264 else
1265 new_ptr = memkind_data->memkind_malloc (kind, new_size);
1266 }
1267 else
b38a4bd1
JJ
1268#endif
1269 if (prev_size)
64001441
AS
1270 {
1271 int was_pinned = (free_allocator_data
1272 ? free_allocator_data->pinned
1273 : free_allocator == ompx_gnu_pinned_mem_alloc);
1274 new_ptr = MEMSPACE_REALLOC (allocator_data->memspace, data->ptr,
1275 data->size, new_size, was_pinned,
1276 allocator_data->pinned);
1277 }
b38a4bd1 1278 else
348874f0
AS
1279 new_ptr = MEMSPACE_ALLOC (allocator_data->memspace, new_size,
1280 allocator_data->pinned);
b38a4bd1
JJ
1281 if (new_ptr == NULL)
1282 {
1283#ifdef HAVE_SYNC_BUILTINS
1284 __atomic_add_fetch (&allocator_data->used_pool_size,
1285 prev_size - new_size,
1286 MEMMODEL_RELAXED);
1287#else
1288 gomp_mutex_lock (&allocator_data->lock);
1289 allocator_data->used_pool_size -= new_size - prev_size;
1290 gomp_mutex_unlock (&allocator_data->lock);
1291#endif
1292 goto fail;
1293 }
1294 else if (prev_size)
1295 {
1296 ret = (char *) new_ptr + sizeof (struct omp_mem_header);
1297 ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
1298 ((struct omp_mem_header *) ret)[-1].size = new_size;
1299 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
1300 return ret;
1301 }
1302 }
1303 else if (new_alignment == sizeof (void *)
1304 && old_alignment == sizeof (struct omp_mem_header)
450b05ce 1305#if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
17f52a1c
JJ
1306 && memkind == free_memkind
1307#endif
b38a4bd1
JJ
1308 && (free_allocator_data == NULL
1309 || free_allocator_data->pool_size == ~(uintptr_t) 0))
1310 {
450b05ce
TB
1311#ifdef LIBGOMP_USE_LIBNUMA
1312 if (memkind == GOMP_MEMKIND_LIBNUMA)
1313 new_ptr = libnuma_data->numa_realloc (data->ptr, data->size, new_size);
1314# ifdef LIBGOMP_USE_MEMKIND
1315 else
1316# endif
1317#endif
17f52a1c
JJ
1318#ifdef LIBGOMP_USE_MEMKIND
1319 if (memkind)
1320 {
1321 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1322 void *kind = *memkind_data->kinds[memkind];
1323 new_ptr = memkind_data->memkind_realloc (kind, data->ptr,
1324 new_size);
1325 }
1326 else
1327#endif
30486fab
AS
1328 {
1329 omp_memspace_handle_t memspace;
1330 memspace = (allocator_data
1331 ? allocator_data->memspace
64001441
AS
1332 : predefined_alloc_mapping (allocator));
1333 int was_pinned = (free_allocator_data
1334 ? free_allocator_data->pinned
1335 : free_allocator == ompx_gnu_pinned_mem_alloc);
1336 int pinned = (allocator_data
1337 ? allocator_data->pinned
1338 : allocator == ompx_gnu_pinned_mem_alloc);
348874f0 1339 new_ptr = MEMSPACE_REALLOC (memspace, data->ptr, data->size, new_size,
64001441 1340 was_pinned, pinned);
30486fab 1341 }
b38a4bd1
JJ
1342 if (new_ptr == NULL)
1343 goto fail;
348874f0 1344
b38a4bd1
JJ
1345 ret = (char *) new_ptr + sizeof (struct omp_mem_header);
1346 ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
1347 ((struct omp_mem_header *) ret)[-1].size = new_size;
1348 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
1349 return ret;
1350 }
1351 else
1352 {
450b05ce
TB
1353#ifdef LIBGOMP_USE_LIBNUMA
1354 if (memkind == GOMP_MEMKIND_LIBNUMA)
1355 new_ptr = libnuma_data->numa_alloc_local (new_size);
1356# ifdef LIBGOMP_USE_MEMKIND
1357 else
1358# endif
1359#endif
17f52a1c
JJ
1360#ifdef LIBGOMP_USE_MEMKIND
1361 if (memkind)
1362 {
1363 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1364 void *kind = *memkind_data->kinds[memkind];
1365 new_ptr = memkind_data->memkind_malloc (kind, new_size);
1366 }
1367 else
1368#endif
30486fab
AS
1369 {
1370 omp_memspace_handle_t memspace;
1371 memspace = (allocator_data
1372 ? allocator_data->memspace
64001441
AS
1373 : predefined_alloc_mapping (allocator));
1374 int pinned = (allocator_data
1375 ? allocator_data->pinned
1376 : allocator == ompx_gnu_pinned_mem_alloc);
1377 new_ptr = MEMSPACE_ALLOC (memspace, new_size, pinned);
30486fab 1378 }
b38a4bd1
JJ
1379 if (new_ptr == NULL)
1380 goto fail;
1381 }
1382
1383 if (new_alignment > sizeof (void *))
1384 ret = (void *) (((uintptr_t) new_ptr
1385 + sizeof (struct omp_mem_header)
1386 + new_alignment - sizeof (void *))
1387 & ~(new_alignment - 1));
1388 else
1389 ret = (char *) new_ptr + sizeof (struct omp_mem_header);
1390 ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
1391 ((struct omp_mem_header *) ret)[-1].size = new_size;
1392 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
1393 if (old_size - old_alignment < size)
1394 size = old_size - old_alignment;
1395 memcpy (ret, ptr, size);
1396 if (__builtin_expect (free_allocator_data
1397 && free_allocator_data->pool_size < ~(uintptr_t) 0, 0))
1398 {
1399#ifdef HAVE_SYNC_BUILTINS
1400 __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size,
1401 MEMMODEL_RELAXED);
1402#else
1403 gomp_mutex_lock (&free_allocator_data->lock);
1404 free_allocator_data->used_pool_size -= data->size;
1405 gomp_mutex_unlock (&free_allocator_data->lock);
1406#endif
1407 }
450b05ce
TB
1408#ifdef LIBGOMP_USE_LIBNUMA
1409 if (free_memkind == GOMP_MEMKIND_LIBNUMA)
1410 {
1411 libnuma_data->numa_free (data->ptr, data->size);
1412 return ret;
1413 }
1414# ifdef LIBGOMP_USE_MEMKIND
1415 else
1416# endif
1417#endif
17f52a1c
JJ
1418#ifdef LIBGOMP_USE_MEMKIND
1419 if (free_memkind)
1420 {
1421 struct gomp_memkind_data *memkind_data = gomp_get_memkind ();
1422 void *kind = *memkind_data->kinds[free_memkind];
1423 memkind_data->memkind_free (kind, data->ptr);
1424 return ret;
1425 }
1426#endif
30486fab
AS
1427 {
1428 omp_memspace_handle_t was_memspace;
1429 was_memspace = (free_allocator_data
1430 ? free_allocator_data->memspace
64001441
AS
1431 : predefined_alloc_mapping (free_allocator));
1432 int was_pinned = (free_allocator_data
1433 ? free_allocator_data->pinned
1434 : free_allocator == ompx_gnu_pinned_mem_alloc);
348874f0 1435 MEMSPACE_FREE (was_memspace, data->ptr, data->size, was_pinned);
30486fab 1436 }
b38a4bd1
JJ
1437 return ret;
1438
30486fab
AS
1439fail:;
1440 int fallback = (allocator_data
1441 ? allocator_data->fallback
64001441
AS
1442 : (allocator == omp_default_mem_alloc
1443 || allocator == ompx_gnu_pinned_mem_alloc)
30486fab
AS
1444 ? omp_atv_null_fb
1445 : omp_atv_default_mem_fb);
1446 switch (fallback)
b38a4bd1 1447 {
30486fab
AS
1448 case omp_atv_default_mem_fb:
1449 allocator = omp_default_mem_alloc;
1450 goto retry;
1451 case omp_atv_null_fb:
1452 break;
1453 default:
1454 case omp_atv_abort_fb:
1455 gomp_fatal ("Out of memory allocating %lu bytes",
1456 (unsigned long) size);
1457 case omp_atv_allocator_fb:
1458 allocator = allocator_data->fb_data;
1459 goto retry;
b38a4bd1
JJ
1460 }
1461 return NULL;
6fcc3cac 1462}