1 /* Copyright (C) 2020-2025 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains wrappers for the system allocation routines. Most
27 places in the OpenMP API do not make any provision for failure, so in
28 general we cannot allow memory allocation to fail. */
34 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
38 /* Keeping track whether a Fortran scalar allocatable/pointer has been
39 allocated via 'omp allocators'/'omp allocate'. */
41 struct fort_alloc_splay_tree_key_s
{
45 typedef struct fort_alloc_splay_tree_node_s
*fort_alloc_splay_tree_node
;
46 typedef struct fort_alloc_splay_tree_s
*fort_alloc_splay_tree
;
47 typedef struct fort_alloc_splay_tree_key_s
*fort_alloc_splay_tree_key
;
50 fort_alloc_splay_compare (fort_alloc_splay_tree_key x
, fort_alloc_splay_tree_key y
)
58 #define splay_tree_prefix fort_alloc
59 #define splay_tree_static
60 #include "splay-tree.h"
62 #define splay_tree_prefix fort_alloc
63 #define splay_tree_static
65 #include "splay-tree.h"
67 static struct fort_alloc_splay_tree_s fort_alloc_scalars
;
69 /* Add pointer as being alloced by GOMP_alloc. */
71 GOMP_add_alloc (void *ptr
)
75 fort_alloc_splay_tree_node item
;
76 item
= gomp_malloc (sizeof (struct splay_tree_node_s
));
80 fort_alloc_splay_tree_insert (&fort_alloc_scalars
, item
);
83 /* Remove pointer, either called by FREE or by REALLOC,
84 either of them can change the allocation status. */
86 GOMP_is_alloc (void *ptr
)
88 struct fort_alloc_splay_tree_key_s needle
;
89 fort_alloc_splay_tree_node n
;
91 n
= fort_alloc_splay_tree_lookup_node (&fort_alloc_scalars
, &needle
);
94 fort_alloc_splay_tree_remove (&fort_alloc_scalars
, &n
->key
);
101 #define omp_max_predefined_alloc omp_thread_mem_alloc
102 #define ompx_gnu_min_predefined_alloc ompx_gnu_pinned_mem_alloc
103 #define ompx_gnu_max_predefined_alloc ompx_gnu_pinned_mem_alloc
105 _Static_assert (GOMP_OMP_PREDEF_ALLOC_MAX
== omp_thread_mem_alloc
,
106 "GOMP_OMP_PREDEF_ALLOC_MAX == omp_thread_mem_alloc");
107 _Static_assert (GOMP_OMPX_PREDEF_ALLOC_MIN
== ompx_gnu_min_predefined_alloc
,
108 "GOMP_OMP_PREDEF_ALLOC_MAX == omp_thread_mem_alloc");
109 _Static_assert (GOMP_OMPX_PREDEF_ALLOC_MAX
== ompx_gnu_max_predefined_alloc
,
110 "GOMP_OMP_PREDEF_ALLOC_MAX == omp_thread_mem_alloc");
111 _Static_assert (GOMP_OMP_PREDEF_ALLOC_THREADS
== omp_thread_mem_alloc
,
112 "GOMP_OMP_PREDEF_ALLOC_THREADS == omp_thread_mem_alloc");
114 /* These macros may be overridden in config/<target>/allocator.c.
115 The defaults (no override) are to return NULL for pinned memory requests
116 and pass through to the regular OS calls otherwise.
117 The following definitions (ab)use comma operators to avoid unused
119 #ifndef MEMSPACE_ALLOC
120 #define MEMSPACE_ALLOC(MEMSPACE, SIZE, PIN) \
121 (PIN ? NULL : malloc (((void)(MEMSPACE), (SIZE))))
123 #ifndef MEMSPACE_CALLOC
124 #define MEMSPACE_CALLOC(MEMSPACE, SIZE, PIN) \
125 (PIN ? NULL : calloc (1, (((void)(MEMSPACE), (SIZE)))))
127 #ifndef MEMSPACE_REALLOC
128 #define MEMSPACE_REALLOC(MEMSPACE, ADDR, OLDSIZE, SIZE, OLDPIN, PIN) \
129 ((PIN) || (OLDPIN) ? NULL \
130 : realloc (ADDR, (((void)(MEMSPACE), (void)(OLDSIZE), (SIZE)))))
132 #ifndef MEMSPACE_FREE
133 #define MEMSPACE_FREE(MEMSPACE, ADDR, SIZE, PIN) \
134 if (PIN) free (((void)(MEMSPACE), (void)(SIZE), (ADDR)))
136 #ifndef MEMSPACE_VALIDATE
137 #define MEMSPACE_VALIDATE(MEMSPACE, ACCESS, PIN) \
138 (PIN ? 0 : ((void)(MEMSPACE), (void)(ACCESS), 1))
141 /* Map the predefined allocators to the correct memory space.
142 The index to this table is the omp_allocator_handle_t enum value.
143 When the user calls omp_alloc with a predefined allocator this
144 table determines what memory they get. */
145 static const omp_memspace_handle_t predefined_omp_alloc_mapping
[] = {
146 omp_default_mem_space
, /* omp_null_allocator doesn't actually use this. */
147 omp_default_mem_space
, /* omp_default_mem_alloc. */
148 omp_large_cap_mem_space
, /* omp_large_cap_mem_alloc. */
149 omp_const_mem_space
, /* omp_const_mem_alloc. */
150 omp_high_bw_mem_space
, /* omp_high_bw_mem_alloc. */
151 omp_low_lat_mem_space
, /* omp_low_lat_mem_alloc. */
152 omp_low_lat_mem_space
, /* omp_cgroup_mem_alloc (implementation defined). */
153 omp_low_lat_mem_space
, /* omp_pteam_mem_alloc (implementation defined). */
154 omp_low_lat_mem_space
, /* omp_thread_mem_alloc (implementation defined). */
156 static const omp_memspace_handle_t predefined_ompx_gnu_alloc_mapping
[] = {
157 omp_default_mem_space
, /* ompx_gnu_pinned_mem_alloc. */
160 #define ARRAY_SIZE(A) (sizeof (A) / sizeof ((A)[0]))
161 _Static_assert (ARRAY_SIZE (predefined_omp_alloc_mapping
)
162 == omp_max_predefined_alloc
+ 1,
163 "predefined_omp_alloc_mapping must match omp_memspace_handle_t");
164 _Static_assert (ARRAY_SIZE (predefined_ompx_gnu_alloc_mapping
)
165 == (ompx_gnu_max_predefined_alloc
166 - ompx_gnu_min_predefined_alloc
) + 1,
167 "predefined_ompx_gnu_alloc_mapping must match"
168 " omp_memspace_handle_t");
171 predefined_allocator_p (omp_allocator_handle_t allocator
)
173 return allocator
<= ompx_gnu_max_predefined_alloc
;
176 static inline omp_memspace_handle_t
177 predefined_alloc_mapping (omp_allocator_handle_t allocator
)
179 if (allocator
<= omp_max_predefined_alloc
)
180 return predefined_omp_alloc_mapping
[allocator
];
181 else if (allocator
>= ompx_gnu_min_predefined_alloc
182 && allocator
<= ompx_gnu_max_predefined_alloc
)
184 int index
= allocator
- ompx_gnu_min_predefined_alloc
;
185 return predefined_ompx_gnu_alloc_mapping
[index
];
188 /* This should never happen. */
189 return omp_default_mem_space
;
192 enum gomp_numa_memkind_kind
194 GOMP_MEMKIND_NONE
= 0,
195 #define GOMP_MEMKIND_KINDS \
196 GOMP_MEMKIND_KIND (HBW_INTERLEAVE), \
197 GOMP_MEMKIND_KIND (HBW_PREFERRED), \
198 GOMP_MEMKIND_KIND (DAX_KMEM_ALL), \
199 GOMP_MEMKIND_KIND (DAX_KMEM), \
200 GOMP_MEMKIND_KIND (INTERLEAVE), \
201 GOMP_MEMKIND_KIND (DEFAULT)
202 #define GOMP_MEMKIND_KIND(kind) GOMP_MEMKIND_##kind
204 #undef GOMP_MEMKIND_KIND
206 GOMP_MEMKIND_LIBNUMA
= GOMP_MEMKIND_COUNT
209 struct omp_allocator_data
211 omp_memspace_handle_t memspace
;
212 omp_uintptr_t alignment
;
213 omp_uintptr_t pool_size
;
214 omp_uintptr_t used_pool_size
;
215 omp_allocator_handle_t fb_data
;
216 unsigned int sync_hint
: 8;
217 unsigned int access
: 8;
218 unsigned int fallback
: 8;
219 unsigned int pinned
: 1;
220 unsigned int partition
: 7;
221 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
222 unsigned int memkind
: 8;
224 #ifndef HAVE_SYNC_BUILTINS
229 struct omp_mem_header
233 omp_allocator_handle_t allocator
;
237 struct gomp_libnuma_data
240 void *(*numa_alloc_local
) (size_t);
241 void *(*numa_realloc
) (void *, size_t, size_t);
242 void (*numa_free
) (void *, size_t);
245 struct gomp_memkind_data
247 void *memkind_handle
;
248 void *(*memkind_malloc
) (void *, size_t);
249 void *(*memkind_calloc
) (void *, size_t, size_t);
250 void *(*memkind_realloc
) (void *, void *, size_t);
251 void (*memkind_free
) (void *, void *);
252 int (*memkind_check_available
) (void *);
253 void **kinds
[GOMP_MEMKIND_COUNT
];
256 #ifdef LIBGOMP_USE_LIBNUMA
257 static struct gomp_libnuma_data
*libnuma_data
;
258 static pthread_once_t libnuma_data_once
= PTHREAD_ONCE_INIT
;
261 gomp_init_libnuma (void)
263 void *handle
= dlopen ("libnuma.so.1", RTLD_LAZY
);
264 struct gomp_libnuma_data
*data
;
266 data
= calloc (1, sizeof (struct gomp_libnuma_data
));
275 int (*numa_available
) (void);
277 = (__typeof (numa_available
)) dlsym (handle
, "numa_available");
278 if (!numa_available
|| numa_available () != 0)
286 __atomic_store_n (&libnuma_data
, data
, MEMMODEL_RELEASE
);
289 data
->numa_handle
= handle
;
290 data
->numa_alloc_local
291 = (__typeof (data
->numa_alloc_local
)) dlsym (handle
, "numa_alloc_local");
293 = (__typeof (data
->numa_realloc
)) dlsym (handle
, "numa_realloc");
295 = (__typeof (data
->numa_free
)) dlsym (handle
, "numa_free");
296 __atomic_store_n (&libnuma_data
, data
, MEMMODEL_RELEASE
);
299 static struct gomp_libnuma_data
*
300 gomp_get_libnuma (void)
302 struct gomp_libnuma_data
*data
303 = __atomic_load_n (&libnuma_data
, MEMMODEL_ACQUIRE
);
306 pthread_once (&libnuma_data_once
, gomp_init_libnuma
);
307 return __atomic_load_n (&libnuma_data
, MEMMODEL_ACQUIRE
);
311 #ifdef LIBGOMP_USE_MEMKIND
312 static struct gomp_memkind_data
*memkind_data
;
313 static pthread_once_t memkind_data_once
= PTHREAD_ONCE_INIT
;
316 gomp_init_memkind (void)
318 void *handle
= dlopen ("libmemkind.so.0", RTLD_LAZY
);
319 struct gomp_memkind_data
*data
;
321 static const char *kinds
[] = {
323 #define GOMP_MEMKIND_KIND(kind) "MEMKIND_" #kind
325 #undef GOMP_MEMKIND_KIND
328 data
= calloc (1, sizeof (struct gomp_memkind_data
));
337 __atomic_store_n (&memkind_data
, data
, MEMMODEL_RELEASE
);
340 data
->memkind_handle
= handle
;
342 = (__typeof (data
->memkind_malloc
)) dlsym (handle
, "memkind_malloc");
344 = (__typeof (data
->memkind_calloc
)) dlsym (handle
, "memkind_calloc");
345 data
->memkind_realloc
346 = (__typeof (data
->memkind_realloc
)) dlsym (handle
, "memkind_realloc");
348 = (__typeof (data
->memkind_free
)) dlsym (handle
, "memkind_free");
349 data
->memkind_check_available
350 = (__typeof (data
->memkind_check_available
))
351 dlsym (handle
, "memkind_check_available");
352 if (data
->memkind_malloc
353 && data
->memkind_calloc
354 && data
->memkind_realloc
355 && data
->memkind_free
356 && data
->memkind_check_available
)
357 for (i
= 1; i
< GOMP_MEMKIND_COUNT
; ++i
)
359 data
->kinds
[i
] = (void **) dlsym (handle
, kinds
[i
]);
360 if (data
->kinds
[i
] && data
->memkind_check_available (*data
->kinds
[i
]))
361 data
->kinds
[i
] = NULL
;
363 __atomic_store_n (&memkind_data
, data
, MEMMODEL_RELEASE
);
366 static struct gomp_memkind_data
*
367 gomp_get_memkind (void)
369 struct gomp_memkind_data
*data
370 = __atomic_load_n (&memkind_data
, MEMMODEL_ACQUIRE
);
373 pthread_once (&memkind_data_once
, gomp_init_memkind
);
374 return __atomic_load_n (&memkind_data
, MEMMODEL_ACQUIRE
);
378 omp_allocator_handle_t
379 omp_init_allocator (omp_memspace_handle_t memspace
, int ntraits
,
380 const omp_alloctrait_t traits
[])
382 struct omp_allocator_data data
383 = { memspace
, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended
, omp_atv_all
,
384 omp_atv_default_mem_fb
, omp_atv_false
, omp_atv_environment
,
385 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
389 struct omp_allocator_data
*ret
;
392 if (memspace
> omp_low_lat_mem_space
)
393 return omp_null_allocator
;
394 for (i
= 0; i
< ntraits
; i
++)
395 switch (traits
[i
].key
)
397 case omp_atk_sync_hint
:
398 switch (traits
[i
].value
)
400 case omp_atv_default
:
401 data
.sync_hint
= omp_atv_contended
;
403 case omp_atv_contended
:
404 case omp_atv_uncontended
:
405 case omp_atv_serialized
:
406 case omp_atv_private
:
407 data
.sync_hint
= traits
[i
].value
;
410 return omp_null_allocator
;
413 case omp_atk_alignment
:
414 if (traits
[i
].value
== omp_atv_default
)
419 if ((traits
[i
].value
& (traits
[i
].value
- 1)) != 0
421 return omp_null_allocator
;
422 data
.alignment
= traits
[i
].value
;
425 switch (traits
[i
].value
)
427 case omp_atv_default
:
428 data
.access
= omp_atv_all
;
434 data
.access
= traits
[i
].value
;
437 return omp_null_allocator
;
440 case omp_atk_pool_size
:
441 if (traits
[i
].value
== omp_atv_default
)
442 data
.pool_size
= ~(uintptr_t) 0;
444 data
.pool_size
= traits
[i
].value
;
446 case omp_atk_fallback
:
447 switch (traits
[i
].value
)
449 case omp_atv_default
:
450 data
.fallback
= omp_atv_default_mem_fb
;
452 case omp_atv_default_mem_fb
:
453 case omp_atv_null_fb
:
454 case omp_atv_abort_fb
:
455 case omp_atv_allocator_fb
:
456 data
.fallback
= traits
[i
].value
;
459 return omp_null_allocator
;
462 case omp_atk_fb_data
:
463 data
.fb_data
= traits
[i
].value
;
466 switch (traits
[i
].value
)
468 case omp_atv_default
:
470 data
.pinned
= omp_atv_false
;
473 data
.pinned
= omp_atv_true
;
476 return omp_null_allocator
;
479 case omp_atk_partition
:
480 switch (traits
[i
].value
)
482 case omp_atv_default
:
483 data
.partition
= omp_atv_environment
;
485 case omp_atv_environment
:
486 case omp_atv_nearest
:
487 case omp_atv_blocked
:
488 case omp_atv_interleaved
:
489 data
.partition
= traits
[i
].value
;
492 return omp_null_allocator
;
496 return omp_null_allocator
;
499 if (data
.alignment
< sizeof (void *))
500 data
.alignment
= sizeof (void *);
504 #ifdef LIBGOMP_USE_MEMKIND
505 case omp_high_bw_mem_space
:
506 struct gomp_memkind_data
*memkind_data
;
507 memkind_data
= gomp_get_memkind ();
508 if (data
.partition
== omp_atv_interleaved
509 && memkind_data
->kinds
[GOMP_MEMKIND_HBW_INTERLEAVE
])
511 data
.memkind
= GOMP_MEMKIND_HBW_INTERLEAVE
;
514 else if (memkind_data
->kinds
[GOMP_MEMKIND_HBW_PREFERRED
])
516 data
.memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
520 case omp_large_cap_mem_space
:
521 memkind_data
= gomp_get_memkind ();
522 if (memkind_data
->kinds
[GOMP_MEMKIND_DAX_KMEM_ALL
])
523 data
.memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
524 else if (memkind_data
->kinds
[GOMP_MEMKIND_DAX_KMEM
])
525 data
.memkind
= GOMP_MEMKIND_DAX_KMEM
;
529 #ifdef LIBGOMP_USE_MEMKIND
530 if (data
.partition
== omp_atv_interleaved
)
532 memkind_data
= gomp_get_memkind ();
533 if (memkind_data
->kinds
[GOMP_MEMKIND_INTERLEAVE
])
534 data
.memkind
= GOMP_MEMKIND_INTERLEAVE
;
540 #ifdef LIBGOMP_USE_LIBNUMA
541 if (data
.memkind
== GOMP_MEMKIND_NONE
&& data
.partition
== omp_atv_nearest
)
543 libnuma_data
= gomp_get_libnuma ();
544 if (libnuma_data
->numa_alloc_local
!= NULL
)
545 data
.memkind
= GOMP_MEMKIND_LIBNUMA
;
549 /* Reject unsupported memory spaces. */
550 if (!MEMSPACE_VALIDATE (data
.memspace
, data
.access
, data
.pinned
))
551 return omp_null_allocator
;
553 ret
= gomp_malloc (sizeof (struct omp_allocator_data
));
555 #ifndef HAVE_SYNC_BUILTINS
556 gomp_mutex_init (&ret
->lock
);
558 return (omp_allocator_handle_t
) ret
;
562 omp_destroy_allocator (omp_allocator_handle_t allocator
)
564 if (allocator
!= omp_null_allocator
)
566 #ifndef HAVE_SYNC_BUILTINS
567 gomp_mutex_destroy (&((struct omp_allocator_data
*) allocator
)->lock
);
569 free ((void *) allocator
);
573 ialias (omp_init_allocator
)
574 ialias (omp_destroy_allocator
)
577 omp_aligned_alloc (size_t alignment
, size_t size
,
578 omp_allocator_handle_t allocator
)
580 struct omp_allocator_data
*allocator_data
;
581 size_t new_size
, new_alignment
;
583 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
584 enum gomp_numa_memkind_kind memkind
;
587 if (__builtin_expect (size
== 0, 0))
591 new_alignment
= alignment
;
592 if (allocator
== omp_null_allocator
)
594 struct gomp_thread
*thr
= gomp_thread ();
595 if (thr
->ts
.def_allocator
== omp_null_allocator
)
596 thr
->ts
.def_allocator
= gomp_def_allocator
;
597 allocator
= (omp_allocator_handle_t
) thr
->ts
.def_allocator
;
600 if (!predefined_allocator_p (allocator
))
602 allocator_data
= (struct omp_allocator_data
*) allocator
;
603 if (new_alignment
< allocator_data
->alignment
)
604 new_alignment
= allocator_data
->alignment
;
605 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
606 memkind
= allocator_data
->memkind
;
611 allocator_data
= NULL
;
612 if (new_alignment
< sizeof (void *))
613 new_alignment
= sizeof (void *);
614 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
615 memkind
= GOMP_MEMKIND_NONE
;
617 #ifdef LIBGOMP_USE_MEMKIND
618 if (allocator
== omp_high_bw_mem_alloc
)
619 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
620 else if (allocator
== omp_large_cap_mem_alloc
)
621 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
624 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
625 if (!memkind_data
->kinds
[memkind
])
626 memkind
= GOMP_MEMKIND_NONE
;
631 new_size
= sizeof (struct omp_mem_header
);
632 if (new_alignment
> sizeof (void *))
633 new_size
+= new_alignment
- sizeof (void *);
634 if (__builtin_add_overflow (size
, new_size
, &new_size
))
636 #ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
637 if (allocator
== omp_low_lat_mem_alloc
)
641 if (__builtin_expect (allocator_data
642 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
644 uintptr_t used_pool_size
;
645 if (new_size
> allocator_data
->pool_size
)
647 #ifdef HAVE_SYNC_BUILTINS
648 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
652 uintptr_t new_pool_size
;
653 if (__builtin_add_overflow (used_pool_size
, new_size
,
655 || new_pool_size
> allocator_data
->pool_size
)
657 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
658 &used_pool_size
, new_pool_size
,
659 true, MEMMODEL_RELAXED
,
665 gomp_mutex_lock (&allocator_data
->lock
);
666 if (__builtin_add_overflow (allocator_data
->used_pool_size
, new_size
,
668 || used_pool_size
> allocator_data
->pool_size
)
670 gomp_mutex_unlock (&allocator_data
->lock
);
673 allocator_data
->used_pool_size
= used_pool_size
;
674 gomp_mutex_unlock (&allocator_data
->lock
);
676 #ifdef LIBGOMP_USE_LIBNUMA
677 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
678 ptr
= libnuma_data
->numa_alloc_local (new_size
);
679 # ifdef LIBGOMP_USE_MEMKIND
683 #ifdef LIBGOMP_USE_MEMKIND
686 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
687 void *kind
= *memkind_data
->kinds
[memkind
];
688 ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
692 ptr
= MEMSPACE_ALLOC (allocator_data
->memspace
, new_size
,
693 allocator_data
->pinned
);
696 #ifdef HAVE_SYNC_BUILTINS
697 __atomic_add_fetch (&allocator_data
->used_pool_size
, -new_size
,
700 gomp_mutex_lock (&allocator_data
->lock
);
701 allocator_data
->used_pool_size
-= new_size
;
702 gomp_mutex_unlock (&allocator_data
->lock
);
709 #ifdef LIBGOMP_USE_LIBNUMA
710 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
711 ptr
= libnuma_data
->numa_alloc_local (new_size
);
712 # ifdef LIBGOMP_USE_MEMKIND
716 #ifdef LIBGOMP_USE_MEMKIND
719 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
720 void *kind
= *memkind_data
->kinds
[memkind
];
721 ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
726 omp_memspace_handle_t memspace
;
727 memspace
= (allocator_data
728 ? allocator_data
->memspace
729 : predefined_alloc_mapping (allocator
));
730 int pinned
= (allocator_data
731 ? allocator_data
->pinned
732 : allocator
== ompx_gnu_pinned_mem_alloc
);
733 ptr
= MEMSPACE_ALLOC (memspace
, new_size
, pinned
);
739 if (new_alignment
> sizeof (void *))
740 ret
= (void *) (((uintptr_t) ptr
741 + sizeof (struct omp_mem_header
)
742 + new_alignment
- sizeof (void *))
743 & ~(new_alignment
- 1));
745 ret
= (char *) ptr
+ sizeof (struct omp_mem_header
);
746 ((struct omp_mem_header
*) ret
)[-1].ptr
= ptr
;
747 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
748 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
752 int fallback
= (allocator_data
753 ? allocator_data
->fallback
754 : (allocator
== omp_default_mem_alloc
755 || allocator
== ompx_gnu_pinned_mem_alloc
)
757 : omp_atv_default_mem_fb
);
760 case omp_atv_default_mem_fb
:
761 allocator
= omp_default_mem_alloc
;
763 case omp_atv_null_fb
:
766 case omp_atv_abort_fb
:
767 gomp_fatal ("Out of memory allocating %lu bytes",
768 (unsigned long) size
);
769 case omp_atv_allocator_fb
:
770 allocator
= allocator_data
->fb_data
;
776 ialias (omp_aligned_alloc
)
779 omp_alloc (size_t size
, omp_allocator_handle_t allocator
)
781 return ialias_call (omp_aligned_alloc
) (1, size
, allocator
);
784 /* Like omp_aligned_alloc, but apply on top of that:
785 "For allocations that arise from this ... the null_fb value of the
786 fallback allocator trait behaves as if the abort_fb had been specified." */
789 GOMP_alloc (size_t alignment
, size_t size
, uintptr_t allocator
)
792 = ialias_call (omp_aligned_alloc
) (alignment
, size
,
793 (omp_allocator_handle_t
) allocator
);
794 if (__builtin_expect (ret
== NULL
, 0) && size
)
795 gomp_fatal ("Out of memory allocating %lu bytes",
796 (unsigned long) size
);
801 omp_free (void *ptr
, omp_allocator_handle_t allocator
)
803 struct omp_mem_header
*data
;
804 omp_memspace_handle_t memspace
= omp_default_mem_space
;
810 data
= &((struct omp_mem_header
*) ptr
)[-1];
811 if (!predefined_allocator_p (data
->allocator
))
813 struct omp_allocator_data
*allocator_data
814 = (struct omp_allocator_data
*) (data
->allocator
);
815 if (allocator_data
->pool_size
< ~(uintptr_t) 0)
817 #ifdef HAVE_SYNC_BUILTINS
818 __atomic_add_fetch (&allocator_data
->used_pool_size
, -data
->size
,
821 gomp_mutex_lock (&allocator_data
->lock
);
822 allocator_data
->used_pool_size
-= data
->size
;
823 gomp_mutex_unlock (&allocator_data
->lock
);
826 #ifdef LIBGOMP_USE_LIBNUMA
827 if (allocator_data
->memkind
== GOMP_MEMKIND_LIBNUMA
)
829 libnuma_data
->numa_free (data
->ptr
, data
->size
);
832 # ifdef LIBGOMP_USE_MEMKIND
836 #ifdef LIBGOMP_USE_MEMKIND
837 if (allocator_data
->memkind
)
839 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
840 void *kind
= *memkind_data
->kinds
[allocator_data
->memkind
];
841 memkind_data
->memkind_free (kind
, data
->ptr
);
846 memspace
= allocator_data
->memspace
;
847 pinned
= allocator_data
->pinned
;
851 #ifdef LIBGOMP_USE_MEMKIND
852 enum gomp_numa_memkind_kind memkind
= GOMP_MEMKIND_NONE
;
853 if (data
->allocator
== omp_high_bw_mem_alloc
)
854 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
855 else if (data
->allocator
== omp_large_cap_mem_alloc
)
856 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
859 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
860 if (memkind_data
->kinds
[memkind
])
862 void *kind
= *memkind_data
->kinds
[memkind
];
863 memkind_data
->memkind_free (kind
, data
->ptr
);
869 memspace
= predefined_alloc_mapping (data
->allocator
);
870 pinned
= (data
->allocator
== ompx_gnu_pinned_mem_alloc
);
873 MEMSPACE_FREE (memspace
, data
->ptr
, data
->size
, pinned
);
879 GOMP_free (void *ptr
, uintptr_t allocator
)
881 return ialias_call (omp_free
) (ptr
, (omp_allocator_handle_t
) allocator
);
885 omp_aligned_calloc (size_t alignment
, size_t nmemb
, size_t size
,
886 omp_allocator_handle_t allocator
)
888 struct omp_allocator_data
*allocator_data
;
889 size_t new_size
, size_temp
, new_alignment
;
891 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
892 enum gomp_numa_memkind_kind memkind
;
895 if (__builtin_expect (size
== 0 || nmemb
== 0, 0))
899 new_alignment
= alignment
;
900 if (allocator
== omp_null_allocator
)
902 struct gomp_thread
*thr
= gomp_thread ();
903 if (thr
->ts
.def_allocator
== omp_null_allocator
)
904 thr
->ts
.def_allocator
= gomp_def_allocator
;
905 allocator
= (omp_allocator_handle_t
) thr
->ts
.def_allocator
;
908 if (!predefined_allocator_p (allocator
))
910 allocator_data
= (struct omp_allocator_data
*) allocator
;
911 if (new_alignment
< allocator_data
->alignment
)
912 new_alignment
= allocator_data
->alignment
;
913 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
914 memkind
= allocator_data
->memkind
;
919 allocator_data
= NULL
;
920 if (new_alignment
< sizeof (void *))
921 new_alignment
= sizeof (void *);
922 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
923 memkind
= GOMP_MEMKIND_NONE
;
925 #ifdef LIBGOMP_USE_MEMKIND
926 if (allocator
== omp_high_bw_mem_alloc
)
927 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
928 else if (allocator
== omp_large_cap_mem_alloc
)
929 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
932 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
933 if (!memkind_data
->kinds
[memkind
])
934 memkind
= GOMP_MEMKIND_NONE
;
939 new_size
= sizeof (struct omp_mem_header
);
940 if (new_alignment
> sizeof (void *))
941 new_size
+= new_alignment
- sizeof (void *);
942 if (__builtin_mul_overflow (size
, nmemb
, &size_temp
))
944 if (__builtin_add_overflow (size_temp
, new_size
, &new_size
))
946 #ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
947 if (allocator
== omp_low_lat_mem_alloc
)
951 if (__builtin_expect (allocator_data
952 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
954 uintptr_t used_pool_size
;
955 if (new_size
> allocator_data
->pool_size
)
957 #ifdef HAVE_SYNC_BUILTINS
958 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
962 uintptr_t new_pool_size
;
963 if (__builtin_add_overflow (used_pool_size
, new_size
,
965 || new_pool_size
> allocator_data
->pool_size
)
967 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
968 &used_pool_size
, new_pool_size
,
969 true, MEMMODEL_RELAXED
,
975 gomp_mutex_lock (&allocator_data
->lock
);
976 if (__builtin_add_overflow (allocator_data
->used_pool_size
, new_size
,
978 || used_pool_size
> allocator_data
->pool_size
)
980 gomp_mutex_unlock (&allocator_data
->lock
);
983 allocator_data
->used_pool_size
= used_pool_size
;
984 gomp_mutex_unlock (&allocator_data
->lock
);
986 #ifdef LIBGOMP_USE_LIBNUMA
987 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
988 /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning
989 memory that is initialized to zero. */
990 ptr
= libnuma_data
->numa_alloc_local (new_size
);
991 # ifdef LIBGOMP_USE_MEMKIND
995 #ifdef LIBGOMP_USE_MEMKIND
998 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
999 void *kind
= *memkind_data
->kinds
[memkind
];
1000 ptr
= memkind_data
->memkind_calloc (kind
, 1, new_size
);
1004 ptr
= MEMSPACE_CALLOC (allocator_data
->memspace
, new_size
,
1005 allocator_data
->pinned
);
1008 #ifdef HAVE_SYNC_BUILTINS
1009 __atomic_add_fetch (&allocator_data
->used_pool_size
, -new_size
,
1012 gomp_mutex_lock (&allocator_data
->lock
);
1013 allocator_data
->used_pool_size
-= new_size
;
1014 gomp_mutex_unlock (&allocator_data
->lock
);
1021 #ifdef LIBGOMP_USE_LIBNUMA
1022 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1023 /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning
1024 memory that is initialized to zero. */
1025 ptr
= libnuma_data
->numa_alloc_local (new_size
);
1026 # ifdef LIBGOMP_USE_MEMKIND
1030 #ifdef LIBGOMP_USE_MEMKIND
1033 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1034 void *kind
= *memkind_data
->kinds
[memkind
];
1035 ptr
= memkind_data
->memkind_calloc (kind
, 1, new_size
);
1040 omp_memspace_handle_t memspace
;
1041 memspace
= (allocator_data
1042 ? allocator_data
->memspace
1043 : predefined_alloc_mapping (allocator
));
1044 int pinned
= (allocator_data
1045 ? allocator_data
->pinned
1046 : allocator
== ompx_gnu_pinned_mem_alloc
);
1047 ptr
= MEMSPACE_CALLOC (memspace
, new_size
, pinned
);
1053 if (new_alignment
> sizeof (void *))
1054 ret
= (void *) (((uintptr_t) ptr
1055 + sizeof (struct omp_mem_header
)
1056 + new_alignment
- sizeof (void *))
1057 & ~(new_alignment
- 1));
1059 ret
= (char *) ptr
+ sizeof (struct omp_mem_header
);
1060 ((struct omp_mem_header
*) ret
)[-1].ptr
= ptr
;
1061 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1062 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1066 int fallback
= (allocator_data
1067 ? allocator_data
->fallback
1068 : (allocator
== omp_default_mem_alloc
1069 || allocator
== ompx_gnu_pinned_mem_alloc
)
1071 : omp_atv_default_mem_fb
);
1074 case omp_atv_default_mem_fb
:
1075 allocator
= omp_default_mem_alloc
;
1077 case omp_atv_null_fb
:
1080 case omp_atv_abort_fb
:
1081 gomp_fatal ("Out of memory allocating %lu bytes",
1082 (unsigned long) (size
* nmemb
));
1083 case omp_atv_allocator_fb
:
1084 allocator
= allocator_data
->fb_data
;
1090 ialias (omp_aligned_calloc
)
1093 omp_calloc (size_t nmemb
, size_t size
, omp_allocator_handle_t allocator
)
1095 return ialias_call (omp_aligned_calloc
) (1, nmemb
, size
, allocator
);
1099 omp_realloc (void *ptr
, size_t size
, omp_allocator_handle_t allocator
,
1100 omp_allocator_handle_t free_allocator
)
1102 struct omp_allocator_data
*allocator_data
, *free_allocator_data
;
1103 size_t new_size
, old_size
, new_alignment
, old_alignment
;
1104 void *new_ptr
, *ret
;
1105 struct omp_mem_header
*data
;
1106 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1107 enum gomp_numa_memkind_kind memkind
, free_memkind
;
1110 if (__builtin_expect (ptr
== NULL
, 0))
1111 return ialias_call (omp_aligned_alloc
) (1, size
, allocator
);
1113 if (__builtin_expect (size
== 0, 0))
1115 ialias_call (omp_free
) (ptr
, free_allocator
);
1119 data
= &((struct omp_mem_header
*) ptr
)[-1];
1120 free_allocator
= data
->allocator
;
1123 new_alignment
= sizeof (void *);
1124 if (allocator
== omp_null_allocator
)
1125 allocator
= free_allocator
;
1127 if (!predefined_allocator_p (allocator
))
1129 allocator_data
= (struct omp_allocator_data
*) allocator
;
1130 if (new_alignment
< allocator_data
->alignment
)
1131 new_alignment
= allocator_data
->alignment
;
1132 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1133 memkind
= allocator_data
->memkind
;
1138 allocator_data
= NULL
;
1139 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1140 memkind
= GOMP_MEMKIND_NONE
;
1142 #ifdef LIBGOMP_USE_MEMKIND
1143 if (allocator
== omp_high_bw_mem_alloc
)
1144 memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
1145 else if (allocator
== omp_large_cap_mem_alloc
)
1146 memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
1149 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1150 if (!memkind_data
->kinds
[memkind
])
1151 memkind
= GOMP_MEMKIND_NONE
;
1155 if (!predefined_allocator_p (free_allocator
))
1157 free_allocator_data
= (struct omp_allocator_data
*) free_allocator
;
1158 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1159 free_memkind
= free_allocator_data
->memkind
;
1164 free_allocator_data
= NULL
;
1165 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1166 free_memkind
= GOMP_MEMKIND_NONE
;
1168 #ifdef LIBGOMP_USE_MEMKIND
1169 if (free_allocator
== omp_high_bw_mem_alloc
)
1170 free_memkind
= GOMP_MEMKIND_HBW_PREFERRED
;
1171 else if (free_allocator
== omp_large_cap_mem_alloc
)
1172 free_memkind
= GOMP_MEMKIND_DAX_KMEM_ALL
;
1175 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1176 if (!memkind_data
->kinds
[free_memkind
])
1177 free_memkind
= GOMP_MEMKIND_NONE
;
1181 old_alignment
= (uintptr_t) ptr
- (uintptr_t) (data
->ptr
);
1183 new_size
= sizeof (struct omp_mem_header
);
1184 if (new_alignment
> sizeof (void *))
1185 new_size
+= new_alignment
- sizeof (void *);
1186 if (__builtin_add_overflow (size
, new_size
, &new_size
))
1188 old_size
= data
->size
;
1189 #ifdef OMP_LOW_LAT_MEM_ALLOC_INVALID
1190 if (allocator
== omp_low_lat_mem_alloc
)
1194 if (__builtin_expect (allocator_data
1195 && allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
1197 uintptr_t used_pool_size
;
1198 size_t prev_size
= 0;
1199 /* Check if we can use realloc. Don't use it if extra alignment
1200 was used previously or newly, because realloc might return a pointer
1201 with different alignment and then we'd need to memmove the data
1203 if (free_allocator_data
1204 && free_allocator_data
== allocator_data
1205 && new_alignment
== sizeof (void *)
1206 && old_alignment
== sizeof (struct omp_mem_header
))
1207 prev_size
= old_size
;
1208 if (new_size
> prev_size
1209 && new_size
- prev_size
> allocator_data
->pool_size
)
1211 #ifdef HAVE_SYNC_BUILTINS
1212 used_pool_size
= __atomic_load_n (&allocator_data
->used_pool_size
,
1216 uintptr_t new_pool_size
;
1217 if (new_size
> prev_size
)
1219 if (__builtin_add_overflow (used_pool_size
, new_size
- prev_size
,
1221 || new_pool_size
> allocator_data
->pool_size
)
1225 new_pool_size
= used_pool_size
+ new_size
- prev_size
;
1226 if (__atomic_compare_exchange_n (&allocator_data
->used_pool_size
,
1227 &used_pool_size
, new_pool_size
,
1228 true, MEMMODEL_RELAXED
,
1234 gomp_mutex_lock (&allocator_data
->lock
);
1235 if (new_size
> prev_size
)
1237 if (__builtin_add_overflow (allocator_data
->used_pool_size
,
1238 new_size
- prev_size
,
1240 || used_pool_size
> allocator_data
->pool_size
)
1242 gomp_mutex_unlock (&allocator_data
->lock
);
1247 used_pool_size
= (allocator_data
->used_pool_size
1248 + new_size
- prev_size
);
1249 allocator_data
->used_pool_size
= used_pool_size
;
1250 gomp_mutex_unlock (&allocator_data
->lock
);
1252 #ifdef LIBGOMP_USE_LIBNUMA
1253 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1256 new_ptr
= libnuma_data
->numa_realloc (data
->ptr
, data
->size
,
1259 new_ptr
= libnuma_data
->numa_alloc_local (new_size
);
1261 # ifdef LIBGOMP_USE_MEMKIND
1265 #ifdef LIBGOMP_USE_MEMKIND
1268 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1269 void *kind
= *memkind_data
->kinds
[memkind
];
1271 new_ptr
= memkind_data
->memkind_realloc (kind
, data
->ptr
,
1274 new_ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
1280 int was_pinned
= (free_allocator_data
1281 ? free_allocator_data
->pinned
1282 : free_allocator
== ompx_gnu_pinned_mem_alloc
);
1283 new_ptr
= MEMSPACE_REALLOC (allocator_data
->memspace
, data
->ptr
,
1284 data
->size
, new_size
, was_pinned
,
1285 allocator_data
->pinned
);
1288 new_ptr
= MEMSPACE_ALLOC (allocator_data
->memspace
, new_size
,
1289 allocator_data
->pinned
);
1290 if (new_ptr
== NULL
)
1292 #ifdef HAVE_SYNC_BUILTINS
1293 __atomic_add_fetch (&allocator_data
->used_pool_size
,
1294 prev_size
- new_size
,
1297 gomp_mutex_lock (&allocator_data
->lock
);
1298 allocator_data
->used_pool_size
-= new_size
- prev_size
;
1299 gomp_mutex_unlock (&allocator_data
->lock
);
1305 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1306 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1307 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1308 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1312 else if (new_alignment
== sizeof (void *)
1313 && old_alignment
== sizeof (struct omp_mem_header
)
1314 #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA)
1315 && memkind
== free_memkind
1317 && (free_allocator_data
== NULL
1318 || free_allocator_data
->pool_size
== ~(uintptr_t) 0))
1320 #ifdef LIBGOMP_USE_LIBNUMA
1321 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1322 new_ptr
= libnuma_data
->numa_realloc (data
->ptr
, data
->size
, new_size
);
1323 # ifdef LIBGOMP_USE_MEMKIND
1327 #ifdef LIBGOMP_USE_MEMKIND
1330 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1331 void *kind
= *memkind_data
->kinds
[memkind
];
1332 new_ptr
= memkind_data
->memkind_realloc (kind
, data
->ptr
,
1338 omp_memspace_handle_t memspace
;
1339 memspace
= (allocator_data
1340 ? allocator_data
->memspace
1341 : predefined_alloc_mapping (allocator
));
1342 int was_pinned
= (free_allocator_data
1343 ? free_allocator_data
->pinned
1344 : free_allocator
== ompx_gnu_pinned_mem_alloc
);
1345 int pinned
= (allocator_data
1346 ? allocator_data
->pinned
1347 : allocator
== ompx_gnu_pinned_mem_alloc
);
1348 new_ptr
= MEMSPACE_REALLOC (memspace
, data
->ptr
, data
->size
, new_size
,
1349 was_pinned
, pinned
);
1351 if (new_ptr
== NULL
)
1354 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1355 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1356 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1357 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1362 #ifdef LIBGOMP_USE_LIBNUMA
1363 if (memkind
== GOMP_MEMKIND_LIBNUMA
)
1364 new_ptr
= libnuma_data
->numa_alloc_local (new_size
);
1365 # ifdef LIBGOMP_USE_MEMKIND
1369 #ifdef LIBGOMP_USE_MEMKIND
1372 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1373 void *kind
= *memkind_data
->kinds
[memkind
];
1374 new_ptr
= memkind_data
->memkind_malloc (kind
, new_size
);
1379 omp_memspace_handle_t memspace
;
1380 memspace
= (allocator_data
1381 ? allocator_data
->memspace
1382 : predefined_alloc_mapping (allocator
));
1383 int pinned
= (allocator_data
1384 ? allocator_data
->pinned
1385 : allocator
== ompx_gnu_pinned_mem_alloc
);
1386 new_ptr
= MEMSPACE_ALLOC (memspace
, new_size
, pinned
);
1388 if (new_ptr
== NULL
)
1392 if (new_alignment
> sizeof (void *))
1393 ret
= (void *) (((uintptr_t) new_ptr
1394 + sizeof (struct omp_mem_header
)
1395 + new_alignment
- sizeof (void *))
1396 & ~(new_alignment
- 1));
1398 ret
= (char *) new_ptr
+ sizeof (struct omp_mem_header
);
1399 ((struct omp_mem_header
*) ret
)[-1].ptr
= new_ptr
;
1400 ((struct omp_mem_header
*) ret
)[-1].size
= new_size
;
1401 ((struct omp_mem_header
*) ret
)[-1].allocator
= allocator
;
1402 if (old_size
- old_alignment
< size
)
1403 size
= old_size
- old_alignment
;
1404 memcpy (ret
, ptr
, size
);
1405 if (__builtin_expect (free_allocator_data
1406 && free_allocator_data
->pool_size
< ~(uintptr_t) 0, 0))
1408 #ifdef HAVE_SYNC_BUILTINS
1409 __atomic_add_fetch (&free_allocator_data
->used_pool_size
, -data
->size
,
1412 gomp_mutex_lock (&free_allocator_data
->lock
);
1413 free_allocator_data
->used_pool_size
-= data
->size
;
1414 gomp_mutex_unlock (&free_allocator_data
->lock
);
1417 #ifdef LIBGOMP_USE_LIBNUMA
1418 if (free_memkind
== GOMP_MEMKIND_LIBNUMA
)
1420 libnuma_data
->numa_free (data
->ptr
, data
->size
);
1423 # ifdef LIBGOMP_USE_MEMKIND
1427 #ifdef LIBGOMP_USE_MEMKIND
1430 struct gomp_memkind_data
*memkind_data
= gomp_get_memkind ();
1431 void *kind
= *memkind_data
->kinds
[free_memkind
];
1432 memkind_data
->memkind_free (kind
, data
->ptr
);
1437 omp_memspace_handle_t was_memspace
;
1438 was_memspace
= (free_allocator_data
1439 ? free_allocator_data
->memspace
1440 : predefined_alloc_mapping (free_allocator
));
1441 int was_pinned
= (free_allocator_data
1442 ? free_allocator_data
->pinned
1443 : free_allocator
== ompx_gnu_pinned_mem_alloc
);
1444 MEMSPACE_FREE (was_memspace
, data
->ptr
, data
->size
, was_pinned
);
1449 int fallback
= (allocator_data
1450 ? allocator_data
->fallback
1451 : (allocator
== omp_default_mem_alloc
1452 || allocator
== ompx_gnu_pinned_mem_alloc
)
1454 : omp_atv_default_mem_fb
);
1457 case omp_atv_default_mem_fb
:
1458 allocator
= omp_default_mem_alloc
;
1460 case omp_atv_null_fb
:
1463 case omp_atv_abort_fb
:
1464 gomp_fatal ("Out of memory allocating %lu bytes",
1465 (unsigned long) size
);
1466 case omp_atv_allocator_fb
:
1467 allocator
= allocator_data
->fb_data
;