]>
Commit | Line | Data |
---|---|---|
83ffe9cd | 1 | /* Copyright (C) 2020-2023 Free Software Foundation, Inc. |
e1071571 JJ |
2 | Contributed by Jakub Jelinek <jakub@redhat.com>. |
3 | ||
4 | This file is part of the GNU Offloading and Multi Processing Library | |
5 | (libgomp). | |
6 | ||
7 | Libgomp is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | more details. | |
16 | ||
17 | Under Section 7 of GPL version 3, you are granted additional | |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
20 | ||
21 | You should have received a copy of the GNU General Public License and | |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
25 | ||
26 | /* This file contains wrappers for the system allocation routines. Most | |
27 | places in the OpenMP API do not make any provision for failure, so in | |
28 | general we cannot allow memory allocation to fail. */ | |
29 | ||
30 | #define _GNU_SOURCE | |
31 | #include "libgomp.h" | |
32 | #include <stdlib.h> | |
b38a4bd1 | 33 | #include <string.h> |
450b05ce | 34 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
35 | #include <dlfcn.h> |
36 | #endif | |
e1071571 JJ |
37 | |
38 | #define omp_max_predefined_alloc omp_thread_mem_alloc | |
39 | ||
450b05ce | 40 | enum gomp_numa_memkind_kind |
17f52a1c JJ |
41 | { |
42 | GOMP_MEMKIND_NONE = 0, | |
43 | #define GOMP_MEMKIND_KINDS \ | |
44 | GOMP_MEMKIND_KIND (HBW_INTERLEAVE), \ | |
45 | GOMP_MEMKIND_KIND (HBW_PREFERRED), \ | |
46 | GOMP_MEMKIND_KIND (DAX_KMEM_ALL), \ | |
47 | GOMP_MEMKIND_KIND (DAX_KMEM), \ | |
48 | GOMP_MEMKIND_KIND (INTERLEAVE), \ | |
49 | GOMP_MEMKIND_KIND (DEFAULT) | |
50 | #define GOMP_MEMKIND_KIND(kind) GOMP_MEMKIND_##kind | |
51 | GOMP_MEMKIND_KINDS, | |
52 | #undef GOMP_MEMKIND_KIND | |
450b05ce TB |
53 | GOMP_MEMKIND_COUNT, |
54 | GOMP_MEMKIND_LIBNUMA = GOMP_MEMKIND_COUNT | |
17f52a1c JJ |
55 | }; |
56 | ||
e1071571 JJ |
57 | struct omp_allocator_data |
58 | { | |
59 | omp_memspace_handle_t memspace; | |
60 | omp_uintptr_t alignment; | |
61 | omp_uintptr_t pool_size; | |
62 | omp_uintptr_t used_pool_size; | |
63 | omp_allocator_handle_t fb_data; | |
64 | unsigned int sync_hint : 8; | |
65 | unsigned int access : 8; | |
66 | unsigned int fallback : 8; | |
67 | unsigned int pinned : 1; | |
68 | unsigned int partition : 7; | |
450b05ce | 69 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
70 | unsigned int memkind : 8; |
71 | #endif | |
e1071571 JJ |
72 | #ifndef HAVE_SYNC_BUILTINS |
73 | gomp_mutex_t lock; | |
74 | #endif | |
75 | }; | |
76 | ||
77 | struct omp_mem_header | |
78 | { | |
79 | void *ptr; | |
80 | size_t size; | |
81 | omp_allocator_handle_t allocator; | |
82 | void *pad; | |
83 | }; | |
84 | ||
450b05ce TB |
85 | struct gomp_libnuma_data |
86 | { | |
87 | void *numa_handle; | |
88 | void *(*numa_alloc_local) (size_t); | |
89 | void *(*numa_realloc) (void *, size_t, size_t); | |
90 | void (*numa_free) (void *, size_t); | |
91 | }; | |
92 | ||
17f52a1c JJ |
93 | struct gomp_memkind_data |
94 | { | |
95 | void *memkind_handle; | |
96 | void *(*memkind_malloc) (void *, size_t); | |
97 | void *(*memkind_calloc) (void *, size_t, size_t); | |
98 | void *(*memkind_realloc) (void *, void *, size_t); | |
99 | void (*memkind_free) (void *, void *); | |
100 | int (*memkind_check_available) (void *); | |
101 | void **kinds[GOMP_MEMKIND_COUNT]; | |
102 | }; | |
103 | ||
450b05ce TB |
104 | #ifdef LIBGOMP_USE_LIBNUMA |
105 | static struct gomp_libnuma_data *libnuma_data; | |
106 | static pthread_once_t libnuma_data_once = PTHREAD_ONCE_INIT; | |
107 | ||
108 | static void | |
109 | gomp_init_libnuma (void) | |
110 | { | |
111 | void *handle = dlopen ("libnuma.so.1", RTLD_LAZY); | |
112 | struct gomp_libnuma_data *data; | |
113 | ||
114 | data = calloc (1, sizeof (struct gomp_libnuma_data)); | |
115 | if (data == NULL) | |
116 | { | |
117 | if (handle) | |
118 | dlclose (handle); | |
119 | return; | |
120 | } | |
8f3c4517 TB |
121 | if (handle) |
122 | { | |
123 | int (*numa_available) (void); | |
124 | numa_available | |
125 | = (__typeof (numa_available)) dlsym (handle, "numa_available"); | |
126 | if (!numa_available || numa_available () != 0) | |
127 | { | |
128 | dlclose (handle); | |
129 | handle = NULL; | |
130 | } | |
131 | } | |
450b05ce TB |
132 | if (!handle) |
133 | { | |
134 | __atomic_store_n (&libnuma_data, data, MEMMODEL_RELEASE); | |
135 | return; | |
136 | } | |
137 | data->numa_handle = handle; | |
138 | data->numa_alloc_local | |
139 | = (__typeof (data->numa_alloc_local)) dlsym (handle, "numa_alloc_local"); | |
140 | data->numa_realloc | |
141 | = (__typeof (data->numa_realloc)) dlsym (handle, "numa_realloc"); | |
142 | data->numa_free | |
143 | = (__typeof (data->numa_free)) dlsym (handle, "numa_free"); | |
144 | __atomic_store_n (&libnuma_data, data, MEMMODEL_RELEASE); | |
145 | } | |
146 | ||
147 | static struct gomp_libnuma_data * | |
148 | gomp_get_libnuma (void) | |
149 | { | |
150 | struct gomp_libnuma_data *data | |
151 | = __atomic_load_n (&libnuma_data, MEMMODEL_ACQUIRE); | |
152 | if (data) | |
153 | return data; | |
154 | pthread_once (&libnuma_data_once, gomp_init_libnuma); | |
155 | return __atomic_load_n (&libnuma_data, MEMMODEL_ACQUIRE); | |
156 | } | |
157 | #endif | |
158 | ||
17f52a1c JJ |
159 | #ifdef LIBGOMP_USE_MEMKIND |
160 | static struct gomp_memkind_data *memkind_data; | |
161 | static pthread_once_t memkind_data_once = PTHREAD_ONCE_INIT; | |
162 | ||
163 | static void | |
164 | gomp_init_memkind (void) | |
165 | { | |
1eff4872 | 166 | void *handle = dlopen ("libmemkind.so.0", RTLD_LAZY); |
17f52a1c JJ |
167 | struct gomp_memkind_data *data; |
168 | int i; | |
169 | static const char *kinds[] = { | |
170 | NULL, | |
171 | #define GOMP_MEMKIND_KIND(kind) "MEMKIND_" #kind | |
172 | GOMP_MEMKIND_KINDS | |
173 | #undef GOMP_MEMKIND_KIND | |
174 | }; | |
175 | ||
176 | data = calloc (1, sizeof (struct gomp_memkind_data)); | |
177 | if (data == NULL) | |
178 | { | |
179 | if (handle) | |
180 | dlclose (handle); | |
181 | return; | |
182 | } | |
183 | if (!handle) | |
184 | { | |
185 | __atomic_store_n (&memkind_data, data, MEMMODEL_RELEASE); | |
186 | return; | |
187 | } | |
188 | data->memkind_handle = handle; | |
189 | data->memkind_malloc | |
190 | = (__typeof (data->memkind_malloc)) dlsym (handle, "memkind_malloc"); | |
191 | data->memkind_calloc | |
192 | = (__typeof (data->memkind_calloc)) dlsym (handle, "memkind_calloc"); | |
193 | data->memkind_realloc | |
194 | = (__typeof (data->memkind_realloc)) dlsym (handle, "memkind_realloc"); | |
195 | data->memkind_free | |
196 | = (__typeof (data->memkind_free)) dlsym (handle, "memkind_free"); | |
197 | data->memkind_check_available | |
198 | = (__typeof (data->memkind_check_available)) | |
199 | dlsym (handle, "memkind_check_available"); | |
200 | if (data->memkind_malloc | |
201 | && data->memkind_calloc | |
202 | && data->memkind_realloc | |
203 | && data->memkind_free | |
204 | && data->memkind_check_available) | |
205 | for (i = 1; i < GOMP_MEMKIND_COUNT; ++i) | |
206 | { | |
207 | data->kinds[i] = (void **) dlsym (handle, kinds[i]); | |
208 | if (data->kinds[i] && data->memkind_check_available (*data->kinds[i])) | |
209 | data->kinds[i] = NULL; | |
210 | } | |
211 | __atomic_store_n (&memkind_data, data, MEMMODEL_RELEASE); | |
212 | } | |
213 | ||
214 | static struct gomp_memkind_data * | |
215 | gomp_get_memkind (void) | |
216 | { | |
217 | struct gomp_memkind_data *data | |
218 | = __atomic_load_n (&memkind_data, MEMMODEL_ACQUIRE); | |
219 | if (data) | |
220 | return data; | |
221 | pthread_once (&memkind_data_once, gomp_init_memkind); | |
222 | return __atomic_load_n (&memkind_data, MEMMODEL_ACQUIRE); | |
223 | } | |
224 | #endif | |
225 | ||
e1071571 JJ |
226 | omp_allocator_handle_t |
227 | omp_init_allocator (omp_memspace_handle_t memspace, int ntraits, | |
228 | const omp_alloctrait_t traits[]) | |
229 | { | |
230 | struct omp_allocator_data data | |
231 | = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all, | |
17f52a1c | 232 | omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment, |
450b05ce | 233 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
234 | GOMP_MEMKIND_NONE |
235 | #endif | |
236 | }; | |
e1071571 JJ |
237 | struct omp_allocator_data *ret; |
238 | int i; | |
239 | ||
240 | if (memspace > omp_low_lat_mem_space) | |
241 | return omp_null_allocator; | |
242 | for (i = 0; i < ntraits; i++) | |
243 | switch (traits[i].key) | |
244 | { | |
245 | case omp_atk_sync_hint: | |
246 | switch (traits[i].value) | |
247 | { | |
248 | case omp_atv_default: | |
249 | data.sync_hint = omp_atv_contended; | |
250 | break; | |
251 | case omp_atv_contended: | |
252 | case omp_atv_uncontended: | |
f7097793 | 253 | case omp_atv_serialized: |
e1071571 JJ |
254 | case omp_atv_private: |
255 | data.sync_hint = traits[i].value; | |
256 | break; | |
257 | default: | |
258 | return omp_null_allocator; | |
259 | } | |
260 | break; | |
261 | case omp_atk_alignment: | |
ea82325a JJ |
262 | if (traits[i].value == omp_atv_default) |
263 | { | |
264 | data.alignment = 1; | |
265 | break; | |
266 | } | |
e1071571 JJ |
267 | if ((traits[i].value & (traits[i].value - 1)) != 0 |
268 | || !traits[i].value) | |
269 | return omp_null_allocator; | |
270 | data.alignment = traits[i].value; | |
271 | break; | |
272 | case omp_atk_access: | |
273 | switch (traits[i].value) | |
274 | { | |
275 | case omp_atv_default: | |
276 | data.access = omp_atv_all; | |
277 | break; | |
278 | case omp_atv_all: | |
279 | case omp_atv_cgroup: | |
280 | case omp_atv_pteam: | |
281 | case omp_atv_thread: | |
282 | data.access = traits[i].value; | |
283 | break; | |
284 | default: | |
285 | return omp_null_allocator; | |
286 | } | |
287 | break; | |
288 | case omp_atk_pool_size: | |
ea82325a JJ |
289 | if (traits[i].value == omp_atv_default) |
290 | data.pool_size = ~(uintptr_t) 0; | |
291 | else | |
292 | data.pool_size = traits[i].value; | |
e1071571 JJ |
293 | break; |
294 | case omp_atk_fallback: | |
295 | switch (traits[i].value) | |
296 | { | |
297 | case omp_atv_default: | |
298 | data.fallback = omp_atv_default_mem_fb; | |
299 | break; | |
300 | case omp_atv_default_mem_fb: | |
301 | case omp_atv_null_fb: | |
302 | case omp_atv_abort_fb: | |
303 | case omp_atv_allocator_fb: | |
304 | data.fallback = traits[i].value; | |
305 | break; | |
306 | default: | |
307 | return omp_null_allocator; | |
308 | } | |
309 | break; | |
310 | case omp_atk_fb_data: | |
311 | data.fb_data = traits[i].value; | |
312 | break; | |
313 | case omp_atk_pinned: | |
314 | switch (traits[i].value) | |
315 | { | |
316 | case omp_atv_default: | |
317 | case omp_atv_false: | |
318 | data.pinned = omp_atv_false; | |
319 | break; | |
320 | case omp_atv_true: | |
321 | data.pinned = omp_atv_true; | |
322 | break; | |
323 | default: | |
324 | return omp_null_allocator; | |
325 | } | |
326 | break; | |
327 | case omp_atk_partition: | |
328 | switch (traits[i].value) | |
329 | { | |
330 | case omp_atv_default: | |
331 | data.partition = omp_atv_environment; | |
332 | break; | |
333 | case omp_atv_environment: | |
334 | case omp_atv_nearest: | |
335 | case omp_atv_blocked: | |
336 | case omp_atv_interleaved: | |
337 | data.partition = traits[i].value; | |
338 | break; | |
339 | default: | |
340 | return omp_null_allocator; | |
341 | } | |
342 | break; | |
343 | default: | |
344 | return omp_null_allocator; | |
345 | } | |
346 | ||
347 | if (data.alignment < sizeof (void *)) | |
348 | data.alignment = sizeof (void *); | |
349 | ||
17f52a1c JJ |
350 | switch (memspace) |
351 | { | |
17f52a1c | 352 | #ifdef LIBGOMP_USE_MEMKIND |
450b05ce | 353 | case omp_high_bw_mem_space: |
17f52a1c JJ |
354 | struct gomp_memkind_data *memkind_data; |
355 | memkind_data = gomp_get_memkind (); | |
356 | if (data.partition == omp_atv_interleaved | |
357 | && memkind_data->kinds[GOMP_MEMKIND_HBW_INTERLEAVE]) | |
358 | { | |
359 | data.memkind = GOMP_MEMKIND_HBW_INTERLEAVE; | |
360 | break; | |
361 | } | |
362 | else if (memkind_data->kinds[GOMP_MEMKIND_HBW_PREFERRED]) | |
363 | { | |
364 | data.memkind = GOMP_MEMKIND_HBW_PREFERRED; | |
365 | break; | |
366 | } | |
8c2fc744 | 367 | break; |
17f52a1c | 368 | case omp_large_cap_mem_space: |
17f52a1c JJ |
369 | memkind_data = gomp_get_memkind (); |
370 | if (memkind_data->kinds[GOMP_MEMKIND_DAX_KMEM_ALL]) | |
371 | data.memkind = GOMP_MEMKIND_DAX_KMEM_ALL; | |
372 | else if (memkind_data->kinds[GOMP_MEMKIND_DAX_KMEM]) | |
373 | data.memkind = GOMP_MEMKIND_DAX_KMEM; | |
17f52a1c | 374 | break; |
450b05ce | 375 | #endif |
17f52a1c JJ |
376 | default: |
377 | #ifdef LIBGOMP_USE_MEMKIND | |
378 | if (data.partition == omp_atv_interleaved) | |
379 | { | |
380 | memkind_data = gomp_get_memkind (); | |
381 | if (memkind_data->kinds[GOMP_MEMKIND_INTERLEAVE]) | |
382 | data.memkind = GOMP_MEMKIND_INTERLEAVE; | |
383 | } | |
384 | #endif | |
385 | break; | |
386 | } | |
387 | ||
450b05ce TB |
388 | #ifdef LIBGOMP_USE_LIBNUMA |
389 | if (data.memkind == GOMP_MEMKIND_NONE && data.partition == omp_atv_nearest) | |
390 | { | |
450b05ce | 391 | libnuma_data = gomp_get_libnuma (); |
407d68da TB |
392 | if (libnuma_data->numa_alloc_local != NULL) |
393 | data.memkind = GOMP_MEMKIND_LIBNUMA; | |
450b05ce TB |
394 | } |
395 | #endif | |
396 | ||
17f52a1c JJ |
397 | /* No support for this so far. */ |
398 | if (data.pinned) | |
e1071571 JJ |
399 | return omp_null_allocator; |
400 | ||
401 | ret = gomp_malloc (sizeof (struct omp_allocator_data)); | |
402 | *ret = data; | |
403 | #ifndef HAVE_SYNC_BUILTINS | |
404 | gomp_mutex_init (&ret->lock); | |
405 | #endif | |
406 | return (omp_allocator_handle_t) ret; | |
407 | } | |
408 | ||
409 | void | |
410 | omp_destroy_allocator (omp_allocator_handle_t allocator) | |
411 | { | |
412 | if (allocator != omp_null_allocator) | |
413 | { | |
414 | #ifndef HAVE_SYNC_BUILTINS | |
415 | gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock); | |
416 | #endif | |
417 | free ((void *) allocator); | |
418 | } | |
419 | } | |
420 | ||
fff15bad TB |
421 | ialias (omp_init_allocator) |
422 | ialias (omp_destroy_allocator) | |
423 | ||
b38a4bd1 | 424 | void * |
6fcc3cac JJ |
425 | omp_aligned_alloc (size_t alignment, size_t size, |
426 | omp_allocator_handle_t allocator) | |
e1071571 JJ |
427 | { |
428 | struct omp_allocator_data *allocator_data; | |
b38a4bd1 | 429 | size_t new_size, new_alignment; |
e1071571 | 430 | void *ptr, *ret; |
450b05ce TB |
431 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
432 | enum gomp_numa_memkind_kind memkind; | |
17f52a1c | 433 | #endif |
e1071571 | 434 | |
05e4db63 JJ |
435 | if (__builtin_expect (size == 0, 0)) |
436 | return NULL; | |
437 | ||
e1071571 | 438 | retry: |
b38a4bd1 | 439 | new_alignment = alignment; |
e1071571 JJ |
440 | if (allocator == omp_null_allocator) |
441 | { | |
442 | struct gomp_thread *thr = gomp_thread (); | |
443 | if (thr->ts.def_allocator == omp_null_allocator) | |
444 | thr->ts.def_allocator = gomp_def_allocator; | |
445 | allocator = (omp_allocator_handle_t) thr->ts.def_allocator; | |
446 | } | |
447 | ||
448 | if (allocator > omp_max_predefined_alloc) | |
449 | { | |
450 | allocator_data = (struct omp_allocator_data *) allocator; | |
b38a4bd1 JJ |
451 | if (new_alignment < allocator_data->alignment) |
452 | new_alignment = allocator_data->alignment; | |
450b05ce | 453 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
454 | memkind = allocator_data->memkind; |
455 | #endif | |
e1071571 JJ |
456 | } |
457 | else | |
458 | { | |
459 | allocator_data = NULL; | |
b38a4bd1 JJ |
460 | if (new_alignment < sizeof (void *)) |
461 | new_alignment = sizeof (void *); | |
450b05ce | 462 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c | 463 | memkind = GOMP_MEMKIND_NONE; |
450b05ce TB |
464 | #endif |
465 | #ifdef LIBGOMP_USE_MEMKIND | |
17f52a1c JJ |
466 | if (allocator == omp_high_bw_mem_alloc) |
467 | memkind = GOMP_MEMKIND_HBW_PREFERRED; | |
468 | else if (allocator == omp_large_cap_mem_alloc) | |
469 | memkind = GOMP_MEMKIND_DAX_KMEM_ALL; | |
470 | if (memkind) | |
471 | { | |
472 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
473 | if (!memkind_data->kinds[memkind]) | |
474 | memkind = GOMP_MEMKIND_NONE; | |
475 | } | |
476 | #endif | |
e1071571 JJ |
477 | } |
478 | ||
479 | new_size = sizeof (struct omp_mem_header); | |
b38a4bd1 JJ |
480 | if (new_alignment > sizeof (void *)) |
481 | new_size += new_alignment - sizeof (void *); | |
e1071571 JJ |
482 | if (__builtin_add_overflow (size, new_size, &new_size)) |
483 | goto fail; | |
484 | ||
485 | if (__builtin_expect (allocator_data | |
486 | && allocator_data->pool_size < ~(uintptr_t) 0, 0)) | |
487 | { | |
488 | uintptr_t used_pool_size; | |
489 | if (new_size > allocator_data->pool_size) | |
490 | goto fail; | |
491 | #ifdef HAVE_SYNC_BUILTINS | |
492 | used_pool_size = __atomic_load_n (&allocator_data->used_pool_size, | |
493 | MEMMODEL_RELAXED); | |
494 | do | |
495 | { | |
496 | uintptr_t new_pool_size; | |
497 | if (__builtin_add_overflow (used_pool_size, new_size, | |
498 | &new_pool_size) | |
499 | || new_pool_size > allocator_data->pool_size) | |
500 | goto fail; | |
501 | if (__atomic_compare_exchange_n (&allocator_data->used_pool_size, | |
502 | &used_pool_size, new_pool_size, | |
503 | true, MEMMODEL_RELAXED, | |
504 | MEMMODEL_RELAXED)) | |
505 | break; | |
506 | } | |
507 | while (1); | |
508 | #else | |
509 | gomp_mutex_lock (&allocator_data->lock); | |
510 | if (__builtin_add_overflow (allocator_data->used_pool_size, new_size, | |
511 | &used_pool_size) | |
512 | || used_pool_size > allocator_data->pool_size) | |
513 | { | |
514 | gomp_mutex_unlock (&allocator_data->lock); | |
515 | goto fail; | |
516 | } | |
517 | allocator_data->used_pool_size = used_pool_size; | |
518 | gomp_mutex_unlock (&allocator_data->lock); | |
519 | #endif | |
450b05ce TB |
520 | #ifdef LIBGOMP_USE_LIBNUMA |
521 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
522 | ptr = libnuma_data->numa_alloc_local (new_size); | |
523 | # ifdef LIBGOMP_USE_MEMKIND | |
524 | else | |
525 | # endif | |
526 | #endif | |
17f52a1c JJ |
527 | #ifdef LIBGOMP_USE_MEMKIND |
528 | if (memkind) | |
529 | { | |
530 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
531 | void *kind = *memkind_data->kinds[memkind]; | |
532 | ptr = memkind_data->memkind_malloc (kind, new_size); | |
533 | } | |
534 | else | |
535 | #endif | |
536 | ptr = malloc (new_size); | |
e1071571 JJ |
537 | if (ptr == NULL) |
538 | { | |
539 | #ifdef HAVE_SYNC_BUILTINS | |
540 | __atomic_add_fetch (&allocator_data->used_pool_size, -new_size, | |
541 | MEMMODEL_RELAXED); | |
542 | #else | |
543 | gomp_mutex_lock (&allocator_data->lock); | |
544 | allocator_data->used_pool_size -= new_size; | |
545 | gomp_mutex_unlock (&allocator_data->lock); | |
546 | #endif | |
547 | goto fail; | |
548 | } | |
549 | } | |
550 | else | |
551 | { | |
450b05ce TB |
552 | #ifdef LIBGOMP_USE_LIBNUMA |
553 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
554 | ptr = libnuma_data->numa_alloc_local (new_size); | |
555 | # ifdef LIBGOMP_USE_MEMKIND | |
556 | else | |
557 | # endif | |
558 | #endif | |
17f52a1c JJ |
559 | #ifdef LIBGOMP_USE_MEMKIND |
560 | if (memkind) | |
561 | { | |
562 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
563 | void *kind = *memkind_data->kinds[memkind]; | |
564 | ptr = memkind_data->memkind_malloc (kind, new_size); | |
565 | } | |
566 | else | |
567 | #endif | |
568 | ptr = malloc (new_size); | |
e1071571 JJ |
569 | if (ptr == NULL) |
570 | goto fail; | |
571 | } | |
572 | ||
b38a4bd1 | 573 | if (new_alignment > sizeof (void *)) |
e1071571 JJ |
574 | ret = (void *) (((uintptr_t) ptr |
575 | + sizeof (struct omp_mem_header) | |
b38a4bd1 JJ |
576 | + new_alignment - sizeof (void *)) |
577 | & ~(new_alignment - 1)); | |
e1071571 JJ |
578 | else |
579 | ret = (char *) ptr + sizeof (struct omp_mem_header); | |
580 | ((struct omp_mem_header *) ret)[-1].ptr = ptr; | |
581 | ((struct omp_mem_header *) ret)[-1].size = new_size; | |
582 | ((struct omp_mem_header *) ret)[-1].allocator = allocator; | |
583 | return ret; | |
584 | ||
585 | fail: | |
586 | if (allocator_data) | |
587 | { | |
588 | switch (allocator_data->fallback) | |
589 | { | |
590 | case omp_atv_default_mem_fb: | |
b38a4bd1 | 591 | if ((new_alignment > sizeof (void *) && new_alignment > alignment) |
450b05ce | 592 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
593 | || memkind |
594 | #endif | |
e1071571 JJ |
595 | || (allocator_data |
596 | && allocator_data->pool_size < ~(uintptr_t) 0)) | |
597 | { | |
598 | allocator = omp_default_mem_alloc; | |
599 | goto retry; | |
600 | } | |
601 | /* Otherwise, we've already performed default mem allocation | |
602 | and if that failed, it won't succeed again (unless it was | |
b38a4bd1 | 603 | intermittent. Return NULL then, as that is the fallback. */ |
e1071571 JJ |
604 | break; |
605 | case omp_atv_null_fb: | |
606 | break; | |
607 | default: | |
608 | case omp_atv_abort_fb: | |
609 | gomp_fatal ("Out of memory allocating %lu bytes", | |
610 | (unsigned long) size); | |
611 | case omp_atv_allocator_fb: | |
612 | allocator = allocator_data->fb_data; | |
613 | goto retry; | |
614 | } | |
615 | } | |
616 | return NULL; | |
617 | } | |
618 | ||
b38a4bd1 JJ |
619 | ialias (omp_aligned_alloc) |
620 | ||
6fcc3cac JJ |
621 | void * |
622 | omp_alloc (size_t size, omp_allocator_handle_t allocator) | |
623 | { | |
b38a4bd1 | 624 | return ialias_call (omp_aligned_alloc) (1, size, allocator); |
6fcc3cac JJ |
625 | } |
626 | ||
627 | /* Like omp_aligned_alloc, but apply on top of that: | |
628 | "For allocations that arise from this ... the null_fb value of the | |
629 | fallback allocator trait behaves as if the abort_fb had been specified." */ | |
630 | ||
631 | void * | |
632 | GOMP_alloc (size_t alignment, size_t size, uintptr_t allocator) | |
633 | { | |
b38a4bd1 JJ |
634 | void *ret |
635 | = ialias_call (omp_aligned_alloc) (alignment, size, | |
636 | (omp_allocator_handle_t) allocator); | |
6fcc3cac JJ |
637 | if (__builtin_expect (ret == NULL, 0) && size) |
638 | gomp_fatal ("Out of memory allocating %lu bytes", | |
639 | (unsigned long) size); | |
640 | return ret; | |
641 | } | |
642 | ||
e1071571 JJ |
643 | void |
644 | omp_free (void *ptr, omp_allocator_handle_t allocator) | |
645 | { | |
646 | struct omp_mem_header *data; | |
647 | ||
648 | if (ptr == NULL) | |
649 | return; | |
650 | (void) allocator; | |
651 | data = &((struct omp_mem_header *) ptr)[-1]; | |
652 | if (data->allocator > omp_max_predefined_alloc) | |
653 | { | |
654 | struct omp_allocator_data *allocator_data | |
655 | = (struct omp_allocator_data *) (data->allocator); | |
656 | if (allocator_data->pool_size < ~(uintptr_t) 0) | |
657 | { | |
658 | #ifdef HAVE_SYNC_BUILTINS | |
659 | __atomic_add_fetch (&allocator_data->used_pool_size, -data->size, | |
660 | MEMMODEL_RELAXED); | |
661 | #else | |
662 | gomp_mutex_lock (&allocator_data->lock); | |
23438370 | 663 | allocator_data->used_pool_size -= data->size; |
e1071571 JJ |
664 | gomp_mutex_unlock (&allocator_data->lock); |
665 | #endif | |
666 | } | |
450b05ce TB |
667 | #ifdef LIBGOMP_USE_LIBNUMA |
668 | if (allocator_data->memkind == GOMP_MEMKIND_LIBNUMA) | |
669 | { | |
670 | libnuma_data->numa_free (data->ptr, data->size); | |
671 | return; | |
672 | } | |
673 | # ifdef LIBGOMP_USE_MEMKIND | |
674 | else | |
675 | # endif | |
676 | #endif | |
17f52a1c JJ |
677 | #ifdef LIBGOMP_USE_MEMKIND |
678 | if (allocator_data->memkind) | |
679 | { | |
680 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
681 | void *kind = *memkind_data->kinds[allocator_data->memkind]; | |
682 | memkind_data->memkind_free (kind, data->ptr); | |
683 | return; | |
684 | } | |
685 | #endif | |
e1071571 | 686 | } |
17f52a1c JJ |
687 | #ifdef LIBGOMP_USE_MEMKIND |
688 | else | |
689 | { | |
450b05ce | 690 | enum gomp_numa_memkind_kind memkind = GOMP_MEMKIND_NONE; |
17f52a1c JJ |
691 | if (data->allocator == omp_high_bw_mem_alloc) |
692 | memkind = GOMP_MEMKIND_HBW_PREFERRED; | |
693 | else if (data->allocator == omp_large_cap_mem_alloc) | |
694 | memkind = GOMP_MEMKIND_DAX_KMEM_ALL; | |
695 | if (memkind) | |
696 | { | |
697 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
698 | if (memkind_data->kinds[memkind]) | |
699 | { | |
700 | void *kind = *memkind_data->kinds[memkind]; | |
701 | memkind_data->memkind_free (kind, data->ptr); | |
702 | return; | |
703 | } | |
704 | } | |
705 | } | |
706 | #endif | |
e1071571 JJ |
707 | free (data->ptr); |
708 | } | |
6fcc3cac JJ |
709 | |
710 | ialias (omp_free) | |
711 | ||
712 | void | |
713 | GOMP_free (void *ptr, uintptr_t allocator) | |
714 | { | |
b38a4bd1 JJ |
715 | return ialias_call (omp_free) (ptr, (omp_allocator_handle_t) allocator); |
716 | } | |
717 | ||
718 | void * | |
719 | omp_aligned_calloc (size_t alignment, size_t nmemb, size_t size, | |
720 | omp_allocator_handle_t allocator) | |
721 | { | |
722 | struct omp_allocator_data *allocator_data; | |
723 | size_t new_size, size_temp, new_alignment; | |
724 | void *ptr, *ret; | |
450b05ce TB |
725 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
726 | enum gomp_numa_memkind_kind memkind; | |
17f52a1c | 727 | #endif |
b38a4bd1 JJ |
728 | |
729 | if (__builtin_expect (size == 0 || nmemb == 0, 0)) | |
730 | return NULL; | |
731 | ||
732 | retry: | |
733 | new_alignment = alignment; | |
734 | if (allocator == omp_null_allocator) | |
735 | { | |
736 | struct gomp_thread *thr = gomp_thread (); | |
737 | if (thr->ts.def_allocator == omp_null_allocator) | |
738 | thr->ts.def_allocator = gomp_def_allocator; | |
739 | allocator = (omp_allocator_handle_t) thr->ts.def_allocator; | |
740 | } | |
741 | ||
742 | if (allocator > omp_max_predefined_alloc) | |
743 | { | |
744 | allocator_data = (struct omp_allocator_data *) allocator; | |
745 | if (new_alignment < allocator_data->alignment) | |
746 | new_alignment = allocator_data->alignment; | |
450b05ce | 747 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
748 | memkind = allocator_data->memkind; |
749 | #endif | |
b38a4bd1 JJ |
750 | } |
751 | else | |
752 | { | |
753 | allocator_data = NULL; | |
754 | if (new_alignment < sizeof (void *)) | |
755 | new_alignment = sizeof (void *); | |
450b05ce | 756 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c | 757 | memkind = GOMP_MEMKIND_NONE; |
450b05ce TB |
758 | #endif |
759 | #ifdef LIBGOMP_USE_MEMKIND | |
17f52a1c JJ |
760 | if (allocator == omp_high_bw_mem_alloc) |
761 | memkind = GOMP_MEMKIND_HBW_PREFERRED; | |
762 | else if (allocator == omp_large_cap_mem_alloc) | |
763 | memkind = GOMP_MEMKIND_DAX_KMEM_ALL; | |
764 | if (memkind) | |
765 | { | |
766 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
767 | if (!memkind_data->kinds[memkind]) | |
768 | memkind = GOMP_MEMKIND_NONE; | |
769 | } | |
770 | #endif | |
b38a4bd1 JJ |
771 | } |
772 | ||
773 | new_size = sizeof (struct omp_mem_header); | |
774 | if (new_alignment > sizeof (void *)) | |
775 | new_size += new_alignment - sizeof (void *); | |
776 | if (__builtin_mul_overflow (size, nmemb, &size_temp)) | |
777 | goto fail; | |
778 | if (__builtin_add_overflow (size_temp, new_size, &new_size)) | |
779 | goto fail; | |
780 | ||
781 | if (__builtin_expect (allocator_data | |
782 | && allocator_data->pool_size < ~(uintptr_t) 0, 0)) | |
783 | { | |
784 | uintptr_t used_pool_size; | |
785 | if (new_size > allocator_data->pool_size) | |
786 | goto fail; | |
787 | #ifdef HAVE_SYNC_BUILTINS | |
788 | used_pool_size = __atomic_load_n (&allocator_data->used_pool_size, | |
789 | MEMMODEL_RELAXED); | |
790 | do | |
791 | { | |
792 | uintptr_t new_pool_size; | |
793 | if (__builtin_add_overflow (used_pool_size, new_size, | |
794 | &new_pool_size) | |
795 | || new_pool_size > allocator_data->pool_size) | |
796 | goto fail; | |
797 | if (__atomic_compare_exchange_n (&allocator_data->used_pool_size, | |
798 | &used_pool_size, new_pool_size, | |
799 | true, MEMMODEL_RELAXED, | |
800 | MEMMODEL_RELAXED)) | |
801 | break; | |
802 | } | |
803 | while (1); | |
804 | #else | |
805 | gomp_mutex_lock (&allocator_data->lock); | |
806 | if (__builtin_add_overflow (allocator_data->used_pool_size, new_size, | |
807 | &used_pool_size) | |
808 | || used_pool_size > allocator_data->pool_size) | |
809 | { | |
810 | gomp_mutex_unlock (&allocator_data->lock); | |
811 | goto fail; | |
812 | } | |
813 | allocator_data->used_pool_size = used_pool_size; | |
814 | gomp_mutex_unlock (&allocator_data->lock); | |
815 | #endif | |
450b05ce TB |
816 | #ifdef LIBGOMP_USE_LIBNUMA |
817 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
818 | /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning | |
819 | memory that is initialized to zero. */ | |
820 | ptr = libnuma_data->numa_alloc_local (new_size); | |
821 | # ifdef LIBGOMP_USE_MEMKIND | |
822 | else | |
823 | # endif | |
824 | #endif | |
17f52a1c JJ |
825 | #ifdef LIBGOMP_USE_MEMKIND |
826 | if (memkind) | |
827 | { | |
828 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
829 | void *kind = *memkind_data->kinds[memkind]; | |
830 | ptr = memkind_data->memkind_calloc (kind, 1, new_size); | |
831 | } | |
832 | else | |
833 | #endif | |
834 | ptr = calloc (1, new_size); | |
b38a4bd1 JJ |
835 | if (ptr == NULL) |
836 | { | |
837 | #ifdef HAVE_SYNC_BUILTINS | |
838 | __atomic_add_fetch (&allocator_data->used_pool_size, -new_size, | |
839 | MEMMODEL_RELAXED); | |
840 | #else | |
841 | gomp_mutex_lock (&allocator_data->lock); | |
842 | allocator_data->used_pool_size -= new_size; | |
843 | gomp_mutex_unlock (&allocator_data->lock); | |
844 | #endif | |
845 | goto fail; | |
846 | } | |
847 | } | |
848 | else | |
849 | { | |
450b05ce TB |
850 | #ifdef LIBGOMP_USE_LIBNUMA |
851 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
852 | /* numa_alloc_local uses mmap with MAP_ANONYMOUS, returning | |
853 | memory that is initialized to zero. */ | |
854 | ptr = libnuma_data->numa_alloc_local (new_size); | |
855 | # ifdef LIBGOMP_USE_MEMKIND | |
856 | else | |
857 | # endif | |
858 | #endif | |
17f52a1c JJ |
859 | #ifdef LIBGOMP_USE_MEMKIND |
860 | if (memkind) | |
861 | { | |
862 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
863 | void *kind = *memkind_data->kinds[memkind]; | |
864 | ptr = memkind_data->memkind_calloc (kind, 1, new_size); | |
865 | } | |
866 | else | |
867 | #endif | |
868 | ptr = calloc (1, new_size); | |
b38a4bd1 JJ |
869 | if (ptr == NULL) |
870 | goto fail; | |
871 | } | |
872 | ||
873 | if (new_alignment > sizeof (void *)) | |
874 | ret = (void *) (((uintptr_t) ptr | |
875 | + sizeof (struct omp_mem_header) | |
876 | + new_alignment - sizeof (void *)) | |
877 | & ~(new_alignment - 1)); | |
878 | else | |
879 | ret = (char *) ptr + sizeof (struct omp_mem_header); | |
880 | ((struct omp_mem_header *) ret)[-1].ptr = ptr; | |
881 | ((struct omp_mem_header *) ret)[-1].size = new_size; | |
882 | ((struct omp_mem_header *) ret)[-1].allocator = allocator; | |
883 | return ret; | |
884 | ||
885 | fail: | |
886 | if (allocator_data) | |
887 | { | |
888 | switch (allocator_data->fallback) | |
889 | { | |
890 | case omp_atv_default_mem_fb: | |
891 | if ((new_alignment > sizeof (void *) && new_alignment > alignment) | |
450b05ce | 892 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
893 | || memkind |
894 | #endif | |
b38a4bd1 JJ |
895 | || (allocator_data |
896 | && allocator_data->pool_size < ~(uintptr_t) 0)) | |
897 | { | |
898 | allocator = omp_default_mem_alloc; | |
899 | goto retry; | |
900 | } | |
901 | /* Otherwise, we've already performed default mem allocation | |
902 | and if that failed, it won't succeed again (unless it was | |
903 | intermittent. Return NULL then, as that is the fallback. */ | |
904 | break; | |
905 | case omp_atv_null_fb: | |
906 | break; | |
907 | default: | |
908 | case omp_atv_abort_fb: | |
909 | gomp_fatal ("Out of memory allocating %lu bytes", | |
910 | (unsigned long) (size * nmemb)); | |
911 | case omp_atv_allocator_fb: | |
912 | allocator = allocator_data->fb_data; | |
913 | goto retry; | |
914 | } | |
915 | } | |
916 | return NULL; | |
917 | } | |
918 | ||
919 | ialias (omp_aligned_calloc) | |
920 | ||
921 | void * | |
922 | omp_calloc (size_t nmemb, size_t size, omp_allocator_handle_t allocator) | |
923 | { | |
924 | return ialias_call (omp_aligned_calloc) (1, nmemb, size, allocator); | |
925 | } | |
926 | ||
927 | void * | |
928 | omp_realloc (void *ptr, size_t size, omp_allocator_handle_t allocator, | |
929 | omp_allocator_handle_t free_allocator) | |
930 | { | |
931 | struct omp_allocator_data *allocator_data, *free_allocator_data; | |
932 | size_t new_size, old_size, new_alignment, old_alignment; | |
933 | void *new_ptr, *ret; | |
934 | struct omp_mem_header *data; | |
450b05ce TB |
935 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
936 | enum gomp_numa_memkind_kind memkind, free_memkind; | |
17f52a1c | 937 | #endif |
b38a4bd1 JJ |
938 | |
939 | if (__builtin_expect (ptr == NULL, 0)) | |
940 | return ialias_call (omp_aligned_alloc) (1, size, allocator); | |
941 | ||
942 | if (__builtin_expect (size == 0, 0)) | |
943 | { | |
944 | ialias_call (omp_free) (ptr, free_allocator); | |
945 | return NULL; | |
946 | } | |
947 | ||
948 | data = &((struct omp_mem_header *) ptr)[-1]; | |
949 | free_allocator = data->allocator; | |
950 | ||
951 | retry: | |
952 | new_alignment = sizeof (void *); | |
953 | if (allocator == omp_null_allocator) | |
954 | allocator = free_allocator; | |
955 | ||
956 | if (allocator > omp_max_predefined_alloc) | |
957 | { | |
958 | allocator_data = (struct omp_allocator_data *) allocator; | |
959 | if (new_alignment < allocator_data->alignment) | |
960 | new_alignment = allocator_data->alignment; | |
450b05ce | 961 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
962 | memkind = allocator_data->memkind; |
963 | #endif | |
b38a4bd1 JJ |
964 | } |
965 | else | |
17f52a1c JJ |
966 | { |
967 | allocator_data = NULL; | |
450b05ce | 968 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c | 969 | memkind = GOMP_MEMKIND_NONE; |
450b05ce TB |
970 | #endif |
971 | #ifdef LIBGOMP_USE_MEMKIND | |
17f52a1c JJ |
972 | if (allocator == omp_high_bw_mem_alloc) |
973 | memkind = GOMP_MEMKIND_HBW_PREFERRED; | |
974 | else if (allocator == omp_large_cap_mem_alloc) | |
975 | memkind = GOMP_MEMKIND_DAX_KMEM_ALL; | |
976 | if (memkind) | |
977 | { | |
978 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
979 | if (!memkind_data->kinds[memkind]) | |
980 | memkind = GOMP_MEMKIND_NONE; | |
981 | } | |
982 | #endif | |
983 | } | |
b38a4bd1 | 984 | if (free_allocator > omp_max_predefined_alloc) |
17f52a1c JJ |
985 | { |
986 | free_allocator_data = (struct omp_allocator_data *) free_allocator; | |
450b05ce | 987 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
988 | free_memkind = free_allocator_data->memkind; |
989 | #endif | |
990 | } | |
b38a4bd1 | 991 | else |
17f52a1c JJ |
992 | { |
993 | free_allocator_data = NULL; | |
450b05ce | 994 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c | 995 | free_memkind = GOMP_MEMKIND_NONE; |
450b05ce TB |
996 | #endif |
997 | #ifdef LIBGOMP_USE_MEMKIND | |
17f52a1c JJ |
998 | if (free_allocator == omp_high_bw_mem_alloc) |
999 | free_memkind = GOMP_MEMKIND_HBW_PREFERRED; | |
1000 | else if (free_allocator == omp_large_cap_mem_alloc) | |
1001 | free_memkind = GOMP_MEMKIND_DAX_KMEM_ALL; | |
1002 | if (free_memkind) | |
1003 | { | |
1004 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
1005 | if (!memkind_data->kinds[free_memkind]) | |
1006 | free_memkind = GOMP_MEMKIND_NONE; | |
1007 | } | |
1008 | #endif | |
1009 | } | |
b38a4bd1 JJ |
1010 | old_alignment = (uintptr_t) ptr - (uintptr_t) (data->ptr); |
1011 | ||
1012 | new_size = sizeof (struct omp_mem_header); | |
1013 | if (new_alignment > sizeof (void *)) | |
1014 | new_size += new_alignment - sizeof (void *); | |
1015 | if (__builtin_add_overflow (size, new_size, &new_size)) | |
1016 | goto fail; | |
1017 | old_size = data->size; | |
1018 | ||
1019 | if (__builtin_expect (allocator_data | |
1020 | && allocator_data->pool_size < ~(uintptr_t) 0, 0)) | |
1021 | { | |
1022 | uintptr_t used_pool_size; | |
1023 | size_t prev_size = 0; | |
1024 | /* Check if we can use realloc. Don't use it if extra alignment | |
1025 | was used previously or newly, because realloc might return a pointer | |
1026 | with different alignment and then we'd need to memmove the data | |
1027 | again. */ | |
1028 | if (free_allocator_data | |
1029 | && free_allocator_data == allocator_data | |
1030 | && new_alignment == sizeof (void *) | |
1031 | && old_alignment == sizeof (struct omp_mem_header)) | |
1032 | prev_size = old_size; | |
1033 | if (new_size > prev_size | |
1034 | && new_size - prev_size > allocator_data->pool_size) | |
1035 | goto fail; | |
1036 | #ifdef HAVE_SYNC_BUILTINS | |
1037 | used_pool_size = __atomic_load_n (&allocator_data->used_pool_size, | |
1038 | MEMMODEL_RELAXED); | |
1039 | do | |
1040 | { | |
1041 | uintptr_t new_pool_size; | |
1042 | if (new_size > prev_size) | |
1043 | { | |
1044 | if (__builtin_add_overflow (used_pool_size, new_size - prev_size, | |
1045 | &new_pool_size) | |
1046 | || new_pool_size > allocator_data->pool_size) | |
1047 | goto fail; | |
1048 | } | |
1049 | else | |
1050 | new_pool_size = used_pool_size + new_size - prev_size; | |
1051 | if (__atomic_compare_exchange_n (&allocator_data->used_pool_size, | |
1052 | &used_pool_size, new_pool_size, | |
1053 | true, MEMMODEL_RELAXED, | |
1054 | MEMMODEL_RELAXED)) | |
1055 | break; | |
1056 | } | |
1057 | while (1); | |
1058 | #else | |
1059 | gomp_mutex_lock (&allocator_data->lock); | |
1060 | if (new_size > prev_size) | |
1061 | { | |
1062 | if (__builtin_add_overflow (allocator_data->used_pool_size, | |
1063 | new_size - prev_size, | |
1064 | &used_pool_size) | |
1065 | || used_pool_size > allocator_data->pool_size) | |
1066 | { | |
1067 | gomp_mutex_unlock (&allocator_data->lock); | |
1068 | goto fail; | |
1069 | } | |
1070 | } | |
1071 | else | |
1072 | used_pool_size = (allocator_data->used_pool_size | |
1073 | + new_size - prev_size); | |
1074 | allocator_data->used_pool_size = used_pool_size; | |
1075 | gomp_mutex_unlock (&allocator_data->lock); | |
17f52a1c | 1076 | #endif |
450b05ce TB |
1077 | #ifdef LIBGOMP_USE_LIBNUMA |
1078 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
1079 | { | |
1080 | if (prev_size) | |
1081 | new_ptr = libnuma_data->numa_realloc (data->ptr, data->size, | |
1082 | new_size); | |
1083 | else | |
1084 | new_ptr = libnuma_data->numa_alloc_local (new_size); | |
1085 | } | |
1086 | # ifdef LIBGOMP_USE_MEMKIND | |
1087 | else | |
1088 | # endif | |
1089 | #endif | |
17f52a1c JJ |
1090 | #ifdef LIBGOMP_USE_MEMKIND |
1091 | if (memkind) | |
1092 | { | |
1093 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
1094 | void *kind = *memkind_data->kinds[memkind]; | |
1095 | if (prev_size) | |
1096 | new_ptr = memkind_data->memkind_realloc (kind, data->ptr, | |
1097 | new_size); | |
1098 | else | |
1099 | new_ptr = memkind_data->memkind_malloc (kind, new_size); | |
1100 | } | |
1101 | else | |
b38a4bd1 JJ |
1102 | #endif |
1103 | if (prev_size) | |
1104 | new_ptr = realloc (data->ptr, new_size); | |
1105 | else | |
1106 | new_ptr = malloc (new_size); | |
1107 | if (new_ptr == NULL) | |
1108 | { | |
1109 | #ifdef HAVE_SYNC_BUILTINS | |
1110 | __atomic_add_fetch (&allocator_data->used_pool_size, | |
1111 | prev_size - new_size, | |
1112 | MEMMODEL_RELAXED); | |
1113 | #else | |
1114 | gomp_mutex_lock (&allocator_data->lock); | |
1115 | allocator_data->used_pool_size -= new_size - prev_size; | |
1116 | gomp_mutex_unlock (&allocator_data->lock); | |
1117 | #endif | |
1118 | goto fail; | |
1119 | } | |
1120 | else if (prev_size) | |
1121 | { | |
1122 | ret = (char *) new_ptr + sizeof (struct omp_mem_header); | |
1123 | ((struct omp_mem_header *) ret)[-1].ptr = new_ptr; | |
1124 | ((struct omp_mem_header *) ret)[-1].size = new_size; | |
1125 | ((struct omp_mem_header *) ret)[-1].allocator = allocator; | |
1126 | return ret; | |
1127 | } | |
1128 | } | |
1129 | else if (new_alignment == sizeof (void *) | |
1130 | && old_alignment == sizeof (struct omp_mem_header) | |
450b05ce | 1131 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
1132 | && memkind == free_memkind |
1133 | #endif | |
b38a4bd1 JJ |
1134 | && (free_allocator_data == NULL |
1135 | || free_allocator_data->pool_size == ~(uintptr_t) 0)) | |
1136 | { | |
450b05ce TB |
1137 | #ifdef LIBGOMP_USE_LIBNUMA |
1138 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
1139 | new_ptr = libnuma_data->numa_realloc (data->ptr, data->size, new_size); | |
1140 | # ifdef LIBGOMP_USE_MEMKIND | |
1141 | else | |
1142 | # endif | |
1143 | #endif | |
17f52a1c JJ |
1144 | #ifdef LIBGOMP_USE_MEMKIND |
1145 | if (memkind) | |
1146 | { | |
1147 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
1148 | void *kind = *memkind_data->kinds[memkind]; | |
1149 | new_ptr = memkind_data->memkind_realloc (kind, data->ptr, | |
1150 | new_size); | |
1151 | } | |
1152 | else | |
1153 | #endif | |
1154 | new_ptr = realloc (data->ptr, new_size); | |
b38a4bd1 JJ |
1155 | if (new_ptr == NULL) |
1156 | goto fail; | |
1157 | ret = (char *) new_ptr + sizeof (struct omp_mem_header); | |
1158 | ((struct omp_mem_header *) ret)[-1].ptr = new_ptr; | |
1159 | ((struct omp_mem_header *) ret)[-1].size = new_size; | |
1160 | ((struct omp_mem_header *) ret)[-1].allocator = allocator; | |
1161 | return ret; | |
1162 | } | |
1163 | else | |
1164 | { | |
450b05ce TB |
1165 | #ifdef LIBGOMP_USE_LIBNUMA |
1166 | if (memkind == GOMP_MEMKIND_LIBNUMA) | |
1167 | new_ptr = libnuma_data->numa_alloc_local (new_size); | |
1168 | # ifdef LIBGOMP_USE_MEMKIND | |
1169 | else | |
1170 | # endif | |
1171 | #endif | |
17f52a1c JJ |
1172 | #ifdef LIBGOMP_USE_MEMKIND |
1173 | if (memkind) | |
1174 | { | |
1175 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
1176 | void *kind = *memkind_data->kinds[memkind]; | |
1177 | new_ptr = memkind_data->memkind_malloc (kind, new_size); | |
1178 | } | |
1179 | else | |
1180 | #endif | |
1181 | new_ptr = malloc (new_size); | |
b38a4bd1 JJ |
1182 | if (new_ptr == NULL) |
1183 | goto fail; | |
1184 | } | |
1185 | ||
1186 | if (new_alignment > sizeof (void *)) | |
1187 | ret = (void *) (((uintptr_t) new_ptr | |
1188 | + sizeof (struct omp_mem_header) | |
1189 | + new_alignment - sizeof (void *)) | |
1190 | & ~(new_alignment - 1)); | |
1191 | else | |
1192 | ret = (char *) new_ptr + sizeof (struct omp_mem_header); | |
1193 | ((struct omp_mem_header *) ret)[-1].ptr = new_ptr; | |
1194 | ((struct omp_mem_header *) ret)[-1].size = new_size; | |
1195 | ((struct omp_mem_header *) ret)[-1].allocator = allocator; | |
1196 | if (old_size - old_alignment < size) | |
1197 | size = old_size - old_alignment; | |
1198 | memcpy (ret, ptr, size); | |
1199 | if (__builtin_expect (free_allocator_data | |
1200 | && free_allocator_data->pool_size < ~(uintptr_t) 0, 0)) | |
1201 | { | |
1202 | #ifdef HAVE_SYNC_BUILTINS | |
1203 | __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size, | |
1204 | MEMMODEL_RELAXED); | |
1205 | #else | |
1206 | gomp_mutex_lock (&free_allocator_data->lock); | |
1207 | free_allocator_data->used_pool_size -= data->size; | |
1208 | gomp_mutex_unlock (&free_allocator_data->lock); | |
1209 | #endif | |
1210 | } | |
450b05ce TB |
1211 | #ifdef LIBGOMP_USE_LIBNUMA |
1212 | if (free_memkind == GOMP_MEMKIND_LIBNUMA) | |
1213 | { | |
1214 | libnuma_data->numa_free (data->ptr, data->size); | |
1215 | return ret; | |
1216 | } | |
1217 | # ifdef LIBGOMP_USE_MEMKIND | |
1218 | else | |
1219 | # endif | |
1220 | #endif | |
17f52a1c JJ |
1221 | #ifdef LIBGOMP_USE_MEMKIND |
1222 | if (free_memkind) | |
1223 | { | |
1224 | struct gomp_memkind_data *memkind_data = gomp_get_memkind (); | |
1225 | void *kind = *memkind_data->kinds[free_memkind]; | |
1226 | memkind_data->memkind_free (kind, data->ptr); | |
1227 | return ret; | |
1228 | } | |
1229 | #endif | |
b38a4bd1 JJ |
1230 | free (data->ptr); |
1231 | return ret; | |
1232 | ||
1233 | fail: | |
1234 | if (allocator_data) | |
1235 | { | |
1236 | switch (allocator_data->fallback) | |
1237 | { | |
1238 | case omp_atv_default_mem_fb: | |
1239 | if (new_alignment > sizeof (void *) | |
450b05ce | 1240 | #if defined(LIBGOMP_USE_MEMKIND) || defined(LIBGOMP_USE_LIBNUMA) |
17f52a1c JJ |
1241 | || memkind |
1242 | #endif | |
b38a4bd1 JJ |
1243 | || (allocator_data |
1244 | && allocator_data->pool_size < ~(uintptr_t) 0)) | |
1245 | { | |
1246 | allocator = omp_default_mem_alloc; | |
1247 | goto retry; | |
1248 | } | |
1249 | /* Otherwise, we've already performed default mem allocation | |
1250 | and if that failed, it won't succeed again (unless it was | |
1251 | intermittent. Return NULL then, as that is the fallback. */ | |
1252 | break; | |
1253 | case omp_atv_null_fb: | |
1254 | break; | |
1255 | default: | |
1256 | case omp_atv_abort_fb: | |
1257 | gomp_fatal ("Out of memory allocating %lu bytes", | |
1258 | (unsigned long) size); | |
1259 | case omp_atv_allocator_fb: | |
1260 | allocator = allocator_data->fb_data; | |
1261 | goto retry; | |
1262 | } | |
1263 | } | |
1264 | return NULL; | |
6fcc3cac | 1265 | } |