]>
Commit | Line | Data |
---|---|---|
e1071571 JJ |
1 | /* Copyright (C) 2020 Free Software Foundation, Inc. |
2 | Contributed by Jakub Jelinek <jakub@redhat.com>. | |
3 | ||
4 | This file is part of the GNU Offloading and Multi Processing Library | |
5 | (libgomp). | |
6 | ||
7 | Libgomp is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | more details. | |
16 | ||
17 | Under Section 7 of GPL version 3, you are granted additional | |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
20 | ||
21 | You should have received a copy of the GNU General Public License and | |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
25 | ||
26 | /* This file contains wrappers for the system allocation routines. Most | |
27 | places in the OpenMP API do not make any provision for failure, so in | |
28 | general we cannot allow memory allocation to fail. */ | |
29 | ||
30 | #define _GNU_SOURCE | |
31 | #include "libgomp.h" | |
32 | #include <stdlib.h> | |
33 | ||
34 | #define omp_max_predefined_alloc omp_thread_mem_alloc | |
35 | ||
36 | struct omp_allocator_data | |
37 | { | |
38 | omp_memspace_handle_t memspace; | |
39 | omp_uintptr_t alignment; | |
40 | omp_uintptr_t pool_size; | |
41 | omp_uintptr_t used_pool_size; | |
42 | omp_allocator_handle_t fb_data; | |
43 | unsigned int sync_hint : 8; | |
44 | unsigned int access : 8; | |
45 | unsigned int fallback : 8; | |
46 | unsigned int pinned : 1; | |
47 | unsigned int partition : 7; | |
48 | #ifndef HAVE_SYNC_BUILTINS | |
49 | gomp_mutex_t lock; | |
50 | #endif | |
51 | }; | |
52 | ||
53 | struct omp_mem_header | |
54 | { | |
55 | void *ptr; | |
56 | size_t size; | |
57 | omp_allocator_handle_t allocator; | |
58 | void *pad; | |
59 | }; | |
60 | ||
61 | omp_allocator_handle_t | |
62 | omp_init_allocator (omp_memspace_handle_t memspace, int ntraits, | |
63 | const omp_alloctrait_t traits[]) | |
64 | { | |
65 | struct omp_allocator_data data | |
66 | = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all, | |
67 | omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment }; | |
68 | struct omp_allocator_data *ret; | |
69 | int i; | |
70 | ||
71 | if (memspace > omp_low_lat_mem_space) | |
72 | return omp_null_allocator; | |
73 | for (i = 0; i < ntraits; i++) | |
74 | switch (traits[i].key) | |
75 | { | |
76 | case omp_atk_sync_hint: | |
77 | switch (traits[i].value) | |
78 | { | |
79 | case omp_atv_default: | |
80 | data.sync_hint = omp_atv_contended; | |
81 | break; | |
82 | case omp_atv_contended: | |
83 | case omp_atv_uncontended: | |
84 | case omp_atv_sequential: | |
85 | case omp_atv_private: | |
86 | data.sync_hint = traits[i].value; | |
87 | break; | |
88 | default: | |
89 | return omp_null_allocator; | |
90 | } | |
91 | break; | |
92 | case omp_atk_alignment: | |
93 | if ((traits[i].value & (traits[i].value - 1)) != 0 | |
94 | || !traits[i].value) | |
95 | return omp_null_allocator; | |
96 | data.alignment = traits[i].value; | |
97 | break; | |
98 | case omp_atk_access: | |
99 | switch (traits[i].value) | |
100 | { | |
101 | case omp_atv_default: | |
102 | data.access = omp_atv_all; | |
103 | break; | |
104 | case omp_atv_all: | |
105 | case omp_atv_cgroup: | |
106 | case omp_atv_pteam: | |
107 | case omp_atv_thread: | |
108 | data.access = traits[i].value; | |
109 | break; | |
110 | default: | |
111 | return omp_null_allocator; | |
112 | } | |
113 | break; | |
114 | case omp_atk_pool_size: | |
115 | data.pool_size = traits[i].value; | |
116 | break; | |
117 | case omp_atk_fallback: | |
118 | switch (traits[i].value) | |
119 | { | |
120 | case omp_atv_default: | |
121 | data.fallback = omp_atv_default_mem_fb; | |
122 | break; | |
123 | case omp_atv_default_mem_fb: | |
124 | case omp_atv_null_fb: | |
125 | case omp_atv_abort_fb: | |
126 | case omp_atv_allocator_fb: | |
127 | data.fallback = traits[i].value; | |
128 | break; | |
129 | default: | |
130 | return omp_null_allocator; | |
131 | } | |
132 | break; | |
133 | case omp_atk_fb_data: | |
134 | data.fb_data = traits[i].value; | |
135 | break; | |
136 | case omp_atk_pinned: | |
137 | switch (traits[i].value) | |
138 | { | |
139 | case omp_atv_default: | |
140 | case omp_atv_false: | |
141 | data.pinned = omp_atv_false; | |
142 | break; | |
143 | case omp_atv_true: | |
144 | data.pinned = omp_atv_true; | |
145 | break; | |
146 | default: | |
147 | return omp_null_allocator; | |
148 | } | |
149 | break; | |
150 | case omp_atk_partition: | |
151 | switch (traits[i].value) | |
152 | { | |
153 | case omp_atv_default: | |
154 | data.partition = omp_atv_environment; | |
155 | break; | |
156 | case omp_atv_environment: | |
157 | case omp_atv_nearest: | |
158 | case omp_atv_blocked: | |
159 | case omp_atv_interleaved: | |
160 | data.partition = traits[i].value; | |
161 | break; | |
162 | default: | |
163 | return omp_null_allocator; | |
164 | } | |
165 | break; | |
166 | default: | |
167 | return omp_null_allocator; | |
168 | } | |
169 | ||
170 | if (data.alignment < sizeof (void *)) | |
171 | data.alignment = sizeof (void *); | |
172 | ||
173 | /* No support for these so far (for hbw will use memkind). */ | |
174 | if (data.pinned || data.memspace == omp_high_bw_mem_space) | |
175 | return omp_null_allocator; | |
176 | ||
177 | ret = gomp_malloc (sizeof (struct omp_allocator_data)); | |
178 | *ret = data; | |
179 | #ifndef HAVE_SYNC_BUILTINS | |
180 | gomp_mutex_init (&ret->lock); | |
181 | #endif | |
182 | return (omp_allocator_handle_t) ret; | |
183 | } | |
184 | ||
185 | void | |
186 | omp_destroy_allocator (omp_allocator_handle_t allocator) | |
187 | { | |
188 | if (allocator != omp_null_allocator) | |
189 | { | |
190 | #ifndef HAVE_SYNC_BUILTINS | |
191 | gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock); | |
192 | #endif | |
193 | free ((void *) allocator); | |
194 | } | |
195 | } | |
196 | ||
197 | void * | |
198 | omp_alloc (size_t size, omp_allocator_handle_t allocator) | |
199 | { | |
200 | struct omp_allocator_data *allocator_data; | |
201 | size_t alignment, new_size; | |
202 | void *ptr, *ret; | |
203 | ||
204 | retry: | |
205 | if (allocator == omp_null_allocator) | |
206 | { | |
207 | struct gomp_thread *thr = gomp_thread (); | |
208 | if (thr->ts.def_allocator == omp_null_allocator) | |
209 | thr->ts.def_allocator = gomp_def_allocator; | |
210 | allocator = (omp_allocator_handle_t) thr->ts.def_allocator; | |
211 | } | |
212 | ||
213 | if (allocator > omp_max_predefined_alloc) | |
214 | { | |
215 | allocator_data = (struct omp_allocator_data *) allocator; | |
216 | alignment = allocator_data->alignment; | |
217 | } | |
218 | else | |
219 | { | |
220 | allocator_data = NULL; | |
221 | alignment = sizeof (void *); | |
222 | } | |
223 | ||
224 | new_size = sizeof (struct omp_mem_header); | |
225 | if (alignment > sizeof (void *)) | |
226 | new_size += alignment - sizeof (void *); | |
227 | if (__builtin_add_overflow (size, new_size, &new_size)) | |
228 | goto fail; | |
229 | ||
230 | if (__builtin_expect (allocator_data | |
231 | && allocator_data->pool_size < ~(uintptr_t) 0, 0)) | |
232 | { | |
233 | uintptr_t used_pool_size; | |
234 | if (new_size > allocator_data->pool_size) | |
235 | goto fail; | |
236 | #ifdef HAVE_SYNC_BUILTINS | |
237 | used_pool_size = __atomic_load_n (&allocator_data->used_pool_size, | |
238 | MEMMODEL_RELAXED); | |
239 | do | |
240 | { | |
241 | uintptr_t new_pool_size; | |
242 | if (__builtin_add_overflow (used_pool_size, new_size, | |
243 | &new_pool_size) | |
244 | || new_pool_size > allocator_data->pool_size) | |
245 | goto fail; | |
246 | if (__atomic_compare_exchange_n (&allocator_data->used_pool_size, | |
247 | &used_pool_size, new_pool_size, | |
248 | true, MEMMODEL_RELAXED, | |
249 | MEMMODEL_RELAXED)) | |
250 | break; | |
251 | } | |
252 | while (1); | |
253 | #else | |
254 | gomp_mutex_lock (&allocator_data->lock); | |
255 | if (__builtin_add_overflow (allocator_data->used_pool_size, new_size, | |
256 | &used_pool_size) | |
257 | || used_pool_size > allocator_data->pool_size) | |
258 | { | |
259 | gomp_mutex_unlock (&allocator_data->lock); | |
260 | goto fail; | |
261 | } | |
262 | allocator_data->used_pool_size = used_pool_size; | |
263 | gomp_mutex_unlock (&allocator_data->lock); | |
264 | #endif | |
265 | ptr = malloc (new_size); | |
266 | if (ptr == NULL) | |
267 | { | |
268 | #ifdef HAVE_SYNC_BUILTINS | |
269 | __atomic_add_fetch (&allocator_data->used_pool_size, -new_size, | |
270 | MEMMODEL_RELAXED); | |
271 | #else | |
272 | gomp_mutex_lock (&allocator_data->lock); | |
273 | allocator_data->used_pool_size -= new_size; | |
274 | gomp_mutex_unlock (&allocator_data->lock); | |
275 | #endif | |
276 | goto fail; | |
277 | } | |
278 | } | |
279 | else | |
280 | { | |
281 | ptr = malloc (new_size); | |
282 | if (ptr == NULL) | |
283 | goto fail; | |
284 | } | |
285 | ||
286 | if (alignment > sizeof (void *)) | |
287 | ret = (void *) (((uintptr_t) ptr | |
288 | + sizeof (struct omp_mem_header) | |
289 | + alignment - sizeof (void *)) & ~(alignment - 1)); | |
290 | else | |
291 | ret = (char *) ptr + sizeof (struct omp_mem_header); | |
292 | ((struct omp_mem_header *) ret)[-1].ptr = ptr; | |
293 | ((struct omp_mem_header *) ret)[-1].size = new_size; | |
294 | ((struct omp_mem_header *) ret)[-1].allocator = allocator; | |
295 | return ret; | |
296 | ||
297 | fail: | |
298 | if (allocator_data) | |
299 | { | |
300 | switch (allocator_data->fallback) | |
301 | { | |
302 | case omp_atv_default_mem_fb: | |
303 | if (alignment > sizeof (void *) | |
304 | || (allocator_data | |
305 | && allocator_data->pool_size < ~(uintptr_t) 0)) | |
306 | { | |
307 | allocator = omp_default_mem_alloc; | |
308 | goto retry; | |
309 | } | |
310 | /* Otherwise, we've already performed default mem allocation | |
311 | and if that failed, it won't succeed again (unless it was | |
312 | intermitent. Return NULL then, as that is the fallback. */ | |
313 | break; | |
314 | case omp_atv_null_fb: | |
315 | break; | |
316 | default: | |
317 | case omp_atv_abort_fb: | |
318 | gomp_fatal ("Out of memory allocating %lu bytes", | |
319 | (unsigned long) size); | |
320 | case omp_atv_allocator_fb: | |
321 | allocator = allocator_data->fb_data; | |
322 | goto retry; | |
323 | } | |
324 | } | |
325 | return NULL; | |
326 | } | |
327 | ||
328 | void | |
329 | omp_free (void *ptr, omp_allocator_handle_t allocator) | |
330 | { | |
331 | struct omp_mem_header *data; | |
332 | ||
333 | if (ptr == NULL) | |
334 | return; | |
335 | (void) allocator; | |
336 | data = &((struct omp_mem_header *) ptr)[-1]; | |
337 | if (data->allocator > omp_max_predefined_alloc) | |
338 | { | |
339 | struct omp_allocator_data *allocator_data | |
340 | = (struct omp_allocator_data *) (data->allocator); | |
341 | if (allocator_data->pool_size < ~(uintptr_t) 0) | |
342 | { | |
343 | #ifdef HAVE_SYNC_BUILTINS | |
344 | __atomic_add_fetch (&allocator_data->used_pool_size, -data->size, | |
345 | MEMMODEL_RELAXED); | |
346 | #else | |
347 | gomp_mutex_lock (&allocator_data->lock); | |
348 | allocator_data->used_pool_size -= data->new_size; | |
349 | gomp_mutex_unlock (&allocator_data->lock); | |
350 | #endif | |
351 | } | |
352 | } | |
353 | free (data->ptr); | |
354 | } |