]>
Commit | Line | Data |
---|---|---|
83ffe9cd | 1 | /* Copyright (C) 2005-2023 Free Software Foundation, Inc. |
953ff289 DN |
2 | Contributed by Richard Henderson <rth@redhat.com>. |
3 | ||
f1f3453e TS |
4 | This file is part of the GNU Offloading and Multi Processing Library |
5 | (libgomp). | |
953ff289 DN |
6 | |
7 | Libgomp is free software; you can redistribute it and/or modify it | |
748086b7 JJ |
8 | under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
953ff289 DN |
11 | |
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
748086b7 | 14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
953ff289 DN |
15 | more details. |
16 | ||
748086b7 JJ |
17 | Under Section 7 of GPL version 3, you are granted additional |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
953ff289 | 20 | |
748086b7 JJ |
21 | You should have received a copy of the GNU General Public License and |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
953ff289 DN |
25 | |
26 | /* This file contains data types and function declarations that are not | |
41dbbb37 TS |
27 | part of the official OpenACC or OpenMP user interfaces. There are |
28 | declarations in here that are part of the GNU Offloading and Multi | |
29 | Processing ABI, in that the compiler is required to know about them | |
30 | and use them. | |
953ff289 DN |
31 | |
32 | The convention is that the all caps prefix "GOMP" is used group items | |
33 | that are part of the external ABI, and the lower case prefix "gomp" | |
34 | is used group items that are completely private to the library. */ | |
35 | ||
36 | #ifndef LIBGOMP_H | |
37 | #define LIBGOMP_H 1 | |
38 | ||
d9a6bd32 JJ |
39 | #ifndef _LIBGOMP_CHECKING_ |
40 | /* Define to 1 to perform internal sanity checks. */ | |
41 | #define _LIBGOMP_CHECKING_ 0 | |
42 | #endif | |
43 | ||
953ff289 | 44 | #include "config.h" |
810f316d | 45 | #include <stdint.h> |
41dbbb37 | 46 | #include "libgomp-plugin.h" |
ec00d3fa | 47 | #include "gomp-constants.h" |
953ff289 | 48 | |
6103184e | 49 | #ifdef HAVE_PTHREAD_H |
953ff289 | 50 | #include <pthread.h> |
6103184e | 51 | #endif |
953ff289 | 52 | #include <stdbool.h> |
acf0174b | 53 | #include <stdlib.h> |
41dbbb37 | 54 | #include <stdarg.h> |
953ff289 | 55 | |
e4606348 JJ |
56 | /* Needed for memset in priority_queue.c. */ |
57 | #if _LIBGOMP_CHECKING_ | |
58 | # ifdef STRING_WITH_STRINGS | |
59 | # include <string.h> | |
60 | # include <strings.h> | |
61 | # else | |
62 | # ifdef HAVE_STRING_H | |
63 | # include <string.h> | |
64 | # else | |
65 | # ifdef HAVE_STRINGS_H | |
66 | # include <strings.h> | |
67 | # endif | |
68 | # endif | |
69 | # endif | |
70 | #endif | |
71 | ||
953ff289 DN |
72 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
73 | # pragma GCC visibility push(hidden) | |
74 | #endif | |
75 | ||
cef86eb2 RH |
76 | /* If we were a C++ library, we'd get this from <std/atomic>. */ |
77 | enum memmodel | |
78 | { | |
79 | MEMMODEL_RELAXED = 0, | |
80 | MEMMODEL_CONSUME = 1, | |
81 | MEMMODEL_ACQUIRE = 2, | |
82 | MEMMODEL_RELEASE = 3, | |
83 | MEMMODEL_ACQ_REL = 4, | |
84 | MEMMODEL_SEQ_CST = 5 | |
85 | }; | |
86 | ||
e4606348 JJ |
87 | /* alloc.c */ |
88 | ||
28567c40 | 89 | #if defined(HAVE_ALIGNED_ALLOC) \ |
28567c40 JJ |
90 | || defined(HAVE_POSIX_MEMALIGN) \ |
91 | || defined(HAVE_MEMALIGN) | |
92 | /* Defined if gomp_aligned_alloc doesn't use fallback version | |
93 | and free can be used instead of gomp_aligned_free. */ | |
94 | #define GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC 1 | |
95 | #endif | |
96 | ||
17da2c74 JJ |
97 | #if defined(GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC) && !defined(__AMDGCN__) |
98 | #define GOMP_USE_ALIGNED_WORK_SHARES 1 | |
99 | #endif | |
100 | ||
e4606348 JJ |
101 | extern void *gomp_malloc (size_t) __attribute__((malloc)); |
102 | extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); | |
103 | extern void *gomp_realloc (void *, size_t); | |
28567c40 JJ |
104 | extern void *gomp_aligned_alloc (size_t, size_t) |
105 | __attribute__((malloc, alloc_size (2))); | |
106 | extern void gomp_aligned_free (void *); | |
e4606348 JJ |
107 | |
108 | /* Avoid conflicting prototypes of alloca() in system headers by using | |
109 | GCC's builtin alloca(). */ | |
110 | #define gomp_alloca(x) __builtin_alloca(x) | |
111 | ||
cee16451 AS |
112 | /* Optimized allocators for team-specific data that will die with the team. */ |
113 | ||
114 | #ifdef __AMDGCN__ | |
f6fff8a6 | 115 | #include "libgomp-gcn.h" |
cee16451 | 116 | /* The arena is initialized in config/gcn/team.c. */ |
cee16451 AS |
117 | #define TEAM_ARENA_START 16 /* LDS offset of free pointer. */ |
118 | #define TEAM_ARENA_FREE 24 /* LDS offset of free pointer. */ | |
119 | #define TEAM_ARENA_END 32 /* LDS offset of end pointer. */ | |
120 | ||
121 | static inline void * __attribute__((malloc)) | |
122 | team_malloc (size_t size) | |
123 | { | |
124 | /* 4-byte align the size. */ | |
125 | size = (size + 3) & ~3; | |
126 | ||
127 | /* Allocate directly from the arena. | |
128 | The compiler does not support DS atomics, yet. */ | |
129 | void *result; | |
130 | asm ("ds_add_rtn_u64 %0, %1, %2\n\ts_waitcnt 0" | |
131 | : "=v"(result) : "v"(TEAM_ARENA_FREE), "v"(size), "e"(1L) : "memory"); | |
132 | ||
133 | /* Handle OOM. */ | |
134 | if (result + size > *(void * __lds *)TEAM_ARENA_END) | |
135 | { | |
136 | /* While this is experimental, let's make sure we know when OOM | |
137 | happens. */ | |
f6fff8a6 AS |
138 | const char msg[] = "GCN team arena exhausted;" |
139 | " configure with GCN_TEAM_ARENA_SIZE=bytes\n"; | |
cee16451 AS |
140 | write (2, msg, sizeof(msg)-1); |
141 | ||
142 | /* Fall back to using the heap (slowly). */ | |
143 | result = gomp_malloc (size); | |
144 | } | |
145 | return result; | |
146 | } | |
147 | ||
148 | static inline void * __attribute__((malloc)) | |
149 | team_malloc_cleared (size_t size) | |
150 | { | |
151 | char *result = team_malloc (size); | |
152 | ||
153 | /* Clear the allocated memory. */ | |
154 | __builtin_memset (result, 0, size); | |
155 | ||
156 | return result; | |
157 | } | |
158 | ||
159 | static inline void | |
160 | team_free (void *ptr) | |
161 | { | |
162 | /* The whole arena is freed when the kernel exits. | |
163 | However, if we fell back to using heap then we should free it. | |
164 | It would be better if this function could be a no-op, but at least | |
165 | LDS loads are cheap. */ | |
166 | if (ptr < *(void * __lds *)TEAM_ARENA_START | |
167 | || ptr >= *(void * __lds *)TEAM_ARENA_END) | |
168 | free (ptr); | |
169 | } | |
170 | #else | |
171 | #define team_malloc(...) gomp_malloc (__VA_ARGS__) | |
172 | #define team_malloc_cleared(...) gomp_malloc_cleared (__VA_ARGS__) | |
173 | #define team_free(...) free (__VA_ARGS__) | |
174 | #endif | |
175 | ||
e4606348 JJ |
176 | /* error.c */ |
177 | ||
178 | extern void gomp_vdebug (int, const char *, va_list); | |
179 | extern void gomp_debug (int, const char *, ...) | |
180 | __attribute__ ((format (printf, 2, 3))); | |
181 | #define gomp_vdebug(KIND, FMT, VALIST) \ | |
182 | do { \ | |
183 | if (__builtin_expect (gomp_debug_var, 0)) \ | |
184 | (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ | |
185 | } while (0) | |
186 | #define gomp_debug(KIND, ...) \ | |
187 | do { \ | |
188 | if (__builtin_expect (gomp_debug_var, 0)) \ | |
189 | (gomp_debug) ((KIND), __VA_ARGS__); \ | |
190 | } while (0) | |
191 | extern void gomp_verror (const char *, va_list); | |
192 | extern void gomp_error (const char *, ...) | |
193 | __attribute__ ((format (printf, 1, 2))); | |
194 | extern void gomp_vfatal (const char *, va_list) | |
195 | __attribute__ ((noreturn)); | |
196 | extern void gomp_fatal (const char *, ...) | |
197 | __attribute__ ((noreturn, format (printf, 1, 2))); | |
198 | ||
199 | struct gomp_task; | |
200 | struct gomp_taskgroup; | |
201 | struct htab; | |
202 | ||
203 | #include "priority_queue.h" | |
953ff289 DN |
204 | #include "sem.h" |
205 | #include "mutex.h" | |
206 | #include "bar.h" | |
6103184e | 207 | #include "simple-bar.h" |
a68ab351 | 208 | #include "ptrlock.h" |
953ff289 DN |
209 | |
210 | ||
211 | /* This structure contains the data to control one work-sharing construct, | |
212 | either a LOOP (FOR/DO) or a SECTIONS. */ | |
213 | ||
214 | enum gomp_schedule_type | |
215 | { | |
a68ab351 | 216 | GFS_RUNTIME, |
953ff289 DN |
217 | GFS_STATIC, |
218 | GFS_DYNAMIC, | |
219 | GFS_GUIDED, | |
28567c40 JJ |
220 | GFS_AUTO, |
221 | GFS_MONOTONIC = 0x80000000U | |
953ff289 DN |
222 | }; |
223 | ||
d9a6bd32 JJ |
224 | struct gomp_doacross_work_share |
225 | { | |
226 | union { | |
227 | /* chunk_size copy, as ws->chunk_size is multiplied by incr for | |
228 | GFS_DYNAMIC. */ | |
229 | long chunk_size; | |
230 | /* Likewise, but for ull implementation. */ | |
231 | unsigned long long chunk_size_ull; | |
232 | /* For schedule(static,0) this is the number | |
233 | of iterations assigned to the last thread, i.e. number of | |
234 | iterations / number of threads. */ | |
235 | long q; | |
236 | /* Likewise, but for ull implementation. */ | |
237 | unsigned long long q_ull; | |
238 | }; | |
239 | /* Size of each array entry (padded to cache line size). */ | |
240 | unsigned long elt_sz; | |
241 | /* Number of dimensions in sink vectors. */ | |
242 | unsigned int ncounts; | |
243 | /* True if the iterations can be flattened. */ | |
244 | bool flattened; | |
245 | /* Actual array (of elt_sz sized units), aligned to cache line size. | |
246 | This is indexed by team_id for GFS_STATIC and outermost iteration | |
247 | / chunk_size for other schedules. */ | |
248 | unsigned char *array; | |
249 | /* These two are only used for schedule(static,0). */ | |
250 | /* This one is number of iterations % number of threads. */ | |
251 | long t; | |
252 | union { | |
253 | /* And this one is cached t * (q + 1). */ | |
254 | long boundary; | |
255 | /* Likewise, but for the ull implementation. */ | |
256 | unsigned long long boundary_ull; | |
257 | }; | |
28567c40 JJ |
258 | /* Pointer to extra memory if needed for lastprivate(conditional). */ |
259 | void *extra; | |
d9a6bd32 JJ |
260 | /* Array of shift counts for each dimension if they can be flattened. */ |
261 | unsigned int shift_counts[]; | |
262 | }; | |
263 | ||
c7abdf46 JJ |
264 | /* Like struct gomp_work_share, but only the 1st cacheline of it plus |
265 | flexible array at the end. | |
266 | Keep in sync with struct gomp_work_share. */ | |
267 | struct gomp_work_share_1st_cacheline | |
268 | { | |
269 | enum gomp_schedule_type sched; | |
270 | int mode; | |
271 | union { | |
272 | struct { | |
273 | long chunk_size, end, incr; | |
274 | }; | |
275 | struct { | |
276 | unsigned long long chunk_size_ull, end_ull, incr_ull; | |
277 | }; | |
278 | }; | |
279 | union { | |
280 | unsigned *ordered_team_ids; | |
281 | struct gomp_doacross_work_share *doacross; | |
282 | }; | |
283 | unsigned ordered_num_used, ordered_owner, ordered_cur; | |
284 | struct gomp_work_share *next_alloc; | |
285 | char pad[]; | |
286 | }; | |
287 | ||
953ff289 DN |
288 | struct gomp_work_share |
289 | { | |
290 | /* This member records the SCHEDULE clause to be used for this construct. | |
291 | The user specification of "runtime" will already have been resolved. | |
292 | If this is a SECTIONS construct, this value will always be DYNAMIC. */ | |
293 | enum gomp_schedule_type sched; | |
294 | ||
a68ab351 | 295 | int mode; |
953ff289 | 296 | |
a68ab351 JJ |
297 | union { |
298 | struct { | |
299 | /* This is the chunk_size argument to the SCHEDULE clause. */ | |
300 | long chunk_size; | |
301 | ||
302 | /* This is the iteration end point. If this is a SECTIONS construct, | |
303 | this is the number of contained sections. */ | |
304 | long end; | |
305 | ||
306 | /* This is the iteration step. If this is a SECTIONS construct, this | |
307 | is always 1. */ | |
308 | long incr; | |
309 | }; | |
310 | ||
311 | struct { | |
312 | /* The same as above, but for the unsigned long long loop variants. */ | |
313 | unsigned long long chunk_size_ull; | |
314 | unsigned long long end_ull; | |
315 | unsigned long long incr_ull; | |
316 | }; | |
317 | }; | |
318 | ||
d9a6bd32 JJ |
319 | union { |
320 | /* This is a circular queue that details which threads will be allowed | |
321 | into the ordered region and in which order. When a thread allocates | |
322 | iterations on which it is going to work, it also registers itself at | |
323 | the end of the array. When a thread reaches the ordered region, it | |
324 | checks to see if it is the one at the head of the queue. If not, it | |
325 | blocks on its RELEASE semaphore. */ | |
326 | unsigned *ordered_team_ids; | |
327 | ||
328 | /* This is a pointer to DOACROSS work share data. */ | |
329 | struct gomp_doacross_work_share *doacross; | |
330 | }; | |
a68ab351 JJ |
331 | |
332 | /* This is the number of threads that have registered themselves in | |
333 | the circular queue ordered_team_ids. */ | |
334 | unsigned ordered_num_used; | |
335 | ||
336 | /* This is the team_id of the currently acknowledged owner of the ordered | |
337 | section, or -1u if the ordered section has not been acknowledged by | |
338 | any thread. This is distinguished from the thread that is *allowed* | |
339 | to take the section next. */ | |
340 | unsigned ordered_owner; | |
341 | ||
342 | /* This is the index into the circular queue ordered_team_ids of the | |
343 | current thread that's allowed into the ordered reason. */ | |
344 | unsigned ordered_cur; | |
953ff289 | 345 | |
a68ab351 JJ |
346 | /* This is a chain of allocated gomp_work_share blocks, valid only |
347 | in the first gomp_work_share struct in the block. */ | |
348 | struct gomp_work_share *next_alloc; | |
349 | ||
350 | /* The above fields are written once during workshare initialization, | |
351 | or related to ordered worksharing. Make sure the following fields | |
352 | are in a different cache line. */ | |
953ff289 DN |
353 | |
354 | /* This lock protects the update of the following members. */ | |
17da2c74 | 355 | #ifdef GOMP_USE_ALIGNED_WORK_SHARES |
a68ab351 | 356 | gomp_mutex_t lock __attribute__((aligned (64))); |
c7abdf46 JJ |
357 | #else |
358 | char pad[64 - offsetof (struct gomp_work_share_1st_cacheline, pad)]; | |
359 | gomp_mutex_t lock; | |
360 | #endif | |
a68ab351 JJ |
361 | |
362 | /* This is the count of the number of threads that have exited the work | |
363 | share construct. If the construct was marked nowait, they have moved on | |
364 | to other work; otherwise they're blocked on a barrier. The last member | |
365 | of the team to exit the work share construct must deallocate it. */ | |
366 | unsigned threads_completed; | |
953ff289 DN |
367 | |
368 | union { | |
369 | /* This is the next iteration value to be allocated. In the case of | |
370 | GFS_STATIC loops, this the iteration start point and never changes. */ | |
371 | long next; | |
372 | ||
a68ab351 JJ |
373 | /* The same, but with unsigned long long type. */ |
374 | unsigned long long next_ull; | |
375 | ||
953ff289 DN |
376 | /* This is the returned data structure for SINGLE COPYPRIVATE. */ |
377 | void *copyprivate; | |
378 | }; | |
379 | ||
a68ab351 JJ |
380 | union { |
381 | /* Link to gomp_work_share struct for next work sharing construct | |
382 | encountered after this one. */ | |
383 | gomp_ptrlock_t next_ws; | |
953ff289 | 384 | |
a68ab351 JJ |
385 | /* gomp_work_share structs are chained in the free work share cache |
386 | through this. */ | |
387 | struct gomp_work_share *next_free; | |
388 | }; | |
953ff289 | 389 | |
28567c40 JJ |
390 | /* Task reductions for this work-sharing construct. */ |
391 | uintptr_t *task_reductions; | |
392 | ||
a68ab351 JJ |
393 | /* If only few threads are in the team, ordered_team_ids can point |
394 | to this array which fills the padding at the end of this struct. */ | |
395 | unsigned inline_ordered_team_ids[0]; | |
953ff289 DN |
396 | }; |
397 | ||
c7abdf46 JJ |
398 | extern char gomp_workshare_struct_check1 |
399 | [offsetof (struct gomp_work_share_1st_cacheline, next_alloc) | |
400 | == offsetof (struct gomp_work_share, next_alloc) ? 1 : -1]; | |
401 | extern char gomp_workshare_struct_check2 | |
402 | [offsetof (struct gomp_work_share, lock) == 64 ? 1 : -1]; | |
403 | ||
953ff289 DN |
404 | /* This structure contains all of the thread-local data associated with |
405 | a thread team. This is the data that must be saved when a thread | |
406 | encounters a nested PARALLEL construct. */ | |
407 | ||
408 | struct gomp_team_state | |
409 | { | |
410 | /* This is the team of which the thread is currently a member. */ | |
411 | struct gomp_team *team; | |
412 | ||
413 | /* This is the work share construct which this thread is currently | |
414 | processing. Recall that with NOWAIT, not all threads may be | |
a68ab351 | 415 | processing the same construct. */ |
953ff289 DN |
416 | struct gomp_work_share *work_share; |
417 | ||
a68ab351 JJ |
418 | /* This is the previous work share construct or NULL if there wasn't any. |
419 | When all threads are done with the current work sharing construct, | |
420 | the previous one can be freed. The current one can't, as its | |
421 | next_ws field is used. */ | |
422 | struct gomp_work_share *last_work_share; | |
423 | ||
953ff289 DN |
424 | /* This is the ID of this thread within the team. This value is |
425 | guaranteed to be between 0 and N-1, where N is the number of | |
426 | threads in the team. */ | |
427 | unsigned team_id; | |
428 | ||
a68ab351 JJ |
429 | /* Nesting level. */ |
430 | unsigned level; | |
431 | ||
432 | /* Active nesting level. Only active parallel regions are counted. */ | |
433 | unsigned active_level; | |
434 | ||
acf0174b JJ |
435 | /* Place-partition-var, offset and length into gomp_places_list array. */ |
436 | unsigned place_partition_off; | |
437 | unsigned place_partition_len; | |
438 | ||
800bcc8c JJ |
439 | /* Def-allocator-var ICV. */ |
440 | uintptr_t def_allocator; | |
441 | ||
a68ab351 JJ |
442 | #ifdef HAVE_SYNC_BUILTINS |
443 | /* Number of single stmts encountered. */ | |
444 | unsigned long single_count; | |
445 | #endif | |
953ff289 DN |
446 | |
447 | /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the | |
448 | trip number through the loop. So first time a particular loop | |
449 | is encountered this number is 0, the second time through the loop | |
450 | is 1, etc. This is unused when the compiler knows in advance that | |
451 | the loop is statically scheduled. */ | |
452 | unsigned long static_trip; | |
453 | }; | |
454 | ||
acf0174b JJ |
455 | struct target_mem_desc; |
456 | ||
9f2fca56 MV |
457 | enum gomp_icvs |
458 | { | |
459 | GOMP_ICV_NTEAMS = 1, | |
460 | GOMP_ICV_SCHEDULE = 2, | |
461 | GOMP_ICV_SCHEDULE_CHUNK_SIZE = 3, | |
462 | GOMP_ICV_DYNAMIC = 4, | |
463 | GOMP_ICV_TEAMS_THREAD_LIMIT = 5, | |
464 | GOMP_ICV_THREAD_LIMIT = 6, | |
465 | GOMP_ICV_NTHREADS = 7, | |
466 | GOMP_ICV_NTHREADS_LIST = 8, | |
467 | GOMP_ICV_NTHREADS_LIST_LEN = 9, | |
468 | GOMP_ICV_BIND = 10, | |
469 | GOMP_ICV_BIND_LIST = 11, | |
470 | GOMP_ICV_BIND_LIST_LEN = 12, | |
471 | GOMP_ICV_MAX_ACTIVE_LEVELS = 13, | |
472 | GOMP_ICV_WAIT_POLICY = 14, | |
473 | GOMP_ICV_STACKSIZE = 15, | |
474 | GOMP_ICV_DEFAULT_DEVICE = 16, | |
475 | GOMP_ICV_CANCELLATION = 17, | |
476 | GOMP_ICV_DISPLAY_AFFINITY = 18, | |
477 | GOMP_ICV_TARGET_OFFLOAD = 19, | |
478 | GOMP_ICV_MAX_TASK_PRIORITY = 20, | |
479 | GOMP_ICV_ALLOCATOR = 21 | |
480 | }; | |
481 | ||
482 | enum gomp_device_num | |
483 | { | |
484 | GOMP_DEVICE_NUM_FOR_DEV = -1, | |
485 | GOMP_DEVICE_NUM_FOR_ALL = -2, | |
486 | GOMP_DEVICE_NUM_FOR_NO_SUFFIX = -3 | |
487 | }; | |
488 | ||
acf0174b | 489 | /* These are the OpenMP 4.0 Internal Control Variables described in |
a68ab351 JJ |
490 | section 2.3.1. Those described as having one copy per task are |
491 | stored within the structure; those described as having one copy | |
492 | for the whole program are (naturally) global variables. */ | |
acf0174b | 493 | |
a68ab351 | 494 | struct gomp_task_icv |
953ff289 | 495 | { |
a68ab351 JJ |
496 | unsigned long nthreads_var; |
497 | enum gomp_schedule_type run_sched_var; | |
d9a6bd32 | 498 | int run_sched_chunk_size; |
acf0174b JJ |
499 | int default_device_var; |
500 | unsigned int thread_limit_var; | |
a68ab351 | 501 | bool dyn_var; |
6fae7eda | 502 | unsigned char max_active_levels_var; |
acf0174b JJ |
503 | char bind_var; |
504 | /* Internal ICV. */ | |
505 | struct target_mem_desc *target_data; | |
a68ab351 | 506 | }; |
953ff289 | 507 | |
9f2fca56 MV |
508 | enum gomp_env_suffix |
509 | { | |
510 | GOMP_ENV_SUFFIX_UNKNOWN = 0, | |
511 | GOMP_ENV_SUFFIX_NONE = 1, | |
512 | GOMP_ENV_SUFFIX_DEV = 2, | |
513 | GOMP_ENV_SUFFIX_ALL = 4, | |
514 | GOMP_ENV_SUFFIX_DEV_X = 8 | |
515 | }; | |
516 | ||
517 | /* Struct that contains all ICVs for which we need to store initial values. | |
518 | Keeping the initial values is needed for omp_display_env. Moreover initial | |
519 | _DEV and _ALL variants of environment variables are also used to determine | |
520 | actually used values for devices and for the host. */ | |
521 | struct gomp_initial_icvs | |
522 | { | |
523 | unsigned long *nthreads_var_list; | |
524 | char *bind_var_list; | |
525 | unsigned long nthreads_var; | |
526 | unsigned long nthreads_var_list_len; | |
527 | unsigned long bind_var_list_len; | |
528 | unsigned long stacksize; | |
529 | int run_sched_chunk_size; | |
530 | int default_device_var; | |
531 | int nteams_var; | |
532 | int teams_thread_limit_var; | |
533 | int wait_policy; | |
534 | unsigned int thread_limit_var; | |
535 | enum gomp_schedule_type run_sched_var; | |
536 | bool dyn_var; | |
537 | unsigned char max_active_levels_var; | |
538 | char bind_var; | |
539 | }; | |
540 | ||
541 | struct gomp_default_icv | |
542 | { | |
543 | unsigned long nthreads_var; | |
544 | enum gomp_schedule_type run_sched_var; | |
545 | int run_sched_chunk_size; | |
546 | int default_device_var; | |
547 | unsigned int thread_limit_var; | |
548 | int nteams_var; | |
549 | int teams_thread_limit_var; | |
550 | bool dyn_var; | |
551 | unsigned char max_active_levels_var; | |
552 | char bind_var; | |
553 | }; | |
554 | ||
555 | /* DEVICE_NUM "-1" is reserved for "_DEV" icvs. | |
556 | DEVICE_NUM "-2" is reserved for "_ALL" icvs. | |
557 | DEVICE_NUM "-3" is reserved for ICVs without suffix. | |
558 | Non-negative DEVICE_NUM is for "_DEV_X" icvs. */ | |
559 | struct gomp_icv_list | |
560 | { | |
561 | int device_num; | |
562 | uint32_t flags; | |
563 | struct gomp_initial_icvs icvs; | |
564 | struct gomp_icv_list *next; | |
565 | }; | |
566 | ||
567 | struct gomp_offload_icvs | |
568 | { | |
569 | int device_num; | |
570 | int default_device; | |
571 | int nteams; | |
572 | int teams_thread_limit; | |
573 | }; | |
574 | ||
575 | struct gomp_offload_icv_list | |
576 | { | |
577 | int device_num; | |
578 | struct gomp_offload_icvs icvs; | |
579 | struct gomp_offload_icv_list *next; | |
580 | }; | |
581 | ||
1bfc07d1 KCY |
582 | enum gomp_target_offload_t |
583 | { | |
584 | GOMP_TARGET_OFFLOAD_DEFAULT, | |
585 | GOMP_TARGET_OFFLOAD_MANDATORY, | |
586 | GOMP_TARGET_OFFLOAD_DISABLED | |
587 | }; | |
588 | ||
6fae7eda | 589 | #define gomp_supported_active_levels UCHAR_MAX |
8949b985 | 590 | |
a68ab351 | 591 | extern struct gomp_task_icv gomp_global_icv; |
a68ab351 | 592 | #ifndef HAVE_SYNC_BUILTINS |
acf0174b | 593 | extern gomp_mutex_t gomp_managed_threads_lock; |
a68ab351 | 594 | #endif |
acf0174b | 595 | extern bool gomp_cancel_var; |
1bfc07d1 | 596 | extern enum gomp_target_offload_t gomp_target_offload_var; |
e4606348 | 597 | extern int gomp_max_task_priority_var; |
a68ab351 JJ |
598 | extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; |
599 | extern unsigned long gomp_available_cpus, gomp_managed_threads; | |
20906c66 | 600 | extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; |
acf0174b JJ |
601 | extern char *gomp_bind_var_list; |
602 | extern unsigned long gomp_bind_var_list_len; | |
603 | extern void **gomp_places_list; | |
604 | extern unsigned long gomp_places_list_len; | |
6103184e | 605 | extern unsigned int gomp_num_teams_var; |
07dd3bcd JJ |
606 | extern int gomp_nteams_var; |
607 | extern int gomp_teams_thread_limit_var; | |
41dbbb37 | 608 | extern int gomp_debug_var; |
28567c40 JJ |
609 | extern bool gomp_display_affinity_var; |
610 | extern char *gomp_affinity_format_var; | |
611 | extern size_t gomp_affinity_format_len; | |
800bcc8c | 612 | extern uintptr_t gomp_def_allocator; |
9f2fca56 MV |
613 | extern const struct gomp_default_icv gomp_default_icv_values; |
614 | extern struct gomp_icv_list *gomp_initial_icv_list; | |
615 | extern struct gomp_offload_icv_list *gomp_offload_icv_list; | |
41dbbb37 TS |
616 | extern int goacc_device_num; |
617 | extern char *goacc_device_type; | |
ec00d3fa | 618 | extern int goacc_default_dims[GOMP_DIM_MAX]; |
953ff289 | 619 | |
a68ab351 JJ |
620 | enum gomp_task_kind |
621 | { | |
d9a6bd32 | 622 | /* Implicit task. */ |
a68ab351 | 623 | GOMP_TASK_IMPLICIT, |
d9a6bd32 JJ |
624 | /* Undeferred task. */ |
625 | GOMP_TASK_UNDEFERRED, | |
626 | /* Task created by GOMP_task and waiting to be run. */ | |
a68ab351 | 627 | GOMP_TASK_WAITING, |
d9a6bd32 | 628 | /* Task currently executing or scheduled and about to execute. */ |
e4606348 JJ |
629 | GOMP_TASK_TIED, |
630 | /* Used for target tasks that have vars mapped and async run started, | |
631 | but not yet completed. Once that completes, they will be readded | |
632 | into the queues as GOMP_TASK_WAITING in order to perform the var | |
633 | unmapping. */ | |
d656bfda KCY |
634 | GOMP_TASK_ASYNC_RUNNING, |
635 | /* Task that has finished executing but is waiting for its | |
636 | completion event to be fulfilled. */ | |
637 | GOMP_TASK_DETACHED | |
a68ab351 | 638 | }; |
953ff289 | 639 | |
acf0174b JJ |
640 | struct gomp_task_depend_entry |
641 | { | |
d9a6bd32 | 642 | /* Address of dependency. */ |
acf0174b JJ |
643 | void *addr; |
644 | struct gomp_task_depend_entry *next; | |
645 | struct gomp_task_depend_entry *prev; | |
d9a6bd32 | 646 | /* Task that provides the dependency in ADDR. */ |
acf0174b | 647 | struct gomp_task *task; |
2c16eb31 JJ |
648 | /* Depend entry is of type "IN" (1) or "INOUTSET" (2). */ |
649 | unsigned char is_in; | |
acf0174b | 650 | bool redundant; |
0494285a | 651 | bool redundant_out; |
acf0174b JJ |
652 | }; |
653 | ||
654 | struct gomp_dependers_vec | |
655 | { | |
656 | size_t n_elem; | |
657 | size_t allocated; | |
658 | struct gomp_task *elem[]; | |
659 | }; | |
660 | ||
0494285a JJ |
661 | /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ |
662 | ||
663 | struct gomp_taskwait | |
664 | { | |
665 | bool in_taskwait; | |
666 | bool in_depend_wait; | |
e4606348 | 667 | /* Number of tasks we are waiting for. */ |
0494285a | 668 | size_t n_depend; |
0494285a JJ |
669 | gomp_sem_t taskwait_sem; |
670 | }; | |
671 | ||
a68ab351 | 672 | /* This structure describes a "task" to be run by a thread. */ |
953ff289 | 673 | |
a68ab351 JJ |
674 | struct gomp_task |
675 | { | |
e4606348 | 676 | /* Parent of this task. */ |
a68ab351 | 677 | struct gomp_task *parent; |
e4606348 JJ |
678 | /* Children of this task. */ |
679 | struct priority_queue children_queue; | |
d9a6bd32 | 680 | /* Taskgroup this task belongs in. */ |
acf0174b | 681 | struct gomp_taskgroup *taskgroup; |
d9a6bd32 | 682 | /* Tasks that depend on this task. */ |
acf0174b JJ |
683 | struct gomp_dependers_vec *dependers; |
684 | struct htab *depend_hash; | |
0494285a | 685 | struct gomp_taskwait *taskwait; |
7f78783d JJ |
686 | /* Last depend({,in}out:omp_all_memory) child if any. */ |
687 | struct gomp_task *depend_all_memory; | |
d9a6bd32 | 688 | /* Number of items in DEPEND. */ |
acf0174b | 689 | size_t depend_count; |
e4606348 JJ |
690 | /* Number of tasks this task depends on. Once this counter reaches |
691 | 0, we have no unsatisfied dependencies, and this task can be put | |
692 | into the various queues to be scheduled. */ | |
acf0174b | 693 | size_t num_dependees; |
e4606348 | 694 | |
d656bfda KCY |
695 | union { |
696 | /* Valid only if deferred_p is false. */ | |
697 | gomp_sem_t *completion_sem; | |
698 | /* Valid only if deferred_p is true. Set to the team that executes the | |
699 | task if the task is detached and the completion event has yet to be | |
700 | fulfilled. */ | |
701 | struct gomp_team *detach_team; | |
702 | }; | |
703 | bool deferred_p; | |
704 | ||
e4606348 JJ |
705 | /* Priority of this task. */ |
706 | int priority; | |
707 | /* The priority node for this task in each of the different queues. | |
708 | We put this here to avoid allocating space for each priority | |
709 | node. Then we play offsetof() games to convert between pnode[] | |
710 | entries and the gomp_task in which they reside. */ | |
711 | struct priority_node pnode[3]; | |
712 | ||
a68ab351 JJ |
713 | struct gomp_task_icv icv; |
714 | void (*fn) (void *); | |
715 | void *fn_data; | |
716 | enum gomp_task_kind kind; | |
5f836cbb | 717 | bool in_tied_task; |
20906c66 | 718 | bool final_task; |
acf0174b | 719 | bool copy_ctors_done; |
d9a6bd32 JJ |
720 | /* Set for undeferred tasks with unsatisfied dependencies which |
721 | block further execution of their parent until the dependencies | |
722 | are satisfied. */ | |
0494285a | 723 | bool parent_depends_on; |
d9a6bd32 JJ |
724 | /* Dependencies provided and/or needed for this task. DEPEND_COUNT |
725 | is the number of items available. */ | |
acf0174b JJ |
726 | struct gomp_task_depend_entry depend[]; |
727 | }; | |
728 | ||
e4606348 JJ |
729 | /* This structure describes a single #pragma omp taskgroup. */ |
730 | ||
acf0174b JJ |
731 | struct gomp_taskgroup |
732 | { | |
733 | struct gomp_taskgroup *prev; | |
e4606348 JJ |
734 | /* Queue of tasks that belong in this taskgroup. */ |
735 | struct priority_queue taskgroup_queue; | |
28567c40 | 736 | uintptr_t *reductions; |
acf0174b JJ |
737 | bool in_taskgroup_wait; |
738 | bool cancelled; | |
28567c40 | 739 | bool workshare; |
acf0174b JJ |
740 | gomp_sem_t taskgroup_sem; |
741 | size_t num_children; | |
a68ab351 JJ |
742 | }; |
743 | ||
e4606348 JJ |
744 | /* Various state of OpenMP async offloading tasks. */ |
745 | enum gomp_target_task_state | |
746 | { | |
747 | GOMP_TARGET_TASK_DATA, | |
748 | GOMP_TARGET_TASK_BEFORE_MAP, | |
749 | GOMP_TARGET_TASK_FALLBACK, | |
750 | GOMP_TARGET_TASK_READY_TO_RUN, | |
751 | GOMP_TARGET_TASK_RUNNING, | |
752 | GOMP_TARGET_TASK_FINISHED | |
753 | }; | |
754 | ||
755 | /* This structure describes a target task. */ | |
756 | ||
d9a6bd32 JJ |
757 | struct gomp_target_task |
758 | { | |
759 | struct gomp_device_descr *devicep; | |
760 | void (*fn) (void *); | |
761 | size_t mapnum; | |
762 | size_t *sizes; | |
763 | unsigned short *kinds; | |
764 | unsigned int flags; | |
e4606348 JJ |
765 | enum gomp_target_task_state state; |
766 | struct target_mem_desc *tgt; | |
767 | struct gomp_task *task; | |
768 | struct gomp_team *team; | |
b2b40051 MJ |
769 | /* Device-specific target arguments. */ |
770 | void **args; | |
d9a6bd32 JJ |
771 | void *hostaddrs[]; |
772 | }; | |
773 | ||
a68ab351 JJ |
774 | /* This structure describes a "team" of threads. These are the threads |
775 | that are spawned by a PARALLEL constructs, as well as the work sharing | |
776 | constructs that the team encounters. */ | |
777 | ||
778 | struct gomp_team | |
779 | { | |
953ff289 DN |
780 | /* This is the number of threads in the current team. */ |
781 | unsigned nthreads; | |
782 | ||
a68ab351 JJ |
783 | /* This is number of gomp_work_share structs that have been allocated |
784 | as a block last time. */ | |
785 | unsigned work_share_chunk; | |
786 | ||
953ff289 DN |
787 | /* This is the saved team state that applied to a master thread before |
788 | the current thread was created. */ | |
789 | struct gomp_team_state prev_ts; | |
790 | ||
953ff289 DN |
791 | /* This semaphore should be used by the master thread instead of its |
792 | "native" semaphore in the thread structure. Required for nested | |
793 | parallels, as the master is a member of two teams. */ | |
794 | gomp_sem_t master_release; | |
795 | ||
a68ab351 JJ |
796 | /* This points to an array with pointers to the release semaphore |
797 | of the threads in the team. */ | |
798 | gomp_sem_t **ordered_release; | |
799 | ||
acf0174b JJ |
800 | /* List of work shares on which gomp_fini_work_share hasn't been |
801 | called yet. If the team hasn't been cancelled, this should be | |
802 | equal to each thr->ts.work_share, but otherwise it can be a possibly | |
803 | long list of workshares. */ | |
804 | struct gomp_work_share *work_shares_to_free; | |
805 | ||
a68ab351 JJ |
806 | /* List of gomp_work_share structs chained through next_free fields. |
807 | This is populated and taken off only by the first thread in the | |
808 | team encountering a new work sharing construct, in a critical | |
809 | section. */ | |
810 | struct gomp_work_share *work_share_list_alloc; | |
811 | ||
812 | /* List of gomp_work_share structs freed by free_work_share. New | |
813 | entries are atomically added to the start of the list, and | |
814 | alloc_work_share can safely only move all but the first entry | |
815 | to work_share_list alloc, as free_work_share can happen concurrently | |
816 | with alloc_work_share. */ | |
817 | struct gomp_work_share *work_share_list_free; | |
818 | ||
819 | #ifdef HAVE_SYNC_BUILTINS | |
820 | /* Number of simple single regions encountered by threads in this | |
821 | team. */ | |
822 | unsigned long single_count; | |
823 | #else | |
824 | /* Mutex protecting addition of workshares to work_share_list_free. */ | |
825 | gomp_mutex_t work_share_list_free_lock; | |
826 | #endif | |
827 | ||
828 | /* This barrier is used for most synchronization of the team. */ | |
829 | gomp_barrier_t barrier; | |
830 | ||
831 | /* Initial work shares, to avoid allocating any gomp_work_share | |
832 | structs in the common case. */ | |
833 | struct gomp_work_share work_shares[8]; | |
834 | ||
835 | gomp_mutex_t task_lock; | |
e4606348 JJ |
836 | /* Scheduled tasks. */ |
837 | struct priority_queue task_queue; | |
acf0174b JJ |
838 | /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ |
839 | unsigned int task_count; | |
840 | /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ | |
841 | unsigned int task_queued_count; | |
842 | /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running | |
843 | directly in gomp_barrier_handle_tasks; tasks spawned | |
844 | from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when | |
845 | that is called from a task run from gomp_barrier_handle_tasks. | |
846 | task_running_count should be always <= team->nthreads, | |
847 | and if current task isn't in_tied_task, then it will be | |
848 | even < team->nthreads. */ | |
849 | unsigned int task_running_count; | |
850 | int work_share_cancelled; | |
851 | int team_cancelled; | |
a68ab351 | 852 | |
d656bfda | 853 | /* Number of tasks waiting for their completion event to be fulfilled. */ |
a6d22fb2 KCY |
854 | unsigned int task_detach_count; |
855 | ||
a68ab351 JJ |
856 | /* This array contains structures for implicit tasks. */ |
857 | struct gomp_task implicit_task[]; | |
953ff289 DN |
858 | }; |
859 | ||
860 | /* This structure contains all data that is private to libgomp and is | |
861 | allocated per thread. */ | |
862 | ||
863 | struct gomp_thread | |
864 | { | |
865 | /* This is the function that the thread should run upon launch. */ | |
866 | void (*fn) (void *data); | |
867 | void *data; | |
868 | ||
869 | /* This is the current team state for this thread. The ts.team member | |
870 | is NULL only if the thread is idle. */ | |
871 | struct gomp_team_state ts; | |
872 | ||
a68ab351 JJ |
873 | /* This is the task that the thread is currently executing. */ |
874 | struct gomp_task *task; | |
875 | ||
953ff289 DN |
876 | /* This semaphore is used for ordered loops. */ |
877 | gomp_sem_t release; | |
a68ab351 | 878 | |
acf0174b JJ |
879 | /* Place this thread is bound to plus one, or zero if not bound |
880 | to any place. */ | |
881 | unsigned int place; | |
882 | ||
883 | /* User pthread thread pool */ | |
a68ab351 | 884 | struct gomp_thread_pool *thread_pool; |
28567c40 | 885 | |
fa4fcb11 JJ |
886 | #ifdef LIBGOMP_USE_PTHREADS |
887 | /* omp_get_num_teams () - 1. */ | |
888 | unsigned int num_teams; | |
889 | ||
890 | /* omp_get_team_num (). */ | |
891 | unsigned int team_num; | |
892 | #endif | |
893 | ||
28567c40 JJ |
894 | #if defined(LIBGOMP_USE_PTHREADS) \ |
895 | && (!defined(HAVE_TLS) \ | |
896 | || !defined(__GLIBC__) \ | |
897 | || !defined(USING_INITIAL_EXEC_TLS)) | |
898 | /* pthread_t of the thread containing this gomp_thread. | |
899 | On Linux when using initial-exec TLS, | |
900 | (typeof (pthread_t)) gomp_thread () - pthread_self () | |
901 | is constant in all threads, so we can optimize and not | |
902 | store it. */ | |
903 | #define GOMP_NEEDS_THREAD_HANDLE 1 | |
904 | pthread_t handle; | |
905 | #endif | |
a68ab351 JJ |
906 | }; |
907 | ||
908 | ||
909 | struct gomp_thread_pool | |
910 | { | |
911 | /* This array manages threads spawned from the top level, which will | |
912 | return to the idle loop once the current PARALLEL construct ends. */ | |
913 | struct gomp_thread **threads; | |
914 | unsigned threads_size; | |
915 | unsigned threads_used; | |
e5210c77 SH |
916 | /* The last team is used for non-nested teams to delay their destruction to |
917 | make sure all the threads in the team move on to the pool's barrier before | |
918 | the team's barrier is destroyed. */ | |
a68ab351 | 919 | struct gomp_team *last_team; |
acf0174b JJ |
920 | /* Number of threads running in this contention group. */ |
921 | unsigned long threads_busy; | |
a68ab351 | 922 | |
6103184e AM |
923 | /* This barrier holds and releases threads waiting in thread pools. */ |
924 | gomp_simple_barrier_t threads_dock; | |
953ff289 DN |
925 | }; |
926 | ||
acf0174b JJ |
927 | enum gomp_cancel_kind |
928 | { | |
929 | GOMP_CANCEL_PARALLEL = 1, | |
930 | GOMP_CANCEL_LOOP = 2, | |
931 | GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, | |
932 | GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, | |
933 | GOMP_CANCEL_SECTIONS = 4, | |
934 | GOMP_CANCEL_TASKGROUP = 8 | |
935 | }; | |
936 | ||
953ff289 DN |
937 | /* ... and here is that TLS data. */ |
938 | ||
6103184e AM |
939 | #if defined __nvptx__ |
940 | extern struct gomp_thread *nvptx_thrs __attribute__((shared)); | |
941 | static inline struct gomp_thread *gomp_thread (void) | |
942 | { | |
943 | int tid; | |
944 | asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); | |
945 | return nvptx_thrs + tid; | |
946 | } | |
fa499995 AS |
947 | #elif defined __AMDGCN__ |
948 | static inline struct gomp_thread *gcn_thrs (void) | |
949 | { | |
950 | /* The value is at the bottom of LDS. */ | |
951 | struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; | |
952 | return *thrs; | |
953 | } | |
954 | static inline void set_gcn_thrs (struct gomp_thread *val) | |
955 | { | |
956 | /* The value is at the bottom of LDS. */ | |
957 | struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; | |
958 | *thrs = val; | |
959 | } | |
960 | static inline struct gomp_thread *gomp_thread (void) | |
961 | { | |
962 | int tid = __builtin_gcn_dim_pos(1); | |
963 | return gcn_thrs () + tid; | |
964 | } | |
6103184e | 965 | #elif defined HAVE_TLS || defined USE_EMUTLS |
953ff289 DN |
966 | extern __thread struct gomp_thread gomp_tls_data; |
967 | static inline struct gomp_thread *gomp_thread (void) | |
968 | { | |
969 | return &gomp_tls_data; | |
970 | } | |
971 | #else | |
972 | extern pthread_key_t gomp_tls_key; | |
973 | static inline struct gomp_thread *gomp_thread (void) | |
974 | { | |
975 | return pthread_getspecific (gomp_tls_key); | |
976 | } | |
977 | #endif | |
978 | ||
a68ab351 JJ |
979 | extern struct gomp_task_icv *gomp_new_icv (void); |
980 | ||
981 | /* Here's how to access the current copy of the ICVs. */ | |
953ff289 | 982 | |
a68ab351 JJ |
983 | static inline struct gomp_task_icv *gomp_icv (bool write) |
984 | { | |
985 | struct gomp_task *task = gomp_thread ()->task; | |
986 | if (task) | |
987 | return &task->icv; | |
988 | else if (write) | |
989 | return gomp_new_icv (); | |
990 | else | |
991 | return &gomp_global_icv; | |
992 | } | |
d0d1b24d | 993 | |
6103184e | 994 | #ifdef LIBGOMP_USE_PTHREADS |
d0d1b24d RH |
995 | /* The attributes to be used during thread creation. */ |
996 | extern pthread_attr_t gomp_thread_attr; | |
953ff289 | 997 | |
66c59f92 | 998 | extern pthread_key_t gomp_thread_destructor; |
6103184e | 999 | #endif |
66c59f92 | 1000 | |
953ff289 DN |
1001 | /* Function prototypes. */ |
1002 | ||
a0884cf0 JJ |
1003 | /* affinity.c */ |
1004 | ||
1005 | extern void gomp_init_affinity (void); | |
6103184e | 1006 | #ifdef LIBGOMP_USE_PTHREADS |
acf0174b | 1007 | extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); |
6103184e | 1008 | #endif |
acf0174b JJ |
1009 | extern void **gomp_affinity_alloc (unsigned long, bool); |
1010 | extern void gomp_affinity_init_place (void *); | |
1011 | extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, | |
1012 | long, bool); | |
1013 | extern bool gomp_affinity_remove_cpu (void *, unsigned long); | |
1014 | extern bool gomp_affinity_copy_place (void *, void *, long); | |
1015 | extern bool gomp_affinity_same_place (void *, void *); | |
1016 | extern bool gomp_affinity_finalize_place_list (bool); | |
1017 | extern bool gomp_affinity_init_level (int, unsigned long, bool); | |
1018 | extern void gomp_affinity_print_place (void *); | |
d9a6bd32 | 1019 | extern void gomp_get_place_proc_ids_8 (int, int64_t *); |
28567c40 JJ |
1020 | extern void gomp_display_affinity_place (char *, size_t, size_t *, int); |
1021 | ||
1022 | /* affinity-fmt.c */ | |
1023 | ||
91df4397 | 1024 | extern bool gomp_print_string (const char *str, size_t len); |
28567c40 JJ |
1025 | extern void gomp_set_affinity_format (const char *, size_t); |
1026 | extern void gomp_display_string (char *, size_t, size_t *, const char *, | |
1027 | size_t); | |
1028 | #ifdef LIBGOMP_USE_PTHREADS | |
1029 | typedef pthread_t gomp_thread_handle; | |
1030 | #else | |
1031 | typedef struct {} gomp_thread_handle; | |
1032 | #endif | |
1033 | extern size_t gomp_display_affinity (char *, size_t, const char *, | |
1034 | gomp_thread_handle, | |
1035 | struct gomp_team_state *, unsigned int); | |
1036 | extern void gomp_display_affinity_thread (gomp_thread_handle, | |
1037 | struct gomp_team_state *, | |
1038 | unsigned int) __attribute__((cold)); | |
a0884cf0 | 1039 | |
9f2fca56 MV |
1040 | /* env.c */ |
1041 | ||
1042 | extern struct gomp_icv_list *gomp_get_initial_icv_item (int dev_num); | |
1043 | extern bool gomp_get_icv_flag (uint32_t value, enum gomp_icvs icv); | |
1044 | ||
953ff289 DN |
1045 | /* iter.c */ |
1046 | ||
1047 | extern int gomp_iter_static_next (long *, long *); | |
1048 | extern bool gomp_iter_dynamic_next_locked (long *, long *); | |
1049 | extern bool gomp_iter_guided_next_locked (long *, long *); | |
1050 | ||
1051 | #ifdef HAVE_SYNC_BUILTINS | |
1052 | extern bool gomp_iter_dynamic_next (long *, long *); | |
1053 | extern bool gomp_iter_guided_next (long *, long *); | |
1054 | #endif | |
1055 | ||
a68ab351 JJ |
1056 | /* iter_ull.c */ |
1057 | ||
1058 | extern int gomp_iter_ull_static_next (unsigned long long *, | |
1059 | unsigned long long *); | |
1060 | extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, | |
1061 | unsigned long long *); | |
1062 | extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, | |
1063 | unsigned long long *); | |
1064 | ||
1065 | #if defined HAVE_SYNC_BUILTINS && defined __LP64__ | |
1066 | extern bool gomp_iter_ull_dynamic_next (unsigned long long *, | |
1067 | unsigned long long *); | |
1068 | extern bool gomp_iter_ull_guided_next (unsigned long long *, | |
1069 | unsigned long long *); | |
1070 | #endif | |
1071 | ||
953ff289 DN |
1072 | /* ordered.c */ |
1073 | ||
1074 | extern void gomp_ordered_first (void); | |
1075 | extern void gomp_ordered_last (void); | |
1076 | extern void gomp_ordered_next (void); | |
1077 | extern void gomp_ordered_static_init (void); | |
1078 | extern void gomp_ordered_static_next (void); | |
1079 | extern void gomp_ordered_sync (void); | |
28567c40 | 1080 | extern void gomp_doacross_init (unsigned, long *, long, size_t); |
d9a6bd32 | 1081 | extern void gomp_doacross_ull_init (unsigned, unsigned long long *, |
28567c40 | 1082 | unsigned long long, size_t); |
953ff289 DN |
1083 | |
1084 | /* parallel.c */ | |
1085 | ||
a68ab351 | 1086 | extern unsigned gomp_resolve_num_threads (unsigned, unsigned); |
953ff289 DN |
1087 | |
1088 | /* proc.c (in config/) */ | |
1089 | ||
1090 | extern void gomp_init_num_threads (void); | |
1091 | extern unsigned gomp_dynamic_max_threads (void); | |
1092 | ||
a68ab351 JJ |
1093 | /* task.c */ |
1094 | ||
1095 | extern void gomp_init_task (struct gomp_task *, struct gomp_task *, | |
1096 | struct gomp_task_icv *); | |
1097 | extern void gomp_end_task (void); | |
1098 | extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); | |
d9a6bd32 | 1099 | extern void gomp_task_maybe_wait_for_dependencies (void **); |
e4606348 | 1100 | extern bool gomp_create_target_task (struct gomp_device_descr *, |
d9a6bd32 JJ |
1101 | void (*) (void *), size_t, void **, |
1102 | size_t *, unsigned short *, unsigned int, | |
b2b40051 MJ |
1103 | void **, void **, |
1104 | enum gomp_target_task_state); | |
28567c40 JJ |
1105 | extern struct gomp_taskgroup *gomp_parallel_reduction_register (uintptr_t *, |
1106 | unsigned); | |
1107 | extern void gomp_workshare_taskgroup_start (void); | |
1108 | extern void gomp_workshare_task_reduction_register (uintptr_t *, uintptr_t *); | |
a68ab351 JJ |
1109 | |
1110 | static void inline | |
1111 | gomp_finish_task (struct gomp_task *task) | |
1112 | { | |
acf0174b JJ |
1113 | if (__builtin_expect (task->depend_hash != NULL, 0)) |
1114 | free (task->depend_hash); | |
a68ab351 JJ |
1115 | } |
1116 | ||
953ff289 DN |
1117 | /* team.c */ |
1118 | ||
a68ab351 | 1119 | extern struct gomp_team *gomp_new_team (unsigned); |
953ff289 | 1120 | extern void gomp_team_start (void (*) (void *), void *, unsigned, |
28567c40 JJ |
1121 | unsigned, struct gomp_team *, |
1122 | struct gomp_taskgroup *); | |
953ff289 | 1123 | extern void gomp_team_end (void); |
acf0174b | 1124 | extern void gomp_free_thread (void *); |
28567c40 | 1125 | extern int gomp_pause_host (void); |
acf0174b JJ |
1126 | |
1127 | /* target.c */ | |
1128 | ||
41dbbb37 | 1129 | extern void gomp_init_targets_once (void); |
acf0174b | 1130 | extern int gomp_get_num_devices (void); |
e4606348 | 1131 | extern bool gomp_target_task_fn (void *); |
131d18e9 TB |
1132 | extern void gomp_target_rev (uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, |
1133 | int, | |
1134 | void (*) (void *, const void *, size_t, void *), | |
1135 | void (*) (void *, const void *, size_t, void *), | |
1136 | void *); | |
953ff289 | 1137 | |
e4606348 | 1138 | /* Splay tree definitions. */ |
41dbbb37 TS |
1139 | typedef struct splay_tree_node_s *splay_tree_node; |
1140 | typedef struct splay_tree_s *splay_tree; | |
1141 | typedef struct splay_tree_key_s *splay_tree_key; | |
1142 | ||
d9a6bd32 JJ |
1143 | struct target_var_desc { |
1144 | /* Splay key. */ | |
1145 | splay_tree_key key; | |
1146 | /* True if data should be copied from device to host at the end. */ | |
1147 | bool copy_from; | |
1148 | /* True if data always should be copied from device to host at the end. */ | |
1149 | bool always_copy_from; | |
bc4ed079 JB |
1150 | /* True if this is for OpenACC 'attach'. */ |
1151 | bool is_attach; | |
972da557 TB |
1152 | /* If GOMP_MAP_TO_PSET had a NULL pointer; used for Fortran descriptors, |
1153 | which were initially unallocated. */ | |
1154 | bool has_null_ptr_assoc; | |
d9a6bd32 JJ |
1155 | /* Relative offset against key host_start. */ |
1156 | uintptr_t offset; | |
1157 | /* Actual length. */ | |
1158 | uintptr_t length; | |
1159 | }; | |
1160 | ||
ea4b23d9 | 1161 | struct target_mem_desc; |
41dbbb37 | 1162 | |
275c736e CLT |
1163 | /* Special value for refcount - mask to indicate existence of special |
1164 | values. Right now we allocate 3 bits. */ | |
1165 | #define REFCOUNT_SPECIAL (~(uintptr_t) 0x7) | |
1166 | ||
d9a6bd32 | 1167 | /* Special value for refcount - infinity. */ |
275c736e | 1168 | #define REFCOUNT_INFINITY (REFCOUNT_SPECIAL | 0) |
4a38b02b IV |
1169 | /* Special value for refcount - tgt_offset contains target address of the |
1170 | artificial pointer to "omp declare target link" object. */ | |
275c736e CLT |
1171 | #define REFCOUNT_LINK (REFCOUNT_SPECIAL | 1) |
1172 | ||
1173 | /* Special value for refcount - structure element sibling list items. | |
1174 | All such key refounts have REFCOUNT_STRUCTELEM bits set, with _FLAG_FIRST | |
1175 | and _FLAG_LAST indicating first and last in the created sibling sequence. */ | |
1176 | #define REFCOUNT_STRUCTELEM (REFCOUNT_SPECIAL | 4) | |
1177 | #define REFCOUNT_STRUCTELEM_P(V) \ | |
1178 | (((V) & REFCOUNT_STRUCTELEM) == REFCOUNT_STRUCTELEM) | |
1179 | /* The first leading key with _FLAG_FIRST set houses the actual reference count | |
1180 | in the structelem_refcount field. Other siblings point to this counter value | |
1181 | through its structelem_refcount_ptr field. */ | |
1182 | #define REFCOUNT_STRUCTELEM_FLAG_FIRST (1) | |
1183 | /* The last key in the sibling sequence has this set. This is required to | |
1184 | indicate the sequence boundary, when we remove the structure sibling list | |
1185 | from the map. */ | |
1186 | #define REFCOUNT_STRUCTELEM_FLAG_LAST (2) | |
1187 | ||
1188 | #define REFCOUNT_STRUCTELEM_FIRST_P(V) \ | |
1189 | (REFCOUNT_STRUCTELEM_P (V) && ((V) & REFCOUNT_STRUCTELEM_FLAG_FIRST)) | |
1190 | #define REFCOUNT_STRUCTELEM_LAST_P(V) \ | |
1191 | (REFCOUNT_STRUCTELEM_P (V) && ((V) & REFCOUNT_STRUCTELEM_FLAG_LAST)) | |
d9a6bd32 | 1192 | |
6c7e076b JB |
1193 | /* Special offset values. */ |
1194 | #define OFFSET_INLINED (~(uintptr_t) 0) | |
1195 | #define OFFSET_POINTER (~(uintptr_t) 1) | |
1196 | #define OFFSET_STRUCT (~(uintptr_t) 2) | |
1197 | ||
2a656a93 JB |
1198 | /* Auxiliary structure for infrequently-used or API-specific data. */ |
1199 | ||
1200 | struct splay_tree_aux { | |
1201 | /* Pointer to the original mapping of "omp declare target link" object. */ | |
1202 | splay_tree_key link_key; | |
5d5be7bf JB |
1203 | /* For a block with attached pointers, the attachment counters for each. |
1204 | Only used for OpenACC. */ | |
1205 | uintptr_t *attach_count; | |
2a656a93 JB |
1206 | }; |
1207 | ||
41dbbb37 TS |
1208 | struct splay_tree_key_s { |
1209 | /* Address of the host object. */ | |
1210 | uintptr_t host_start; | |
1211 | /* Address immediately after the host object. */ | |
1212 | uintptr_t host_end; | |
1213 | /* Descriptor of the target memory. */ | |
1214 | struct target_mem_desc *tgt; | |
1215 | /* Offset from tgt->tgt_start to the start of the target object. */ | |
1216 | uintptr_t tgt_offset; | |
1217 | /* Reference count. */ | |
1218 | uintptr_t refcount; | |
275c736e CLT |
1219 | union { |
1220 | /* Dynamic reference count. */ | |
1221 | uintptr_t dynamic_refcount; | |
1222 | ||
1223 | /* Unified reference count for structure element siblings, this is used | |
1224 | when REFCOUNT_STRUCTELEM_FIRST_P(k->refcount) == true, the first sibling | |
1225 | in a structure element sibling list item sequence. */ | |
1226 | uintptr_t structelem_refcount; | |
1227 | ||
1228 | /* When REFCOUNT_STRUCTELEM_P (k->refcount) == true, this field points | |
1229 | into the (above) structelem_refcount field of the _FIRST splay_tree_key, | |
1230 | the first key in the created sequence. All structure element siblings | |
1231 | share a single refcount in this manner. Since these two fields won't be | |
1232 | used at the same time, they are stashed in a union. */ | |
1233 | uintptr_t *structelem_refcount_ptr; | |
1234 | }; | |
2a656a93 | 1235 | struct splay_tree_aux *aux; |
41dbbb37 TS |
1236 | }; |
1237 | ||
e4606348 JJ |
1238 | /* The comparison function. */ |
1239 | ||
1240 | static inline int | |
1241 | splay_compare (splay_tree_key x, splay_tree_key y) | |
1242 | { | |
1243 | if (x->host_start == x->host_end | |
1244 | && y->host_start == y->host_end) | |
1245 | return 0; | |
1246 | if (x->host_end <= y->host_start) | |
1247 | return -1; | |
1248 | if (x->host_start >= y->host_end) | |
1249 | return 1; | |
1250 | return 0; | |
1251 | } | |
1252 | ||
41dbbb37 TS |
1253 | #include "splay-tree.h" |
1254 | ||
ea4b23d9 TB |
1255 | /* Reverse offload splay-tree handling (functions only). */ |
1256 | ||
1257 | struct reverse_splay_tree_key_s { | |
1258 | /* Address of the device object. */ | |
1259 | uint64_t dev; | |
1260 | splay_tree_key k; | |
1261 | }; | |
1262 | ||
1263 | typedef struct reverse_splay_tree_node_s *reverse_splay_tree_node; | |
1264 | typedef struct reverse_splay_tree_s *reverse_splay_tree; | |
1265 | typedef struct reverse_splay_tree_key_s *reverse_splay_tree_key; | |
1266 | ||
1267 | static inline int | |
1268 | reverse_splay_compare (reverse_splay_tree_key x, reverse_splay_tree_key y) | |
1269 | { | |
1270 | if (x->dev < y->dev) | |
1271 | return -1; | |
1272 | if (x->dev > y->dev) | |
1273 | return 1; | |
1274 | return 0; | |
1275 | } | |
1276 | ||
1277 | #define splay_tree_prefix reverse | |
1278 | #include "splay-tree.h" | |
1279 | ||
1280 | struct target_mem_desc { | |
1281 | /* Reference count. */ | |
1282 | uintptr_t refcount; | |
1283 | /* All the splay nodes allocated together. */ | |
1284 | splay_tree_node array; | |
1285 | /* Likewise for the reverse lookup device->host for reverse offload. */ | |
1286 | reverse_splay_tree_node rev_array; | |
1287 | /* Start of the target region. */ | |
1288 | uintptr_t tgt_start; | |
1289 | /* End of the targer region. */ | |
1290 | uintptr_t tgt_end; | |
1291 | /* Handle to free. */ | |
1292 | void *to_free; | |
1293 | /* Previous target_mem_desc. */ | |
1294 | struct target_mem_desc *prev; | |
1295 | /* Number of items in following list. */ | |
1296 | size_t list_count; | |
1297 | ||
1298 | /* Corresponding target device descriptor. */ | |
1299 | struct gomp_device_descr *device_descr; | |
1300 | ||
1301 | /* List of target items to remove (or decrease refcount) | |
1302 | at the end of region. */ | |
1303 | struct target_var_desc list[]; | |
1304 | }; | |
1305 | ||
1306 | ||
41dbbb37 TS |
1307 | typedef struct acc_dispatch_t |
1308 | { | |
41dbbb37 | 1309 | /* Execute. */ |
345a8c17 | 1310 | __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; |
41dbbb37 | 1311 | |
41dbbb37 | 1312 | /* Create/destroy TLS data. */ |
dced339c TS |
1313 | __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; |
1314 | __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) | |
1315 | *destroy_thread_data_func; | |
1f4c5b9b CLT |
1316 | |
1317 | struct { | |
1318 | /* Once created and put into the "active" list, asyncqueues are then never | |
1319 | destructed and removed from the "active" list, other than if the TODO | |
1320 | device is shut down. */ | |
1321 | gomp_mutex_t lock; | |
1322 | int nasyncqueue; | |
1323 | struct goacc_asyncqueue **asyncqueue; | |
1324 | struct goacc_asyncqueue_list *active; | |
1325 | ||
1326 | __typeof (GOMP_OFFLOAD_openacc_async_construct) *construct_func; | |
1327 | __typeof (GOMP_OFFLOAD_openacc_async_destruct) *destruct_func; | |
1328 | __typeof (GOMP_OFFLOAD_openacc_async_test) *test_func; | |
1329 | __typeof (GOMP_OFFLOAD_openacc_async_synchronize) *synchronize_func; | |
1330 | __typeof (GOMP_OFFLOAD_openacc_async_serialize) *serialize_func; | |
1331 | __typeof (GOMP_OFFLOAD_openacc_async_queue_callback) *queue_callback_func; | |
1332 | ||
1333 | __typeof (GOMP_OFFLOAD_openacc_async_exec) *exec_func; | |
1334 | __typeof (GOMP_OFFLOAD_openacc_async_dev2host) *dev2host_func; | |
1335 | __typeof (GOMP_OFFLOAD_openacc_async_host2dev) *host2dev_func; | |
1336 | } async; | |
41dbbb37 | 1337 | |
6fc0385c TS |
1338 | __typeof (GOMP_OFFLOAD_openacc_get_property) *get_property_func; |
1339 | ||
41dbbb37 TS |
1340 | /* NVIDIA target specific routines. */ |
1341 | struct { | |
345a8c17 | 1342 | __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) |
dced339c | 1343 | *get_current_device_func; |
345a8c17 | 1344 | __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) |
dced339c | 1345 | *get_current_context_func; |
345a8c17 TS |
1346 | __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; |
1347 | __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; | |
41dbbb37 TS |
1348 | } cuda; |
1349 | } acc_dispatch_t; | |
1350 | ||
d84ffc0a IV |
1351 | /* Various state of the accelerator device. */ |
1352 | enum gomp_device_state | |
1353 | { | |
1354 | GOMP_DEVICE_UNINITIALIZED, | |
1355 | GOMP_DEVICE_INITIALIZED, | |
1356 | GOMP_DEVICE_FINALIZED | |
1357 | }; | |
1358 | ||
41dbbb37 TS |
1359 | /* This structure describes accelerator device. |
1360 | It contains name of the corresponding libgomp plugin, function handlers for | |
1361 | interaction with the device, ID-number of the device, and information about | |
1362 | mapped memory. */ | |
1363 | struct gomp_device_descr | |
1364 | { | |
1365 | /* Immutable data, which is only set during initialization, and which is not | |
1366 | guarded by the lock. */ | |
1367 | ||
1368 | /* The name of the device. */ | |
1369 | const char *name; | |
1370 | ||
1371 | /* Capabilities of device (supports OpenACC, OpenMP). */ | |
1372 | unsigned int capabilities; | |
1373 | ||
1374 | /* This is the ID number of device among devices of the same type. */ | |
1375 | int target_id; | |
1376 | ||
1377 | /* This is the TYPE of device. */ | |
1378 | enum offload_target_type type; | |
1379 | ||
1380 | /* Function handlers. */ | |
dced339c TS |
1381 | __typeof (GOMP_OFFLOAD_get_name) *get_name_func; |
1382 | __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; | |
1383 | __typeof (GOMP_OFFLOAD_get_type) *get_type_func; | |
1384 | __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; | |
1385 | __typeof (GOMP_OFFLOAD_init_device) *init_device_func; | |
1386 | __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; | |
1387 | __typeof (GOMP_OFFLOAD_version) *version_func; | |
1388 | __typeof (GOMP_OFFLOAD_load_image) *load_image_func; | |
1389 | __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; | |
1390 | __typeof (GOMP_OFFLOAD_alloc) *alloc_func; | |
1391 | __typeof (GOMP_OFFLOAD_free) *free_func; | |
1392 | __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; | |
1393 | __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; | |
1394 | __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; | |
1395 | __typeof (GOMP_OFFLOAD_can_run) *can_run_func; | |
1396 | __typeof (GOMP_OFFLOAD_run) *run_func; | |
1397 | __typeof (GOMP_OFFLOAD_async_run) *async_run_func; | |
41dbbb37 | 1398 | |
a51df54e IV |
1399 | /* Splay tree containing information about mapped memory regions. */ |
1400 | struct splay_tree_s mem_map; | |
ea4b23d9 | 1401 | struct reverse_splay_tree_s mem_map_rev; |
41dbbb37 TS |
1402 | |
1403 | /* Mutex for the mutable data. */ | |
1404 | gomp_mutex_t lock; | |
1405 | ||
d84ffc0a IV |
1406 | /* Current state of the device. OpenACC allows to move from INITIALIZED state |
1407 | back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED | |
1408 | to FINALIZED state (at program shutdown). */ | |
1409 | enum gomp_device_state state; | |
41dbbb37 | 1410 | |
41dbbb37 | 1411 | /* OpenACC-specific data and functions. */ |
47afc7b4 | 1412 | /* This is mutable because of its mutable target_data member. */ |
41dbbb37 TS |
1413 | acc_dispatch_t openacc; |
1414 | }; | |
1415 | ||
d9a6bd32 JJ |
1416 | /* Kind of the pragma, for which gomp_map_vars () is called. */ |
1417 | enum gomp_map_vars_kind | |
1418 | { | |
9e628024 CLT |
1419 | GOMP_MAP_VARS_OPENACC = 1, |
1420 | GOMP_MAP_VARS_TARGET = 2, | |
1421 | GOMP_MAP_VARS_DATA = 4, | |
1422 | GOMP_MAP_VARS_ENTER_DATA = 8 | |
d9a6bd32 JJ |
1423 | }; |
1424 | ||
829c6349 CLT |
1425 | extern void gomp_acc_declare_allocate (bool, size_t, void **, size_t *, |
1426 | unsigned short *); | |
1f4c5b9b CLT |
1427 | struct gomp_coalesce_buf; |
1428 | extern void gomp_copy_host2dev (struct gomp_device_descr *, | |
1429 | struct goacc_asyncqueue *, void *, const void *, | |
9c41f5b9 | 1430 | size_t, bool, struct gomp_coalesce_buf *); |
1f4c5b9b CLT |
1431 | extern void gomp_copy_dev2host (struct gomp_device_descr *, |
1432 | struct goacc_asyncqueue *, void *, const void *, | |
1433 | size_t); | |
5bcd470b | 1434 | extern uintptr_t gomp_map_val (struct target_mem_desc *, void **, size_t); |
5d5be7bf JB |
1435 | extern void gomp_attach_pointer (struct gomp_device_descr *, |
1436 | struct goacc_asyncqueue *, splay_tree, | |
1437 | splay_tree_key, uintptr_t, size_t, | |
0ab29cf0 | 1438 | struct gomp_coalesce_buf *, bool); |
5d5be7bf JB |
1439 | extern void gomp_detach_pointer (struct gomp_device_descr *, |
1440 | struct goacc_asyncqueue *, splay_tree_key, | |
1441 | uintptr_t, bool, struct gomp_coalesce_buf *); | |
275c736e CLT |
1442 | extern struct target_mem_desc *goacc_map_vars (struct gomp_device_descr *, |
1443 | struct goacc_asyncqueue *, | |
1444 | size_t, void **, void **, | |
1445 | size_t *, void *, bool, | |
1446 | enum gomp_map_vars_kind); | |
1447 | extern void goacc_unmap_vars (struct target_mem_desc *, bool, | |
1448 | struct goacc_asyncqueue *); | |
41dbbb37 | 1449 | extern void gomp_init_device (struct gomp_device_descr *); |
1f4c5b9b | 1450 | extern bool gomp_fini_device (struct gomp_device_descr *); |
22be2349 | 1451 | extern void gomp_unload_device (struct gomp_device_descr *); |
829c6349 | 1452 | extern bool gomp_remove_var (struct gomp_device_descr *, splay_tree_key); |
1cbd94e8 JB |
1453 | extern void gomp_remove_var_async (struct gomp_device_descr *, splay_tree_key, |
1454 | struct goacc_asyncqueue *); | |
41dbbb37 | 1455 | |
953ff289 DN |
1456 | /* work.c */ |
1457 | ||
28567c40 | 1458 | extern void gomp_init_work_share (struct gomp_work_share *, size_t, unsigned); |
a68ab351 | 1459 | extern void gomp_fini_work_share (struct gomp_work_share *); |
28567c40 | 1460 | extern bool gomp_work_share_start (size_t); |
953ff289 | 1461 | extern void gomp_work_share_end (void); |
acf0174b | 1462 | extern bool gomp_work_share_end_cancel (void); |
953ff289 DN |
1463 | extern void gomp_work_share_end_nowait (void); |
1464 | ||
a68ab351 JJ |
1465 | static inline void |
1466 | gomp_work_share_init_done (void) | |
1467 | { | |
1468 | struct gomp_thread *thr = gomp_thread (); | |
1469 | if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) | |
1470 | gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); | |
1471 | } | |
1472 | ||
953ff289 DN |
1473 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
1474 | # pragma GCC visibility pop | |
1475 | #endif | |
1476 | ||
1477 | /* Now that we're back to default visibility, include the globals. */ | |
1478 | #include "libgomp_g.h" | |
1479 | ||
1480 | /* Include omp.h by parts. */ | |
1481 | #include "omp-lock.h" | |
1482 | #define _LIBGOMP_OMP_LOCK_DEFINED 1 | |
1483 | #include "omp.h.in" | |
1484 | ||
a68ab351 JJ |
1485 | #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ |
1486 | || !defined (HAVE_ATTRIBUTE_ALIAS) \ | |
876080ff | 1487 | || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ |
6d28b933 RO |
1488 | || !defined (PIC) \ |
1489 | || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) | |
a68ab351 JJ |
1490 | # undef LIBGOMP_GNU_SYMBOL_VERSIONING |
1491 | #endif | |
1492 | ||
1493 | #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING | |
1494 | extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1495 | extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1496 | extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1497 | extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1498 | extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1499 | extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1500 | extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1501 | extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1502 | extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1503 | extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1504 | ||
1505 | extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1506 | extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1507 | extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1508 | extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1509 | extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1510 | extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1511 | extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1512 | extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1513 | extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1514 | extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1515 | ||
a68ab351 JJ |
1516 | # define omp_lock_symver(fn) \ |
1517 | __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ | |
1518 | __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); | |
1519 | #else | |
1520 | # define gomp_init_lock_30 omp_init_lock | |
1521 | # define gomp_destroy_lock_30 omp_destroy_lock | |
1522 | # define gomp_set_lock_30 omp_set_lock | |
1523 | # define gomp_unset_lock_30 omp_unset_lock | |
1524 | # define gomp_test_lock_30 omp_test_lock | |
1525 | # define gomp_init_nest_lock_30 omp_init_nest_lock | |
1526 | # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock | |
1527 | # define gomp_set_nest_lock_30 omp_set_nest_lock | |
1528 | # define gomp_unset_nest_lock_30 omp_unset_nest_lock | |
1529 | # define gomp_test_nest_lock_30 omp_test_nest_lock | |
1530 | #endif | |
1531 | ||
953ff289 DN |
1532 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
1533 | # define attribute_hidden __attribute__ ((visibility ("hidden"))) | |
1534 | #else | |
1535 | # define attribute_hidden | |
1536 | #endif | |
1537 | ||
79a2c428 MS |
1538 | #if __GNUC__ >= 9 |
1539 | # define HAVE_ATTRIBUTE_COPY | |
1540 | #endif | |
1541 | ||
1542 | #ifdef HAVE_ATTRIBUTE_COPY | |
1543 | # define attribute_copy(arg) __attribute__ ((copy (arg))) | |
1544 | #else | |
1545 | # define attribute_copy(arg) | |
1546 | #endif | |
1547 | ||
953ff289 | 1548 | #ifdef HAVE_ATTRIBUTE_ALIAS |
9b94fbc7 | 1549 | # define strong_alias(fn, al) \ |
79a2c428 | 1550 | extern __typeof (fn) al __attribute__ ((alias (#fn))) attribute_copy (fn); |
9b94fbc7 | 1551 | |
acf0174b JJ |
1552 | # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) |
1553 | # define ialias_str1(x) ialias_str2(x) | |
1554 | # define ialias_str2(x) #x | |
953ff289 DN |
1555 | # define ialias(fn) \ |
1556 | extern __typeof (fn) gomp_ialias_##fn \ | |
79a2c428 | 1557 | __attribute__ ((alias (#fn))) attribute_hidden attribute_copy (fn); |
acf0174b JJ |
1558 | # define ialias_redirect(fn) \ |
1559 | extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; | |
1560 | # define ialias_call(fn) gomp_ialias_ ## fn | |
953ff289 DN |
1561 | #else |
1562 | # define ialias(fn) | |
acf0174b JJ |
1563 | # define ialias_redirect(fn) |
1564 | # define ialias_call(fn) fn | |
953ff289 DN |
1565 | #endif |
1566 | ||
e4606348 JJ |
1567 | /* Helper function for priority_node_to_task() and |
1568 | task_to_priority_node(). | |
1569 | ||
1570 | Return the offset from a task to its priority_node entry. The | |
1571 | priority_node entry is has a type of TYPE. */ | |
1572 | ||
1573 | static inline size_t | |
1574 | priority_queue_offset (enum priority_queue_type type) | |
1575 | { | |
1576 | return offsetof (struct gomp_task, pnode[(int) type]); | |
1577 | } | |
1578 | ||
1579 | /* Return the task associated with a priority NODE of type TYPE. */ | |
1580 | ||
1581 | static inline struct gomp_task * | |
1582 | priority_node_to_task (enum priority_queue_type type, | |
1583 | struct priority_node *node) | |
1584 | { | |
1585 | return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); | |
1586 | } | |
1587 | ||
1588 | /* Return the priority node of type TYPE for a given TASK. */ | |
1589 | ||
1590 | static inline struct priority_node * | |
1591 | task_to_priority_node (enum priority_queue_type type, | |
1592 | struct gomp_task *task) | |
1593 | { | |
1594 | return (struct priority_node *) ((char *) task | |
1595 | + priority_queue_offset (type)); | |
1596 | } | |
28567c40 JJ |
1597 | |
1598 | #ifdef LIBGOMP_USE_PTHREADS | |
1599 | static inline gomp_thread_handle | |
1600 | gomp_thread_self (void) | |
1601 | { | |
1602 | return pthread_self (); | |
1603 | } | |
1604 | ||
1605 | static inline gomp_thread_handle | |
1606 | gomp_thread_to_pthread_t (struct gomp_thread *thr) | |
1607 | { | |
1608 | struct gomp_thread *this_thr = gomp_thread (); | |
1609 | if (thr == this_thr) | |
1610 | return pthread_self (); | |
1611 | #ifdef GOMP_NEEDS_THREAD_HANDLE | |
1612 | return thr->handle; | |
1613 | #else | |
1614 | /* On Linux with initial-exec TLS, the pthread_t of the thread containing | |
1615 | thr can be computed from thr, this_thr and pthread_self (), | |
1616 | as the distance between this_thr and pthread_self () is constant. */ | |
1617 | return pthread_self () + ((uintptr_t) thr - (uintptr_t) this_thr); | |
1618 | #endif | |
1619 | } | |
1620 | #else | |
1621 | static inline gomp_thread_handle | |
1622 | gomp_thread_self (void) | |
1623 | { | |
1624 | return (gomp_thread_handle) {}; | |
1625 | } | |
1626 | ||
1627 | static inline gomp_thread_handle | |
1628 | gomp_thread_to_pthread_t (struct gomp_thread *thr) | |
1629 | { | |
1630 | (void) thr; | |
1631 | return gomp_thread_self (); | |
1632 | } | |
1633 | #endif | |
1634 | ||
953ff289 | 1635 | #endif /* LIBGOMP_H */ |