]>
Commit | Line | Data |
---|---|---|
8e8f6434 | 1 | /* Copyright (C) 2005-2018 Free Software Foundation, Inc. |
1e8e9920 | 2 | Contributed by Richard Henderson <rth@redhat.com>. |
3 | ||
c35c9a62 | 4 | This file is part of the GNU Offloading and Multi Processing Library |
5 | (libgomp). | |
1e8e9920 | 6 | |
7 | Libgomp is free software; you can redistribute it and/or modify it | |
6bc9506f | 8 | under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
1e8e9920 | 11 | |
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
6bc9506f | 14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
1e8e9920 | 15 | more details. |
16 | ||
6bc9506f | 17 | Under Section 7 of GPL version 3, you are granted additional |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
1e8e9920 | 20 | |
6bc9506f | 21 | You should have received a copy of the GNU General Public License and |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
1e8e9920 | 25 | |
26 | /* This file contains data types and function declarations that are not | |
ca4c3545 | 27 | part of the official OpenACC or OpenMP user interfaces. There are |
28 | declarations in here that are part of the GNU Offloading and Multi | |
29 | Processing ABI, in that the compiler is required to know about them | |
30 | and use them. | |
1e8e9920 | 31 | |
32 | The convention is that the all caps prefix "GOMP" is used group items | |
33 | that are part of the external ABI, and the lower case prefix "gomp" | |
34 | is used group items that are completely private to the library. */ | |
35 | ||
36 | #ifndef LIBGOMP_H | |
37 | #define LIBGOMP_H 1 | |
38 | ||
43895be5 | 39 | #ifndef _LIBGOMP_CHECKING_ |
40 | /* Define to 1 to perform internal sanity checks. */ | |
41 | #define _LIBGOMP_CHECKING_ 0 | |
42 | #endif | |
43 | ||
1e8e9920 | 44 | #include "config.h" |
01473a49 | 45 | #include "gstdint.h" |
ca4c3545 | 46 | #include "libgomp-plugin.h" |
8655b2ce | 47 | #include "gomp-constants.h" |
1e8e9920 | 48 | |
44a69dfd | 49 | #ifdef HAVE_PTHREAD_H |
1e8e9920 | 50 | #include <pthread.h> |
44a69dfd | 51 | #endif |
1e8e9920 | 52 | #include <stdbool.h> |
bc7bff74 | 53 | #include <stdlib.h> |
ca4c3545 | 54 | #include <stdarg.h> |
1e8e9920 | 55 | |
a9833286 | 56 | /* Needed for memset in priority_queue.c. */ |
57 | #if _LIBGOMP_CHECKING_ | |
58 | # ifdef STRING_WITH_STRINGS | |
59 | # include <string.h> | |
60 | # include <strings.h> | |
61 | # else | |
62 | # ifdef HAVE_STRING_H | |
63 | # include <string.h> | |
64 | # else | |
65 | # ifdef HAVE_STRINGS_H | |
66 | # include <strings.h> | |
67 | # endif | |
68 | # endif | |
69 | # endif | |
70 | #endif | |
71 | ||
1e8e9920 | 72 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
73 | # pragma GCC visibility push(hidden) | |
74 | #endif | |
75 | ||
24e74fd0 | 76 | /* If we were a C++ library, we'd get this from <std/atomic>. */ |
77 | enum memmodel | |
78 | { | |
79 | MEMMODEL_RELAXED = 0, | |
80 | MEMMODEL_CONSUME = 1, | |
81 | MEMMODEL_ACQUIRE = 2, | |
82 | MEMMODEL_RELEASE = 3, | |
83 | MEMMODEL_ACQ_REL = 4, | |
84 | MEMMODEL_SEQ_CST = 5 | |
85 | }; | |
86 | ||
a9833286 | 87 | /* alloc.c */ |
88 | ||
7e5a76c8 | 89 | #if defined(HAVE_ALIGNED_ALLOC) \ |
90 | || defined(HAVE__ALIGNED_MALLOC) \ | |
91 | || defined(HAVE_POSIX_MEMALIGN) \ | |
92 | || defined(HAVE_MEMALIGN) | |
93 | /* Defined if gomp_aligned_alloc doesn't use fallback version | |
94 | and free can be used instead of gomp_aligned_free. */ | |
95 | #define GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC 1 | |
96 | #endif | |
97 | ||
a9833286 | 98 | extern void *gomp_malloc (size_t) __attribute__((malloc)); |
99 | extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); | |
100 | extern void *gomp_realloc (void *, size_t); | |
7e5a76c8 | 101 | extern void *gomp_aligned_alloc (size_t, size_t) |
102 | __attribute__((malloc, alloc_size (2))); | |
103 | extern void gomp_aligned_free (void *); | |
a9833286 | 104 | |
105 | /* Avoid conflicting prototypes of alloca() in system headers by using | |
106 | GCC's builtin alloca(). */ | |
107 | #define gomp_alloca(x) __builtin_alloca(x) | |
108 | ||
109 | /* error.c */ | |
110 | ||
111 | extern void gomp_vdebug (int, const char *, va_list); | |
112 | extern void gomp_debug (int, const char *, ...) | |
113 | __attribute__ ((format (printf, 2, 3))); | |
114 | #define gomp_vdebug(KIND, FMT, VALIST) \ | |
115 | do { \ | |
116 | if (__builtin_expect (gomp_debug_var, 0)) \ | |
117 | (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ | |
118 | } while (0) | |
119 | #define gomp_debug(KIND, ...) \ | |
120 | do { \ | |
121 | if (__builtin_expect (gomp_debug_var, 0)) \ | |
122 | (gomp_debug) ((KIND), __VA_ARGS__); \ | |
123 | } while (0) | |
124 | extern void gomp_verror (const char *, va_list); | |
125 | extern void gomp_error (const char *, ...) | |
126 | __attribute__ ((format (printf, 1, 2))); | |
127 | extern void gomp_vfatal (const char *, va_list) | |
128 | __attribute__ ((noreturn)); | |
129 | extern void gomp_fatal (const char *, ...) | |
130 | __attribute__ ((noreturn, format (printf, 1, 2))); | |
131 | ||
132 | struct gomp_task; | |
133 | struct gomp_taskgroup; | |
134 | struct htab; | |
135 | ||
136 | #include "priority_queue.h" | |
1e8e9920 | 137 | #include "sem.h" |
138 | #include "mutex.h" | |
139 | #include "bar.h" | |
44a69dfd | 140 | #include "simple-bar.h" |
fd6481cf | 141 | #include "ptrlock.h" |
1e8e9920 | 142 | |
143 | ||
144 | /* This structure contains the data to control one work-sharing construct, | |
145 | either a LOOP (FOR/DO) or a SECTIONS. */ | |
146 | ||
147 | enum gomp_schedule_type | |
148 | { | |
fd6481cf | 149 | GFS_RUNTIME, |
1e8e9920 | 150 | GFS_STATIC, |
151 | GFS_DYNAMIC, | |
152 | GFS_GUIDED, | |
7e5a76c8 | 153 | GFS_AUTO, |
154 | GFS_MONOTONIC = 0x80000000U | |
1e8e9920 | 155 | }; |
156 | ||
43895be5 | 157 | struct gomp_doacross_work_share |
158 | { | |
159 | union { | |
160 | /* chunk_size copy, as ws->chunk_size is multiplied by incr for | |
161 | GFS_DYNAMIC. */ | |
162 | long chunk_size; | |
163 | /* Likewise, but for ull implementation. */ | |
164 | unsigned long long chunk_size_ull; | |
165 | /* For schedule(static,0) this is the number | |
166 | of iterations assigned to the last thread, i.e. number of | |
167 | iterations / number of threads. */ | |
168 | long q; | |
169 | /* Likewise, but for ull implementation. */ | |
170 | unsigned long long q_ull; | |
171 | }; | |
172 | /* Size of each array entry (padded to cache line size). */ | |
173 | unsigned long elt_sz; | |
174 | /* Number of dimensions in sink vectors. */ | |
175 | unsigned int ncounts; | |
176 | /* True if the iterations can be flattened. */ | |
177 | bool flattened; | |
178 | /* Actual array (of elt_sz sized units), aligned to cache line size. | |
179 | This is indexed by team_id for GFS_STATIC and outermost iteration | |
180 | / chunk_size for other schedules. */ | |
181 | unsigned char *array; | |
182 | /* These two are only used for schedule(static,0). */ | |
183 | /* This one is number of iterations % number of threads. */ | |
184 | long t; | |
185 | union { | |
186 | /* And this one is cached t * (q + 1). */ | |
187 | long boundary; | |
188 | /* Likewise, but for the ull implementation. */ | |
189 | unsigned long long boundary_ull; | |
190 | }; | |
7e5a76c8 | 191 | /* Pointer to extra memory if needed for lastprivate(conditional). */ |
192 | void *extra; | |
43895be5 | 193 | /* Array of shift counts for each dimension if they can be flattened. */ |
194 | unsigned int shift_counts[]; | |
195 | }; | |
196 | ||
1e8e9920 | 197 | struct gomp_work_share |
198 | { | |
199 | /* This member records the SCHEDULE clause to be used for this construct. | |
200 | The user specification of "runtime" will already have been resolved. | |
201 | If this is a SECTIONS construct, this value will always be DYNAMIC. */ | |
202 | enum gomp_schedule_type sched; | |
203 | ||
fd6481cf | 204 | int mode; |
1e8e9920 | 205 | |
fd6481cf | 206 | union { |
207 | struct { | |
208 | /* This is the chunk_size argument to the SCHEDULE clause. */ | |
209 | long chunk_size; | |
210 | ||
211 | /* This is the iteration end point. If this is a SECTIONS construct, | |
212 | this is the number of contained sections. */ | |
213 | long end; | |
214 | ||
215 | /* This is the iteration step. If this is a SECTIONS construct, this | |
216 | is always 1. */ | |
217 | long incr; | |
218 | }; | |
219 | ||
220 | struct { | |
221 | /* The same as above, but for the unsigned long long loop variants. */ | |
222 | unsigned long long chunk_size_ull; | |
223 | unsigned long long end_ull; | |
224 | unsigned long long incr_ull; | |
225 | }; | |
226 | }; | |
227 | ||
43895be5 | 228 | union { |
229 | /* This is a circular queue that details which threads will be allowed | |
230 | into the ordered region and in which order. When a thread allocates | |
231 | iterations on which it is going to work, it also registers itself at | |
232 | the end of the array. When a thread reaches the ordered region, it | |
233 | checks to see if it is the one at the head of the queue. If not, it | |
234 | blocks on its RELEASE semaphore. */ | |
235 | unsigned *ordered_team_ids; | |
236 | ||
237 | /* This is a pointer to DOACROSS work share data. */ | |
238 | struct gomp_doacross_work_share *doacross; | |
239 | }; | |
fd6481cf | 240 | |
241 | /* This is the number of threads that have registered themselves in | |
242 | the circular queue ordered_team_ids. */ | |
243 | unsigned ordered_num_used; | |
244 | ||
245 | /* This is the team_id of the currently acknowledged owner of the ordered | |
246 | section, or -1u if the ordered section has not been acknowledged by | |
247 | any thread. This is distinguished from the thread that is *allowed* | |
248 | to take the section next. */ | |
249 | unsigned ordered_owner; | |
250 | ||
251 | /* This is the index into the circular queue ordered_team_ids of the | |
252 | current thread that's allowed into the ordered reason. */ | |
253 | unsigned ordered_cur; | |
1e8e9920 | 254 | |
fd6481cf | 255 | /* This is a chain of allocated gomp_work_share blocks, valid only |
256 | in the first gomp_work_share struct in the block. */ | |
257 | struct gomp_work_share *next_alloc; | |
258 | ||
259 | /* The above fields are written once during workshare initialization, | |
260 | or related to ordered worksharing. Make sure the following fields | |
261 | are in a different cache line. */ | |
1e8e9920 | 262 | |
263 | /* This lock protects the update of the following members. */ | |
fd6481cf | 264 | gomp_mutex_t lock __attribute__((aligned (64))); |
265 | ||
266 | /* This is the count of the number of threads that have exited the work | |
267 | share construct. If the construct was marked nowait, they have moved on | |
268 | to other work; otherwise they're blocked on a barrier. The last member | |
269 | of the team to exit the work share construct must deallocate it. */ | |
270 | unsigned threads_completed; | |
1e8e9920 | 271 | |
272 | union { | |
273 | /* This is the next iteration value to be allocated. In the case of | |
274 | GFS_STATIC loops, this the iteration start point and never changes. */ | |
275 | long next; | |
276 | ||
fd6481cf | 277 | /* The same, but with unsigned long long type. */ |
278 | unsigned long long next_ull; | |
279 | ||
1e8e9920 | 280 | /* This is the returned data structure for SINGLE COPYPRIVATE. */ |
281 | void *copyprivate; | |
282 | }; | |
283 | ||
fd6481cf | 284 | union { |
285 | /* Link to gomp_work_share struct for next work sharing construct | |
286 | encountered after this one. */ | |
287 | gomp_ptrlock_t next_ws; | |
1e8e9920 | 288 | |
fd6481cf | 289 | /* gomp_work_share structs are chained in the free work share cache |
290 | through this. */ | |
291 | struct gomp_work_share *next_free; | |
292 | }; | |
1e8e9920 | 293 | |
7e5a76c8 | 294 | /* Task reductions for this work-sharing construct. */ |
295 | uintptr_t *task_reductions; | |
296 | ||
fd6481cf | 297 | /* If only few threads are in the team, ordered_team_ids can point |
298 | to this array which fills the padding at the end of this struct. */ | |
299 | unsigned inline_ordered_team_ids[0]; | |
1e8e9920 | 300 | }; |
301 | ||
302 | /* This structure contains all of the thread-local data associated with | |
303 | a thread team. This is the data that must be saved when a thread | |
304 | encounters a nested PARALLEL construct. */ | |
305 | ||
306 | struct gomp_team_state | |
307 | { | |
308 | /* This is the team of which the thread is currently a member. */ | |
309 | struct gomp_team *team; | |
310 | ||
311 | /* This is the work share construct which this thread is currently | |
312 | processing. Recall that with NOWAIT, not all threads may be | |
fd6481cf | 313 | processing the same construct. */ |
1e8e9920 | 314 | struct gomp_work_share *work_share; |
315 | ||
fd6481cf | 316 | /* This is the previous work share construct or NULL if there wasn't any. |
317 | When all threads are done with the current work sharing construct, | |
318 | the previous one can be freed. The current one can't, as its | |
319 | next_ws field is used. */ | |
320 | struct gomp_work_share *last_work_share; | |
321 | ||
1e8e9920 | 322 | /* This is the ID of this thread within the team. This value is |
323 | guaranteed to be between 0 and N-1, where N is the number of | |
324 | threads in the team. */ | |
325 | unsigned team_id; | |
326 | ||
fd6481cf | 327 | /* Nesting level. */ |
328 | unsigned level; | |
329 | ||
330 | /* Active nesting level. Only active parallel regions are counted. */ | |
331 | unsigned active_level; | |
332 | ||
bc7bff74 | 333 | /* Place-partition-var, offset and length into gomp_places_list array. */ |
334 | unsigned place_partition_off; | |
335 | unsigned place_partition_len; | |
336 | ||
fd6481cf | 337 | #ifdef HAVE_SYNC_BUILTINS |
338 | /* Number of single stmts encountered. */ | |
339 | unsigned long single_count; | |
340 | #endif | |
1e8e9920 | 341 | |
342 | /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the | |
343 | trip number through the loop. So first time a particular loop | |
344 | is encountered this number is 0, the second time through the loop | |
345 | is 1, etc. This is unused when the compiler knows in advance that | |
346 | the loop is statically scheduled. */ | |
347 | unsigned long static_trip; | |
348 | }; | |
349 | ||
bc7bff74 | 350 | struct target_mem_desc; |
351 | ||
352 | /* These are the OpenMP 4.0 Internal Control Variables described in | |
fd6481cf | 353 | section 2.3.1. Those described as having one copy per task are |
354 | stored within the structure; those described as having one copy | |
355 | for the whole program are (naturally) global variables. */ | |
bc7bff74 | 356 | |
fd6481cf | 357 | struct gomp_task_icv |
1e8e9920 | 358 | { |
fd6481cf | 359 | unsigned long nthreads_var; |
360 | enum gomp_schedule_type run_sched_var; | |
43895be5 | 361 | int run_sched_chunk_size; |
bc7bff74 | 362 | int default_device_var; |
363 | unsigned int thread_limit_var; | |
fd6481cf | 364 | bool dyn_var; |
365 | bool nest_var; | |
bc7bff74 | 366 | char bind_var; |
367 | /* Internal ICV. */ | |
368 | struct target_mem_desc *target_data; | |
fd6481cf | 369 | }; |
1e8e9920 | 370 | |
fd6481cf | 371 | extern struct gomp_task_icv gomp_global_icv; |
fd6481cf | 372 | #ifndef HAVE_SYNC_BUILTINS |
bc7bff74 | 373 | extern gomp_mutex_t gomp_managed_threads_lock; |
fd6481cf | 374 | #endif |
375 | extern unsigned long gomp_max_active_levels_var; | |
bc7bff74 | 376 | extern bool gomp_cancel_var; |
a9833286 | 377 | extern int gomp_max_task_priority_var; |
fd6481cf | 378 | extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; |
379 | extern unsigned long gomp_available_cpus, gomp_managed_threads; | |
2169f33b | 380 | extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; |
bc7bff74 | 381 | extern char *gomp_bind_var_list; |
382 | extern unsigned long gomp_bind_var_list_len; | |
383 | extern void **gomp_places_list; | |
384 | extern unsigned long gomp_places_list_len; | |
44a69dfd | 385 | extern unsigned int gomp_num_teams_var; |
ca4c3545 | 386 | extern int gomp_debug_var; |
7e5a76c8 | 387 | extern bool gomp_display_affinity_var; |
388 | extern char *gomp_affinity_format_var; | |
389 | extern size_t gomp_affinity_format_len; | |
ca4c3545 | 390 | extern int goacc_device_num; |
391 | extern char *goacc_device_type; | |
8655b2ce | 392 | extern int goacc_default_dims[GOMP_DIM_MAX]; |
1e8e9920 | 393 | |
fd6481cf | 394 | enum gomp_task_kind |
395 | { | |
43895be5 | 396 | /* Implicit task. */ |
fd6481cf | 397 | GOMP_TASK_IMPLICIT, |
43895be5 | 398 | /* Undeferred task. */ |
399 | GOMP_TASK_UNDEFERRED, | |
400 | /* Task created by GOMP_task and waiting to be run. */ | |
fd6481cf | 401 | GOMP_TASK_WAITING, |
43895be5 | 402 | /* Task currently executing or scheduled and about to execute. */ |
a9833286 | 403 | GOMP_TASK_TIED, |
404 | /* Used for target tasks that have vars mapped and async run started, | |
405 | but not yet completed. Once that completes, they will be readded | |
406 | into the queues as GOMP_TASK_WAITING in order to perform the var | |
407 | unmapping. */ | |
408 | GOMP_TASK_ASYNC_RUNNING | |
fd6481cf | 409 | }; |
1e8e9920 | 410 | |
bc7bff74 | 411 | struct gomp_task_depend_entry |
412 | { | |
43895be5 | 413 | /* Address of dependency. */ |
bc7bff74 | 414 | void *addr; |
415 | struct gomp_task_depend_entry *next; | |
416 | struct gomp_task_depend_entry *prev; | |
43895be5 | 417 | /* Task that provides the dependency in ADDR. */ |
bc7bff74 | 418 | struct gomp_task *task; |
43895be5 | 419 | /* Depend entry is of type "IN". */ |
bc7bff74 | 420 | bool is_in; |
421 | bool redundant; | |
96013422 | 422 | bool redundant_out; |
bc7bff74 | 423 | }; |
424 | ||
425 | struct gomp_dependers_vec | |
426 | { | |
427 | size_t n_elem; | |
428 | size_t allocated; | |
429 | struct gomp_task *elem[]; | |
430 | }; | |
431 | ||
96013422 | 432 | /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ |
433 | ||
434 | struct gomp_taskwait | |
435 | { | |
436 | bool in_taskwait; | |
437 | bool in_depend_wait; | |
a9833286 | 438 | /* Number of tasks we are waiting for. */ |
96013422 | 439 | size_t n_depend; |
96013422 | 440 | gomp_sem_t taskwait_sem; |
441 | }; | |
442 | ||
fd6481cf | 443 | /* This structure describes a "task" to be run by a thread. */ |
1e8e9920 | 444 | |
fd6481cf | 445 | struct gomp_task |
446 | { | |
a9833286 | 447 | /* Parent of this task. */ |
fd6481cf | 448 | struct gomp_task *parent; |
a9833286 | 449 | /* Children of this task. */ |
450 | struct priority_queue children_queue; | |
43895be5 | 451 | /* Taskgroup this task belongs in. */ |
bc7bff74 | 452 | struct gomp_taskgroup *taskgroup; |
43895be5 | 453 | /* Tasks that depend on this task. */ |
bc7bff74 | 454 | struct gomp_dependers_vec *dependers; |
455 | struct htab *depend_hash; | |
96013422 | 456 | struct gomp_taskwait *taskwait; |
43895be5 | 457 | /* Number of items in DEPEND. */ |
bc7bff74 | 458 | size_t depend_count; |
a9833286 | 459 | /* Number of tasks this task depends on. Once this counter reaches |
460 | 0, we have no unsatisfied dependencies, and this task can be put | |
461 | into the various queues to be scheduled. */ | |
bc7bff74 | 462 | size_t num_dependees; |
a9833286 | 463 | |
464 | /* Priority of this task. */ | |
465 | int priority; | |
466 | /* The priority node for this task in each of the different queues. | |
467 | We put this here to avoid allocating space for each priority | |
468 | node. Then we play offsetof() games to convert between pnode[] | |
469 | entries and the gomp_task in which they reside. */ | |
470 | struct priority_node pnode[3]; | |
471 | ||
fd6481cf | 472 | struct gomp_task_icv icv; |
473 | void (*fn) (void *); | |
474 | void *fn_data; | |
475 | enum gomp_task_kind kind; | |
9ead5ba0 | 476 | bool in_tied_task; |
2169f33b | 477 | bool final_task; |
bc7bff74 | 478 | bool copy_ctors_done; |
43895be5 | 479 | /* Set for undeferred tasks with unsatisfied dependencies which |
480 | block further execution of their parent until the dependencies | |
481 | are satisfied. */ | |
96013422 | 482 | bool parent_depends_on; |
43895be5 | 483 | /* Dependencies provided and/or needed for this task. DEPEND_COUNT |
484 | is the number of items available. */ | |
bc7bff74 | 485 | struct gomp_task_depend_entry depend[]; |
486 | }; | |
487 | ||
a9833286 | 488 | /* This structure describes a single #pragma omp taskgroup. */ |
489 | ||
bc7bff74 | 490 | struct gomp_taskgroup |
491 | { | |
492 | struct gomp_taskgroup *prev; | |
a9833286 | 493 | /* Queue of tasks that belong in this taskgroup. */ |
494 | struct priority_queue taskgroup_queue; | |
7e5a76c8 | 495 | uintptr_t *reductions; |
bc7bff74 | 496 | bool in_taskgroup_wait; |
497 | bool cancelled; | |
7e5a76c8 | 498 | bool workshare; |
bc7bff74 | 499 | gomp_sem_t taskgroup_sem; |
500 | size_t num_children; | |
fd6481cf | 501 | }; |
502 | ||
a9833286 | 503 | /* Various state of OpenMP async offloading tasks. */ |
504 | enum gomp_target_task_state | |
505 | { | |
506 | GOMP_TARGET_TASK_DATA, | |
507 | GOMP_TARGET_TASK_BEFORE_MAP, | |
508 | GOMP_TARGET_TASK_FALLBACK, | |
509 | GOMP_TARGET_TASK_READY_TO_RUN, | |
510 | GOMP_TARGET_TASK_RUNNING, | |
511 | GOMP_TARGET_TASK_FINISHED | |
512 | }; | |
513 | ||
514 | /* This structure describes a target task. */ | |
515 | ||
43895be5 | 516 | struct gomp_target_task |
517 | { | |
518 | struct gomp_device_descr *devicep; | |
519 | void (*fn) (void *); | |
520 | size_t mapnum; | |
521 | size_t *sizes; | |
522 | unsigned short *kinds; | |
523 | unsigned int flags; | |
a9833286 | 524 | enum gomp_target_task_state state; |
525 | struct target_mem_desc *tgt; | |
526 | struct gomp_task *task; | |
527 | struct gomp_team *team; | |
56686608 | 528 | /* Device-specific target arguments. */ |
529 | void **args; | |
43895be5 | 530 | void *hostaddrs[]; |
531 | }; | |
532 | ||
fd6481cf | 533 | /* This structure describes a "team" of threads. These are the threads |
534 | that are spawned by a PARALLEL constructs, as well as the work sharing | |
535 | constructs that the team encounters. */ | |
536 | ||
537 | struct gomp_team | |
538 | { | |
1e8e9920 | 539 | /* This is the number of threads in the current team. */ |
540 | unsigned nthreads; | |
541 | ||
fd6481cf | 542 | /* This is number of gomp_work_share structs that have been allocated |
543 | as a block last time. */ | |
544 | unsigned work_share_chunk; | |
545 | ||
1e8e9920 | 546 | /* This is the saved team state that applied to a master thread before |
547 | the current thread was created. */ | |
548 | struct gomp_team_state prev_ts; | |
549 | ||
1e8e9920 | 550 | /* This semaphore should be used by the master thread instead of its |
551 | "native" semaphore in the thread structure. Required for nested | |
552 | parallels, as the master is a member of two teams. */ | |
553 | gomp_sem_t master_release; | |
554 | ||
fd6481cf | 555 | /* This points to an array with pointers to the release semaphore |
556 | of the threads in the team. */ | |
557 | gomp_sem_t **ordered_release; | |
558 | ||
bc7bff74 | 559 | /* List of work shares on which gomp_fini_work_share hasn't been |
560 | called yet. If the team hasn't been cancelled, this should be | |
561 | equal to each thr->ts.work_share, but otherwise it can be a possibly | |
562 | long list of workshares. */ | |
563 | struct gomp_work_share *work_shares_to_free; | |
564 | ||
fd6481cf | 565 | /* List of gomp_work_share structs chained through next_free fields. |
566 | This is populated and taken off only by the first thread in the | |
567 | team encountering a new work sharing construct, in a critical | |
568 | section. */ | |
569 | struct gomp_work_share *work_share_list_alloc; | |
570 | ||
571 | /* List of gomp_work_share structs freed by free_work_share. New | |
572 | entries are atomically added to the start of the list, and | |
573 | alloc_work_share can safely only move all but the first entry | |
574 | to work_share_list alloc, as free_work_share can happen concurrently | |
575 | with alloc_work_share. */ | |
576 | struct gomp_work_share *work_share_list_free; | |
577 | ||
578 | #ifdef HAVE_SYNC_BUILTINS | |
579 | /* Number of simple single regions encountered by threads in this | |
580 | team. */ | |
581 | unsigned long single_count; | |
582 | #else | |
583 | /* Mutex protecting addition of workshares to work_share_list_free. */ | |
584 | gomp_mutex_t work_share_list_free_lock; | |
585 | #endif | |
586 | ||
587 | /* This barrier is used for most synchronization of the team. */ | |
588 | gomp_barrier_t barrier; | |
589 | ||
590 | /* Initial work shares, to avoid allocating any gomp_work_share | |
591 | structs in the common case. */ | |
592 | struct gomp_work_share work_shares[8]; | |
593 | ||
594 | gomp_mutex_t task_lock; | |
a9833286 | 595 | /* Scheduled tasks. */ |
596 | struct priority_queue task_queue; | |
bc7bff74 | 597 | /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ |
598 | unsigned int task_count; | |
599 | /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ | |
600 | unsigned int task_queued_count; | |
601 | /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running | |
602 | directly in gomp_barrier_handle_tasks; tasks spawned | |
603 | from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when | |
604 | that is called from a task run from gomp_barrier_handle_tasks. | |
605 | task_running_count should be always <= team->nthreads, | |
606 | and if current task isn't in_tied_task, then it will be | |
607 | even < team->nthreads. */ | |
608 | unsigned int task_running_count; | |
609 | int work_share_cancelled; | |
610 | int team_cancelled; | |
fd6481cf | 611 | |
612 | /* This array contains structures for implicit tasks. */ | |
613 | struct gomp_task implicit_task[]; | |
1e8e9920 | 614 | }; |
615 | ||
616 | /* This structure contains all data that is private to libgomp and is | |
617 | allocated per thread. */ | |
618 | ||
619 | struct gomp_thread | |
620 | { | |
621 | /* This is the function that the thread should run upon launch. */ | |
622 | void (*fn) (void *data); | |
623 | void *data; | |
624 | ||
625 | /* This is the current team state for this thread. The ts.team member | |
626 | is NULL only if the thread is idle. */ | |
627 | struct gomp_team_state ts; | |
628 | ||
fd6481cf | 629 | /* This is the task that the thread is currently executing. */ |
630 | struct gomp_task *task; | |
631 | ||
1e8e9920 | 632 | /* This semaphore is used for ordered loops. */ |
633 | gomp_sem_t release; | |
fd6481cf | 634 | |
bc7bff74 | 635 | /* Place this thread is bound to plus one, or zero if not bound |
636 | to any place. */ | |
637 | unsigned int place; | |
638 | ||
639 | /* User pthread thread pool */ | |
fd6481cf | 640 | struct gomp_thread_pool *thread_pool; |
7e5a76c8 | 641 | |
642 | #if defined(LIBGOMP_USE_PTHREADS) \ | |
643 | && (!defined(HAVE_TLS) \ | |
644 | || !defined(__GLIBC__) \ | |
645 | || !defined(USING_INITIAL_EXEC_TLS)) | |
646 | /* pthread_t of the thread containing this gomp_thread. | |
647 | On Linux when using initial-exec TLS, | |
648 | (typeof (pthread_t)) gomp_thread () - pthread_self () | |
649 | is constant in all threads, so we can optimize and not | |
650 | store it. */ | |
651 | #define GOMP_NEEDS_THREAD_HANDLE 1 | |
652 | pthread_t handle; | |
653 | #endif | |
fd6481cf | 654 | }; |
655 | ||
656 | ||
657 | struct gomp_thread_pool | |
658 | { | |
659 | /* This array manages threads spawned from the top level, which will | |
660 | return to the idle loop once the current PARALLEL construct ends. */ | |
661 | struct gomp_thread **threads; | |
662 | unsigned threads_size; | |
663 | unsigned threads_used; | |
a29fd1b8 | 664 | /* The last team is used for non-nested teams to delay their destruction to |
665 | make sure all the threads in the team move on to the pool's barrier before | |
666 | the team's barrier is destroyed. */ | |
fd6481cf | 667 | struct gomp_team *last_team; |
bc7bff74 | 668 | /* Number of threads running in this contention group. */ |
669 | unsigned long threads_busy; | |
fd6481cf | 670 | |
44a69dfd | 671 | /* This barrier holds and releases threads waiting in thread pools. */ |
672 | gomp_simple_barrier_t threads_dock; | |
1e8e9920 | 673 | }; |
674 | ||
bc7bff74 | 675 | enum gomp_cancel_kind |
676 | { | |
677 | GOMP_CANCEL_PARALLEL = 1, | |
678 | GOMP_CANCEL_LOOP = 2, | |
679 | GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, | |
680 | GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, | |
681 | GOMP_CANCEL_SECTIONS = 4, | |
682 | GOMP_CANCEL_TASKGROUP = 8 | |
683 | }; | |
684 | ||
1e8e9920 | 685 | /* ... and here is that TLS data. */ |
686 | ||
44a69dfd | 687 | #if defined __nvptx__ |
688 | extern struct gomp_thread *nvptx_thrs __attribute__((shared)); | |
689 | static inline struct gomp_thread *gomp_thread (void) | |
690 | { | |
691 | int tid; | |
692 | asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); | |
693 | return nvptx_thrs + tid; | |
694 | } | |
695 | #elif defined HAVE_TLS || defined USE_EMUTLS | |
1e8e9920 | 696 | extern __thread struct gomp_thread gomp_tls_data; |
697 | static inline struct gomp_thread *gomp_thread (void) | |
698 | { | |
699 | return &gomp_tls_data; | |
700 | } | |
701 | #else | |
702 | extern pthread_key_t gomp_tls_key; | |
703 | static inline struct gomp_thread *gomp_thread (void) | |
704 | { | |
705 | return pthread_getspecific (gomp_tls_key); | |
706 | } | |
707 | #endif | |
708 | ||
fd6481cf | 709 | extern struct gomp_task_icv *gomp_new_icv (void); |
710 | ||
711 | /* Here's how to access the current copy of the ICVs. */ | |
1e8e9920 | 712 | |
fd6481cf | 713 | static inline struct gomp_task_icv *gomp_icv (bool write) |
714 | { | |
715 | struct gomp_task *task = gomp_thread ()->task; | |
716 | if (task) | |
717 | return &task->icv; | |
718 | else if (write) | |
719 | return gomp_new_icv (); | |
720 | else | |
721 | return &gomp_global_icv; | |
722 | } | |
ba893327 | 723 | |
44a69dfd | 724 | #ifdef LIBGOMP_USE_PTHREADS |
ba893327 | 725 | /* The attributes to be used during thread creation. */ |
726 | extern pthread_attr_t gomp_thread_attr; | |
1e8e9920 | 727 | |
65cb1e66 | 728 | extern pthread_key_t gomp_thread_destructor; |
44a69dfd | 729 | #endif |
65cb1e66 | 730 | |
1e8e9920 | 731 | /* Function prototypes. */ |
732 | ||
8f697de6 | 733 | /* affinity.c */ |
734 | ||
735 | extern void gomp_init_affinity (void); | |
44a69dfd | 736 | #ifdef LIBGOMP_USE_PTHREADS |
bc7bff74 | 737 | extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); |
44a69dfd | 738 | #endif |
bc7bff74 | 739 | extern void **gomp_affinity_alloc (unsigned long, bool); |
740 | extern void gomp_affinity_init_place (void *); | |
741 | extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, | |
742 | long, bool); | |
743 | extern bool gomp_affinity_remove_cpu (void *, unsigned long); | |
744 | extern bool gomp_affinity_copy_place (void *, void *, long); | |
745 | extern bool gomp_affinity_same_place (void *, void *); | |
746 | extern bool gomp_affinity_finalize_place_list (bool); | |
747 | extern bool gomp_affinity_init_level (int, unsigned long, bool); | |
748 | extern void gomp_affinity_print_place (void *); | |
43895be5 | 749 | extern void gomp_get_place_proc_ids_8 (int, int64_t *); |
7e5a76c8 | 750 | extern void gomp_display_affinity_place (char *, size_t, size_t *, int); |
751 | ||
752 | /* affinity-fmt.c */ | |
753 | ||
754 | extern void gomp_set_affinity_format (const char *, size_t); | |
755 | extern void gomp_display_string (char *, size_t, size_t *, const char *, | |
756 | size_t); | |
757 | #ifdef LIBGOMP_USE_PTHREADS | |
758 | typedef pthread_t gomp_thread_handle; | |
759 | #else | |
760 | typedef struct {} gomp_thread_handle; | |
761 | #endif | |
762 | extern size_t gomp_display_affinity (char *, size_t, const char *, | |
763 | gomp_thread_handle, | |
764 | struct gomp_team_state *, unsigned int); | |
765 | extern void gomp_display_affinity_thread (gomp_thread_handle, | |
766 | struct gomp_team_state *, | |
767 | unsigned int) __attribute__((cold)); | |
8f697de6 | 768 | |
1e8e9920 | 769 | /* iter.c */ |
770 | ||
771 | extern int gomp_iter_static_next (long *, long *); | |
772 | extern bool gomp_iter_dynamic_next_locked (long *, long *); | |
773 | extern bool gomp_iter_guided_next_locked (long *, long *); | |
774 | ||
775 | #ifdef HAVE_SYNC_BUILTINS | |
776 | extern bool gomp_iter_dynamic_next (long *, long *); | |
777 | extern bool gomp_iter_guided_next (long *, long *); | |
778 | #endif | |
779 | ||
fd6481cf | 780 | /* iter_ull.c */ |
781 | ||
782 | extern int gomp_iter_ull_static_next (unsigned long long *, | |
783 | unsigned long long *); | |
784 | extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, | |
785 | unsigned long long *); | |
786 | extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, | |
787 | unsigned long long *); | |
788 | ||
789 | #if defined HAVE_SYNC_BUILTINS && defined __LP64__ | |
790 | extern bool gomp_iter_ull_dynamic_next (unsigned long long *, | |
791 | unsigned long long *); | |
792 | extern bool gomp_iter_ull_guided_next (unsigned long long *, | |
793 | unsigned long long *); | |
794 | #endif | |
795 | ||
1e8e9920 | 796 | /* ordered.c */ |
797 | ||
798 | extern void gomp_ordered_first (void); | |
799 | extern void gomp_ordered_last (void); | |
800 | extern void gomp_ordered_next (void); | |
801 | extern void gomp_ordered_static_init (void); | |
802 | extern void gomp_ordered_static_next (void); | |
803 | extern void gomp_ordered_sync (void); | |
7e5a76c8 | 804 | extern void gomp_doacross_init (unsigned, long *, long, size_t); |
43895be5 | 805 | extern void gomp_doacross_ull_init (unsigned, unsigned long long *, |
7e5a76c8 | 806 | unsigned long long, size_t); |
1e8e9920 | 807 | |
808 | /* parallel.c */ | |
809 | ||
fd6481cf | 810 | extern unsigned gomp_resolve_num_threads (unsigned, unsigned); |
1e8e9920 | 811 | |
812 | /* proc.c (in config/) */ | |
813 | ||
814 | extern void gomp_init_num_threads (void); | |
815 | extern unsigned gomp_dynamic_max_threads (void); | |
816 | ||
fd6481cf | 817 | /* task.c */ |
818 | ||
819 | extern void gomp_init_task (struct gomp_task *, struct gomp_task *, | |
820 | struct gomp_task_icv *); | |
821 | extern void gomp_end_task (void); | |
822 | extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); | |
43895be5 | 823 | extern void gomp_task_maybe_wait_for_dependencies (void **); |
a9833286 | 824 | extern bool gomp_create_target_task (struct gomp_device_descr *, |
43895be5 | 825 | void (*) (void *), size_t, void **, |
826 | size_t *, unsigned short *, unsigned int, | |
56686608 | 827 | void **, void **, |
828 | enum gomp_target_task_state); | |
7e5a76c8 | 829 | extern struct gomp_taskgroup *gomp_parallel_reduction_register (uintptr_t *, |
830 | unsigned); | |
831 | extern void gomp_workshare_taskgroup_start (void); | |
832 | extern void gomp_workshare_task_reduction_register (uintptr_t *, uintptr_t *); | |
fd6481cf | 833 | |
834 | static void inline | |
835 | gomp_finish_task (struct gomp_task *task) | |
836 | { | |
bc7bff74 | 837 | if (__builtin_expect (task->depend_hash != NULL, 0)) |
838 | free (task->depend_hash); | |
fd6481cf | 839 | } |
840 | ||
1e8e9920 | 841 | /* team.c */ |
842 | ||
fd6481cf | 843 | extern struct gomp_team *gomp_new_team (unsigned); |
1e8e9920 | 844 | extern void gomp_team_start (void (*) (void *), void *, unsigned, |
7e5a76c8 | 845 | unsigned, struct gomp_team *, |
846 | struct gomp_taskgroup *); | |
1e8e9920 | 847 | extern void gomp_team_end (void); |
bc7bff74 | 848 | extern void gomp_free_thread (void *); |
7e5a76c8 | 849 | extern int gomp_pause_host (void); |
bc7bff74 | 850 | |
851 | /* target.c */ | |
852 | ||
ca4c3545 | 853 | extern void gomp_init_targets_once (void); |
bc7bff74 | 854 | extern int gomp_get_num_devices (void); |
a9833286 | 855 | extern bool gomp_target_task_fn (void *); |
1e8e9920 | 856 | |
a9833286 | 857 | /* Splay tree definitions. */ |
ca4c3545 | 858 | typedef struct splay_tree_node_s *splay_tree_node; |
859 | typedef struct splay_tree_s *splay_tree; | |
860 | typedef struct splay_tree_key_s *splay_tree_key; | |
861 | ||
43895be5 | 862 | struct target_var_desc { |
863 | /* Splay key. */ | |
864 | splay_tree_key key; | |
865 | /* True if data should be copied from device to host at the end. */ | |
866 | bool copy_from; | |
867 | /* True if data always should be copied from device to host at the end. */ | |
868 | bool always_copy_from; | |
869 | /* Relative offset against key host_start. */ | |
870 | uintptr_t offset; | |
871 | /* Actual length. */ | |
872 | uintptr_t length; | |
873 | }; | |
874 | ||
ca4c3545 | 875 | struct target_mem_desc { |
876 | /* Reference count. */ | |
877 | uintptr_t refcount; | |
878 | /* All the splay nodes allocated together. */ | |
879 | splay_tree_node array; | |
880 | /* Start of the target region. */ | |
881 | uintptr_t tgt_start; | |
882 | /* End of the targer region. */ | |
883 | uintptr_t tgt_end; | |
884 | /* Handle to free. */ | |
885 | void *to_free; | |
886 | /* Previous target_mem_desc. */ | |
887 | struct target_mem_desc *prev; | |
888 | /* Number of items in following list. */ | |
889 | size_t list_count; | |
890 | ||
891 | /* Corresponding target device descriptor. */ | |
892 | struct gomp_device_descr *device_descr; | |
893 | ||
43895be5 | 894 | /* List of target items to remove (or decrease refcount) |
ca4c3545 | 895 | at the end of region. */ |
43895be5 | 896 | struct target_var_desc list[]; |
ca4c3545 | 897 | }; |
898 | ||
43895be5 | 899 | /* Special value for refcount - infinity. */ |
900 | #define REFCOUNT_INFINITY (~(uintptr_t) 0) | |
c0998828 | 901 | /* Special value for refcount - tgt_offset contains target address of the |
902 | artificial pointer to "omp declare target link" object. */ | |
903 | #define REFCOUNT_LINK (~(uintptr_t) 1) | |
43895be5 | 904 | |
ca4c3545 | 905 | struct splay_tree_key_s { |
906 | /* Address of the host object. */ | |
907 | uintptr_t host_start; | |
908 | /* Address immediately after the host object. */ | |
909 | uintptr_t host_end; | |
910 | /* Descriptor of the target memory. */ | |
911 | struct target_mem_desc *tgt; | |
912 | /* Offset from tgt->tgt_start to the start of the target object. */ | |
913 | uintptr_t tgt_offset; | |
914 | /* Reference count. */ | |
915 | uintptr_t refcount; | |
737cc978 | 916 | /* Dynamic reference count. */ |
917 | uintptr_t dynamic_refcount; | |
c0998828 | 918 | /* Pointer to the original mapping of "omp declare target link" object. */ |
919 | splay_tree_key link_key; | |
ca4c3545 | 920 | }; |
921 | ||
a9833286 | 922 | /* The comparison function. */ |
923 | ||
924 | static inline int | |
925 | splay_compare (splay_tree_key x, splay_tree_key y) | |
926 | { | |
927 | if (x->host_start == x->host_end | |
928 | && y->host_start == y->host_end) | |
929 | return 0; | |
930 | if (x->host_end <= y->host_start) | |
931 | return -1; | |
932 | if (x->host_start >= y->host_end) | |
933 | return 1; | |
934 | return 0; | |
935 | } | |
936 | ||
ca4c3545 | 937 | #include "splay-tree.h" |
938 | ||
ca4c3545 | 939 | typedef struct acc_dispatch_t |
940 | { | |
941 | /* This is a linked list of data mapped using the | |
942 | acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas. | |
943 | Unlike mapped_data in the goacc_thread struct, unmapping can | |
944 | happen out-of-order with respect to mapping. */ | |
945 | /* This is guarded by the lock in the "outer" struct gomp_device_descr. */ | |
946 | struct target_mem_desc *data_environ; | |
947 | ||
ca4c3545 | 948 | /* Execute. */ |
65caa53b | 949 | __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; |
ca4c3545 | 950 | |
951 | /* Async cleanup callback registration. */ | |
468af399 | 952 | __typeof (GOMP_OFFLOAD_openacc_register_async_cleanup) |
953 | *register_async_cleanup_func; | |
ca4c3545 | 954 | |
955 | /* Asynchronous routines. */ | |
468af399 | 956 | __typeof (GOMP_OFFLOAD_openacc_async_test) *async_test_func; |
957 | __typeof (GOMP_OFFLOAD_openacc_async_test_all) *async_test_all_func; | |
958 | __typeof (GOMP_OFFLOAD_openacc_async_wait) *async_wait_func; | |
959 | __typeof (GOMP_OFFLOAD_openacc_async_wait_async) *async_wait_async_func; | |
960 | __typeof (GOMP_OFFLOAD_openacc_async_wait_all) *async_wait_all_func; | |
961 | __typeof (GOMP_OFFLOAD_openacc_async_wait_all_async) | |
962 | *async_wait_all_async_func; | |
963 | __typeof (GOMP_OFFLOAD_openacc_async_set_async) *async_set_async_func; | |
ca4c3545 | 964 | |
965 | /* Create/destroy TLS data. */ | |
468af399 | 966 | __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; |
967 | __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) | |
968 | *destroy_thread_data_func; | |
ca4c3545 | 969 | |
970 | /* NVIDIA target specific routines. */ | |
971 | struct { | |
65caa53b | 972 | __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) |
468af399 | 973 | *get_current_device_func; |
65caa53b | 974 | __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) |
468af399 | 975 | *get_current_context_func; |
65caa53b | 976 | __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; |
977 | __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; | |
ca4c3545 | 978 | } cuda; |
979 | } acc_dispatch_t; | |
980 | ||
f87b2900 | 981 | /* Various state of the accelerator device. */ |
982 | enum gomp_device_state | |
983 | { | |
984 | GOMP_DEVICE_UNINITIALIZED, | |
985 | GOMP_DEVICE_INITIALIZED, | |
986 | GOMP_DEVICE_FINALIZED | |
987 | }; | |
988 | ||
ca4c3545 | 989 | /* This structure describes accelerator device. |
990 | It contains name of the corresponding libgomp plugin, function handlers for | |
991 | interaction with the device, ID-number of the device, and information about | |
992 | mapped memory. */ | |
993 | struct gomp_device_descr | |
994 | { | |
995 | /* Immutable data, which is only set during initialization, and which is not | |
996 | guarded by the lock. */ | |
997 | ||
998 | /* The name of the device. */ | |
999 | const char *name; | |
1000 | ||
1001 | /* Capabilities of device (supports OpenACC, OpenMP). */ | |
1002 | unsigned int capabilities; | |
1003 | ||
1004 | /* This is the ID number of device among devices of the same type. */ | |
1005 | int target_id; | |
1006 | ||
1007 | /* This is the TYPE of device. */ | |
1008 | enum offload_target_type type; | |
1009 | ||
1010 | /* Function handlers. */ | |
468af399 | 1011 | __typeof (GOMP_OFFLOAD_get_name) *get_name_func; |
1012 | __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; | |
1013 | __typeof (GOMP_OFFLOAD_get_type) *get_type_func; | |
1014 | __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; | |
1015 | __typeof (GOMP_OFFLOAD_init_device) *init_device_func; | |
1016 | __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; | |
1017 | __typeof (GOMP_OFFLOAD_version) *version_func; | |
1018 | __typeof (GOMP_OFFLOAD_load_image) *load_image_func; | |
1019 | __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; | |
1020 | __typeof (GOMP_OFFLOAD_alloc) *alloc_func; | |
1021 | __typeof (GOMP_OFFLOAD_free) *free_func; | |
1022 | __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; | |
1023 | __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; | |
1024 | __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; | |
1025 | __typeof (GOMP_OFFLOAD_can_run) *can_run_func; | |
1026 | __typeof (GOMP_OFFLOAD_run) *run_func; | |
1027 | __typeof (GOMP_OFFLOAD_async_run) *async_run_func; | |
ca4c3545 | 1028 | |
0d8c703d | 1029 | /* Splay tree containing information about mapped memory regions. */ |
1030 | struct splay_tree_s mem_map; | |
ca4c3545 | 1031 | |
1032 | /* Mutex for the mutable data. */ | |
1033 | gomp_mutex_t lock; | |
1034 | ||
f87b2900 | 1035 | /* Current state of the device. OpenACC allows to move from INITIALIZED state |
1036 | back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED | |
1037 | to FINALIZED state (at program shutdown). */ | |
1038 | enum gomp_device_state state; | |
ca4c3545 | 1039 | |
ca4c3545 | 1040 | /* OpenACC-specific data and functions. */ |
1041 | /* This is mutable because of its mutable data_environ and target_data | |
1042 | members. */ | |
1043 | acc_dispatch_t openacc; | |
1044 | }; | |
1045 | ||
43895be5 | 1046 | /* Kind of the pragma, for which gomp_map_vars () is called. */ |
1047 | enum gomp_map_vars_kind | |
1048 | { | |
1049 | GOMP_MAP_VARS_OPENACC, | |
1050 | GOMP_MAP_VARS_TARGET, | |
1051 | GOMP_MAP_VARS_DATA, | |
1052 | GOMP_MAP_VARS_ENTER_DATA | |
1053 | }; | |
1054 | ||
ca4c3545 | 1055 | extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *); |
737cc978 | 1056 | extern void gomp_acc_remove_pointer (void *, size_t, bool, int, int, int); |
1057 | extern void gomp_acc_declare_allocate (bool, size_t, void **, size_t *, | |
1058 | unsigned short *); | |
ca4c3545 | 1059 | |
1060 | extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, | |
1061 | size_t, void **, void **, | |
43895be5 | 1062 | size_t *, void *, bool, |
1063 | enum gomp_map_vars_kind); | |
ca4c3545 | 1064 | extern void gomp_unmap_vars (struct target_mem_desc *, bool); |
1065 | extern void gomp_init_device (struct gomp_device_descr *); | |
0d8c703d | 1066 | extern void gomp_free_memmap (struct splay_tree_s *); |
7de5731e | 1067 | extern void gomp_unload_device (struct gomp_device_descr *); |
737cc978 | 1068 | extern bool gomp_remove_var (struct gomp_device_descr *, splay_tree_key); |
ca4c3545 | 1069 | |
1e8e9920 | 1070 | /* work.c */ |
1071 | ||
7e5a76c8 | 1072 | extern void gomp_init_work_share (struct gomp_work_share *, size_t, unsigned); |
fd6481cf | 1073 | extern void gomp_fini_work_share (struct gomp_work_share *); |
7e5a76c8 | 1074 | extern bool gomp_work_share_start (size_t); |
1e8e9920 | 1075 | extern void gomp_work_share_end (void); |
bc7bff74 | 1076 | extern bool gomp_work_share_end_cancel (void); |
1e8e9920 | 1077 | extern void gomp_work_share_end_nowait (void); |
1078 | ||
fd6481cf | 1079 | static inline void |
1080 | gomp_work_share_init_done (void) | |
1081 | { | |
1082 | struct gomp_thread *thr = gomp_thread (); | |
1083 | if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) | |
1084 | gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); | |
1085 | } | |
1086 | ||
1e8e9920 | 1087 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
1088 | # pragma GCC visibility pop | |
1089 | #endif | |
1090 | ||
1091 | /* Now that we're back to default visibility, include the globals. */ | |
1092 | #include "libgomp_g.h" | |
1093 | ||
1094 | /* Include omp.h by parts. */ | |
1095 | #include "omp-lock.h" | |
1096 | #define _LIBGOMP_OMP_LOCK_DEFINED 1 | |
1097 | #include "omp.h.in" | |
1098 | ||
fd6481cf | 1099 | #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ |
1100 | || !defined (HAVE_ATTRIBUTE_ALIAS) \ | |
8a75144c | 1101 | || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ |
71cbce26 | 1102 | || !defined (PIC) \ |
1103 | || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) | |
fd6481cf | 1104 | # undef LIBGOMP_GNU_SYMBOL_VERSIONING |
1105 | #endif | |
1106 | ||
1107 | #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING | |
1108 | extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1109 | extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1110 | extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1111 | extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1112 | extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1113 | extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1114 | extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1115 | extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1116 | extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1117 | extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1118 | ||
1119 | extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1120 | extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1121 | extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1122 | extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1123 | extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1124 | extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1125 | extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1126 | extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1127 | extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1128 | extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1129 | ||
fd6481cf | 1130 | # define omp_lock_symver(fn) \ |
1131 | __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ | |
1132 | __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); | |
1133 | #else | |
1134 | # define gomp_init_lock_30 omp_init_lock | |
1135 | # define gomp_destroy_lock_30 omp_destroy_lock | |
1136 | # define gomp_set_lock_30 omp_set_lock | |
1137 | # define gomp_unset_lock_30 omp_unset_lock | |
1138 | # define gomp_test_lock_30 omp_test_lock | |
1139 | # define gomp_init_nest_lock_30 omp_init_nest_lock | |
1140 | # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock | |
1141 | # define gomp_set_nest_lock_30 omp_set_nest_lock | |
1142 | # define gomp_unset_nest_lock_30 omp_unset_nest_lock | |
1143 | # define gomp_test_nest_lock_30 omp_test_nest_lock | |
1144 | #endif | |
1145 | ||
1e8e9920 | 1146 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
1147 | # define attribute_hidden __attribute__ ((visibility ("hidden"))) | |
1148 | #else | |
1149 | # define attribute_hidden | |
1150 | #endif | |
1151 | ||
1152 | #ifdef HAVE_ATTRIBUTE_ALIAS | |
e4d15e02 | 1153 | # define strong_alias(fn, al) \ |
1154 | extern __typeof (fn) al __attribute__ ((alias (#fn))); | |
1155 | ||
bc7bff74 | 1156 | # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) |
1157 | # define ialias_str1(x) ialias_str2(x) | |
1158 | # define ialias_str2(x) #x | |
1e8e9920 | 1159 | # define ialias(fn) \ |
1160 | extern __typeof (fn) gomp_ialias_##fn \ | |
1161 | __attribute__ ((alias (#fn))) attribute_hidden; | |
bc7bff74 | 1162 | # define ialias_redirect(fn) \ |
1163 | extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; | |
1164 | # define ialias_call(fn) gomp_ialias_ ## fn | |
1e8e9920 | 1165 | #else |
1166 | # define ialias(fn) | |
bc7bff74 | 1167 | # define ialias_redirect(fn) |
1168 | # define ialias_call(fn) fn | |
1e8e9920 | 1169 | #endif |
1170 | ||
a9833286 | 1171 | /* Helper function for priority_node_to_task() and |
1172 | task_to_priority_node(). | |
1173 | ||
1174 | Return the offset from a task to its priority_node entry. The | |
1175 | priority_node entry is has a type of TYPE. */ | |
1176 | ||
1177 | static inline size_t | |
1178 | priority_queue_offset (enum priority_queue_type type) | |
1179 | { | |
1180 | return offsetof (struct gomp_task, pnode[(int) type]); | |
1181 | } | |
1182 | ||
1183 | /* Return the task associated with a priority NODE of type TYPE. */ | |
1184 | ||
1185 | static inline struct gomp_task * | |
1186 | priority_node_to_task (enum priority_queue_type type, | |
1187 | struct priority_node *node) | |
1188 | { | |
1189 | return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); | |
1190 | } | |
1191 | ||
1192 | /* Return the priority node of type TYPE for a given TASK. */ | |
1193 | ||
1194 | static inline struct priority_node * | |
1195 | task_to_priority_node (enum priority_queue_type type, | |
1196 | struct gomp_task *task) | |
1197 | { | |
1198 | return (struct priority_node *) ((char *) task | |
1199 | + priority_queue_offset (type)); | |
1200 | } | |
7e5a76c8 | 1201 | |
1202 | #ifdef LIBGOMP_USE_PTHREADS | |
1203 | static inline gomp_thread_handle | |
1204 | gomp_thread_self (void) | |
1205 | { | |
1206 | return pthread_self (); | |
1207 | } | |
1208 | ||
1209 | static inline gomp_thread_handle | |
1210 | gomp_thread_to_pthread_t (struct gomp_thread *thr) | |
1211 | { | |
1212 | struct gomp_thread *this_thr = gomp_thread (); | |
1213 | if (thr == this_thr) | |
1214 | return pthread_self (); | |
1215 | #ifdef GOMP_NEEDS_THREAD_HANDLE | |
1216 | return thr->handle; | |
1217 | #else | |
1218 | /* On Linux with initial-exec TLS, the pthread_t of the thread containing | |
1219 | thr can be computed from thr, this_thr and pthread_self (), | |
1220 | as the distance between this_thr and pthread_self () is constant. */ | |
1221 | return pthread_self () + ((uintptr_t) thr - (uintptr_t) this_thr); | |
1222 | #endif | |
1223 | } | |
1224 | #else | |
1225 | static inline gomp_thread_handle | |
1226 | gomp_thread_self (void) | |
1227 | { | |
1228 | return (gomp_thread_handle) {}; | |
1229 | } | |
1230 | ||
1231 | static inline gomp_thread_handle | |
1232 | gomp_thread_to_pthread_t (struct gomp_thread *thr) | |
1233 | { | |
1234 | (void) thr; | |
1235 | return gomp_thread_self (); | |
1236 | } | |
1237 | #endif | |
1238 | ||
1e8e9920 | 1239 | #endif /* LIBGOMP_H */ |