]>
Commit | Line | Data |
---|---|---|
d353bf18 | 1 | /* Copyright (C) 2005-2015 Free Software Foundation, Inc. |
1e8e9920 | 2 | Contributed by Richard Henderson <rth@redhat.com>. |
3 | ||
c35c9a62 | 4 | This file is part of the GNU Offloading and Multi Processing Library |
5 | (libgomp). | |
1e8e9920 | 6 | |
7 | Libgomp is free software; you can redistribute it and/or modify it | |
6bc9506f | 8 | under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
1e8e9920 | 11 | |
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
6bc9506f | 14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
1e8e9920 | 15 | more details. |
16 | ||
6bc9506f | 17 | Under Section 7 of GPL version 3, you are granted additional |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
1e8e9920 | 20 | |
6bc9506f | 21 | You should have received a copy of the GNU General Public License and |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
1e8e9920 | 25 | |
26 | /* This file contains data types and function declarations that are not | |
ca4c3545 | 27 | part of the official OpenACC or OpenMP user interfaces. There are |
28 | declarations in here that are part of the GNU Offloading and Multi | |
29 | Processing ABI, in that the compiler is required to know about them | |
30 | and use them. | |
1e8e9920 | 31 | |
32 | The convention is that the all caps prefix "GOMP" is used group items | |
33 | that are part of the external ABI, and the lower case prefix "gomp" | |
34 | is used group items that are completely private to the library. */ | |
35 | ||
36 | #ifndef LIBGOMP_H | |
37 | #define LIBGOMP_H 1 | |
38 | ||
39 | #include "config.h" | |
01473a49 | 40 | #include "gstdint.h" |
ca4c3545 | 41 | #include "libgomp-plugin.h" |
1e8e9920 | 42 | |
43 | #include <pthread.h> | |
44 | #include <stdbool.h> | |
bc7bff74 | 45 | #include <stdlib.h> |
ca4c3545 | 46 | #include <stdarg.h> |
1e8e9920 | 47 | |
48 | #ifdef HAVE_ATTRIBUTE_VISIBILITY | |
49 | # pragma GCC visibility push(hidden) | |
50 | #endif | |
51 | ||
24e74fd0 | 52 | /* If we were a C++ library, we'd get this from <std/atomic>. */ |
53 | enum memmodel | |
54 | { | |
55 | MEMMODEL_RELAXED = 0, | |
56 | MEMMODEL_CONSUME = 1, | |
57 | MEMMODEL_ACQUIRE = 2, | |
58 | MEMMODEL_RELEASE = 3, | |
59 | MEMMODEL_ACQ_REL = 4, | |
60 | MEMMODEL_SEQ_CST = 5 | |
61 | }; | |
62 | ||
1e8e9920 | 63 | #include "sem.h" |
64 | #include "mutex.h" | |
65 | #include "bar.h" | |
fd6481cf | 66 | #include "ptrlock.h" |
1e8e9920 | 67 | |
68 | ||
69 | /* This structure contains the data to control one work-sharing construct, | |
70 | either a LOOP (FOR/DO) or a SECTIONS. */ | |
71 | ||
72 | enum gomp_schedule_type | |
73 | { | |
fd6481cf | 74 | GFS_RUNTIME, |
1e8e9920 | 75 | GFS_STATIC, |
76 | GFS_DYNAMIC, | |
77 | GFS_GUIDED, | |
fd6481cf | 78 | GFS_AUTO |
1e8e9920 | 79 | }; |
80 | ||
81 | struct gomp_work_share | |
82 | { | |
83 | /* This member records the SCHEDULE clause to be used for this construct. | |
84 | The user specification of "runtime" will already have been resolved. | |
85 | If this is a SECTIONS construct, this value will always be DYNAMIC. */ | |
86 | enum gomp_schedule_type sched; | |
87 | ||
fd6481cf | 88 | int mode; |
1e8e9920 | 89 | |
fd6481cf | 90 | union { |
91 | struct { | |
92 | /* This is the chunk_size argument to the SCHEDULE clause. */ | |
93 | long chunk_size; | |
94 | ||
95 | /* This is the iteration end point. If this is a SECTIONS construct, | |
96 | this is the number of contained sections. */ | |
97 | long end; | |
98 | ||
99 | /* This is the iteration step. If this is a SECTIONS construct, this | |
100 | is always 1. */ | |
101 | long incr; | |
102 | }; | |
103 | ||
104 | struct { | |
105 | /* The same as above, but for the unsigned long long loop variants. */ | |
106 | unsigned long long chunk_size_ull; | |
107 | unsigned long long end_ull; | |
108 | unsigned long long incr_ull; | |
109 | }; | |
110 | }; | |
111 | ||
112 | /* This is a circular queue that details which threads will be allowed | |
113 | into the ordered region and in which order. When a thread allocates | |
114 | iterations on which it is going to work, it also registers itself at | |
115 | the end of the array. When a thread reaches the ordered region, it | |
116 | checks to see if it is the one at the head of the queue. If not, it | |
117 | blocks on its RELEASE semaphore. */ | |
118 | unsigned *ordered_team_ids; | |
119 | ||
120 | /* This is the number of threads that have registered themselves in | |
121 | the circular queue ordered_team_ids. */ | |
122 | unsigned ordered_num_used; | |
123 | ||
124 | /* This is the team_id of the currently acknowledged owner of the ordered | |
125 | section, or -1u if the ordered section has not been acknowledged by | |
126 | any thread. This is distinguished from the thread that is *allowed* | |
127 | to take the section next. */ | |
128 | unsigned ordered_owner; | |
129 | ||
130 | /* This is the index into the circular queue ordered_team_ids of the | |
131 | current thread that's allowed into the ordered reason. */ | |
132 | unsigned ordered_cur; | |
1e8e9920 | 133 | |
fd6481cf | 134 | /* This is a chain of allocated gomp_work_share blocks, valid only |
135 | in the first gomp_work_share struct in the block. */ | |
136 | struct gomp_work_share *next_alloc; | |
137 | ||
138 | /* The above fields are written once during workshare initialization, | |
139 | or related to ordered worksharing. Make sure the following fields | |
140 | are in a different cache line. */ | |
1e8e9920 | 141 | |
142 | /* This lock protects the update of the following members. */ | |
fd6481cf | 143 | gomp_mutex_t lock __attribute__((aligned (64))); |
144 | ||
145 | /* This is the count of the number of threads that have exited the work | |
146 | share construct. If the construct was marked nowait, they have moved on | |
147 | to other work; otherwise they're blocked on a barrier. The last member | |
148 | of the team to exit the work share construct must deallocate it. */ | |
149 | unsigned threads_completed; | |
1e8e9920 | 150 | |
151 | union { | |
152 | /* This is the next iteration value to be allocated. In the case of | |
153 | GFS_STATIC loops, this the iteration start point and never changes. */ | |
154 | long next; | |
155 | ||
fd6481cf | 156 | /* The same, but with unsigned long long type. */ |
157 | unsigned long long next_ull; | |
158 | ||
1e8e9920 | 159 | /* This is the returned data structure for SINGLE COPYPRIVATE. */ |
160 | void *copyprivate; | |
161 | }; | |
162 | ||
fd6481cf | 163 | union { |
164 | /* Link to gomp_work_share struct for next work sharing construct | |
165 | encountered after this one. */ | |
166 | gomp_ptrlock_t next_ws; | |
1e8e9920 | 167 | |
fd6481cf | 168 | /* gomp_work_share structs are chained in the free work share cache |
169 | through this. */ | |
170 | struct gomp_work_share *next_free; | |
171 | }; | |
1e8e9920 | 172 | |
fd6481cf | 173 | /* If only few threads are in the team, ordered_team_ids can point |
174 | to this array which fills the padding at the end of this struct. */ | |
175 | unsigned inline_ordered_team_ids[0]; | |
1e8e9920 | 176 | }; |
177 | ||
178 | /* This structure contains all of the thread-local data associated with | |
179 | a thread team. This is the data that must be saved when a thread | |
180 | encounters a nested PARALLEL construct. */ | |
181 | ||
182 | struct gomp_team_state | |
183 | { | |
184 | /* This is the team of which the thread is currently a member. */ | |
185 | struct gomp_team *team; | |
186 | ||
187 | /* This is the work share construct which this thread is currently | |
188 | processing. Recall that with NOWAIT, not all threads may be | |
fd6481cf | 189 | processing the same construct. */ |
1e8e9920 | 190 | struct gomp_work_share *work_share; |
191 | ||
fd6481cf | 192 | /* This is the previous work share construct or NULL if there wasn't any. |
193 | When all threads are done with the current work sharing construct, | |
194 | the previous one can be freed. The current one can't, as its | |
195 | next_ws field is used. */ | |
196 | struct gomp_work_share *last_work_share; | |
197 | ||
1e8e9920 | 198 | /* This is the ID of this thread within the team. This value is |
199 | guaranteed to be between 0 and N-1, where N is the number of | |
200 | threads in the team. */ | |
201 | unsigned team_id; | |
202 | ||
fd6481cf | 203 | /* Nesting level. */ |
204 | unsigned level; | |
205 | ||
206 | /* Active nesting level. Only active parallel regions are counted. */ | |
207 | unsigned active_level; | |
208 | ||
bc7bff74 | 209 | /* Place-partition-var, offset and length into gomp_places_list array. */ |
210 | unsigned place_partition_off; | |
211 | unsigned place_partition_len; | |
212 | ||
fd6481cf | 213 | #ifdef HAVE_SYNC_BUILTINS |
214 | /* Number of single stmts encountered. */ | |
215 | unsigned long single_count; | |
216 | #endif | |
1e8e9920 | 217 | |
218 | /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the | |
219 | trip number through the loop. So first time a particular loop | |
220 | is encountered this number is 0, the second time through the loop | |
221 | is 1, etc. This is unused when the compiler knows in advance that | |
222 | the loop is statically scheduled. */ | |
223 | unsigned long static_trip; | |
224 | }; | |
225 | ||
bc7bff74 | 226 | struct target_mem_desc; |
ca4c3545 | 227 | struct gomp_memory_mapping; |
bc7bff74 | 228 | |
229 | /* These are the OpenMP 4.0 Internal Control Variables described in | |
fd6481cf | 230 | section 2.3.1. Those described as having one copy per task are |
231 | stored within the structure; those described as having one copy | |
232 | for the whole program are (naturally) global variables. */ | |
bc7bff74 | 233 | |
fd6481cf | 234 | struct gomp_task_icv |
1e8e9920 | 235 | { |
fd6481cf | 236 | unsigned long nthreads_var; |
237 | enum gomp_schedule_type run_sched_var; | |
238 | int run_sched_modifier; | |
bc7bff74 | 239 | int default_device_var; |
240 | unsigned int thread_limit_var; | |
fd6481cf | 241 | bool dyn_var; |
242 | bool nest_var; | |
bc7bff74 | 243 | char bind_var; |
244 | /* Internal ICV. */ | |
245 | struct target_mem_desc *target_data; | |
fd6481cf | 246 | }; |
1e8e9920 | 247 | |
fd6481cf | 248 | extern struct gomp_task_icv gomp_global_icv; |
fd6481cf | 249 | #ifndef HAVE_SYNC_BUILTINS |
bc7bff74 | 250 | extern gomp_mutex_t gomp_managed_threads_lock; |
fd6481cf | 251 | #endif |
252 | extern unsigned long gomp_max_active_levels_var; | |
bc7bff74 | 253 | extern bool gomp_cancel_var; |
fd6481cf | 254 | extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; |
255 | extern unsigned long gomp_available_cpus, gomp_managed_threads; | |
2169f33b | 256 | extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; |
bc7bff74 | 257 | extern char *gomp_bind_var_list; |
258 | extern unsigned long gomp_bind_var_list_len; | |
259 | extern void **gomp_places_list; | |
260 | extern unsigned long gomp_places_list_len; | |
ca4c3545 | 261 | extern int gomp_debug_var; |
262 | extern int goacc_device_num; | |
263 | extern char *goacc_device_type; | |
1e8e9920 | 264 | |
fd6481cf | 265 | enum gomp_task_kind |
266 | { | |
267 | GOMP_TASK_IMPLICIT, | |
268 | GOMP_TASK_IFFALSE, | |
269 | GOMP_TASK_WAITING, | |
270 | GOMP_TASK_TIED | |
271 | }; | |
1e8e9920 | 272 | |
bc7bff74 | 273 | struct gomp_task; |
274 | struct gomp_taskgroup; | |
275 | struct htab; | |
276 | ||
277 | struct gomp_task_depend_entry | |
278 | { | |
279 | void *addr; | |
280 | struct gomp_task_depend_entry *next; | |
281 | struct gomp_task_depend_entry *prev; | |
282 | struct gomp_task *task; | |
283 | bool is_in; | |
284 | bool redundant; | |
96013422 | 285 | bool redundant_out; |
bc7bff74 | 286 | }; |
287 | ||
288 | struct gomp_dependers_vec | |
289 | { | |
290 | size_t n_elem; | |
291 | size_t allocated; | |
292 | struct gomp_task *elem[]; | |
293 | }; | |
294 | ||
96013422 | 295 | /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ |
296 | ||
297 | struct gomp_taskwait | |
298 | { | |
299 | bool in_taskwait; | |
300 | bool in_depend_wait; | |
301 | size_t n_depend; | |
302 | struct gomp_task *last_parent_depends_on; | |
303 | gomp_sem_t taskwait_sem; | |
304 | }; | |
305 | ||
fd6481cf | 306 | /* This structure describes a "task" to be run by a thread. */ |
1e8e9920 | 307 | |
fd6481cf | 308 | struct gomp_task |
309 | { | |
310 | struct gomp_task *parent; | |
311 | struct gomp_task *children; | |
312 | struct gomp_task *next_child; | |
313 | struct gomp_task *prev_child; | |
314 | struct gomp_task *next_queue; | |
315 | struct gomp_task *prev_queue; | |
bc7bff74 | 316 | struct gomp_task *next_taskgroup; |
317 | struct gomp_task *prev_taskgroup; | |
318 | struct gomp_taskgroup *taskgroup; | |
319 | struct gomp_dependers_vec *dependers; | |
320 | struct htab *depend_hash; | |
96013422 | 321 | struct gomp_taskwait *taskwait; |
bc7bff74 | 322 | size_t depend_count; |
323 | size_t num_dependees; | |
fd6481cf | 324 | struct gomp_task_icv icv; |
325 | void (*fn) (void *); | |
326 | void *fn_data; | |
327 | enum gomp_task_kind kind; | |
9ead5ba0 | 328 | bool in_tied_task; |
2169f33b | 329 | bool final_task; |
bc7bff74 | 330 | bool copy_ctors_done; |
96013422 | 331 | bool parent_depends_on; |
bc7bff74 | 332 | struct gomp_task_depend_entry depend[]; |
333 | }; | |
334 | ||
335 | struct gomp_taskgroup | |
336 | { | |
337 | struct gomp_taskgroup *prev; | |
338 | struct gomp_task *children; | |
339 | bool in_taskgroup_wait; | |
340 | bool cancelled; | |
341 | gomp_sem_t taskgroup_sem; | |
342 | size_t num_children; | |
fd6481cf | 343 | }; |
344 | ||
345 | /* This structure describes a "team" of threads. These are the threads | |
346 | that are spawned by a PARALLEL constructs, as well as the work sharing | |
347 | constructs that the team encounters. */ | |
348 | ||
349 | struct gomp_team | |
350 | { | |
1e8e9920 | 351 | /* This is the number of threads in the current team. */ |
352 | unsigned nthreads; | |
353 | ||
fd6481cf | 354 | /* This is number of gomp_work_share structs that have been allocated |
355 | as a block last time. */ | |
356 | unsigned work_share_chunk; | |
357 | ||
1e8e9920 | 358 | /* This is the saved team state that applied to a master thread before |
359 | the current thread was created. */ | |
360 | struct gomp_team_state prev_ts; | |
361 | ||
1e8e9920 | 362 | /* This semaphore should be used by the master thread instead of its |
363 | "native" semaphore in the thread structure. Required for nested | |
364 | parallels, as the master is a member of two teams. */ | |
365 | gomp_sem_t master_release; | |
366 | ||
fd6481cf | 367 | /* This points to an array with pointers to the release semaphore |
368 | of the threads in the team. */ | |
369 | gomp_sem_t **ordered_release; | |
370 | ||
bc7bff74 | 371 | /* List of work shares on which gomp_fini_work_share hasn't been |
372 | called yet. If the team hasn't been cancelled, this should be | |
373 | equal to each thr->ts.work_share, but otherwise it can be a possibly | |
374 | long list of workshares. */ | |
375 | struct gomp_work_share *work_shares_to_free; | |
376 | ||
fd6481cf | 377 | /* List of gomp_work_share structs chained through next_free fields. |
378 | This is populated and taken off only by the first thread in the | |
379 | team encountering a new work sharing construct, in a critical | |
380 | section. */ | |
381 | struct gomp_work_share *work_share_list_alloc; | |
382 | ||
383 | /* List of gomp_work_share structs freed by free_work_share. New | |
384 | entries are atomically added to the start of the list, and | |
385 | alloc_work_share can safely only move all but the first entry | |
386 | to work_share_list alloc, as free_work_share can happen concurrently | |
387 | with alloc_work_share. */ | |
388 | struct gomp_work_share *work_share_list_free; | |
389 | ||
390 | #ifdef HAVE_SYNC_BUILTINS | |
391 | /* Number of simple single regions encountered by threads in this | |
392 | team. */ | |
393 | unsigned long single_count; | |
394 | #else | |
395 | /* Mutex protecting addition of workshares to work_share_list_free. */ | |
396 | gomp_mutex_t work_share_list_free_lock; | |
397 | #endif | |
398 | ||
399 | /* This barrier is used for most synchronization of the team. */ | |
400 | gomp_barrier_t barrier; | |
401 | ||
402 | /* Initial work shares, to avoid allocating any gomp_work_share | |
403 | structs in the common case. */ | |
404 | struct gomp_work_share work_shares[8]; | |
405 | ||
406 | gomp_mutex_t task_lock; | |
407 | struct gomp_task *task_queue; | |
bc7bff74 | 408 | /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ |
409 | unsigned int task_count; | |
410 | /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ | |
411 | unsigned int task_queued_count; | |
412 | /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running | |
413 | directly in gomp_barrier_handle_tasks; tasks spawned | |
414 | from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when | |
415 | that is called from a task run from gomp_barrier_handle_tasks. | |
416 | task_running_count should be always <= team->nthreads, | |
417 | and if current task isn't in_tied_task, then it will be | |
418 | even < team->nthreads. */ | |
419 | unsigned int task_running_count; | |
420 | int work_share_cancelled; | |
421 | int team_cancelled; | |
fd6481cf | 422 | |
423 | /* This array contains structures for implicit tasks. */ | |
424 | struct gomp_task implicit_task[]; | |
1e8e9920 | 425 | }; |
426 | ||
427 | /* This structure contains all data that is private to libgomp and is | |
428 | allocated per thread. */ | |
429 | ||
430 | struct gomp_thread | |
431 | { | |
432 | /* This is the function that the thread should run upon launch. */ | |
433 | void (*fn) (void *data); | |
434 | void *data; | |
435 | ||
436 | /* This is the current team state for this thread. The ts.team member | |
437 | is NULL only if the thread is idle. */ | |
438 | struct gomp_team_state ts; | |
439 | ||
fd6481cf | 440 | /* This is the task that the thread is currently executing. */ |
441 | struct gomp_task *task; | |
442 | ||
1e8e9920 | 443 | /* This semaphore is used for ordered loops. */ |
444 | gomp_sem_t release; | |
fd6481cf | 445 | |
bc7bff74 | 446 | /* Place this thread is bound to plus one, or zero if not bound |
447 | to any place. */ | |
448 | unsigned int place; | |
449 | ||
450 | /* User pthread thread pool */ | |
fd6481cf | 451 | struct gomp_thread_pool *thread_pool; |
452 | }; | |
453 | ||
454 | ||
455 | struct gomp_thread_pool | |
456 | { | |
457 | /* This array manages threads spawned from the top level, which will | |
458 | return to the idle loop once the current PARALLEL construct ends. */ | |
459 | struct gomp_thread **threads; | |
460 | unsigned threads_size; | |
461 | unsigned threads_used; | |
462 | struct gomp_team *last_team; | |
bc7bff74 | 463 | /* Number of threads running in this contention group. */ |
464 | unsigned long threads_busy; | |
fd6481cf | 465 | |
466 | /* This barrier holds and releases threads waiting in threads. */ | |
467 | gomp_barrier_t threads_dock; | |
1e8e9920 | 468 | }; |
469 | ||
bc7bff74 | 470 | enum gomp_cancel_kind |
471 | { | |
472 | GOMP_CANCEL_PARALLEL = 1, | |
473 | GOMP_CANCEL_LOOP = 2, | |
474 | GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, | |
475 | GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, | |
476 | GOMP_CANCEL_SECTIONS = 4, | |
477 | GOMP_CANCEL_TASKGROUP = 8 | |
478 | }; | |
479 | ||
1e8e9920 | 480 | /* ... and here is that TLS data. */ |
481 | ||
4b9d11e9 | 482 | #if defined HAVE_TLS || defined USE_EMUTLS |
1e8e9920 | 483 | extern __thread struct gomp_thread gomp_tls_data; |
484 | static inline struct gomp_thread *gomp_thread (void) | |
485 | { | |
486 | return &gomp_tls_data; | |
487 | } | |
488 | #else | |
489 | extern pthread_key_t gomp_tls_key; | |
490 | static inline struct gomp_thread *gomp_thread (void) | |
491 | { | |
492 | return pthread_getspecific (gomp_tls_key); | |
493 | } | |
494 | #endif | |
495 | ||
fd6481cf | 496 | extern struct gomp_task_icv *gomp_new_icv (void); |
497 | ||
498 | /* Here's how to access the current copy of the ICVs. */ | |
1e8e9920 | 499 | |
fd6481cf | 500 | static inline struct gomp_task_icv *gomp_icv (bool write) |
501 | { | |
502 | struct gomp_task *task = gomp_thread ()->task; | |
503 | if (task) | |
504 | return &task->icv; | |
505 | else if (write) | |
506 | return gomp_new_icv (); | |
507 | else | |
508 | return &gomp_global_icv; | |
509 | } | |
ba893327 | 510 | |
511 | /* The attributes to be used during thread creation. */ | |
512 | extern pthread_attr_t gomp_thread_attr; | |
1e8e9920 | 513 | |
514 | /* Function prototypes. */ | |
515 | ||
8f697de6 | 516 | /* affinity.c */ |
517 | ||
518 | extern void gomp_init_affinity (void); | |
bc7bff74 | 519 | extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); |
520 | extern void **gomp_affinity_alloc (unsigned long, bool); | |
521 | extern void gomp_affinity_init_place (void *); | |
522 | extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, | |
523 | long, bool); | |
524 | extern bool gomp_affinity_remove_cpu (void *, unsigned long); | |
525 | extern bool gomp_affinity_copy_place (void *, void *, long); | |
526 | extern bool gomp_affinity_same_place (void *, void *); | |
527 | extern bool gomp_affinity_finalize_place_list (bool); | |
528 | extern bool gomp_affinity_init_level (int, unsigned long, bool); | |
529 | extern void gomp_affinity_print_place (void *); | |
8f697de6 | 530 | |
1e8e9920 | 531 | /* alloc.c */ |
532 | ||
533 | extern void *gomp_malloc (size_t) __attribute__((malloc)); | |
534 | extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); | |
535 | extern void *gomp_realloc (void *, size_t); | |
536 | ||
fe87ce9b | 537 | /* Avoid conflicting prototypes of alloca() in system headers by using |
538 | GCC's builtin alloca(). */ | |
539 | #define gomp_alloca(x) __builtin_alloca(x) | |
540 | ||
1e8e9920 | 541 | /* error.c */ |
542 | ||
ca4c3545 | 543 | extern void gomp_vdebug (int, const char *, va_list); |
544 | extern void gomp_debug (int, const char *, ...) | |
545 | __attribute__ ((format (printf, 2, 3))); | |
546 | #define gomp_vdebug(KIND, FMT, VALIST) \ | |
547 | do { \ | |
548 | if (__builtin_expect (gomp_debug_var, 0)) \ | |
549 | (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ | |
550 | } while (0) | |
551 | #define gomp_debug(KIND, ...) \ | |
552 | do { \ | |
553 | if (__builtin_expect (gomp_debug_var, 0)) \ | |
554 | (gomp_debug) ((KIND), __VA_ARGS__); \ | |
555 | } while (0) | |
556 | extern void gomp_verror (const char *, va_list); | |
1e8e9920 | 557 | extern void gomp_error (const char *, ...) |
ca4c3545 | 558 | __attribute__ ((format (printf, 1, 2))); |
559 | extern void gomp_vfatal (const char *, va_list) | |
560 | __attribute__ ((noreturn)); | |
1e8e9920 | 561 | extern void gomp_fatal (const char *, ...) |
ca4c3545 | 562 | __attribute__ ((noreturn, format (printf, 1, 2))); |
1e8e9920 | 563 | |
564 | /* iter.c */ | |
565 | ||
566 | extern int gomp_iter_static_next (long *, long *); | |
567 | extern bool gomp_iter_dynamic_next_locked (long *, long *); | |
568 | extern bool gomp_iter_guided_next_locked (long *, long *); | |
569 | ||
570 | #ifdef HAVE_SYNC_BUILTINS | |
571 | extern bool gomp_iter_dynamic_next (long *, long *); | |
572 | extern bool gomp_iter_guided_next (long *, long *); | |
573 | #endif | |
574 | ||
fd6481cf | 575 | /* iter_ull.c */ |
576 | ||
577 | extern int gomp_iter_ull_static_next (unsigned long long *, | |
578 | unsigned long long *); | |
579 | extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, | |
580 | unsigned long long *); | |
581 | extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, | |
582 | unsigned long long *); | |
583 | ||
584 | #if defined HAVE_SYNC_BUILTINS && defined __LP64__ | |
585 | extern bool gomp_iter_ull_dynamic_next (unsigned long long *, | |
586 | unsigned long long *); | |
587 | extern bool gomp_iter_ull_guided_next (unsigned long long *, | |
588 | unsigned long long *); | |
589 | #endif | |
590 | ||
1e8e9920 | 591 | /* ordered.c */ |
592 | ||
593 | extern void gomp_ordered_first (void); | |
594 | extern void gomp_ordered_last (void); | |
595 | extern void gomp_ordered_next (void); | |
596 | extern void gomp_ordered_static_init (void); | |
597 | extern void gomp_ordered_static_next (void); | |
598 | extern void gomp_ordered_sync (void); | |
599 | ||
600 | /* parallel.c */ | |
601 | ||
fd6481cf | 602 | extern unsigned gomp_resolve_num_threads (unsigned, unsigned); |
1e8e9920 | 603 | |
604 | /* proc.c (in config/) */ | |
605 | ||
606 | extern void gomp_init_num_threads (void); | |
607 | extern unsigned gomp_dynamic_max_threads (void); | |
608 | ||
fd6481cf | 609 | /* task.c */ |
610 | ||
611 | extern void gomp_init_task (struct gomp_task *, struct gomp_task *, | |
612 | struct gomp_task_icv *); | |
613 | extern void gomp_end_task (void); | |
614 | extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); | |
615 | ||
616 | static void inline | |
617 | gomp_finish_task (struct gomp_task *task) | |
618 | { | |
bc7bff74 | 619 | if (__builtin_expect (task->depend_hash != NULL, 0)) |
620 | free (task->depend_hash); | |
fd6481cf | 621 | } |
622 | ||
1e8e9920 | 623 | /* team.c */ |
624 | ||
fd6481cf | 625 | extern struct gomp_team *gomp_new_team (unsigned); |
1e8e9920 | 626 | extern void gomp_team_start (void (*) (void *), void *, unsigned, |
bc7bff74 | 627 | unsigned, struct gomp_team *); |
1e8e9920 | 628 | extern void gomp_team_end (void); |
bc7bff74 | 629 | extern void gomp_free_thread (void *); |
630 | ||
631 | /* target.c */ | |
632 | ||
ca4c3545 | 633 | extern void gomp_init_targets_once (void); |
bc7bff74 | 634 | extern int gomp_get_num_devices (void); |
1e8e9920 | 635 | |
ca4c3545 | 636 | typedef struct splay_tree_node_s *splay_tree_node; |
637 | typedef struct splay_tree_s *splay_tree; | |
638 | typedef struct splay_tree_key_s *splay_tree_key; | |
639 | ||
640 | struct target_mem_desc { | |
641 | /* Reference count. */ | |
642 | uintptr_t refcount; | |
643 | /* All the splay nodes allocated together. */ | |
644 | splay_tree_node array; | |
645 | /* Start of the target region. */ | |
646 | uintptr_t tgt_start; | |
647 | /* End of the targer region. */ | |
648 | uintptr_t tgt_end; | |
649 | /* Handle to free. */ | |
650 | void *to_free; | |
651 | /* Previous target_mem_desc. */ | |
652 | struct target_mem_desc *prev; | |
653 | /* Number of items in following list. */ | |
654 | size_t list_count; | |
655 | ||
656 | /* Corresponding target device descriptor. */ | |
657 | struct gomp_device_descr *device_descr; | |
658 | ||
659 | /* Memory mapping info for the thread that created this descriptor. */ | |
660 | struct gomp_memory_mapping *mem_map; | |
661 | ||
662 | /* List of splay keys to remove (or decrease refcount) | |
663 | at the end of region. */ | |
664 | splay_tree_key list[]; | |
665 | }; | |
666 | ||
667 | struct splay_tree_key_s { | |
668 | /* Address of the host object. */ | |
669 | uintptr_t host_start; | |
670 | /* Address immediately after the host object. */ | |
671 | uintptr_t host_end; | |
672 | /* Descriptor of the target memory. */ | |
673 | struct target_mem_desc *tgt; | |
674 | /* Offset from tgt->tgt_start to the start of the target object. */ | |
675 | uintptr_t tgt_offset; | |
676 | /* Reference count. */ | |
677 | uintptr_t refcount; | |
678 | /* Asynchronous reference count. */ | |
679 | uintptr_t async_refcount; | |
680 | /* True if data should be copied from device to host at the end. */ | |
681 | bool copy_from; | |
682 | }; | |
683 | ||
684 | #include "splay-tree.h" | |
685 | ||
686 | /* Information about mapped memory regions (per device/context). */ | |
687 | ||
688 | struct gomp_memory_mapping | |
689 | { | |
690 | /* Mutex for operating with the splay tree and other shared structures. */ | |
691 | gomp_mutex_t lock; | |
692 | ||
693 | /* True when tables have been added to this memory map. */ | |
694 | bool is_initialized; | |
695 | ||
696 | /* Splay tree containing information about mapped memory regions. */ | |
697 | struct splay_tree_s splay_tree; | |
698 | }; | |
699 | ||
700 | typedef struct acc_dispatch_t | |
701 | { | |
702 | /* This is a linked list of data mapped using the | |
703 | acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas. | |
704 | Unlike mapped_data in the goacc_thread struct, unmapping can | |
705 | happen out-of-order with respect to mapping. */ | |
706 | /* This is guarded by the lock in the "outer" struct gomp_device_descr. */ | |
707 | struct target_mem_desc *data_environ; | |
708 | ||
709 | /* Extra information required for a device instance by a given target. */ | |
710 | /* This is guarded by the lock in the "outer" struct gomp_device_descr. */ | |
711 | void *target_data; | |
712 | ||
713 | /* Open or close a device instance. */ | |
714 | void *(*open_device_func) (int n); | |
715 | int (*close_device_func) (void *h); | |
716 | ||
717 | /* Set or get the device number. */ | |
718 | int (*get_device_num_func) (void); | |
719 | void (*set_device_num_func) (int); | |
720 | ||
721 | /* Execute. */ | |
722 | void (*exec_func) (void (*) (void *), size_t, void **, void **, size_t *, | |
723 | unsigned short *, int, int, int, int, void *); | |
724 | ||
725 | /* Async cleanup callback registration. */ | |
726 | void (*register_async_cleanup_func) (void *); | |
727 | ||
728 | /* Asynchronous routines. */ | |
729 | int (*async_test_func) (int); | |
730 | int (*async_test_all_func) (void); | |
731 | void (*async_wait_func) (int); | |
732 | void (*async_wait_async_func) (int, int); | |
733 | void (*async_wait_all_func) (void); | |
734 | void (*async_wait_all_async_func) (int); | |
735 | void (*async_set_async_func) (int); | |
736 | ||
737 | /* Create/destroy TLS data. */ | |
738 | void *(*create_thread_data_func) (void *); | |
739 | void (*destroy_thread_data_func) (void *); | |
740 | ||
741 | /* NVIDIA target specific routines. */ | |
742 | struct { | |
743 | void *(*get_current_device_func) (void); | |
744 | void *(*get_current_context_func) (void); | |
745 | void *(*get_stream_func) (int); | |
746 | int (*set_stream_func) (int, void *); | |
747 | } cuda; | |
748 | } acc_dispatch_t; | |
749 | ||
750 | /* This structure describes accelerator device. | |
751 | It contains name of the corresponding libgomp plugin, function handlers for | |
752 | interaction with the device, ID-number of the device, and information about | |
753 | mapped memory. */ | |
754 | struct gomp_device_descr | |
755 | { | |
756 | /* Immutable data, which is only set during initialization, and which is not | |
757 | guarded by the lock. */ | |
758 | ||
759 | /* The name of the device. */ | |
760 | const char *name; | |
761 | ||
762 | /* Capabilities of device (supports OpenACC, OpenMP). */ | |
763 | unsigned int capabilities; | |
764 | ||
765 | /* This is the ID number of device among devices of the same type. */ | |
766 | int target_id; | |
767 | ||
768 | /* This is the TYPE of device. */ | |
769 | enum offload_target_type type; | |
770 | ||
771 | /* Function handlers. */ | |
772 | const char *(*get_name_func) (void); | |
773 | unsigned int (*get_caps_func) (void); | |
774 | int (*get_type_func) (void); | |
775 | int (*get_num_devices_func) (void); | |
776 | void (*register_image_func) (void *, void *); | |
777 | void (*init_device_func) (int); | |
778 | void (*fini_device_func) (int); | |
779 | int (*get_table_func) (int, struct mapping_table **); | |
780 | void *(*alloc_func) (int, size_t); | |
781 | void (*free_func) (int, void *); | |
782 | void *(*dev2host_func) (int, void *, const void *, size_t); | |
783 | void *(*host2dev_func) (int, void *, const void *, size_t); | |
784 | void (*run_func) (int, void *, void *); | |
785 | ||
786 | /* Memory-mapping info for this device instance. */ | |
787 | /* Uses a separate lock. */ | |
788 | struct gomp_memory_mapping mem_map; | |
789 | ||
790 | /* Mutex for the mutable data. */ | |
791 | gomp_mutex_t lock; | |
792 | ||
793 | /* Set to true when device is initialized. */ | |
794 | bool is_initialized; | |
795 | ||
796 | /* True when offload regions have been registered with this device. */ | |
797 | bool offload_regions_registered; | |
798 | ||
799 | /* OpenACC-specific data and functions. */ | |
800 | /* This is mutable because of its mutable data_environ and target_data | |
801 | members. */ | |
802 | acc_dispatch_t openacc; | |
803 | }; | |
804 | ||
805 | extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *); | |
806 | extern void gomp_acc_remove_pointer (void *, bool, int, int); | |
807 | ||
808 | extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, | |
809 | size_t, void **, void **, | |
810 | size_t *, void *, bool, bool); | |
811 | extern void gomp_copy_from_async (struct target_mem_desc *); | |
812 | extern void gomp_unmap_vars (struct target_mem_desc *, bool); | |
813 | extern void gomp_init_device (struct gomp_device_descr *); | |
814 | extern void gomp_init_tables (struct gomp_device_descr *, | |
815 | struct gomp_memory_mapping *); | |
816 | extern void gomp_free_memmap (struct gomp_memory_mapping *); | |
817 | extern void gomp_fini_device (struct gomp_device_descr *); | |
818 | ||
1e8e9920 | 819 | /* work.c */ |
820 | ||
fd6481cf | 821 | extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned); |
822 | extern void gomp_fini_work_share (struct gomp_work_share *); | |
1e8e9920 | 823 | extern bool gomp_work_share_start (bool); |
824 | extern void gomp_work_share_end (void); | |
bc7bff74 | 825 | extern bool gomp_work_share_end_cancel (void); |
1e8e9920 | 826 | extern void gomp_work_share_end_nowait (void); |
827 | ||
fd6481cf | 828 | static inline void |
829 | gomp_work_share_init_done (void) | |
830 | { | |
831 | struct gomp_thread *thr = gomp_thread (); | |
832 | if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) | |
833 | gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); | |
834 | } | |
835 | ||
1e8e9920 | 836 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
837 | # pragma GCC visibility pop | |
838 | #endif | |
839 | ||
840 | /* Now that we're back to default visibility, include the globals. */ | |
841 | #include "libgomp_g.h" | |
842 | ||
843 | /* Include omp.h by parts. */ | |
844 | #include "omp-lock.h" | |
845 | #define _LIBGOMP_OMP_LOCK_DEFINED 1 | |
846 | #include "omp.h.in" | |
847 | ||
fd6481cf | 848 | #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ |
849 | || !defined (HAVE_ATTRIBUTE_ALIAS) \ | |
8a75144c | 850 | || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ |
71cbce26 | 851 | || !defined (PIC) \ |
852 | || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) | |
fd6481cf | 853 | # undef LIBGOMP_GNU_SYMBOL_VERSIONING |
854 | #endif | |
855 | ||
856 | #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING | |
857 | extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
858 | extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
859 | extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
860 | extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
861 | extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
862 | extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
863 | extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
864 | extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
865 | extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
866 | extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
867 | ||
868 | extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
869 | extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
870 | extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
871 | extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
872 | extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
873 | extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
874 | extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
875 | extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
876 | extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
877 | extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
878 | ||
879 | # define strong_alias(fn, al) \ | |
880 | extern __typeof (fn) al __attribute__ ((alias (#fn))); | |
881 | # define omp_lock_symver(fn) \ | |
882 | __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ | |
883 | __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); | |
884 | #else | |
885 | # define gomp_init_lock_30 omp_init_lock | |
886 | # define gomp_destroy_lock_30 omp_destroy_lock | |
887 | # define gomp_set_lock_30 omp_set_lock | |
888 | # define gomp_unset_lock_30 omp_unset_lock | |
889 | # define gomp_test_lock_30 omp_test_lock | |
890 | # define gomp_init_nest_lock_30 omp_init_nest_lock | |
891 | # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock | |
892 | # define gomp_set_nest_lock_30 omp_set_nest_lock | |
893 | # define gomp_unset_nest_lock_30 omp_unset_nest_lock | |
894 | # define gomp_test_nest_lock_30 omp_test_nest_lock | |
895 | #endif | |
896 | ||
1e8e9920 | 897 | #ifdef HAVE_ATTRIBUTE_VISIBILITY |
898 | # define attribute_hidden __attribute__ ((visibility ("hidden"))) | |
899 | #else | |
900 | # define attribute_hidden | |
901 | #endif | |
902 | ||
903 | #ifdef HAVE_ATTRIBUTE_ALIAS | |
bc7bff74 | 904 | # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) |
905 | # define ialias_str1(x) ialias_str2(x) | |
906 | # define ialias_str2(x) #x | |
1e8e9920 | 907 | # define ialias(fn) \ |
908 | extern __typeof (fn) gomp_ialias_##fn \ | |
909 | __attribute__ ((alias (#fn))) attribute_hidden; | |
bc7bff74 | 910 | # define ialias_redirect(fn) \ |
911 | extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; | |
912 | # define ialias_call(fn) gomp_ialias_ ## fn | |
1e8e9920 | 913 | #else |
914 | # define ialias(fn) | |
bc7bff74 | 915 | # define ialias_redirect(fn) |
916 | # define ialias_call(fn) fn | |
1e8e9920 | 917 | #endif |
918 | ||
919 | #endif /* LIBGOMP_H */ |