]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/parallel.c
tree-optimization/95495 - use SLP_TREE_REPRESENTATIVE in assertion
[thirdparty/gcc.git] / libgomp / parallel.c
CommitLineData
8d9254fc 1/* Copyright (C) 2005-2020 Free Software Foundation, Inc.
953ff289
DN
2 Contributed by Richard Henderson <rth@redhat.com>.
3
f1f3453e
TS
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
953ff289
DN
6
7 Libgomp is free software; you can redistribute it and/or modify it
748086b7
JJ
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
953ff289
DN
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
748086b7 14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
953ff289
DN
15 more details.
16
748086b7
JJ
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
953ff289
DN
25
26/* This file handles the (bare) PARALLEL construct. */
27
28#include "libgomp.h"
a68ab351 29#include <limits.h>
953ff289
DN
30
31
32/* Determine the number of threads to be launched for a PARALLEL construct.
a68ab351 33 This algorithm is explicitly described in OpenMP 3.0 section 2.4.1.
953ff289
DN
34 SPECIFIED is a combination of the NUM_THREADS clause and the IF clause.
35 If the IF clause is false, SPECIFIED is forced to 1. When NUM_THREADS
36 is not present, SPECIFIED is 0. */
37
38unsigned
a68ab351 39gomp_resolve_num_threads (unsigned specified, unsigned count)
953ff289 40{
acf0174b 41 struct gomp_thread *thr = gomp_thread ();
a68ab351
JJ
42 struct gomp_task_icv *icv;
43 unsigned threads_requested, max_num_threads, num_threads;
acf0174b
JJ
44 unsigned long busy;
45 struct gomp_thread_pool *pool;
a68ab351
JJ
46
47 icv = gomp_icv (false);
48
953ff289
DN
49 if (specified == 1)
50 return 1;
acf0174b 51 else if (thr->ts.active_level >= 1 && !icv->nest_var)
a68ab351 52 return 1;
acf0174b 53 else if (thr->ts.active_level >= gomp_max_active_levels_var)
953ff289
DN
54 return 1;
55
56 /* If NUM_THREADS not specified, use nthreads_var. */
57 if (specified == 0)
a68ab351
JJ
58 threads_requested = icv->nthreads_var;
59 else
60 threads_requested = specified;
61
62 max_num_threads = threads_requested;
953ff289
DN
63
64 /* If dynamic threads are enabled, bound the number of threads
65 that we launch. */
a68ab351 66 if (icv->dyn_var)
953ff289
DN
67 {
68 unsigned dyn = gomp_dynamic_max_threads ();
a68ab351
JJ
69 if (dyn < max_num_threads)
70 max_num_threads = dyn;
71
72 /* Optimization for parallel sections. */
73 if (count && count < max_num_threads)
74 max_num_threads = count;
953ff289
DN
75 }
76
acf0174b
JJ
77 /* UINT_MAX stands for infinity. */
78 if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1)
a68ab351
JJ
79 || max_num_threads == 1)
80 return max_num_threads;
81
acf0174b
JJ
82 /* The threads_busy counter lives in thread_pool, if there
83 isn't a thread_pool yet, there must be just one thread
84 in the contention group. If thr->team is NULL, this isn't
85 nested parallel, so there is just one thread in the
86 contention group as well, no need to handle it atomically. */
87 pool = thr->thread_pool;
e4606348 88 if (thr->ts.team == NULL || pool == NULL)
acf0174b
JJ
89 {
90 num_threads = max_num_threads;
91 if (num_threads > icv->thread_limit_var)
92 num_threads = icv->thread_limit_var;
93 if (pool)
94 pool->threads_busy = num_threads;
95 return num_threads;
96 }
97
a68ab351
JJ
98#ifdef HAVE_SYNC_BUILTINS
99 do
100 {
acf0174b 101 busy = pool->threads_busy;
a68ab351 102 num_threads = max_num_threads;
acf0174b
JJ
103 if (icv->thread_limit_var - busy + 1 < num_threads)
104 num_threads = icv->thread_limit_var - busy + 1;
a68ab351 105 }
acf0174b
JJ
106 while (__sync_val_compare_and_swap (&pool->threads_busy,
107 busy, busy + num_threads - 1)
108 != busy);
a68ab351 109#else
acf0174b 110 gomp_mutex_lock (&gomp_managed_threads_lock);
a68ab351 111 num_threads = max_num_threads;
acf0174b
JJ
112 busy = pool->threads_busy;
113 if (icv->thread_limit_var - busy + 1 < num_threads)
114 num_threads = icv->thread_limit_var - busy + 1;
115 pool->threads_busy += num_threads - 1;
116 gomp_mutex_unlock (&gomp_managed_threads_lock);
a68ab351
JJ
117#endif
118
119 return num_threads;
953ff289
DN
120}
121
122void
123GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads)
124{
a68ab351 125 num_threads = gomp_resolve_num_threads (num_threads, 0);
28567c40
JJ
126 gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads),
127 NULL);
953ff289
DN
128}
129
130void
131GOMP_parallel_end (void)
132{
acf0174b
JJ
133 struct gomp_task_icv *icv = gomp_icv (false);
134 if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0))
a68ab351
JJ
135 {
136 struct gomp_thread *thr = gomp_thread ();
137 struct gomp_team *team = thr->ts.team;
acf0174b
JJ
138 unsigned int nthreads = team ? team->nthreads : 1;
139 gomp_team_end ();
140 if (nthreads > 1)
a68ab351 141 {
acf0174b
JJ
142 /* If not nested, there is just one thread in the
143 contention group left, no need for atomicity. */
144 if (thr->ts.team == NULL)
145 thr->thread_pool->threads_busy = 1;
146 else
147 {
a68ab351 148#ifdef HAVE_SYNC_BUILTINS
acf0174b
JJ
149 __sync_fetch_and_add (&thr->thread_pool->threads_busy,
150 1UL - nthreads);
a68ab351 151#else
acf0174b
JJ
152 gomp_mutex_lock (&gomp_managed_threads_lock);
153 thr->thread_pool->threads_busy -= nthreads - 1;
154 gomp_mutex_unlock (&gomp_managed_threads_lock);
a68ab351 155#endif
acf0174b 156 }
a68ab351
JJ
157 }
158 }
acf0174b
JJ
159 else
160 gomp_team_end ();
161}
162ialias (GOMP_parallel_end)
163
164void
28567c40
JJ
165GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads,
166 unsigned int flags)
acf0174b
JJ
167{
168 num_threads = gomp_resolve_num_threads (num_threads, 0);
28567c40
JJ
169 gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads),
170 NULL);
acf0174b
JJ
171 fn (data);
172 ialias_call (GOMP_parallel_end) ();
173}
174
28567c40
JJ
175unsigned
176GOMP_parallel_reductions (void (*fn) (void *), void *data,
177 unsigned num_threads, unsigned int flags)
178{
179 struct gomp_taskgroup *taskgroup;
180 num_threads = gomp_resolve_num_threads (num_threads, 0);
181 uintptr_t *rdata = *(uintptr_t **)data;
182 taskgroup = gomp_parallel_reduction_register (rdata, num_threads);
183 gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads),
184 taskgroup);
185 fn (data);
186 ialias_call (GOMP_parallel_end) ();
187 gomp_sem_destroy (&taskgroup->taskgroup_sem);
188 free (taskgroup);
189 return num_threads;
190}
191
acf0174b
JJ
192bool
193GOMP_cancellation_point (int which)
194{
195 if (!gomp_cancel_var)
196 return false;
197
198 struct gomp_thread *thr = gomp_thread ();
199 struct gomp_team *team = thr->ts.team;
200 if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
201 {
202 if (team == NULL)
203 return false;
204 return team->work_share_cancelled != 0;
205 }
206 else if (which & GOMP_CANCEL_TASKGROUP)
207 {
28567c40
JJ
208 if (thr->task->taskgroup)
209 {
210 if (thr->task->taskgroup->cancelled)
211 return true;
212 if (thr->task->taskgroup->workshare
213 && thr->task->taskgroup->prev
214 && thr->task->taskgroup->prev->cancelled)
215 return true;
216 }
acf0174b
JJ
217 /* FALLTHRU into the GOMP_CANCEL_PARALLEL case,
218 as #pragma omp cancel parallel also cancels all explicit
219 tasks. */
220 }
221 if (team)
222 return gomp_team_barrier_cancelled (&team->barrier);
223 return false;
953ff289 224}
acf0174b
JJ
225ialias (GOMP_cancellation_point)
226
227bool
228GOMP_cancel (int which, bool do_cancel)
229{
230 if (!gomp_cancel_var)
231 return false;
232
233 if (!do_cancel)
234 return ialias_call (GOMP_cancellation_point) (which);
953ff289 235
acf0174b
JJ
236 struct gomp_thread *thr = gomp_thread ();
237 struct gomp_team *team = thr->ts.team;
238 if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
239 {
240 /* In orphaned worksharing region, all we want to cancel
241 is current thread. */
242 if (team != NULL)
243 team->work_share_cancelled = 1;
244 return true;
245 }
246 else if (which & GOMP_CANCEL_TASKGROUP)
247 {
28567c40 248 if (thr->task->taskgroup)
acf0174b 249 {
28567c40
JJ
250 struct gomp_taskgroup *taskgroup = thr->task->taskgroup;
251 if (taskgroup->workshare && taskgroup->prev)
252 taskgroup = taskgroup->prev;
253 if (!taskgroup->cancelled)
254 {
255 gomp_mutex_lock (&team->task_lock);
256 taskgroup->cancelled = true;
257 gomp_mutex_unlock (&team->task_lock);
258 }
acf0174b
JJ
259 }
260 return true;
261 }
262 team->team_cancelled = 1;
263 gomp_team_barrier_cancel (team);
264 return true;
265}
953ff289
DN
266\f
267/* The public OpenMP API for thread and team related inquiries. */
268
269int
270omp_get_num_threads (void)
271{
272 struct gomp_team *team = gomp_thread ()->ts.team;
273 return team ? team->nthreads : 1;
274}
275
953ff289 276int
a68ab351 277omp_get_thread_num (void)
953ff289 278{
a68ab351 279 return gomp_thread ()->ts.team_id;
953ff289
DN
280}
281
a68ab351
JJ
282/* This wasn't right for OpenMP 2.5. Active region used to be non-zero
283 when the IF clause doesn't evaluate to false, starting with OpenMP 3.0
284 it is non-zero with more than one thread in the team. */
285
953ff289 286int
a68ab351 287omp_in_parallel (void)
953ff289 288{
a68ab351 289 return gomp_thread ()->ts.active_level > 0;
953ff289
DN
290}
291
a68ab351
JJ
292int
293omp_get_level (void)
294{
295 return gomp_thread ()->ts.level;
296}
953ff289 297
a68ab351
JJ
298int
299omp_get_ancestor_thread_num (int level)
953ff289 300{
a68ab351
JJ
301 struct gomp_team_state *ts = &gomp_thread ()->ts;
302 if (level < 0 || level > ts->level)
303 return -1;
304 for (level = ts->level - level; level > 0; --level)
305 ts = &ts->team->prev_ts;
306 return ts->team_id;
307}
953ff289 308
a68ab351
JJ
309int
310omp_get_team_size (int level)
311{
312 struct gomp_team_state *ts = &gomp_thread ()->ts;
313 if (level < 0 || level > ts->level)
314 return -1;
315 for (level = ts->level - level; level > 0; --level)
316 ts = &ts->team->prev_ts;
317 if (ts->team == NULL)
318 return 1;
319 else
320 return ts->team->nthreads;
321}
953ff289 322
a68ab351
JJ
323int
324omp_get_active_level (void)
325{
326 return gomp_thread ()->ts.active_level;
953ff289
DN
327}
328
329ialias (omp_get_num_threads)
953ff289
DN
330ialias (omp_get_thread_num)
331ialias (omp_in_parallel)
a68ab351
JJ
332ialias (omp_get_level)
333ialias (omp_get_ancestor_thread_num)
334ialias (omp_get_team_size)
335ialias (omp_get_active_level)