]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/team.c
* testsuite/libjava.jvmti/jvmti-interp.exp
[thirdparty/gcc.git] / libgomp / team.c
CommitLineData
748086b7 1/* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
953ff289
DN
2 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU OpenMP Library (libgomp).
5
6 Libgomp is free software; you can redistribute it and/or modify it
748086b7
JJ
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
953ff289
DN
10
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
748086b7 13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
953ff289
DN
14 more details.
15
748086b7
JJ
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
19
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
953ff289
DN
24
25/* This file handles the maintainence of threads in response to team
26 creation and termination. */
27
28#include "libgomp.h"
29#include <stdlib.h>
30#include <string.h>
31
953ff289 32/* This attribute contains PTHREAD_CREATE_DETACHED. */
d0d1b24d 33pthread_attr_t gomp_thread_attr;
953ff289 34
a68ab351
JJ
35/* This key is for the thread destructor. */
36pthread_key_t gomp_thread_destructor;
37
953ff289
DN
38
39/* This is the libgomp per-thread data structure. */
40#ifdef HAVE_TLS
41__thread struct gomp_thread gomp_tls_data;
42#else
43pthread_key_t gomp_tls_key;
44#endif
45
46
47/* This structure is used to communicate across pthread_create. */
48
49struct gomp_thread_start_data
50{
953ff289
DN
51 void (*fn) (void *);
52 void *fn_data;
a68ab351
JJ
53 struct gomp_team_state ts;
54 struct gomp_task *task;
55 struct gomp_thread_pool *thread_pool;
953ff289
DN
56 bool nested;
57};
58
59
60/* This function is a pthread_create entry point. This contains the idle
61 loop in which a thread waits to be called up to become part of a team. */
62
63static void *
64gomp_thread_start (void *xdata)
65{
66 struct gomp_thread_start_data *data = xdata;
67 struct gomp_thread *thr;
a68ab351 68 struct gomp_thread_pool *pool;
953ff289
DN
69 void (*local_fn) (void *);
70 void *local_data;
71
72#ifdef HAVE_TLS
73 thr = &gomp_tls_data;
74#else
75 struct gomp_thread local_thr;
76 thr = &local_thr;
77 pthread_setspecific (gomp_tls_key, thr);
78#endif
79 gomp_sem_init (&thr->release, 0);
80
81 /* Extract what we need from data. */
82 local_fn = data->fn;
83 local_data = data->fn_data;
a68ab351 84 thr->thread_pool = data->thread_pool;
953ff289 85 thr->ts = data->ts;
a68ab351 86 thr->task = data->task;
953ff289
DN
87
88 thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
89
a68ab351
JJ
90 /* Make thread pool local. */
91 pool = thr->thread_pool;
92
953ff289
DN
93 if (data->nested)
94 {
a68ab351
JJ
95 struct gomp_team *team = thr->ts.team;
96 struct gomp_task *task = thr->task;
97
98 gomp_barrier_wait (&team->barrier);
99
953ff289 100 local_fn (local_data);
a68ab351
JJ
101 gomp_team_barrier_wait (&team->barrier);
102 gomp_finish_task (task);
103 gomp_barrier_wait_last (&team->barrier);
953ff289
DN
104 }
105 else
106 {
a68ab351 107 pool->threads[thr->ts.team_id] = thr;
953ff289 108
a68ab351 109 gomp_barrier_wait (&pool->threads_dock);
953ff289
DN
110 do
111 {
a68ab351
JJ
112 struct gomp_team *team = thr->ts.team;
113 struct gomp_task *task = thr->task;
953ff289
DN
114
115 local_fn (local_data);
a68ab351
JJ
116 gomp_team_barrier_wait (&team->barrier);
117 gomp_finish_task (task);
953ff289 118
a68ab351 119 gomp_barrier_wait (&pool->threads_dock);
953ff289
DN
120
121 local_fn = thr->fn;
122 local_data = thr->data;
a68ab351 123 thr->fn = NULL;
953ff289
DN
124 }
125 while (local_fn);
126 }
127
128 return NULL;
129}
130
131
132/* Create a new team data structure. */
133
a68ab351
JJ
134struct gomp_team *
135gomp_new_team (unsigned nthreads)
953ff289
DN
136{
137 struct gomp_team *team;
138 size_t size;
a68ab351 139 int i;
953ff289 140
a68ab351
JJ
141 size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
142 + sizeof (team->implicit_task[0]));
953ff289 143 team = gomp_malloc (size);
953ff289 144
a68ab351
JJ
145 team->work_share_chunk = 8;
146#ifdef HAVE_SYNC_BUILTINS
147 team->single_count = 0;
148#else
149 gomp_mutex_init (&team->work_share_list_free_lock);
150#endif
151 gomp_init_work_share (&team->work_shares[0], false, nthreads);
152 team->work_shares[0].next_alloc = NULL;
153 team->work_share_list_free = NULL;
154 team->work_share_list_alloc = &team->work_shares[1];
155 for (i = 1; i < 7; i++)
156 team->work_shares[i].next_free = &team->work_shares[i + 1];
157 team->work_shares[i].next_free = NULL;
953ff289
DN
158
159 team->nthreads = nthreads;
160 gomp_barrier_init (&team->barrier, nthreads);
161
162 gomp_sem_init (&team->master_release, 0);
a68ab351 163 team->ordered_release = (void *) &team->implicit_task[nthreads];
953ff289
DN
164 team->ordered_release[0] = &team->master_release;
165
a68ab351
JJ
166 gomp_mutex_init (&team->task_lock);
167 team->task_queue = NULL;
168 team->task_count = 0;
169 team->task_running_count = 0;
170
953ff289
DN
171 return team;
172}
173
174
175/* Free a team data structure. */
176
177static void
178free_team (struct gomp_team *team)
179{
953ff289 180 gomp_barrier_destroy (&team->barrier);
a68ab351 181 gomp_mutex_destroy (&team->task_lock);
953ff289
DN
182 free (team);
183}
184
a68ab351
JJ
185/* Allocate and initialize a thread pool. */
186
187static struct gomp_thread_pool *gomp_new_thread_pool (void)
188{
189 struct gomp_thread_pool *pool
190 = gomp_malloc (sizeof(struct gomp_thread_pool));
191 pool->threads = NULL;
192 pool->threads_size = 0;
193 pool->threads_used = 0;
194 pool->last_team = NULL;
195 return pool;
196}
197
198static void
199gomp_free_pool_helper (void *thread_pool)
200{
201 struct gomp_thread_pool *pool
202 = (struct gomp_thread_pool *) thread_pool;
203 gomp_barrier_wait_last (&pool->threads_dock);
204 pthread_exit (NULL);
205}
206
207/* Free a thread pool and release its threads. */
208
209static void
210gomp_free_thread (void *arg __attribute__((unused)))
211{
212 struct gomp_thread *thr = gomp_thread ();
213 struct gomp_thread_pool *pool = thr->thread_pool;
214 if (pool)
215 {
216 if (pool->threads_used > 0)
217 {
218 int i;
219 for (i = 1; i < pool->threads_used; i++)
220 {
221 struct gomp_thread *nthr = pool->threads[i];
222 nthr->fn = gomp_free_pool_helper;
223 nthr->data = pool;
224 }
225 /* This barrier undocks threads docked on pool->threads_dock. */
226 gomp_barrier_wait (&pool->threads_dock);
227 /* And this waits till all threads have called gomp_barrier_wait_last
228 in gomp_free_pool_helper. */
229 gomp_barrier_wait (&pool->threads_dock);
230 /* Now it is safe to destroy the barrier and free the pool. */
231 gomp_barrier_destroy (&pool->threads_dock);
232 }
233 free (pool->threads);
234 if (pool->last_team)
235 free_team (pool->last_team);
236 free (pool);
237 thr->thread_pool = NULL;
238 }
239 if (thr->task != NULL)
240 {
241 struct gomp_task *task = thr->task;
242 gomp_end_task ();
243 free (task);
244 }
245}
953ff289
DN
246
247/* Launch a team. */
248
249void
250gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
a68ab351 251 struct gomp_team *team)
953ff289
DN
252{
253 struct gomp_thread_start_data *start_data;
254 struct gomp_thread *thr, *nthr;
a68ab351
JJ
255 struct gomp_task *task;
256 struct gomp_task_icv *icv;
953ff289 257 bool nested;
a68ab351 258 struct gomp_thread_pool *pool;
953ff289 259 unsigned i, n, old_threads_used = 0;
a0884cf0 260 pthread_attr_t thread_attr, *attr;
953ff289
DN
261
262 thr = gomp_thread ();
263 nested = thr->ts.team != NULL;
a68ab351
JJ
264 if (__builtin_expect (thr->thread_pool == NULL, 0))
265 {
266 thr->thread_pool = gomp_new_thread_pool ();
267 pthread_setspecific (gomp_thread_destructor, thr);
268 }
269 pool = thr->thread_pool;
270 task = thr->task;
271 icv = task ? &task->icv : &gomp_global_icv;
953ff289
DN
272
273 /* Always save the previous state, even if this isn't a nested team.
274 In particular, we should save any work share state from an outer
275 orphaned work share construct. */
276 team->prev_ts = thr->ts;
277
278 thr->ts.team = team;
953ff289 279 thr->ts.team_id = 0;
a68ab351
JJ
280 ++thr->ts.level;
281 if (nthreads > 1)
282 ++thr->ts.active_level;
283 thr->ts.work_share = &team->work_shares[0];
284 thr->ts.last_work_share = NULL;
285#ifdef HAVE_SYNC_BUILTINS
286 thr->ts.single_count = 0;
287#endif
953ff289 288 thr->ts.static_trip = 0;
a68ab351
JJ
289 thr->task = &team->implicit_task[0];
290 gomp_init_task (thr->task, task, icv);
953ff289
DN
291
292 if (nthreads == 1)
293 return;
294
295 i = 1;
296
297 /* We only allow the reuse of idle threads for non-nested PARALLEL
298 regions. This appears to be implied by the semantics of
299 threadprivate variables, but perhaps that's reading too much into
300 things. Certainly it does prevent any locking problems, since
301 only the initial program thread will modify gomp_threads. */
302 if (!nested)
303 {
a68ab351 304 old_threads_used = pool->threads_used;
953ff289
DN
305
306 if (nthreads <= old_threads_used)
307 n = nthreads;
308 else if (old_threads_used == 0)
309 {
310 n = 0;
a68ab351 311 gomp_barrier_init (&pool->threads_dock, nthreads);
953ff289
DN
312 }
313 else
314 {
315 n = old_threads_used;
316
317 /* Increase the barrier threshold to make sure all new
318 threads arrive before the team is released. */
a68ab351 319 gomp_barrier_reinit (&pool->threads_dock, nthreads);
953ff289
DN
320 }
321
322 /* Not true yet, but soon will be. We're going to release all
a68ab351 323 threads from the dock, and those that aren't part of the
953ff289 324 team will exit. */
a68ab351 325 pool->threads_used = nthreads;
953ff289
DN
326
327 /* Release existing idle threads. */
328 for (; i < n; ++i)
329 {
a68ab351 330 nthr = pool->threads[i];
953ff289 331 nthr->ts.team = team;
a68ab351
JJ
332 nthr->ts.work_share = &team->work_shares[0];
333 nthr->ts.last_work_share = NULL;
953ff289 334 nthr->ts.team_id = i;
a68ab351
JJ
335 nthr->ts.level = team->prev_ts.level + 1;
336 nthr->ts.active_level = thr->ts.active_level;
337#ifdef HAVE_SYNC_BUILTINS
338 nthr->ts.single_count = 0;
339#endif
953ff289 340 nthr->ts.static_trip = 0;
a68ab351
JJ
341 nthr->task = &team->implicit_task[i];
342 gomp_init_task (nthr->task, task, icv);
953ff289
DN
343 nthr->fn = fn;
344 nthr->data = data;
345 team->ordered_release[i] = &nthr->release;
346 }
347
348 if (i == nthreads)
349 goto do_release;
350
351 /* If necessary, expand the size of the gomp_threads array. It is
a68ab351 352 expected that changes in the number of threads are rare, thus we
953ff289 353 make no effort to expand gomp_threads_size geometrically. */
a68ab351 354 if (nthreads >= pool->threads_size)
953ff289 355 {
a68ab351
JJ
356 pool->threads_size = nthreads + 1;
357 pool->threads
358 = gomp_realloc (pool->threads,
359 pool->threads_size
953ff289
DN
360 * sizeof (struct gomp_thread_data *));
361 }
362 }
363
a68ab351
JJ
364 if (__builtin_expect (nthreads > old_threads_used, 0))
365 {
366 long diff = (long) nthreads - (long) old_threads_used;
367
368 if (old_threads_used == 0)
369 --diff;
370
371#ifdef HAVE_SYNC_BUILTINS
372 __sync_fetch_and_add (&gomp_managed_threads, diff);
373#else
374 gomp_mutex_lock (&gomp_remaining_threads_lock);
375 gomp_managed_threads += diff;
376 gomp_mutex_unlock (&gomp_remaining_threads_lock);
377#endif
378 }
379
a0884cf0 380 attr = &gomp_thread_attr;
a68ab351 381 if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
a0884cf0
JJ
382 {
383 size_t stacksize;
384 pthread_attr_init (&thread_attr);
385 pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
46d8fbd1 386 if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
a0884cf0
JJ
387 pthread_attr_setstacksize (&thread_attr, stacksize);
388 attr = &thread_attr;
389 }
390
a55b8e18
SE
391 start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
392 * (nthreads-i));
953ff289
DN
393
394 /* Launch new threads. */
395 for (; i < nthreads; ++i, ++start_data)
396 {
397 pthread_t pt;
398 int err;
399
a68ab351
JJ
400 start_data->fn = fn;
401 start_data->fn_data = data;
953ff289 402 start_data->ts.team = team;
a68ab351
JJ
403 start_data->ts.work_share = &team->work_shares[0];
404 start_data->ts.last_work_share = NULL;
953ff289 405 start_data->ts.team_id = i;
a68ab351
JJ
406 start_data->ts.level = team->prev_ts.level + 1;
407 start_data->ts.active_level = thr->ts.active_level;
408#ifdef HAVE_SYNC_BUILTINS
409 start_data->ts.single_count = 0;
410#endif
953ff289 411 start_data->ts.static_trip = 0;
a68ab351
JJ
412 start_data->task = &team->implicit_task[i];
413 gomp_init_task (start_data->task, task, icv);
414 start_data->thread_pool = pool;
953ff289
DN
415 start_data->nested = nested;
416
a0884cf0
JJ
417 if (gomp_cpu_affinity != NULL)
418 gomp_init_thread_affinity (attr);
419
420 err = pthread_create (&pt, attr, gomp_thread_start, start_data);
953ff289
DN
421 if (err != 0)
422 gomp_fatal ("Thread creation failed: %s", strerror (err));
423 }
424
a68ab351 425 if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
a0884cf0
JJ
426 pthread_attr_destroy (&thread_attr);
427
953ff289 428 do_release:
a68ab351 429 gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock);
953ff289
DN
430
431 /* Decrease the barrier threshold to match the number of threads
432 that should arrive back at the end of this team. The extra
433 threads should be exiting. Note that we arrange for this test
434 to never be true for nested teams. */
a68ab351
JJ
435 if (__builtin_expect (nthreads < old_threads_used, 0))
436 {
437 long diff = (long) nthreads - (long) old_threads_used;
438
439 gomp_barrier_reinit (&pool->threads_dock, nthreads);
440
441#ifdef HAVE_SYNC_BUILTINS
442 __sync_fetch_and_add (&gomp_managed_threads, diff);
443#else
444 gomp_mutex_lock (&gomp_remaining_threads_lock);
445 gomp_managed_threads += diff;
446 gomp_mutex_unlock (&gomp_remaining_threads_lock);
447#endif
448 }
953ff289
DN
449}
450
451
452/* Terminate the current team. This is only to be called by the master
453 thread. We assume that we must wait for the other threads. */
454
455void
456gomp_team_end (void)
457{
458 struct gomp_thread *thr = gomp_thread ();
459 struct gomp_team *team = thr->ts.team;
460
a68ab351
JJ
461 /* This barrier handles all pending explicit threads. */
462 gomp_team_barrier_wait (&team->barrier);
463 gomp_fini_work_share (thr->ts.work_share);
953ff289 464
a68ab351 465 gomp_end_task ();
953ff289
DN
466 thr->ts = team->prev_ts;
467
a68ab351
JJ
468 if (__builtin_expect (thr->ts.team != NULL, 0))
469 {
470#ifdef HAVE_SYNC_BUILTINS
471 __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
472#else
473 gomp_mutex_lock (&gomp_remaining_threads_lock);
474 gomp_managed_threads -= team->nthreads - 1L;
475 gomp_mutex_unlock (&gomp_remaining_threads_lock);
476#endif
477 /* This barrier has gomp_barrier_wait_last counterparts
478 and ensures the team can be safely destroyed. */
479 gomp_barrier_wait (&team->barrier);
480 }
481
482 if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
483 {
484 struct gomp_work_share *ws = team->work_shares[0].next_alloc;
485 do
486 {
487 struct gomp_work_share *next_ws = ws->next_alloc;
488 free (ws);
489 ws = next_ws;
490 }
491 while (ws != NULL);
492 }
493 gomp_sem_destroy (&team->master_release);
494#ifndef HAVE_SYNC_BUILTINS
495 gomp_mutex_destroy (&team->work_share_list_free_lock);
496#endif
497
4db72361
JJ
498 if (__builtin_expect (thr->ts.team != NULL, 0)
499 || __builtin_expect (team->nthreads == 1, 0))
a68ab351
JJ
500 free_team (team);
501 else
502 {
503 struct gomp_thread_pool *pool = thr->thread_pool;
504 if (pool->last_team)
505 free_team (pool->last_team);
506 pool->last_team = team;
507 }
953ff289
DN
508}
509
510
511/* Constructors for this file. */
512
513static void __attribute__((constructor))
514initialize_team (void)
515{
516 struct gomp_thread *thr;
517
518#ifndef HAVE_TLS
519 static struct gomp_thread initial_thread_tls_data;
520
521 pthread_key_create (&gomp_tls_key, NULL);
522 pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
523#endif
524
a68ab351
JJ
525 if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
526 gomp_fatal ("could not create thread pool destructor.");
527
953ff289
DN
528#ifdef HAVE_TLS
529 thr = &gomp_tls_data;
530#else
531 thr = &initial_thread_tls_data;
532#endif
533 gomp_sem_init (&thr->release, 0);
953ff289 534}
a68ab351
JJ
535
536static void __attribute__((destructor))
537team_destructor (void)
538{
539 /* Without this dlclose on libgomp could lead to subsequent
540 crashes. */
541 pthread_key_delete (gomp_thread_destructor);
542}
543
544struct gomp_task_icv *
545gomp_new_icv (void)
546{
547 struct gomp_thread *thr = gomp_thread ();
548 struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
549 gomp_init_task (task, NULL, &gomp_global_icv);
550 thr->task = task;
551 pthread_setspecific (gomp_thread_destructor, thr);
552 return &task->icv;
553}