1 /* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file handles the maintainence of threads in response to team
26 creation and termination. */
32 /* This attribute contains PTHREAD_CREATE_DETACHED. */
33 pthread_attr_t gomp_thread_attr
;
35 /* This key is for the thread destructor. */
36 pthread_key_t gomp_thread_destructor
;
39 /* This is the libgomp per-thread data structure. */
41 __thread
struct gomp_thread gomp_tls_data
;
43 pthread_key_t gomp_tls_key
;
47 /* This structure is used to communicate across pthread_create. */
49 struct gomp_thread_start_data
53 struct gomp_team_state ts
;
54 struct gomp_task
*task
;
55 struct gomp_thread_pool
*thread_pool
;
60 /* This function is a pthread_create entry point. This contains the idle
61 loop in which a thread waits to be called up to become part of a team. */
64 gomp_thread_start (void *xdata
)
66 struct gomp_thread_start_data
*data
= xdata
;
67 struct gomp_thread
*thr
;
68 struct gomp_thread_pool
*pool
;
69 void (*local_fn
) (void *);
75 struct gomp_thread local_thr
;
77 pthread_setspecific (gomp_tls_key
, thr
);
79 gomp_sem_init (&thr
->release
, 0);
81 /* Extract what we need from data. */
83 local_data
= data
->fn_data
;
84 thr
->thread_pool
= data
->thread_pool
;
86 thr
->task
= data
->task
;
88 thr
->ts
.team
->ordered_release
[thr
->ts
.team_id
] = &thr
->release
;
90 /* Make thread pool local. */
91 pool
= thr
->thread_pool
;
95 struct gomp_team
*team
= thr
->ts
.team
;
96 struct gomp_task
*task
= thr
->task
;
98 gomp_barrier_wait (&team
->barrier
);
100 local_fn (local_data
);
101 gomp_team_barrier_wait (&team
->barrier
);
102 gomp_finish_task (task
);
103 gomp_barrier_wait_last (&team
->barrier
);
107 pool
->threads
[thr
->ts
.team_id
] = thr
;
109 gomp_barrier_wait (&pool
->threads_dock
);
112 struct gomp_team
*team
= thr
->ts
.team
;
113 struct gomp_task
*task
= thr
->task
;
115 local_fn (local_data
);
116 gomp_team_barrier_wait (&team
->barrier
);
117 gomp_finish_task (task
);
119 gomp_barrier_wait (&pool
->threads_dock
);
122 local_data
= thr
->data
;
132 /* Create a new team data structure. */
135 gomp_new_team (unsigned nthreads
)
137 struct gomp_team
*team
;
141 size
= sizeof (*team
) + nthreads
* (sizeof (team
->ordered_release
[0])
142 + sizeof (team
->implicit_task
[0]));
143 team
= gomp_malloc (size
);
145 team
->work_share_chunk
= 8;
146 #ifdef HAVE_SYNC_BUILTINS
147 team
->single_count
= 0;
149 gomp_mutex_init (&team
->work_share_list_free_lock
);
151 gomp_init_work_share (&team
->work_shares
[0], false, nthreads
);
152 team
->work_shares
[0].next_alloc
= NULL
;
153 team
->work_share_list_free
= NULL
;
154 team
->work_share_list_alloc
= &team
->work_shares
[1];
155 for (i
= 1; i
< 7; i
++)
156 team
->work_shares
[i
].next_free
= &team
->work_shares
[i
+ 1];
157 team
->work_shares
[i
].next_free
= NULL
;
159 team
->nthreads
= nthreads
;
160 gomp_barrier_init (&team
->barrier
, nthreads
);
162 gomp_sem_init (&team
->master_release
, 0);
163 team
->ordered_release
= (void *) &team
->implicit_task
[nthreads
];
164 team
->ordered_release
[0] = &team
->master_release
;
166 gomp_mutex_init (&team
->task_lock
);
167 team
->task_queue
= NULL
;
168 team
->task_count
= 0;
169 team
->task_running_count
= 0;
175 /* Free a team data structure. */
178 free_team (struct gomp_team
*team
)
180 gomp_barrier_destroy (&team
->barrier
);
181 gomp_mutex_destroy (&team
->task_lock
);
185 /* Allocate and initialize a thread pool. */
187 static struct gomp_thread_pool
*gomp_new_thread_pool (void)
189 struct gomp_thread_pool
*pool
190 = gomp_malloc (sizeof(struct gomp_thread_pool
));
191 pool
->threads
= NULL
;
192 pool
->threads_size
= 0;
193 pool
->threads_used
= 0;
194 pool
->last_team
= NULL
;
199 gomp_free_pool_helper (void *thread_pool
)
201 struct gomp_thread_pool
*pool
202 = (struct gomp_thread_pool
*) thread_pool
;
203 gomp_barrier_wait_last (&pool
->threads_dock
);
207 /* Free a thread pool and release its threads. */
210 gomp_free_thread (void *arg
__attribute__((unused
)))
212 struct gomp_thread
*thr
= gomp_thread ();
213 struct gomp_thread_pool
*pool
= thr
->thread_pool
;
216 if (pool
->threads_used
> 0)
219 for (i
= 1; i
< pool
->threads_used
; i
++)
221 struct gomp_thread
*nthr
= pool
->threads
[i
];
222 nthr
->fn
= gomp_free_pool_helper
;
225 /* This barrier undocks threads docked on pool->threads_dock. */
226 gomp_barrier_wait (&pool
->threads_dock
);
227 /* And this waits till all threads have called gomp_barrier_wait_last
228 in gomp_free_pool_helper. */
229 gomp_barrier_wait (&pool
->threads_dock
);
230 /* Now it is safe to destroy the barrier and free the pool. */
231 gomp_barrier_destroy (&pool
->threads_dock
);
233 free (pool
->threads
);
235 free_team (pool
->last_team
);
237 thr
->thread_pool
= NULL
;
239 if (thr
->task
!= NULL
)
241 struct gomp_task
*task
= thr
->task
;
250 gomp_team_start (void (*fn
) (void *), void *data
, unsigned nthreads
,
251 struct gomp_team
*team
)
253 struct gomp_thread_start_data
*start_data
;
254 struct gomp_thread
*thr
, *nthr
;
255 struct gomp_task
*task
;
256 struct gomp_task_icv
*icv
;
258 struct gomp_thread_pool
*pool
;
259 unsigned i
, n
, old_threads_used
= 0;
260 pthread_attr_t thread_attr
, *attr
;
262 thr
= gomp_thread ();
263 nested
= thr
->ts
.team
!= NULL
;
264 if (__builtin_expect (thr
->thread_pool
== NULL
, 0))
266 thr
->thread_pool
= gomp_new_thread_pool ();
267 pthread_setspecific (gomp_thread_destructor
, thr
);
269 pool
= thr
->thread_pool
;
271 icv
= task
? &task
->icv
: &gomp_global_icv
;
273 /* Always save the previous state, even if this isn't a nested team.
274 In particular, we should save any work share state from an outer
275 orphaned work share construct. */
276 team
->prev_ts
= thr
->ts
;
282 ++thr
->ts
.active_level
;
283 thr
->ts
.work_share
= &team
->work_shares
[0];
284 thr
->ts
.last_work_share
= NULL
;
285 #ifdef HAVE_SYNC_BUILTINS
286 thr
->ts
.single_count
= 0;
288 thr
->ts
.static_trip
= 0;
289 thr
->task
= &team
->implicit_task
[0];
290 gomp_init_task (thr
->task
, task
, icv
);
297 /* We only allow the reuse of idle threads for non-nested PARALLEL
298 regions. This appears to be implied by the semantics of
299 threadprivate variables, but perhaps that's reading too much into
300 things. Certainly it does prevent any locking problems, since
301 only the initial program thread will modify gomp_threads. */
304 old_threads_used
= pool
->threads_used
;
306 if (nthreads
<= old_threads_used
)
308 else if (old_threads_used
== 0)
311 gomp_barrier_init (&pool
->threads_dock
, nthreads
);
315 n
= old_threads_used
;
317 /* Increase the barrier threshold to make sure all new
318 threads arrive before the team is released. */
319 gomp_barrier_reinit (&pool
->threads_dock
, nthreads
);
322 /* Not true yet, but soon will be. We're going to release all
323 threads from the dock, and those that aren't part of the
325 pool
->threads_used
= nthreads
;
327 /* Release existing idle threads. */
330 nthr
= pool
->threads
[i
];
331 nthr
->ts
.team
= team
;
332 nthr
->ts
.work_share
= &team
->work_shares
[0];
333 nthr
->ts
.last_work_share
= NULL
;
334 nthr
->ts
.team_id
= i
;
335 nthr
->ts
.level
= team
->prev_ts
.level
+ 1;
336 nthr
->ts
.active_level
= thr
->ts
.active_level
;
337 #ifdef HAVE_SYNC_BUILTINS
338 nthr
->ts
.single_count
= 0;
340 nthr
->ts
.static_trip
= 0;
341 nthr
->task
= &team
->implicit_task
[i
];
342 gomp_init_task (nthr
->task
, task
, icv
);
345 team
->ordered_release
[i
] = &nthr
->release
;
351 /* If necessary, expand the size of the gomp_threads array. It is
352 expected that changes in the number of threads are rare, thus we
353 make no effort to expand gomp_threads_size geometrically. */
354 if (nthreads
>= pool
->threads_size
)
356 pool
->threads_size
= nthreads
+ 1;
358 = gomp_realloc (pool
->threads
,
360 * sizeof (struct gomp_thread_data
*));
364 if (__builtin_expect (nthreads
> old_threads_used
, 0))
366 long diff
= (long) nthreads
- (long) old_threads_used
;
368 if (old_threads_used
== 0)
371 #ifdef HAVE_SYNC_BUILTINS
372 __sync_fetch_and_add (&gomp_managed_threads
, diff
);
374 gomp_mutex_lock (&gomp_remaining_threads_lock
);
375 gomp_managed_threads
+= diff
;
376 gomp_mutex_unlock (&gomp_remaining_threads_lock
);
380 attr
= &gomp_thread_attr
;
381 if (__builtin_expect (gomp_cpu_affinity
!= NULL
, 0))
384 pthread_attr_init (&thread_attr
);
385 pthread_attr_setdetachstate (&thread_attr
, PTHREAD_CREATE_DETACHED
);
386 if (! pthread_attr_getstacksize (&gomp_thread_attr
, &stacksize
))
387 pthread_attr_setstacksize (&thread_attr
, stacksize
);
391 start_data
= gomp_alloca (sizeof (struct gomp_thread_start_data
)
394 /* Launch new threads. */
395 for (; i
< nthreads
; ++i
, ++start_data
)
401 start_data
->fn_data
= data
;
402 start_data
->ts
.team
= team
;
403 start_data
->ts
.work_share
= &team
->work_shares
[0];
404 start_data
->ts
.last_work_share
= NULL
;
405 start_data
->ts
.team_id
= i
;
406 start_data
->ts
.level
= team
->prev_ts
.level
+ 1;
407 start_data
->ts
.active_level
= thr
->ts
.active_level
;
408 #ifdef HAVE_SYNC_BUILTINS
409 start_data
->ts
.single_count
= 0;
411 start_data
->ts
.static_trip
= 0;
412 start_data
->task
= &team
->implicit_task
[i
];
413 gomp_init_task (start_data
->task
, task
, icv
);
414 start_data
->thread_pool
= pool
;
415 start_data
->nested
= nested
;
417 if (gomp_cpu_affinity
!= NULL
)
418 gomp_init_thread_affinity (attr
);
420 err
= pthread_create (&pt
, attr
, gomp_thread_start
, start_data
);
422 gomp_fatal ("Thread creation failed: %s", strerror (err
));
425 if (__builtin_expect (gomp_cpu_affinity
!= NULL
, 0))
426 pthread_attr_destroy (&thread_attr
);
429 gomp_barrier_wait (nested
? &team
->barrier
: &pool
->threads_dock
);
431 /* Decrease the barrier threshold to match the number of threads
432 that should arrive back at the end of this team. The extra
433 threads should be exiting. Note that we arrange for this test
434 to never be true for nested teams. */
435 if (__builtin_expect (nthreads
< old_threads_used
, 0))
437 long diff
= (long) nthreads
- (long) old_threads_used
;
439 gomp_barrier_reinit (&pool
->threads_dock
, nthreads
);
441 #ifdef HAVE_SYNC_BUILTINS
442 __sync_fetch_and_add (&gomp_managed_threads
, diff
);
444 gomp_mutex_lock (&gomp_remaining_threads_lock
);
445 gomp_managed_threads
+= diff
;
446 gomp_mutex_unlock (&gomp_remaining_threads_lock
);
452 /* Terminate the current team. This is only to be called by the master
453 thread. We assume that we must wait for the other threads. */
458 struct gomp_thread
*thr
= gomp_thread ();
459 struct gomp_team
*team
= thr
->ts
.team
;
461 /* This barrier handles all pending explicit threads. */
462 gomp_team_barrier_wait (&team
->barrier
);
463 gomp_fini_work_share (thr
->ts
.work_share
);
466 thr
->ts
= team
->prev_ts
;
468 if (__builtin_expect (thr
->ts
.team
!= NULL
, 0))
470 #ifdef HAVE_SYNC_BUILTINS
471 __sync_fetch_and_add (&gomp_managed_threads
, 1L - team
->nthreads
);
473 gomp_mutex_lock (&gomp_remaining_threads_lock
);
474 gomp_managed_threads
-= team
->nthreads
- 1L;
475 gomp_mutex_unlock (&gomp_remaining_threads_lock
);
477 /* This barrier has gomp_barrier_wait_last counterparts
478 and ensures the team can be safely destroyed. */
479 gomp_barrier_wait (&team
->barrier
);
482 if (__builtin_expect (team
->work_shares
[0].next_alloc
!= NULL
, 0))
484 struct gomp_work_share
*ws
= team
->work_shares
[0].next_alloc
;
487 struct gomp_work_share
*next_ws
= ws
->next_alloc
;
493 gomp_sem_destroy (&team
->master_release
);
494 #ifndef HAVE_SYNC_BUILTINS
495 gomp_mutex_destroy (&team
->work_share_list_free_lock
);
498 if (__builtin_expect (thr
->ts
.team
!= NULL
, 0)
499 || __builtin_expect (team
->nthreads
== 1, 0))
503 struct gomp_thread_pool
*pool
= thr
->thread_pool
;
505 free_team (pool
->last_team
);
506 pool
->last_team
= team
;
511 /* Constructors for this file. */
513 static void __attribute__((constructor
))
514 initialize_team (void)
516 struct gomp_thread
*thr
;
519 static struct gomp_thread initial_thread_tls_data
;
521 pthread_key_create (&gomp_tls_key
, NULL
);
522 pthread_setspecific (gomp_tls_key
, &initial_thread_tls_data
);
525 if (pthread_key_create (&gomp_thread_destructor
, gomp_free_thread
) != 0)
526 gomp_fatal ("could not create thread pool destructor.");
529 thr
= &gomp_tls_data
;
531 thr
= &initial_thread_tls_data
;
533 gomp_sem_init (&thr
->release
, 0);
536 static void __attribute__((destructor
))
537 team_destructor (void)
539 /* Without this dlclose on libgomp could lead to subsequent
541 pthread_key_delete (gomp_thread_destructor
);
544 struct gomp_task_icv
*
547 struct gomp_thread
*thr
= gomp_thread ();
548 struct gomp_task
*task
= gomp_malloc (sizeof (struct gomp_task
));
549 gomp_init_task (task
, NULL
, &gomp_global_icv
);
551 pthread_setspecific (gomp_thread_destructor
, thr
);