]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/work.c
* testsuite/libjava.jvmti/jvmti-interp.exp
[thirdparty/gcc.git] / libgomp / work.c
CommitLineData
748086b7 1/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
953ff289
DN
2 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU OpenMP Library (libgomp).
5
6 Libgomp is free software; you can redistribute it and/or modify it
748086b7
JJ
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
953ff289
DN
10
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
748086b7 13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
953ff289
DN
14 more details.
15
748086b7
JJ
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
19
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
953ff289
DN
24
25/* This file contains routines to manage the work-share queue for a team
26 of threads. */
27
28#include "libgomp.h"
a68ab351 29#include <stddef.h>
953ff289
DN
30#include <stdlib.h>
31#include <string.h>
32
33
a68ab351
JJ
34/* Allocate a new work share structure, preferably from current team's
35 free gomp_work_share cache. */
953ff289 36
a68ab351
JJ
37static struct gomp_work_share *
38alloc_work_share (struct gomp_team *team)
953ff289
DN
39{
40 struct gomp_work_share *ws;
a68ab351 41 unsigned int i;
953ff289 42
a68ab351
JJ
43 /* This is called in a critical section. */
44 if (team->work_share_list_alloc != NULL)
45 {
46 ws = team->work_share_list_alloc;
47 team->work_share_list_alloc = ws->next_free;
48 return ws;
49 }
953ff289 50
a68ab351
JJ
51#ifdef HAVE_SYNC_BUILTINS
52 ws = team->work_share_list_free;
53 /* We need atomic read from work_share_list_free,
54 as free_work_share can be called concurrently. */
55 __asm ("" : "+r" (ws));
56
57 if (ws && ws->next_free)
58 {
59 struct gomp_work_share *next = ws->next_free;
60 ws->next_free = NULL;
61 team->work_share_list_alloc = next->next_free;
62 return next;
63 }
64#else
65 gomp_mutex_lock (&team->work_share_list_free_lock);
66 ws = team->work_share_list_free;
67 if (ws)
68 {
69 team->work_share_list_alloc = ws->next_free;
70 team->work_share_list_free = NULL;
71 gomp_mutex_unlock (&team->work_share_list_free_lock);
72 return ws;
73 }
74 gomp_mutex_unlock (&team->work_share_list_free_lock);
75#endif
953ff289 76
a68ab351
JJ
77 team->work_share_chunk *= 2;
78 ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
79 ws->next_alloc = team->work_shares[0].next_alloc;
80 team->work_shares[0].next_alloc = ws;
81 team->work_share_list_alloc = &ws[1];
82 for (i = 1; i < team->work_share_chunk - 1; i++)
83 ws[i].next_free = &ws[i + 1];
84 ws[i].next_free = NULL;
953ff289
DN
85 return ws;
86}
87
a68ab351
JJ
88/* Initialize an already allocated struct gomp_work_share.
89 This shouldn't touch the next_alloc field. */
90
91void
92gomp_init_work_share (struct gomp_work_share *ws, bool ordered,
93 unsigned nthreads)
94{
95 gomp_mutex_init (&ws->lock);
96 if (__builtin_expect (ordered, 0))
97 {
98#define INLINE_ORDERED_TEAM_IDS_CNT \
99 ((sizeof (struct gomp_work_share) \
100 - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
101 / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
102
103 if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT)
104 ws->ordered_team_ids
105 = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids));
106 else
107 ws->ordered_team_ids = ws->inline_ordered_team_ids;
108 memset (ws->ordered_team_ids, '\0',
109 nthreads * sizeof (*ws->ordered_team_ids));
110 ws->ordered_num_used = 0;
111 ws->ordered_owner = -1;
112 ws->ordered_cur = 0;
113 }
114 else
115 ws->ordered_team_ids = NULL;
116 gomp_ptrlock_init (&ws->next_ws, NULL);
117 ws->threads_completed = 0;
118}
953ff289 119
a68ab351
JJ
120/* Do any needed destruction of gomp_work_share fields before it
121 is put back into free gomp_work_share cache or freed. */
953ff289 122
a68ab351
JJ
123void
124gomp_fini_work_share (struct gomp_work_share *ws)
953ff289
DN
125{
126 gomp_mutex_destroy (&ws->lock);
a68ab351
JJ
127 if (ws->ordered_team_ids != ws->inline_ordered_team_ids)
128 free (ws->ordered_team_ids);
129 gomp_ptrlock_destroy (&ws->next_ws);
953ff289
DN
130}
131
a68ab351
JJ
132/* Free a work share struct, if not orphaned, put it into current
133 team's free gomp_work_share cache. */
134
135static inline void
136free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
137{
138 gomp_fini_work_share (ws);
139 if (__builtin_expect (team == NULL, 0))
140 free (ws);
141 else
142 {
143 struct gomp_work_share *next_ws;
144#ifdef HAVE_SYNC_BUILTINS
145 do
146 {
147 next_ws = team->work_share_list_free;
148 ws->next_free = next_ws;
149 }
150 while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
151 next_ws, ws));
152#else
153 gomp_mutex_lock (&team->work_share_list_free_lock);
154 next_ws = team->work_share_list_free;
155 ws->next_free = next_ws;
156 team->work_share_list_free = ws;
157 gomp_mutex_unlock (&team->work_share_list_free_lock);
158#endif
159 }
160}
953ff289
DN
161
162/* The current thread is ready to begin the next work sharing construct.
163 In all cases, thr->ts.work_share is updated to point to the new
164 structure. In all cases the work_share lock is locked. Return true
165 if this was the first thread to reach this point. */
166
167bool
168gomp_work_share_start (bool ordered)
169{
170 struct gomp_thread *thr = gomp_thread ();
171 struct gomp_team *team = thr->ts.team;
172 struct gomp_work_share *ws;
953ff289
DN
173
174 /* Work sharing constructs can be orphaned. */
175 if (team == NULL)
176 {
a68ab351
JJ
177 ws = gomp_malloc (sizeof (*ws));
178 gomp_init_work_share (ws, ordered, 1);
953ff289 179 thr->ts.work_share = ws;
a68ab351 180 return ws;
953ff289
DN
181 }
182
a68ab351
JJ
183 ws = thr->ts.work_share;
184 thr->ts.last_work_share = ws;
185 ws = gomp_ptrlock_get (&ws->next_ws);
186 if (ws == NULL)
953ff289 187 {
a68ab351
JJ
188 /* This thread encountered a new ws first. */
189 struct gomp_work_share *ws = alloc_work_share (team);
190 gomp_init_work_share (ws, ordered, team->nthreads);
953ff289 191 thr->ts.work_share = ws;
a68ab351 192 return true;
953ff289 193 }
a68ab351 194 else
953ff289 195 {
a68ab351
JJ
196 thr->ts.work_share = ws;
197 return false;
953ff289 198 }
953ff289
DN
199}
200
953ff289
DN
201/* The current thread is done with its current work sharing construct.
202 This version does imply a barrier at the end of the work-share. */
203
204void
205gomp_work_share_end (void)
206{
207 struct gomp_thread *thr = gomp_thread ();
208 struct gomp_team *team = thr->ts.team;
a68ab351 209 gomp_barrier_state_t bstate;
953ff289
DN
210
211 /* Work sharing constructs can be orphaned. */
212 if (team == NULL)
213 {
a68ab351
JJ
214 free_work_share (NULL, thr->ts.work_share);
215 thr->ts.work_share = NULL;
953ff289
DN
216 return;
217 }
218
a68ab351 219 bstate = gomp_barrier_wait_start (&team->barrier);
953ff289 220
a68ab351 221 if (gomp_barrier_last_thread (bstate))
953ff289 222 {
a68ab351
JJ
223 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
224 free_work_share (team, thr->ts.last_work_share);
953ff289
DN
225 }
226
a68ab351
JJ
227 gomp_team_barrier_wait_end (&team->barrier, bstate);
228 thr->ts.last_work_share = NULL;
953ff289
DN
229}
230
953ff289
DN
231/* The current thread is done with its current work sharing construct.
232 This version does NOT imply a barrier at the end of the work-share. */
233
234void
235gomp_work_share_end_nowait (void)
236{
237 struct gomp_thread *thr = gomp_thread ();
238 struct gomp_team *team = thr->ts.team;
239 struct gomp_work_share *ws = thr->ts.work_share;
240 unsigned completed;
241
953ff289
DN
242 /* Work sharing constructs can be orphaned. */
243 if (team == NULL)
244 {
a68ab351
JJ
245 free_work_share (NULL, ws);
246 thr->ts.work_share = NULL;
953ff289
DN
247 return;
248 }
249
a68ab351
JJ
250 if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
251 return;
252
953ff289
DN
253#ifdef HAVE_SYNC_BUILTINS
254 completed = __sync_add_and_fetch (&ws->threads_completed, 1);
255#else
256 gomp_mutex_lock (&ws->lock);
257 completed = ++ws->threads_completed;
258 gomp_mutex_unlock (&ws->lock);
259#endif
260
261 if (completed == team->nthreads)
a68ab351
JJ
262 free_work_share (team, thr->ts.last_work_share);
263 thr->ts.last_work_share = NULL;
953ff289 264}