]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/work.c
re PR testsuite/39696 (gcc.dg/tree-ssa/ssa-ccp-25.c scan-tree-dump doesn't work on...
[thirdparty/gcc.git] / libgomp / work.c
CommitLineData
a68ab351 1/* Copyright (C) 2005, 2008 Free Software Foundation, Inc.
953ff289
DN
2 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU OpenMP Library (libgomp).
5
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21/* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
27
28/* This file contains routines to manage the work-share queue for a team
29 of threads. */
30
31#include "libgomp.h"
a68ab351 32#include <stddef.h>
953ff289
DN
33#include <stdlib.h>
34#include <string.h>
35
36
a68ab351
JJ
37/* Allocate a new work share structure, preferably from current team's
38 free gomp_work_share cache. */
953ff289 39
a68ab351
JJ
40static struct gomp_work_share *
41alloc_work_share (struct gomp_team *team)
953ff289
DN
42{
43 struct gomp_work_share *ws;
a68ab351 44 unsigned int i;
953ff289 45
a68ab351
JJ
46 /* This is called in a critical section. */
47 if (team->work_share_list_alloc != NULL)
48 {
49 ws = team->work_share_list_alloc;
50 team->work_share_list_alloc = ws->next_free;
51 return ws;
52 }
953ff289 53
a68ab351
JJ
54#ifdef HAVE_SYNC_BUILTINS
55 ws = team->work_share_list_free;
56 /* We need atomic read from work_share_list_free,
57 as free_work_share can be called concurrently. */
58 __asm ("" : "+r" (ws));
59
60 if (ws && ws->next_free)
61 {
62 struct gomp_work_share *next = ws->next_free;
63 ws->next_free = NULL;
64 team->work_share_list_alloc = next->next_free;
65 return next;
66 }
67#else
68 gomp_mutex_lock (&team->work_share_list_free_lock);
69 ws = team->work_share_list_free;
70 if (ws)
71 {
72 team->work_share_list_alloc = ws->next_free;
73 team->work_share_list_free = NULL;
74 gomp_mutex_unlock (&team->work_share_list_free_lock);
75 return ws;
76 }
77 gomp_mutex_unlock (&team->work_share_list_free_lock);
78#endif
953ff289 79
a68ab351
JJ
80 team->work_share_chunk *= 2;
81 ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
82 ws->next_alloc = team->work_shares[0].next_alloc;
83 team->work_shares[0].next_alloc = ws;
84 team->work_share_list_alloc = &ws[1];
85 for (i = 1; i < team->work_share_chunk - 1; i++)
86 ws[i].next_free = &ws[i + 1];
87 ws[i].next_free = NULL;
953ff289
DN
88 return ws;
89}
90
a68ab351
JJ
91/* Initialize an already allocated struct gomp_work_share.
92 This shouldn't touch the next_alloc field. */
93
94void
95gomp_init_work_share (struct gomp_work_share *ws, bool ordered,
96 unsigned nthreads)
97{
98 gomp_mutex_init (&ws->lock);
99 if (__builtin_expect (ordered, 0))
100 {
101#define INLINE_ORDERED_TEAM_IDS_CNT \
102 ((sizeof (struct gomp_work_share) \
103 - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
104 / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
105
106 if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT)
107 ws->ordered_team_ids
108 = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids));
109 else
110 ws->ordered_team_ids = ws->inline_ordered_team_ids;
111 memset (ws->ordered_team_ids, '\0',
112 nthreads * sizeof (*ws->ordered_team_ids));
113 ws->ordered_num_used = 0;
114 ws->ordered_owner = -1;
115 ws->ordered_cur = 0;
116 }
117 else
118 ws->ordered_team_ids = NULL;
119 gomp_ptrlock_init (&ws->next_ws, NULL);
120 ws->threads_completed = 0;
121}
953ff289 122
a68ab351
JJ
123/* Do any needed destruction of gomp_work_share fields before it
124 is put back into free gomp_work_share cache or freed. */
953ff289 125
a68ab351
JJ
126void
127gomp_fini_work_share (struct gomp_work_share *ws)
953ff289
DN
128{
129 gomp_mutex_destroy (&ws->lock);
a68ab351
JJ
130 if (ws->ordered_team_ids != ws->inline_ordered_team_ids)
131 free (ws->ordered_team_ids);
132 gomp_ptrlock_destroy (&ws->next_ws);
953ff289
DN
133}
134
a68ab351
JJ
135/* Free a work share struct, if not orphaned, put it into current
136 team's free gomp_work_share cache. */
137
138static inline void
139free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
140{
141 gomp_fini_work_share (ws);
142 if (__builtin_expect (team == NULL, 0))
143 free (ws);
144 else
145 {
146 struct gomp_work_share *next_ws;
147#ifdef HAVE_SYNC_BUILTINS
148 do
149 {
150 next_ws = team->work_share_list_free;
151 ws->next_free = next_ws;
152 }
153 while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
154 next_ws, ws));
155#else
156 gomp_mutex_lock (&team->work_share_list_free_lock);
157 next_ws = team->work_share_list_free;
158 ws->next_free = next_ws;
159 team->work_share_list_free = ws;
160 gomp_mutex_unlock (&team->work_share_list_free_lock);
161#endif
162 }
163}
953ff289
DN
164
165/* The current thread is ready to begin the next work sharing construct.
166 In all cases, thr->ts.work_share is updated to point to the new
167 structure. In all cases the work_share lock is locked. Return true
168 if this was the first thread to reach this point. */
169
170bool
171gomp_work_share_start (bool ordered)
172{
173 struct gomp_thread *thr = gomp_thread ();
174 struct gomp_team *team = thr->ts.team;
175 struct gomp_work_share *ws;
953ff289
DN
176
177 /* Work sharing constructs can be orphaned. */
178 if (team == NULL)
179 {
a68ab351
JJ
180 ws = gomp_malloc (sizeof (*ws));
181 gomp_init_work_share (ws, ordered, 1);
953ff289 182 thr->ts.work_share = ws;
a68ab351 183 return ws;
953ff289
DN
184 }
185
a68ab351
JJ
186 ws = thr->ts.work_share;
187 thr->ts.last_work_share = ws;
188 ws = gomp_ptrlock_get (&ws->next_ws);
189 if (ws == NULL)
953ff289 190 {
a68ab351
JJ
191 /* This thread encountered a new ws first. */
192 struct gomp_work_share *ws = alloc_work_share (team);
193 gomp_init_work_share (ws, ordered, team->nthreads);
953ff289 194 thr->ts.work_share = ws;
a68ab351 195 return true;
953ff289 196 }
a68ab351 197 else
953ff289 198 {
a68ab351
JJ
199 thr->ts.work_share = ws;
200 return false;
953ff289 201 }
953ff289
DN
202}
203
953ff289
DN
204/* The current thread is done with its current work sharing construct.
205 This version does imply a barrier at the end of the work-share. */
206
207void
208gomp_work_share_end (void)
209{
210 struct gomp_thread *thr = gomp_thread ();
211 struct gomp_team *team = thr->ts.team;
a68ab351 212 gomp_barrier_state_t bstate;
953ff289
DN
213
214 /* Work sharing constructs can be orphaned. */
215 if (team == NULL)
216 {
a68ab351
JJ
217 free_work_share (NULL, thr->ts.work_share);
218 thr->ts.work_share = NULL;
953ff289
DN
219 return;
220 }
221
a68ab351 222 bstate = gomp_barrier_wait_start (&team->barrier);
953ff289 223
a68ab351 224 if (gomp_barrier_last_thread (bstate))
953ff289 225 {
a68ab351
JJ
226 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
227 free_work_share (team, thr->ts.last_work_share);
953ff289
DN
228 }
229
a68ab351
JJ
230 gomp_team_barrier_wait_end (&team->barrier, bstate);
231 thr->ts.last_work_share = NULL;
953ff289
DN
232}
233
953ff289
DN
234/* The current thread is done with its current work sharing construct.
235 This version does NOT imply a barrier at the end of the work-share. */
236
237void
238gomp_work_share_end_nowait (void)
239{
240 struct gomp_thread *thr = gomp_thread ();
241 struct gomp_team *team = thr->ts.team;
242 struct gomp_work_share *ws = thr->ts.work_share;
243 unsigned completed;
244
953ff289
DN
245 /* Work sharing constructs can be orphaned. */
246 if (team == NULL)
247 {
a68ab351
JJ
248 free_work_share (NULL, ws);
249 thr->ts.work_share = NULL;
953ff289
DN
250 return;
251 }
252
a68ab351
JJ
253 if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
254 return;
255
953ff289
DN
256#ifdef HAVE_SYNC_BUILTINS
257 completed = __sync_add_and_fetch (&ws->threads_completed, 1);
258#else
259 gomp_mutex_lock (&ws->lock);
260 completed = ++ws->threads_completed;
261 gomp_mutex_unlock (&ws->lock);
262#endif
263
264 if (completed == team->nthreads)
a68ab351
JJ
265 free_work_share (team, thr->ts.last_work_share);
266 thr->ts.last_work_share = NULL;
953ff289 267}