]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/config/linux/bar.c
Update copyright years.
[thirdparty/gcc.git] / libgomp / config / linux / bar.c
CommitLineData
f1717362 1/* Copyright (C) 2005-2016 Free Software Foundation, Inc.
1e8e9920 2 Contributed by Richard Henderson <rth@redhat.com>.
3
c35c9a62 4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
1e8e9920 6
7 Libgomp is free software; you can redistribute it and/or modify it
6bc9506f 8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
1e8e9920 11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
6bc9506f 14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1e8e9920 15 more details.
16
6bc9506f 17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
1e8e9920 25
26/* This is a Linux specific implementation of a barrier synchronization
27 mechanism for libgomp. This type is private to the library. This
28 implementation uses atomic instructions and the futex syscall. */
29
1e8e9920 30#include <limits.h>
fd6481cf 31#include "wait.h"
1e8e9920 32
33
34void
fd6481cf 35gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
1e8e9920 36{
bc7bff74 37 if (__builtin_expect (state & BAR_WAS_LAST, 0))
1e8e9920 38 {
fd6481cf 39 /* Next time we'll be awaiting TOTAL threads again. */
40 bar->awaited = bar->total;
bc7bff74 41 __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
7262c91f 42 MEMMODEL_RELEASE);
fd6481cf 43 futex_wake ((int *) &bar->generation, INT_MAX);
1e8e9920 44 }
45 else
46 {
1e8e9920 47 do
7262c91f 48 do_wait ((int *) &bar->generation, state);
49 while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) == state);
1e8e9920 50 }
fd6481cf 51}
52
53void
54gomp_barrier_wait (gomp_barrier_t *bar)
55{
56 gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
57}
1e8e9920 58
fd6481cf 59/* Like gomp_barrier_wait, except that if the encountering thread
60 is not the last one to hit the barrier, it returns immediately.
61 The intended usage is that a thread which intends to gomp_barrier_destroy
62 this barrier calls gomp_barrier_wait, while all other threads
63 call gomp_barrier_wait_last. When gomp_barrier_wait returns,
64 the barrier can be safely destroyed. */
65
66void
67gomp_barrier_wait_last (gomp_barrier_t *bar)
68{
69 gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
bc7bff74 70 if (state & BAR_WAS_LAST)
fd6481cf 71 gomp_barrier_wait_end (bar, state);
72}
73
74void
75gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
76{
77 futex_wake ((int *) &bar->generation, count == 0 ? INT_MAX : count);
78}
79
80void
81gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
82{
7262c91f 83 unsigned int generation, gen;
fd6481cf 84
bc7bff74 85 if (__builtin_expect (state & BAR_WAS_LAST, 0))
fd6481cf 86 {
87 /* Next time we'll be awaiting TOTAL threads again. */
88 struct gomp_thread *thr = gomp_thread ();
89 struct gomp_team *team = thr->ts.team;
7262c91f 90
fd6481cf 91 bar->awaited = bar->total;
bc7bff74 92 team->work_share_cancelled = 0;
fd6481cf 93 if (__builtin_expect (team->task_count, 0))
94 {
95 gomp_barrier_handle_tasks (state);
bc7bff74 96 state &= ~BAR_WAS_LAST;
fd6481cf 97 }
98 else
99 {
bc7bff74 100 state &= ~BAR_CANCELLED;
101 state += BAR_INCR - BAR_WAS_LAST;
102 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
fd6481cf 103 futex_wake ((int *) &bar->generation, INT_MAX);
104 return;
105 }
106 }
107
108 generation = state;
bc7bff74 109 state &= ~BAR_CANCELLED;
fd6481cf 110 do
111 {
112 do_wait ((int *) &bar->generation, generation);
7262c91f 113 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
bc7bff74 114 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
7262c91f 115 {
116 gomp_barrier_handle_tasks (state);
117 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
118 }
bc7bff74 119 generation |= gen & BAR_WAITING_FOR_TASK;
fd6481cf 120 }
bc7bff74 121 while (gen != state + BAR_INCR);
1e8e9920 122}
123
124void
fd6481cf 125gomp_team_barrier_wait (gomp_barrier_t *bar)
1e8e9920 126{
fd6481cf 127 gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
1e8e9920 128}
bc7bff74 129
130void
131gomp_team_barrier_wait_final (gomp_barrier_t *bar)
132{
133 gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
134 if (__builtin_expect (state & BAR_WAS_LAST, 0))
135 bar->awaited_final = bar->total;
136 gomp_team_barrier_wait_end (bar, state);
137}
138
139bool
140gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
141 gomp_barrier_state_t state)
142{
143 unsigned int generation, gen;
144
145 if (__builtin_expect (state & BAR_WAS_LAST, 0))
146 {
147 /* Next time we'll be awaiting TOTAL threads again. */
148 /* BAR_CANCELLED should never be set in state here, because
149 cancellation means that at least one of the threads has been
150 cancelled, thus on a cancellable barrier we should never see
151 all threads to arrive. */
152 struct gomp_thread *thr = gomp_thread ();
153 struct gomp_team *team = thr->ts.team;
154
155 bar->awaited = bar->total;
156 team->work_share_cancelled = 0;
157 if (__builtin_expect (team->task_count, 0))
158 {
159 gomp_barrier_handle_tasks (state);
160 state &= ~BAR_WAS_LAST;
161 }
162 else
163 {
164 state += BAR_INCR - BAR_WAS_LAST;
165 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
166 futex_wake ((int *) &bar->generation, INT_MAX);
167 return false;
168 }
169 }
170
171 if (__builtin_expect (state & BAR_CANCELLED, 0))
172 return true;
173
174 generation = state;
175 do
176 {
177 do_wait ((int *) &bar->generation, generation);
178 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
179 if (__builtin_expect (gen & BAR_CANCELLED, 0))
180 return true;
181 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
182 {
183 gomp_barrier_handle_tasks (state);
184 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
185 }
186 generation |= gen & BAR_WAITING_FOR_TASK;
187 }
188 while (gen != state + BAR_INCR);
189
190 return false;
191}
192
193bool
194gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
195{
196 return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
197}
198
199void
200gomp_team_barrier_cancel (struct gomp_team *team)
201{
202 gomp_mutex_lock (&team->task_lock);
203 if (team->barrier.generation & BAR_CANCELLED)
204 {
205 gomp_mutex_unlock (&team->task_lock);
206 return;
207 }
208 team->barrier.generation |= BAR_CANCELLED;
209 gomp_mutex_unlock (&team->task_lock);
210 futex_wake ((int *) &team->barrier.generation, INT_MAX);
211}