]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/config/nvptx/bar.h
Update copyright years.
[thirdparty/gcc.git] / libgomp / config / nvptx / bar.h
CommitLineData
85ec4feb 1/* Copyright (C) 2015-2018 Free Software Foundation, Inc.
6103184e
AM
2 Contributed by Alexander Monakov <amonakov@ispras.ru>
3
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
6
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26/* This is an NVPTX specific implementation of a barrier synchronization
27 mechanism for libgomp. This type is private to the library. This
28 implementation uses atomic instructions and bar.sync instruction. */
29
30#ifndef GOMP_BARRIER_H
31#define GOMP_BARRIER_H 1
32
33#include "mutex.h"
34
35typedef struct
36{
37 unsigned total;
38 unsigned generation;
39 unsigned awaited;
40 unsigned awaited_final;
41} gomp_barrier_t;
42
43typedef unsigned int gomp_barrier_state_t;
44
45/* The generation field contains a counter in the high bits, with a few
46 low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can
47 share space because WAS_LAST is never stored back to generation. */
48#define BAR_TASK_PENDING 1
49#define BAR_WAS_LAST 1
50#define BAR_WAITING_FOR_TASK 2
51#define BAR_CANCELLED 4
52#define BAR_INCR 8
53
54static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
55{
56 bar->total = count;
57 bar->awaited = count;
58 bar->awaited_final = count;
59 bar->generation = 0;
60}
61
62static inline void gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count)
63{
64 __atomic_add_fetch (&bar->awaited, count - bar->total, MEMMODEL_ACQ_REL);
65 bar->total = count;
66}
67
68static inline void gomp_barrier_destroy (gomp_barrier_t *bar)
69{
70}
71
72extern void gomp_barrier_wait (gomp_barrier_t *);
73extern void gomp_barrier_wait_last (gomp_barrier_t *);
74extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
75extern void gomp_team_barrier_wait (gomp_barrier_t *);
76extern void gomp_team_barrier_wait_final (gomp_barrier_t *);
77extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
78 gomp_barrier_state_t);
79extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
80extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
81 gomp_barrier_state_t);
82extern void gomp_team_barrier_wake (gomp_barrier_t *, int);
83struct gomp_team;
84extern void gomp_team_barrier_cancel (struct gomp_team *);
85
86static inline gomp_barrier_state_t
87gomp_barrier_wait_start (gomp_barrier_t *bar)
88{
89 unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
90 ret &= -BAR_INCR | BAR_CANCELLED;
91 /* A memory barrier is needed before exiting from the various forms
92 of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section
93 2.8.6 flush Construct, which says there is an implicit flush during
94 a barrier region. This is a convenient place to add the barrier,
95 so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE. */
96 if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0)
97 ret |= BAR_WAS_LAST;
98 return ret;
99}
100
101static inline gomp_barrier_state_t
102gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
103{
104 return gomp_barrier_wait_start (bar);
105}
106
107/* This is like gomp_barrier_wait_start, except it decrements
108 bar->awaited_final rather than bar->awaited and should be used
109 for the gomp_team_end barrier only. */
110static inline gomp_barrier_state_t
111gomp_barrier_wait_final_start (gomp_barrier_t *bar)
112{
113 unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
114 ret &= -BAR_INCR | BAR_CANCELLED;
115 /* See above gomp_barrier_wait_start comment. */
116 if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_ACQ_REL) == 0)
117 ret |= BAR_WAS_LAST;
118 return ret;
119}
120
121static inline bool
122gomp_barrier_last_thread (gomp_barrier_state_t state)
123{
124 return state & BAR_WAS_LAST;
125}
126
127/* All the inlines below must be called with team->task_lock
128 held. */
129
130static inline void
131gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
132{
133 bar->generation |= BAR_TASK_PENDING;
134}
135
136static inline void
137gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
138{
139 bar->generation &= ~BAR_TASK_PENDING;
140}
141
142static inline void
143gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
144{
145 bar->generation |= BAR_WAITING_FOR_TASK;
146}
147
148static inline bool
149gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
150{
151 return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
152}
153
154static inline bool
155gomp_team_barrier_cancelled (gomp_barrier_t *bar)
156{
157 return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
158}
159
160static inline void
161gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
162{
163 bar->generation = (state & -BAR_INCR) + BAR_INCR;
164}
165
166#endif /* GOMP_BARRIER_H */