]>
Commit | Line | Data |
---|---|---|
7adcbafe | 1 | /* Copyright (C) 2015-2022 Free Software Foundation, Inc. |
fa499995 AS |
2 | Contributed by Mentor Embedded. |
3 | ||
4 | This file is part of the GNU Offloading and Multi Processing Library | |
5 | (libgomp). | |
6 | ||
7 | Libgomp is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | more details. | |
16 | ||
17 | Under Section 7 of GPL version 3, you are granted additional | |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
20 | ||
21 | You should have received a copy of the GNU General Public License and | |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
25 | ||
26 | /* This is an AMD GCN specific implementation of a barrier synchronization | |
27 | mechanism for libgomp. This type is private to the library. This | |
28 | implementation uses atomic instructions and s_barrier instruction. It | |
29 | uses MEMMODEL_RELAXED here because barriers are within workgroups and | |
30 | therefore don't need to flush caches. */ | |
31 | ||
32 | #ifndef GOMP_BARRIER_H | |
33 | #define GOMP_BARRIER_H 1 | |
34 | ||
35 | #include "mutex.h" | |
36 | ||
37 | typedef struct | |
38 | { | |
39 | unsigned total; | |
40 | unsigned generation; | |
41 | unsigned awaited; | |
42 | unsigned awaited_final; | |
43 | } gomp_barrier_t; | |
44 | ||
45 | typedef unsigned int gomp_barrier_state_t; | |
46 | ||
47 | /* The generation field contains a counter in the high bits, with a few | |
48 | low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can | |
49 | share space because WAS_LAST is never stored back to generation. */ | |
50 | #define BAR_TASK_PENDING 1 | |
51 | #define BAR_WAS_LAST 1 | |
52 | #define BAR_WAITING_FOR_TASK 2 | |
53 | #define BAR_CANCELLED 4 | |
54 | #define BAR_INCR 8 | |
55 | ||
56 | static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count) | |
57 | { | |
58 | bar->total = count; | |
59 | bar->awaited = count; | |
60 | bar->awaited_final = count; | |
61 | bar->generation = 0; | |
62 | } | |
63 | ||
64 | static inline void gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count) | |
65 | { | |
66 | __atomic_add_fetch (&bar->awaited, count - bar->total, MEMMODEL_RELAXED); | |
67 | bar->total = count; | |
68 | } | |
69 | ||
70 | static inline void gomp_barrier_destroy (gomp_barrier_t *bar) | |
71 | { | |
72 | } | |
73 | ||
74 | extern void gomp_barrier_wait (gomp_barrier_t *); | |
75 | extern void gomp_barrier_wait_last (gomp_barrier_t *); | |
76 | extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); | |
77 | extern void gomp_team_barrier_wait (gomp_barrier_t *); | |
78 | extern void gomp_team_barrier_wait_final (gomp_barrier_t *); | |
79 | extern void gomp_team_barrier_wait_end (gomp_barrier_t *, | |
80 | gomp_barrier_state_t); | |
81 | extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *); | |
82 | extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *, | |
83 | gomp_barrier_state_t); | |
84 | extern void gomp_team_barrier_wake (gomp_barrier_t *, int); | |
85 | struct gomp_team; | |
86 | extern void gomp_team_barrier_cancel (struct gomp_team *); | |
87 | ||
88 | static inline gomp_barrier_state_t | |
89 | gomp_barrier_wait_start (gomp_barrier_t *bar) | |
90 | { | |
91 | unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_RELAXED); | |
92 | ret &= -BAR_INCR | BAR_CANCELLED; | |
93 | /* A memory barrier is needed before exiting from the various forms | |
94 | of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section | |
95 | 2.8.6 flush Construct, which says there is an implicit flush during | |
96 | a barrier region. This is a convenient place to add the barrier, | |
97 | so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE. */ | |
98 | if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_RELAXED) == 0) | |
99 | ret |= BAR_WAS_LAST; | |
100 | return ret; | |
101 | } | |
102 | ||
103 | static inline gomp_barrier_state_t | |
104 | gomp_barrier_wait_cancel_start (gomp_barrier_t *bar) | |
105 | { | |
106 | return gomp_barrier_wait_start (bar); | |
107 | } | |
108 | ||
109 | /* This is like gomp_barrier_wait_start, except it decrements | |
110 | bar->awaited_final rather than bar->awaited and should be used | |
111 | for the gomp_team_end barrier only. */ | |
112 | static inline gomp_barrier_state_t | |
113 | gomp_barrier_wait_final_start (gomp_barrier_t *bar) | |
114 | { | |
115 | unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_RELAXED); | |
116 | ret &= -BAR_INCR | BAR_CANCELLED; | |
117 | /* See above gomp_barrier_wait_start comment. */ | |
118 | if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_RELAXED) == 0) | |
119 | ret |= BAR_WAS_LAST; | |
120 | return ret; | |
121 | } | |
122 | ||
123 | static inline bool | |
124 | gomp_barrier_last_thread (gomp_barrier_state_t state) | |
125 | { | |
126 | return state & BAR_WAS_LAST; | |
127 | } | |
128 | ||
129 | /* All the inlines below must be called with team->task_lock | |
130 | held. */ | |
131 | ||
132 | static inline void | |
133 | gomp_team_barrier_set_task_pending (gomp_barrier_t *bar) | |
134 | { | |
135 | bar->generation |= BAR_TASK_PENDING; | |
136 | } | |
137 | ||
138 | static inline void | |
139 | gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar) | |
140 | { | |
141 | bar->generation &= ~BAR_TASK_PENDING; | |
142 | } | |
143 | ||
144 | static inline void | |
145 | gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar) | |
146 | { | |
147 | bar->generation |= BAR_WAITING_FOR_TASK; | |
148 | } | |
149 | ||
150 | static inline bool | |
151 | gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar) | |
152 | { | |
153 | return (bar->generation & BAR_WAITING_FOR_TASK) != 0; | |
154 | } | |
155 | ||
156 | static inline bool | |
157 | gomp_team_barrier_cancelled (gomp_barrier_t *bar) | |
158 | { | |
159 | return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0); | |
160 | } | |
161 | ||
162 | static inline void | |
163 | gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state) | |
164 | { | |
165 | bar->generation = (state & -BAR_INCR) + BAR_INCR; | |
166 | } | |
167 | ||
168 | #endif /* GOMP_BARRIER_H */ |