]>
Commit | Line | Data |
---|---|---|
1 | // <barrier> -*- C++ -*- | |
2 | ||
3 | // Copyright (C) 2020-2025 Free Software Foundation, Inc. | |
4 | // | |
5 | // This file is part of the GNU ISO C++ Library. This library is free | |
6 | // software; you can redistribute it and/or modify it under the | |
7 | // terms of the GNU General Public License as published by the | |
8 | // Free Software Foundation; either version 3, or (at your option) | |
9 | // any later version. | |
10 | ||
11 | // This library is distributed in the hope that it will be useful, | |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | // GNU General Public License for more details. | |
15 | ||
16 | // Under Section 7 of GPL version 3, you are granted additional | |
17 | // permissions described in the GCC Runtime Library Exception, version | |
18 | // 3.1, as published by the Free Software Foundation. | |
19 | ||
20 | // You should have received a copy of the GNU General Public License and | |
21 | // a copy of the GCC Runtime Library Exception along with this program; | |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | // <http://www.gnu.org/licenses/>. | |
24 | ||
25 | // This implementation is based on libcxx/include/barrier | |
26 | //===-- barrier.h --------------------------------------------------===// | |
27 | // | |
28 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |
29 | // See https://llvm.org/LICENSE.txt for license information. | |
30 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
31 | // | |
32 | //===---------------------------------------------------------------===// | |
33 | ||
34 | /** @file include/barrier | |
35 | * This is a Standard C++ Library header. | |
36 | */ | |
37 | ||
38 | #ifndef _GLIBCXX_BARRIER | |
39 | #define _GLIBCXX_BARRIER 1 | |
40 | ||
41 | #ifdef _GLIBCXX_SYSHDR | |
42 | #pragma GCC system_header | |
43 | #endif | |
44 | ||
45 | #include <bits/requires_hosted.h> // threading primitive | |
46 | ||
47 | #define __glibcxx_want_barrier | |
48 | #include <bits/version.h> | |
49 | ||
50 | #ifdef __cpp_lib_barrier // C++ >= 20 && __cpp_aligned_new && lib_atomic_wait | |
51 | #include <bits/atomic_base.h> | |
52 | #include <bits/std_thread.h> | |
53 | #include <bits/unique_ptr.h> | |
54 | ||
55 | #include <array> | |
56 | ||
57 | namespace std _GLIBCXX_VISIBILITY(default) | |
58 | { | |
59 | _GLIBCXX_BEGIN_NAMESPACE_VERSION | |
60 | ||
61 | struct __empty_completion | |
62 | { | |
63 | _GLIBCXX_ALWAYS_INLINE void | |
64 | operator()() noexcept | |
65 | { } | |
66 | }; | |
67 | ||
68 | /* | |
69 | ||
70 | The default implementation of __tree_barrier is a classic tree barrier. | |
71 | ||
72 | It looks different from literature pseudocode for two main reasons: | |
73 | 1. Threads that call into std::barrier functions do not provide indices, | |
74 | so a numbering step is added before the actual barrier algorithm, | |
75 | appearing as an N+1 round to the N rounds of the tree barrier. | |
76 | 2. A great deal of attention has been paid to avoid cache line thrashing | |
77 | by flattening the tree structure into cache-line sized arrays, that | |
78 | are indexed in an efficient way. | |
79 | ||
80 | */ | |
81 | ||
82 | enum class __barrier_phase_t : unsigned char { }; | |
83 | ||
84 | struct __tree_barrier_base | |
85 | { | |
86 | static constexpr ptrdiff_t | |
87 | max() noexcept | |
88 | { return __PTRDIFF_MAX__ - 1; } | |
89 | ||
90 | protected: | |
91 | using __atomic_phase_ref_t = std::__atomic_ref<__barrier_phase_t>; | |
92 | using __atomic_phase_const_ref_t = std::__atomic_ref<const __barrier_phase_t>; | |
93 | static constexpr auto __phase_alignment = | |
94 | __atomic_phase_ref_t::required_alignment; | |
95 | ||
96 | using __tickets_t = std::array<__barrier_phase_t, 64>; | |
97 | struct alignas(64) /* naturally-align the heap state */ __state_t | |
98 | { | |
99 | alignas(__phase_alignment) __tickets_t __tickets; | |
100 | }; | |
101 | ||
102 | ptrdiff_t _M_expected; | |
103 | __atomic_base<__state_t*> _M_state{nullptr}; | |
104 | __atomic_base<ptrdiff_t> _M_expected_adjustment{0}; | |
105 | alignas(__phase_alignment) __barrier_phase_t _M_phase{}; | |
106 | ||
107 | explicit constexpr | |
108 | __tree_barrier_base(ptrdiff_t __expected) | |
109 | : _M_expected(__expected) | |
110 | { | |
111 | __glibcxx_assert(__expected >= 0 && __expected <= max()); | |
112 | ||
113 | if (!std::is_constant_evaluated()) | |
114 | _M_state.store(_M_alloc_state().release(), memory_order_release); | |
115 | } | |
116 | ||
117 | unique_ptr<__state_t[]> | |
118 | _M_alloc_state() | |
119 | { | |
120 | size_t const __count = (_M_expected + 1) >> 1; | |
121 | return std::make_unique<__state_t[]>(__count); | |
122 | } | |
123 | ||
124 | bool | |
125 | _M_arrive(__barrier_phase_t __old_phase, size_t __current) | |
126 | { | |
127 | const auto __old_phase_val = static_cast<unsigned char>(__old_phase); | |
128 | const auto __half_step = | |
129 | static_cast<__barrier_phase_t>(__old_phase_val + 1); | |
130 | const auto __full_step = | |
131 | static_cast<__barrier_phase_t>(__old_phase_val + 2); | |
132 | ||
133 | size_t __current_expected = _M_expected; | |
134 | __current %= ((_M_expected + 1) >> 1); | |
135 | ||
136 | __state_t* const __state = _M_state.load(memory_order_relaxed); | |
137 | ||
138 | for (int __round = 0; ; ++__round) | |
139 | { | |
140 | if (__current_expected <= 1) | |
141 | return true; | |
142 | size_t const __end_node = ((__current_expected + 1) >> 1), | |
143 | __last_node = __end_node - 1; | |
144 | for ( ; ; ++__current) | |
145 | { | |
146 | if (__current == __end_node) | |
147 | __current = 0; | |
148 | auto __expect = __old_phase; | |
149 | __atomic_phase_ref_t __phase(__state[__current] | |
150 | .__tickets[__round]); | |
151 | if (__current == __last_node && (__current_expected & 1)) | |
152 | { | |
153 | if (__phase.compare_exchange_strong(__expect, __full_step, | |
154 | memory_order_acq_rel)) | |
155 | break; // I'm 1 in 1, go to next __round | |
156 | } | |
157 | else if (__phase.compare_exchange_strong(__expect, __half_step, | |
158 | memory_order_acq_rel)) | |
159 | { | |
160 | return false; // I'm 1 in 2, done with arrival | |
161 | } | |
162 | else if (__expect == __half_step) | |
163 | { | |
164 | if (__phase.compare_exchange_strong(__expect, __full_step, | |
165 | memory_order_acq_rel)) | |
166 | break; // I'm 2 in 2, go to next __round | |
167 | } | |
168 | } | |
169 | __current_expected = __last_node + 1; | |
170 | __current >>= 1; | |
171 | } | |
172 | } | |
173 | }; | |
174 | ||
175 | template<typename _CompletionF> | |
176 | class __tree_barrier : public __tree_barrier_base | |
177 | { | |
178 | [[no_unique_address]] _CompletionF _M_completion; | |
179 | ||
180 | // _GLIBCXX_RESOLVE_LIB_DEFECTS | |
181 | // 3898. Possibly unintended preconditions for completion functions | |
182 | void _M_invoke_completion() noexcept { _M_completion(); } | |
183 | ||
184 | public: | |
185 | using arrival_token = __barrier_phase_t; | |
186 | ||
187 | constexpr | |
188 | __tree_barrier(ptrdiff_t __expected, _CompletionF __completion) | |
189 | : __tree_barrier_base(__expected), _M_completion(std::move(__completion)) | |
190 | { } | |
191 | ||
192 | [[nodiscard]] arrival_token | |
193 | arrive(ptrdiff_t __update) | |
194 | { | |
195 | __glibcxx_assert(__update > 0); | |
196 | // FIXME: Check that update is less than or equal to the expected count | |
197 | // for the current barrier phase. | |
198 | ||
199 | std::hash<std::thread::id> __hasher; | |
200 | size_t __current = __hasher(std::this_thread::get_id()); | |
201 | __atomic_phase_ref_t __phase(_M_phase); | |
202 | const auto __old_phase = __phase.load(memory_order_relaxed); | |
203 | const auto __cur = static_cast<unsigned char>(__old_phase); | |
204 | ||
205 | if (__cur == 0 && !_M_state.load(memory_order_relaxed)) [[unlikely]] | |
206 | { | |
207 | auto __p = _M_alloc_state(); | |
208 | __state_t* __val = nullptr; | |
209 | if (_M_state.compare_exchange_strong(__val, __p.get(), | |
210 | memory_order_seq_cst, | |
211 | memory_order_acquire)) | |
212 | __p.release(); | |
213 | } | |
214 | ||
215 | for (; __update; --__update) | |
216 | { | |
217 | if (_M_arrive(__old_phase, __current)) | |
218 | { | |
219 | _M_invoke_completion(); | |
220 | _M_expected += _M_expected_adjustment.load(memory_order_relaxed); | |
221 | _M_expected_adjustment.store(0, memory_order_relaxed); | |
222 | auto __new_phase = static_cast<__barrier_phase_t>(__cur + 2); | |
223 | __phase.store(__new_phase, memory_order_release); | |
224 | __phase.notify_all(); | |
225 | } | |
226 | } | |
227 | return __old_phase; | |
228 | } | |
229 | ||
230 | void | |
231 | wait(arrival_token&& __old_phase) const | |
232 | { | |
233 | __atomic_phase_const_ref_t __phase(_M_phase); | |
234 | __phase.wait(__old_phase, memory_order_acquire); | |
235 | } | |
236 | ||
237 | void | |
238 | arrive_and_drop() | |
239 | { | |
240 | _M_expected_adjustment.fetch_sub(1, memory_order_relaxed); | |
241 | (void)arrive(1); | |
242 | } | |
243 | }; | |
244 | ||
245 | template<typename _CompletionF = __empty_completion> | |
246 | class barrier | |
247 | { | |
248 | static_assert(is_invocable_v<_CompletionF&>); | |
249 | ||
250 | // Note, we may introduce a "central" barrier algorithm at some point | |
251 | // for more space constrained targets | |
252 | using __algorithm_t = __tree_barrier<_CompletionF>; | |
253 | __algorithm_t _M_b; | |
254 | ||
255 | public: | |
256 | class arrival_token final | |
257 | { | |
258 | public: | |
259 | arrival_token(arrival_token&&) = default; | |
260 | arrival_token& operator=(arrival_token&&) = default; | |
261 | ~arrival_token() = default; | |
262 | ||
263 | private: | |
264 | friend class barrier; | |
265 | using __token = typename __algorithm_t::arrival_token; | |
266 | explicit arrival_token(__token __tok) noexcept : _M_tok(__tok) { } | |
267 | __token _M_tok; | |
268 | }; | |
269 | ||
270 | static constexpr ptrdiff_t | |
271 | max() noexcept | |
272 | { return __algorithm_t::max(); } | |
273 | ||
274 | constexpr explicit | |
275 | barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF()) | |
276 | : _M_b(__count, std::move(__completion)) | |
277 | { } | |
278 | ||
279 | barrier(barrier const&) = delete; | |
280 | barrier& operator=(barrier const&) = delete; | |
281 | ||
282 | [[nodiscard]] arrival_token | |
283 | arrive(ptrdiff_t __update = 1) | |
284 | { return arrival_token{_M_b.arrive(__update)}; } | |
285 | ||
286 | void | |
287 | wait(arrival_token&& __phase) const | |
288 | { _M_b.wait(std::move(__phase._M_tok)); } | |
289 | ||
290 | void | |
291 | arrive_and_wait() | |
292 | { wait(arrive()); } | |
293 | ||
294 | void | |
295 | arrive_and_drop() | |
296 | { _M_b.arrive_and_drop(); } | |
297 | }; | |
298 | ||
299 | _GLIBCXX_END_NAMESPACE_VERSION | |
300 | } // namespace | |
301 | #endif // __cpp_lib_barrier | |
302 | #endif // _GLIBCXX_BARRIER |