1 // <barrier> -*- C++ -*-
3 // Copyright (C) 2020-2025 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 // This implementation is based on libcxx/include/barrier
26 //===-- barrier.h --------------------------------------------------===//
28 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
29 // See https://llvm.org/LICENSE.txt for license information.
30 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
32 //===---------------------------------------------------------------===//
34 /** @file include/barrier
35 * This is a Standard C++ Library header.
38 #ifndef _GLIBCXX_BARRIER
39 #define _GLIBCXX_BARRIER 1
41 #ifdef _GLIBCXX_SYSHDR
42 #pragma GCC system_header
45 #include <bits/requires_hosted.h> // threading primitive
47 #define __glibcxx_want_barrier
48 #include <bits/version.h>
50 #ifdef __cpp_lib_barrier // C++ >= 20 && __cpp_aligned_new && lib_atomic_wait
51 #include <bits/atomic_base.h>
52 #include <bits/std_thread.h>
53 #include <bits/unique_ptr.h>
57 namespace std _GLIBCXX_VISIBILITY(default)
59 _GLIBCXX_BEGIN_NAMESPACE_VERSION
61 struct __empty_completion
63 _GLIBCXX_ALWAYS_INLINE void
70 The default implementation of __tree_barrier is a classic tree barrier.
72 It looks different from literature pseudocode for two main reasons:
73 1. Threads that call into std::barrier functions do not provide indices,
74 so a numbering step is added before the actual barrier algorithm,
75 appearing as an N+1 round to the N rounds of the tree barrier.
76 2. A great deal of attention has been paid to avoid cache line thrashing
77 by flattening the tree structure into cache-line sized arrays, that
78 are indexed in an efficient way.
82 enum class __barrier_phase_t : unsigned char { };
84 struct __tree_barrier_base
86 static constexpr ptrdiff_t
88 { return __PTRDIFF_MAX__ - 1; }
91 using __atomic_phase_ref_t = std::__atomic_ref<__barrier_phase_t>;
92 using __atomic_phase_const_ref_t = std::__atomic_ref<const __barrier_phase_t>;
93 static constexpr auto __phase_alignment =
94 __atomic_phase_ref_t::required_alignment;
96 using __tickets_t = std::array<__barrier_phase_t, 64>;
97 struct alignas(64) /* naturally-align the heap state */ __state_t
99 alignas(__phase_alignment) __tickets_t __tickets;
102 ptrdiff_t _M_expected;
103 __atomic_base<__state_t*> _M_state{nullptr};
104 __atomic_base<ptrdiff_t> _M_expected_adjustment{0};
105 alignas(__phase_alignment) __barrier_phase_t _M_phase{};
108 __tree_barrier_base(ptrdiff_t __expected)
109 : _M_expected(__expected)
111 __glibcxx_assert(__expected >= 0 && __expected <= max());
113 if (!std::is_constant_evaluated())
114 _M_state.store(_M_alloc_state().release(), memory_order_release);
117 unique_ptr<__state_t[]>
120 size_t const __count = (_M_expected + 1) >> 1;
121 return std::make_unique<__state_t[]>(__count);
125 _M_arrive(__barrier_phase_t __old_phase, size_t __current)
127 const auto __old_phase_val = static_cast<unsigned char>(__old_phase);
128 const auto __half_step =
129 static_cast<__barrier_phase_t>(__old_phase_val + 1);
130 const auto __full_step =
131 static_cast<__barrier_phase_t>(__old_phase_val + 2);
133 size_t __current_expected = _M_expected;
134 __current %= ((_M_expected + 1) >> 1);
136 __state_t* const __state = _M_state.load(memory_order_relaxed);
138 for (int __round = 0; ; ++__round)
140 if (__current_expected <= 1)
142 size_t const __end_node = ((__current_expected + 1) >> 1),
143 __last_node = __end_node - 1;
144 for ( ; ; ++__current)
146 if (__current == __end_node)
148 auto __expect = __old_phase;
149 __atomic_phase_ref_t __phase(__state[__current]
150 .__tickets[__round]);
151 if (__current == __last_node && (__current_expected & 1))
153 if (__phase.compare_exchange_strong(__expect, __full_step,
154 memory_order_acq_rel))
155 break; // I'm 1 in 1, go to next __round
157 else if (__phase.compare_exchange_strong(__expect, __half_step,
158 memory_order_acq_rel))
160 return false; // I'm 1 in 2, done with arrival
162 else if (__expect == __half_step)
164 if (__phase.compare_exchange_strong(__expect, __full_step,
165 memory_order_acq_rel))
166 break; // I'm 2 in 2, go to next __round
169 __current_expected = __last_node + 1;
175 template<typename _CompletionF>
176 class __tree_barrier : public __tree_barrier_base
178 [[no_unique_address]] _CompletionF _M_completion;
180 // _GLIBCXX_RESOLVE_LIB_DEFECTS
181 // 3898. Possibly unintended preconditions for completion functions
182 void _M_invoke_completion() noexcept { _M_completion(); }
185 using arrival_token = __barrier_phase_t;
188 __tree_barrier(ptrdiff_t __expected, _CompletionF __completion)
189 : __tree_barrier_base(__expected), _M_completion(std::move(__completion))
192 [[nodiscard]] arrival_token
193 arrive(ptrdiff_t __update)
195 __glibcxx_assert(__update > 0);
196 // FIXME: Check that update is less than or equal to the expected count
197 // for the current barrier phase.
199 std::hash<std::thread::id> __hasher;
200 size_t __current = __hasher(std::this_thread::get_id());
201 __atomic_phase_ref_t __phase(_M_phase);
202 const auto __old_phase = __phase.load(memory_order_relaxed);
203 const auto __cur = static_cast<unsigned char>(__old_phase);
205 if (__cur == 0 && !_M_state.load(memory_order_relaxed)) [[unlikely]]
207 auto __p = _M_alloc_state();
208 __state_t* __val = nullptr;
209 if (_M_state.compare_exchange_strong(__val, __p.get(),
210 memory_order_seq_cst,
211 memory_order_acquire))
215 for (; __update; --__update)
217 if (_M_arrive(__old_phase, __current))
219 _M_invoke_completion();
220 _M_expected += _M_expected_adjustment.load(memory_order_relaxed);
221 _M_expected_adjustment.store(0, memory_order_relaxed);
222 auto __new_phase = static_cast<__barrier_phase_t>(__cur + 2);
223 __phase.store(__new_phase, memory_order_release);
224 __phase.notify_all();
231 wait(arrival_token&& __old_phase) const
233 __atomic_phase_const_ref_t __phase(_M_phase);
234 __phase.wait(__old_phase, memory_order_acquire);
240 _M_expected_adjustment.fetch_sub(1, memory_order_relaxed);
245 template<typename _CompletionF = __empty_completion>
248 static_assert(is_invocable_v<_CompletionF&>);
250 // Note, we may introduce a "central" barrier algorithm at some point
251 // for more space constrained targets
252 using __algorithm_t = __tree_barrier<_CompletionF>;
256 class arrival_token final
259 arrival_token(arrival_token&&) = default;
260 arrival_token& operator=(arrival_token&&) = default;
261 ~arrival_token() = default;
264 friend class barrier;
265 using __token = typename __algorithm_t::arrival_token;
266 explicit arrival_token(__token __tok) noexcept : _M_tok(__tok) { }
270 static constexpr ptrdiff_t
272 { return __algorithm_t::max(); }
275 barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF())
276 : _M_b(__count, std::move(__completion))
279 barrier(barrier const&) = delete;
280 barrier& operator=(barrier const&) = delete;
282 [[nodiscard]] arrival_token
283 arrive(ptrdiff_t __update = 1)
284 { return arrival_token{_M_b.arrive(__update)}; }
287 wait(arrival_token&& __phase) const
288 { _M_b.wait(std::move(__phase._M_tok)); }
296 { _M_b.arrive_and_drop(); }
299 _GLIBCXX_END_NAMESPACE_VERSION
301 #endif // __cpp_lib_barrier
302 #endif // _GLIBCXX_BARRIER