]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/include/bits/atomic_wait.h
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / bits / atomic_wait.h
CommitLineData
83a1beee
TR
1// -*- C++ -*- header.
2
7adcbafe 3// Copyright (C) 2020-2022 Free Software Foundation, Inc.
83a1beee
TR
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_wait.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_WAIT_H
31#define _GLIBCXX_ATOMIC_WAIT_H 1
32
33#pragma GCC system_header
34
35#include <bits/c++config.h>
61c71a62 36#if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
83a1beee
TR
37#include <bits/functional_hash.h>
38#include <bits/gthr.h>
83a1beee
TR
39#include <ext/numeric_traits.h>
40
41#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
7d2a98a7
JW
42# include <cerrno>
43# include <climits>
44# include <unistd.h>
45# include <syscall.h>
46# include <bits/functexcept.h>
83a1beee
TR
47#endif
48
b52aef3a
TR
49# include <bits/std_mutex.h> // std::mutex, std::__condvar
50
6591e422 51#define __cpp_lib_atomic_wait 201907L
83a1beee 52
83a1beee
TR
53namespace std _GLIBCXX_VISIBILITY(default)
54{
55_GLIBCXX_BEGIN_NAMESPACE_VERSION
56 namespace __detail
57 {
b52aef3a 58#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
aeaea265 59#define _GLIBCXX_HAVE_PLATFORM_WAIT 1
83a1beee 60 using __platform_wait_t = int;
b52aef3a
TR
61 static constexpr size_t __platform_wait_alignment = 4;
62#else
aeaea265
JW
63// define _GLIBCX_HAVE_PLATFORM_WAIT and implement __platform_wait()
64// and __platform_notify() if there is a more efficient primitive supported
65// by the platform (e.g. __ulock_wait()/__ulock_wake()) which is better than
66// a mutex/condvar based wait.
b52aef3a
TR
67 using __platform_wait_t = uint64_t;
68 static constexpr size_t __platform_wait_alignment
69 = __alignof__(__platform_wait_t);
70#endif
71 } // namespace __detail
83a1beee 72
b52aef3a
TR
73 template<typename _Tp>
74 inline constexpr bool __platform_wait_uses_type
75#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
76 = is_scalar_v<_Tp>
77 && ((sizeof(_Tp) == sizeof(__detail::__platform_wait_t))
aeaea265 78 && (alignof(_Tp*) >= __detail::__platform_wait_alignment));
83a1beee 79#else
b52aef3a 80 = false;
83a1beee
TR
81#endif
82
b52aef3a
TR
83 namespace __detail
84 {
83a1beee
TR
85#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
86 enum class __futex_wait_flags : int
87 {
88#ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
89 __private_flag = 128,
90#else
91 __private_flag = 0,
92#endif
93 __wait = 0,
94 __wake = 1,
95 __wait_bitset = 9,
96 __wake_bitset = 10,
97 __wait_private = __wait | __private_flag,
98 __wake_private = __wake | __private_flag,
99 __wait_bitset_private = __wait_bitset | __private_flag,
100 __wake_bitset_private = __wake_bitset | __private_flag,
101 __bitset_match_any = -1
102 };
103
104 template<typename _Tp>
105 void
106 __platform_wait(const _Tp* __addr, __platform_wait_t __val) noexcept
107 {
b52aef3a
TR
108 auto __e = syscall (SYS_futex, static_cast<const void*>(__addr),
109 static_cast<int>(__futex_wait_flags::__wait_private),
110 __val, nullptr);
111 if (!__e || errno == EAGAIN)
112 return;
113 if (errno != EINTR)
114 __throw_system_error(errno);
83a1beee
TR
115 }
116
183ae52b 117 template<typename _Tp>
83a1beee
TR
118 void
119 __platform_notify(const _Tp* __addr, bool __all) noexcept
120 {
121 syscall (SYS_futex, static_cast<const void*>(__addr),
b52aef3a
TR
122 static_cast<int>(__futex_wait_flags::__wake_private),
123 __all ? INT_MAX : 1);
83a1beee
TR
124 }
125#endif
126
b52aef3a
TR
127 inline void
128 __thread_yield() noexcept
83a1beee 129 {
b52aef3a
TR
130#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
131 __gthread_yield();
132#endif
133 }
83a1beee 134
b52aef3a
TR
135 inline void
136 __thread_relax() noexcept
137 {
138#if defined __i386__ || defined __x86_64__
139 __builtin_ia32_pause();
140#else
141 __thread_yield();
83a1beee 142#endif
b52aef3a 143 }
83a1beee 144
b52aef3a
TR
145 constexpr auto __atomic_spin_count_1 = 12;
146 constexpr auto __atomic_spin_count_2 = 4;
147
148 struct __default_spin_policy
149 {
150 bool
151 operator()() const noexcept
152 { return false; }
153 };
154
155 template<typename _Pred,
156 typename _Spin = __default_spin_policy>
157 bool
158 __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept
83a1beee 159 {
b52aef3a
TR
160 for (auto __i = 0; __i < __atomic_spin_count_1; ++__i)
161 {
162 if (__pred())
163 return true;
164 __detail::__thread_relax();
165 }
166
167 for (auto __i = 0; __i < __atomic_spin_count_2; ++__i)
168 {
169 if (__pred())
170 return true;
171 __detail::__thread_yield();
172 }
173
174 while (__spin())
175 {
176 if (__pred())
177 return true;
178 }
179
180 return false;
83a1beee
TR
181 }
182
346cbaf5 183 // return true if equal
b52aef3a
TR
184 template<typename _Tp>
185 bool __atomic_compare(const _Tp& __a, const _Tp& __b)
83a1beee 186 {
b52aef3a 187 // TODO make this do the correct padding bit ignoring comparison
346cbaf5 188 return __builtin_memcmp(&__a, &__b, sizeof(_Tp)) == 0;
83a1beee
TR
189 }
190
b52aef3a
TR
191 struct __waiter_pool_base
192 {
0e907990
JW
193 // Don't use std::hardware_destructive_interference_size here because we
194 // don't want the layout of library types to depend on compiler options.
195 static constexpr auto _S_align = 64;
b52aef3a
TR
196
197 alignas(_S_align) __platform_wait_t _M_wait = 0;
198
199#ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
200 mutex _M_mtx;
201#endif
202
203 alignas(_S_align) __platform_wait_t _M_ver = 0;
204
205#ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
206 __condvar _M_cv;
207#endif
208 __waiter_pool_base() = default;
209
210 void
211 _M_enter_wait() noexcept
212 { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_ACQ_REL); }
213
214 void
215 _M_leave_wait() noexcept
216 { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_ACQ_REL); }
83a1beee
TR
217
218 bool
219 _M_waiting() const noexcept
183ae52b
JW
220 {
221 __platform_wait_t __res;
222 __atomic_load(&_M_wait, &__res, __ATOMIC_ACQUIRE);
b52aef3a 223 return __res > 0;
183ae52b 224 }
83a1beee
TR
225
226 void
ae2f6e01 227 _M_notify(const __platform_wait_t* __addr, bool __all, bool __bare) noexcept
83a1beee 228 {
ae2f6e01 229 if (!(__bare || _M_waiting()))
b52aef3a
TR
230 return;
231
232#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
233 __platform_notify(__addr, __all);
83a1beee 234#else
7d2a98a7
JW
235 if (__all)
236 _M_cv.notify_all();
237 else
238 _M_cv.notify_one();
83a1beee
TR
239#endif
240 }
241
b52aef3a
TR
242 static __waiter_pool_base&
243 _S_for(const void* __addr) noexcept
83a1beee 244 {
b52aef3a
TR
245 constexpr uintptr_t __ct = 16;
246 static __waiter_pool_base __w[__ct];
247 auto __key = (uintptr_t(__addr) >> 2) % __ct;
83a1beee
TR
248 return __w[__key];
249 }
250 };
251
b52aef3a 252 struct __waiter_pool : __waiter_pool_base
83a1beee 253 {
b52aef3a
TR
254 void
255 _M_do_wait(const __platform_wait_t* __addr, __platform_wait_t __old) noexcept
256 {
257#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
258 __platform_wait(__addr, __old);
259#else
260 __platform_wait_t __val;
261 __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
262 if (__val == __old)
263 {
264 lock_guard<mutex> __l(_M_mtx);
265 _M_cv.wait(_M_mtx);
266 }
267#endif // __GLIBCXX_HAVE_PLATFORM_WAIT
268 }
269 };
83a1beee 270
b52aef3a
TR
271 template<typename _Tp>
272 struct __waiter_base
273 {
274 using __waiter_type = _Tp;
83a1beee 275
b52aef3a
TR
276 __waiter_type& _M_w;
277 __platform_wait_t* _M_addr;
83a1beee 278
b52aef3a
TR
279 template<typename _Up>
280 static __platform_wait_t*
281 _S_wait_addr(const _Up* __a, __platform_wait_t* __b)
282 {
283 if constexpr (__platform_wait_uses_type<_Up>)
284 return reinterpret_cast<__platform_wait_t*>(const_cast<_Up*>(__a));
285 else
286 return __b;
287 }
83a1beee 288
b52aef3a
TR
289 static __waiter_type&
290 _S_for(const void* __addr) noexcept
291 {
292 static_assert(sizeof(__waiter_type) == sizeof(__waiter_pool_base));
293 auto& res = __waiter_pool_base::_S_for(__addr);
294 return reinterpret_cast<__waiter_type&>(res);
295 }
83a1beee 296
b52aef3a
TR
297 template<typename _Up>
298 explicit __waiter_base(const _Up* __addr) noexcept
299 : _M_w(_S_for(__addr))
300 , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
346cbaf5
TR
301 { }
302
303 bool
304 _M_laundered() const
305 { return _M_addr == &_M_w._M_ver; }
83a1beee 306
b52aef3a 307 void
ae2f6e01 308 _M_notify(bool __all, bool __bare = false)
83a1beee 309 {
346cbaf5
TR
310 if (_M_laundered())
311 {
312 __atomic_fetch_add(_M_addr, 1, __ATOMIC_ACQ_REL);
313 __all = true;
314 }
ae2f6e01 315 _M_w._M_notify(_M_addr, __all, __bare);
b52aef3a 316 }
83a1beee 317
b52aef3a
TR
318 template<typename _Up, typename _ValFn,
319 typename _Spin = __default_spin_policy>
320 static bool
321 _S_do_spin_v(__platform_wait_t* __addr,
322 const _Up& __old, _ValFn __vfn,
323 __platform_wait_t& __val,
324 _Spin __spin = _Spin{ })
325 {
326 auto const __pred = [=]
346cbaf5 327 { return !__detail::__atomic_compare(__old, __vfn()); };
b52aef3a
TR
328
329 if constexpr (__platform_wait_uses_type<_Up>)
330 {
aeaea265 331 __builtin_memcpy(&__val, &__old, sizeof(__val));
b52aef3a
TR
332 }
333 else
334 {
335 __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
336 }
337 return __atomic_spin(__pred, __spin);
338 }
339
340 template<typename _Up, typename _ValFn,
341 typename _Spin = __default_spin_policy>
342 bool
343 _M_do_spin_v(const _Up& __old, _ValFn __vfn,
344 __platform_wait_t& __val,
345 _Spin __spin = _Spin{ })
346 { return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
347
348 template<typename _Pred,
349 typename _Spin = __default_spin_policy>
350 static bool
351 _S_do_spin(const __platform_wait_t* __addr,
352 _Pred __pred,
353 __platform_wait_t& __val,
354 _Spin __spin = _Spin{ })
355 {
356 __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
357 return __atomic_spin(__pred, __spin);
358 }
359
360 template<typename _Pred,
361 typename _Spin = __default_spin_policy>
362 bool
363 _M_do_spin(_Pred __pred, __platform_wait_t& __val,
364 _Spin __spin = _Spin{ })
365 { return _S_do_spin(_M_addr, __pred, __val, __spin); }
366 };
367
368 template<typename _EntersWait>
369 struct __waiter : __waiter_base<__waiter_pool>
370 {
371 using __base_type = __waiter_base<__waiter_pool>;
372
373 template<typename _Tp>
374 explicit __waiter(const _Tp* __addr) noexcept
375 : __base_type(__addr)
376 {
377 if constexpr (_EntersWait::value)
378 _M_w._M_enter_wait();
379 }
380
381 ~__waiter()
382 {
383 if constexpr (_EntersWait::value)
384 _M_w._M_leave_wait();
83a1beee 385 }
b52aef3a
TR
386
387 template<typename _Tp, typename _ValFn>
388 void
389 _M_do_wait_v(_Tp __old, _ValFn __vfn)
390 {
391 __platform_wait_t __val;
392 if (__base_type::_M_do_spin_v(__old, __vfn, __val))
393 return;
346cbaf5
TR
394
395 do
396 {
397 __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
398 }
399 while (__detail::__atomic_compare(__old, __vfn()));
b52aef3a
TR
400 }
401
402 template<typename _Pred>
403 void
404 _M_do_wait(_Pred __pred) noexcept
405 {
406 do
407 {
408 __platform_wait_t __val;
409 if (__base_type::_M_do_spin(__pred, __val))
410 return;
411 __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
412 }
413 while (!__pred());
414 }
415 };
416
417 using __enters_wait = __waiter<std::true_type>;
418 using __bare_wait = __waiter<std::false_type>;
419 } // namespace __detail
420
421 template<typename _Tp, typename _ValFn>
422 void
423 __atomic_wait_address_v(const _Tp* __addr, _Tp __old,
424 _ValFn __vfn) noexcept
425 {
426 __detail::__enters_wait __w(__addr);
427 __w._M_do_wait_v(__old, __vfn);
83a1beee
TR
428 }
429
430 template<typename _Tp, typename _Pred>
431 void
b52aef3a 432 __atomic_wait_address(const _Tp* __addr, _Pred __pred) noexcept
83a1beee 433 {
b52aef3a
TR
434 __detail::__enters_wait __w(__addr);
435 __w._M_do_wait(__pred);
436 }
83a1beee 437
b52aef3a
TR
438 // This call is to be used by atomic types which track contention externally
439 template<typename _Pred>
440 void
441 __atomic_wait_address_bare(const __detail::__platform_wait_t* __addr,
442 _Pred __pred) noexcept
443 {
444#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
445 do
83a1beee 446 {
b52aef3a
TR
447 __detail::__platform_wait_t __val;
448 if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
449 return;
450 __detail::__platform_wait(__addr, __val);
83a1beee 451 }
b52aef3a
TR
452 while (!__pred());
453#else // !_GLIBCXX_HAVE_PLATFORM_WAIT
454 __detail::__bare_wait __w(__addr);
455 __w._M_do_wait(__pred);
456#endif
83a1beee
TR
457 }
458
459 template<typename _Tp>
460 void
b52aef3a 461 __atomic_notify_address(const _Tp* __addr, bool __all) noexcept
83a1beee 462 {
b52aef3a 463 __detail::__bare_wait __w(__addr);
346cbaf5 464 __w._M_notify(__all);
b52aef3a 465 }
83a1beee 466
b52aef3a
TR
467 // This call is to be used by atomic types which track contention externally
468 inline void
469 __atomic_notify_address_bare(const __detail::__platform_wait_t* __addr,
470 bool __all) noexcept
471 {
472#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
473 __detail::__platform_notify(__addr, __all);
474#else
475 __detail::__bare_wait __w(__addr);
ae2f6e01 476 __w._M_notify(__all, true);
62d19588 477#endif
b52aef3a 478 }
83a1beee
TR
479_GLIBCXX_END_NAMESPACE_VERSION
480} // namespace std
183ae52b
JW
481#endif // GTHREADS || LINUX_FUTEX
482#endif // _GLIBCXX_ATOMIC_WAIT_H