3 // Copyright (C) 2015-2021 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/atomic_futex.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly.
30 #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 #define _GLIBCXX_ATOMIC_FUTEX_H 1
33 #pragma GCC system_header
35 #include <bits/c++config.h>
38 #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
40 #include <condition_variable>
43 #ifndef _GLIBCXX_ALWAYS_INLINE
44 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
47 namespace std
_GLIBCXX_VISIBILITY(default)
49 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 #ifdef _GLIBCXX_HAS_GTHREADS
52 #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
53 struct __atomic_futex_unsigned_base
55 // __s and __ns are measured against CLOCK_REALTIME. Returns false
56 // iff a timeout occurred.
58 _M_futex_wait_until(unsigned *__addr
, unsigned __val
, bool __has_timeout
,
59 chrono::seconds __s
, chrono::nanoseconds __ns
);
61 // __s and __ns are measured against CLOCK_MONOTONIC. Returns
62 // false iff a timeout occurred.
64 _M_futex_wait_until_steady(unsigned *__addr
, unsigned __val
,
65 bool __has_timeout
, chrono::seconds __s
, chrono::nanoseconds __ns
);
67 // This can be executed after the object has been destroyed.
68 static void _M_futex_notify_all(unsigned* __addr
);
71 template <unsigned _Waiter_bit
= 0x80000000>
72 class __atomic_futex_unsigned
: __atomic_futex_unsigned_base
74 typedef chrono::steady_clock __clock_t
;
76 // This must be lock-free and at offset 0.
77 atomic
<unsigned> _M_data
;
81 __atomic_futex_unsigned(unsigned __data
) : _M_data(__data
)
84 _GLIBCXX_ALWAYS_INLINE
unsigned
85 _M_load(memory_order __mo
)
87 return _M_data
.load(__mo
) & ~_Waiter_bit
;
91 // If a timeout occurs, returns a current value after the timeout;
92 // otherwise, returns the operand's value if equal is true or a different
93 // value if equal is false.
94 // The assumed value is the caller's assumption about the current value
95 // when making the call.
96 // __s and __ns are measured against CLOCK_REALTIME.
98 _M_load_and_test_until(unsigned __assumed
, unsigned __operand
,
99 bool __equal
, memory_order __mo
, bool __has_timeout
,
100 chrono::seconds __s
, chrono::nanoseconds __ns
)
104 // Don't bother checking the value again because we expect the caller
105 // to have done it recently.
106 // memory_order_relaxed is sufficient because we can rely on just the
107 // modification order (store_notify uses an atomic RMW operation too),
108 // and the futex syscalls synchronize between themselves.
109 _M_data
.fetch_or(_Waiter_bit
, memory_order_relaxed
);
110 bool __ret
= _M_futex_wait_until((unsigned*)(void*)&_M_data
,
111 __assumed
| _Waiter_bit
,
112 __has_timeout
, __s
, __ns
);
113 // Fetch the current value after waiting (clears _Waiter_bit).
114 __assumed
= _M_load(__mo
);
115 if (!__ret
|| ((__operand
== __assumed
) == __equal
))
117 // TODO adapt wait time
121 // If a timeout occurs, returns a current value after the timeout;
122 // otherwise, returns the operand's value if equal is true or a different
123 // value if equal is false.
124 // The assumed value is the caller's assumption about the current value
125 // when making the call.
126 // __s and __ns are measured against CLOCK_MONOTONIC.
128 _M_load_and_test_until_steady(unsigned __assumed
, unsigned __operand
,
129 bool __equal
, memory_order __mo
, bool __has_timeout
,
130 chrono::seconds __s
, chrono::nanoseconds __ns
)
134 // Don't bother checking the value again because we expect the caller
135 // to have done it recently.
136 // memory_order_relaxed is sufficient because we can rely on just the
137 // modification order (store_notify uses an atomic RMW operation too),
138 // and the futex syscalls synchronize between themselves.
139 _M_data
.fetch_or(_Waiter_bit
, memory_order_relaxed
);
140 bool __ret
= _M_futex_wait_until_steady((unsigned*)(void*)&_M_data
,
141 __assumed
| _Waiter_bit
,
142 __has_timeout
, __s
, __ns
);
143 // Fetch the current value after waiting (clears _Waiter_bit).
144 __assumed
= _M_load(__mo
);
145 if (!__ret
|| ((__operand
== __assumed
) == __equal
))
147 // TODO adapt wait time
151 // Returns the operand's value if equal is true or a different value if
153 // The assumed value is the caller's assumption about the current value
154 // when making the call.
156 _M_load_and_test(unsigned __assumed
, unsigned __operand
,
157 bool __equal
, memory_order __mo
)
159 return _M_load_and_test_until(__assumed
, __operand
, __equal
, __mo
,
163 // If a timeout occurs, returns a current value after the timeout;
164 // otherwise, returns the operand's value if equal is true or a different
165 // value if equal is false.
166 // The assumed value is the caller's assumption about the current value
167 // when making the call.
168 template<typename _Dur
>
170 _M_load_and_test_until_impl(unsigned __assumed
, unsigned __operand
,
171 bool __equal
, memory_order __mo
,
172 const chrono::time_point
<std::chrono::system_clock
, _Dur
>& __atime
)
174 auto __s
= chrono::time_point_cast
<chrono::seconds
>(__atime
);
175 auto __ns
= chrono::duration_cast
<chrono::nanoseconds
>(__atime
- __s
);
177 return _M_load_and_test_until(__assumed
, __operand
, __equal
, __mo
,
178 true, __s
.time_since_epoch(), __ns
);
181 template<typename _Dur
>
183 _M_load_and_test_until_impl(unsigned __assumed
, unsigned __operand
,
184 bool __equal
, memory_order __mo
,
185 const chrono::time_point
<std::chrono::steady_clock
, _Dur
>& __atime
)
187 auto __s
= chrono::time_point_cast
<chrono::seconds
>(__atime
);
188 auto __ns
= chrono::duration_cast
<chrono::nanoseconds
>(__atime
- __s
);
190 return _M_load_and_test_until_steady(__assumed
, __operand
, __equal
, __mo
,
191 true, __s
.time_since_epoch(), __ns
);
196 _GLIBCXX_ALWAYS_INLINE
unsigned
197 _M_load_when_not_equal(unsigned __val
, memory_order __mo
)
199 unsigned __i
= _M_load(__mo
);
200 if ((__i
& ~_Waiter_bit
) != __val
)
201 return (__i
& ~_Waiter_bit
);
202 // TODO Spin-wait first.
203 return _M_load_and_test(__i
, __val
, false, __mo
);
206 _GLIBCXX_ALWAYS_INLINE
void
207 _M_load_when_equal(unsigned __val
, memory_order __mo
)
209 unsigned __i
= _M_load(__mo
);
210 if ((__i
& ~_Waiter_bit
) == __val
)
212 // TODO Spin-wait first.
213 _M_load_and_test(__i
, __val
, true, __mo
);
216 // Returns false iff a timeout occurred.
217 template<typename _Rep
, typename _Period
>
218 _GLIBCXX_ALWAYS_INLINE
bool
219 _M_load_when_equal_for(unsigned __val
, memory_order __mo
,
220 const chrono::duration
<_Rep
, _Period
>& __rtime
)
222 using __dur
= typename
__clock_t::duration
;
223 return _M_load_when_equal_until(__val
, __mo
,
224 __clock_t::now() + chrono::__detail::ceil
<__dur
>(__rtime
));
227 // Returns false iff a timeout occurred.
228 template<typename _Clock
, typename _Duration
>
229 _GLIBCXX_ALWAYS_INLINE
bool
230 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
231 const chrono::time_point
<_Clock
, _Duration
>& __atime
)
233 typename
_Clock::time_point __c_entry
= _Clock::now();
235 const __clock_t::time_point __s_entry
= __clock_t::now();
236 const auto __delta
= __atime
- __c_entry
;
237 const auto __s_atime
= __s_entry
+
238 chrono::__detail::ceil
<__clock_t::duration
>(__delta
);
239 if (_M_load_when_equal_until(__val
, __mo
, __s_atime
))
241 __c_entry
= _Clock::now();
242 } while (__c_entry
< __atime
);
246 // Returns false iff a timeout occurred.
247 template<typename _Duration
>
248 _GLIBCXX_ALWAYS_INLINE
bool
249 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
250 const chrono::time_point
<std::chrono::system_clock
, _Duration
>& __atime
)
252 unsigned __i
= _M_load(__mo
);
253 if ((__i
& ~_Waiter_bit
) == __val
)
255 // TODO Spin-wait first. Ignore effect on timeout.
256 __i
= _M_load_and_test_until_impl(__i
, __val
, true, __mo
, __atime
);
257 return (__i
& ~_Waiter_bit
) == __val
;
260 // Returns false iff a timeout occurred.
261 template<typename _Duration
>
262 _GLIBCXX_ALWAYS_INLINE
bool
263 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
264 const chrono::time_point
<std::chrono::steady_clock
, _Duration
>& __atime
)
266 unsigned __i
= _M_load(__mo
);
267 if ((__i
& ~_Waiter_bit
) == __val
)
269 // TODO Spin-wait first. Ignore effect on timeout.
270 __i
= _M_load_and_test_until_impl(__i
, __val
, true, __mo
, __atime
);
271 return (__i
& ~_Waiter_bit
) == __val
;
274 _GLIBCXX_ALWAYS_INLINE
void
275 _M_store_notify_all(unsigned __val
, memory_order __mo
)
277 unsigned* __futex
= (unsigned *)(void *)&_M_data
;
278 if (_M_data
.exchange(__val
, __mo
) & _Waiter_bit
)
279 _M_futex_notify_all(__futex
);
283 #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
285 // If futexes are not available, use a mutex and a condvar to wait.
286 // Because we access the data only within critical sections, all accesses
287 // are sequentially consistent; thus, we satisfy any provided memory_order.
288 template <unsigned _Waiter_bit
= 0x80000000>
289 class __atomic_futex_unsigned
291 typedef chrono::system_clock __clock_t
;
295 condition_variable _M_condvar
;
299 __atomic_futex_unsigned(unsigned __data
) : _M_data(__data
)
302 _GLIBCXX_ALWAYS_INLINE
unsigned
303 _M_load(memory_order __mo
)
305 unique_lock
<mutex
> __lock(_M_mutex
);
309 _GLIBCXX_ALWAYS_INLINE
unsigned
310 _M_load_when_not_equal(unsigned __val
, memory_order __mo
)
312 unique_lock
<mutex
> __lock(_M_mutex
);
313 while (_M_data
== __val
)
314 _M_condvar
.wait(__lock
);
318 _GLIBCXX_ALWAYS_INLINE
void
319 _M_load_when_equal(unsigned __val
, memory_order __mo
)
321 unique_lock
<mutex
> __lock(_M_mutex
);
322 while (_M_data
!= __val
)
323 _M_condvar
.wait(__lock
);
326 template<typename _Rep
, typename _Period
>
327 _GLIBCXX_ALWAYS_INLINE
bool
328 _M_load_when_equal_for(unsigned __val
, memory_order __mo
,
329 const chrono::duration
<_Rep
, _Period
>& __rtime
)
331 unique_lock
<mutex
> __lock(_M_mutex
);
332 return _M_condvar
.wait_for(__lock
, __rtime
,
333 [&] { return _M_data
== __val
;});
336 template<typename _Clock
, typename _Duration
>
337 _GLIBCXX_ALWAYS_INLINE
bool
338 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
339 const chrono::time_point
<_Clock
, _Duration
>& __atime
)
341 unique_lock
<mutex
> __lock(_M_mutex
);
342 return _M_condvar
.wait_until(__lock
, __atime
,
343 [&] { return _M_data
== __val
;});
346 _GLIBCXX_ALWAYS_INLINE
void
347 _M_store_notify_all(unsigned __val
, memory_order __mo
)
349 unique_lock
<mutex
> __lock(_M_mutex
);
351 _M_condvar
.notify_all();
355 #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
356 #endif // _GLIBCXX_HAS_GTHREADS
358 _GLIBCXX_END_NAMESPACE_VERSION