3 // Copyright (C) 2015-2022 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/atomic_futex.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly.
30 #ifndef _GLIBCXX_ATOMIC_FUTEX_H
31 #define _GLIBCXX_ATOMIC_FUTEX_H 1
33 #pragma GCC system_header
36 #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
38 #include <condition_variable>
40 #include <bits/chrono.h>
42 #ifndef _GLIBCXX_ALWAYS_INLINE
43 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
46 namespace std
_GLIBCXX_VISIBILITY(default)
48 _GLIBCXX_BEGIN_NAMESPACE_VERSION
50 #ifdef _GLIBCXX_HAS_GTHREADS
51 #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
52 struct __atomic_futex_unsigned_base
54 // __s and __ns are measured against CLOCK_REALTIME. Returns false
55 // iff a timeout occurred.
57 _M_futex_wait_until(unsigned *__addr
, unsigned __val
, bool __has_timeout
,
58 chrono::seconds __s
, chrono::nanoseconds __ns
);
60 // __s and __ns are measured against CLOCK_MONOTONIC. Returns
61 // false iff a timeout occurred.
63 _M_futex_wait_until_steady(unsigned *__addr
, unsigned __val
,
64 bool __has_timeout
, chrono::seconds __s
, chrono::nanoseconds __ns
);
66 // This can be executed after the object has been destroyed.
67 static void _M_futex_notify_all(unsigned* __addr
);
70 template <unsigned _Waiter_bit
= 0x80000000>
71 class __atomic_futex_unsigned
: __atomic_futex_unsigned_base
73 typedef chrono::steady_clock __clock_t
;
75 // This must be lock-free and at offset 0.
76 atomic
<unsigned> _M_data
;
80 __atomic_futex_unsigned(unsigned __data
) : _M_data(__data
)
83 _GLIBCXX_ALWAYS_INLINE
unsigned
84 _M_load(memory_order __mo
)
86 return _M_data
.load(__mo
) & ~_Waiter_bit
;
90 // If a timeout occurs, returns a current value after the timeout;
91 // otherwise, returns the operand's value if equal is true or a different
92 // value if equal is false.
93 // The assumed value is the caller's assumption about the current value
94 // when making the call.
95 // __s and __ns are measured against CLOCK_REALTIME.
97 _M_load_and_test_until(unsigned __assumed
, unsigned __operand
,
98 bool __equal
, memory_order __mo
, bool __has_timeout
,
99 chrono::seconds __s
, chrono::nanoseconds __ns
)
103 // Don't bother checking the value again because we expect the caller
104 // to have done it recently.
105 // memory_order_relaxed is sufficient because we can rely on just the
106 // modification order (store_notify uses an atomic RMW operation too),
107 // and the futex syscalls synchronize between themselves.
108 _M_data
.fetch_or(_Waiter_bit
, memory_order_relaxed
);
109 bool __ret
= _M_futex_wait_until((unsigned*)(void*)&_M_data
,
110 __assumed
| _Waiter_bit
,
111 __has_timeout
, __s
, __ns
);
112 // Fetch the current value after waiting (clears _Waiter_bit).
113 __assumed
= _M_load(__mo
);
114 if (!__ret
|| ((__operand
== __assumed
) == __equal
))
116 // TODO adapt wait time
120 // If a timeout occurs, returns a current value after the timeout;
121 // otherwise, returns the operand's value if equal is true or a different
122 // value if equal is false.
123 // The assumed value is the caller's assumption about the current value
124 // when making the call.
125 // __s and __ns are measured against CLOCK_MONOTONIC.
127 _M_load_and_test_until_steady(unsigned __assumed
, unsigned __operand
,
128 bool __equal
, memory_order __mo
, bool __has_timeout
,
129 chrono::seconds __s
, chrono::nanoseconds __ns
)
133 // Don't bother checking the value again because we expect the caller
134 // to have done it recently.
135 // memory_order_relaxed is sufficient because we can rely on just the
136 // modification order (store_notify uses an atomic RMW operation too),
137 // and the futex syscalls synchronize between themselves.
138 _M_data
.fetch_or(_Waiter_bit
, memory_order_relaxed
);
139 bool __ret
= _M_futex_wait_until_steady((unsigned*)(void*)&_M_data
,
140 __assumed
| _Waiter_bit
,
141 __has_timeout
, __s
, __ns
);
142 // Fetch the current value after waiting (clears _Waiter_bit).
143 __assumed
= _M_load(__mo
);
144 if (!__ret
|| ((__operand
== __assumed
) == __equal
))
146 // TODO adapt wait time
150 // Returns the operand's value if equal is true or a different value if
152 // The assumed value is the caller's assumption about the current value
153 // when making the call.
155 _M_load_and_test(unsigned __assumed
, unsigned __operand
,
156 bool __equal
, memory_order __mo
)
158 return _M_load_and_test_until(__assumed
, __operand
, __equal
, __mo
,
162 // If a timeout occurs, returns a current value after the timeout;
163 // otherwise, returns the operand's value if equal is true or a different
164 // value if equal is false.
165 // The assumed value is the caller's assumption about the current value
166 // when making the call.
167 template<typename _Dur
>
169 _M_load_and_test_until_impl(unsigned __assumed
, unsigned __operand
,
170 bool __equal
, memory_order __mo
,
171 const chrono::time_point
<std::chrono::system_clock
, _Dur
>& __atime
)
173 auto __s
= chrono::time_point_cast
<chrono::seconds
>(__atime
);
174 auto __ns
= chrono::duration_cast
<chrono::nanoseconds
>(__atime
- __s
);
176 return _M_load_and_test_until(__assumed
, __operand
, __equal
, __mo
,
177 true, __s
.time_since_epoch(), __ns
);
180 template<typename _Dur
>
182 _M_load_and_test_until_impl(unsigned __assumed
, unsigned __operand
,
183 bool __equal
, memory_order __mo
,
184 const chrono::time_point
<std::chrono::steady_clock
, _Dur
>& __atime
)
186 auto __s
= chrono::time_point_cast
<chrono::seconds
>(__atime
);
187 auto __ns
= chrono::duration_cast
<chrono::nanoseconds
>(__atime
- __s
);
189 return _M_load_and_test_until_steady(__assumed
, __operand
, __equal
, __mo
,
190 true, __s
.time_since_epoch(), __ns
);
195 _GLIBCXX_ALWAYS_INLINE
unsigned
196 _M_load_when_not_equal(unsigned __val
, memory_order __mo
)
198 unsigned __i
= _M_load(__mo
);
199 if ((__i
& ~_Waiter_bit
) != __val
)
200 return (__i
& ~_Waiter_bit
);
201 // TODO Spin-wait first.
202 return _M_load_and_test(__i
, __val
, false, __mo
);
205 _GLIBCXX_ALWAYS_INLINE
void
206 _M_load_when_equal(unsigned __val
, memory_order __mo
)
208 unsigned __i
= _M_load(__mo
);
209 if ((__i
& ~_Waiter_bit
) == __val
)
211 // TODO Spin-wait first.
212 _M_load_and_test(__i
, __val
, true, __mo
);
215 // Returns false iff a timeout occurred.
216 template<typename _Rep
, typename _Period
>
217 _GLIBCXX_ALWAYS_INLINE
bool
218 _M_load_when_equal_for(unsigned __val
, memory_order __mo
,
219 const chrono::duration
<_Rep
, _Period
>& __rtime
)
221 using __dur
= typename
__clock_t::duration
;
222 return _M_load_when_equal_until(__val
, __mo
,
223 __clock_t::now() + chrono::__detail::ceil
<__dur
>(__rtime
));
226 // Returns false iff a timeout occurred.
227 template<typename _Clock
, typename _Duration
>
228 _GLIBCXX_ALWAYS_INLINE
bool
229 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
230 const chrono::time_point
<_Clock
, _Duration
>& __atime
)
232 typename
_Clock::time_point __c_entry
= _Clock::now();
234 const __clock_t::time_point __s_entry
= __clock_t::now();
235 const auto __delta
= __atime
- __c_entry
;
236 const auto __s_atime
= __s_entry
+
237 chrono::__detail::ceil
<__clock_t::duration
>(__delta
);
238 if (_M_load_when_equal_until(__val
, __mo
, __s_atime
))
240 __c_entry
= _Clock::now();
241 } while (__c_entry
< __atime
);
245 // Returns false iff a timeout occurred.
246 template<typename _Duration
>
247 _GLIBCXX_ALWAYS_INLINE
bool
248 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
249 const chrono::time_point
<std::chrono::system_clock
, _Duration
>& __atime
)
251 unsigned __i
= _M_load(__mo
);
252 if ((__i
& ~_Waiter_bit
) == __val
)
254 // TODO Spin-wait first. Ignore effect on timeout.
255 __i
= _M_load_and_test_until_impl(__i
, __val
, true, __mo
, __atime
);
256 return (__i
& ~_Waiter_bit
) == __val
;
259 // Returns false iff a timeout occurred.
260 template<typename _Duration
>
261 _GLIBCXX_ALWAYS_INLINE
bool
262 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
263 const chrono::time_point
<std::chrono::steady_clock
, _Duration
>& __atime
)
265 unsigned __i
= _M_load(__mo
);
266 if ((__i
& ~_Waiter_bit
) == __val
)
268 // TODO Spin-wait first. Ignore effect on timeout.
269 __i
= _M_load_and_test_until_impl(__i
, __val
, true, __mo
, __atime
);
270 return (__i
& ~_Waiter_bit
) == __val
;
273 _GLIBCXX_ALWAYS_INLINE
void
274 _M_store_notify_all(unsigned __val
, memory_order __mo
)
276 unsigned* __futex
= (unsigned *)(void *)&_M_data
;
277 if (_M_data
.exchange(__val
, __mo
) & _Waiter_bit
)
278 _M_futex_notify_all(__futex
);
282 #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
284 // If futexes are not available, use a mutex and a condvar to wait.
285 // Because we access the data only within critical sections, all accesses
286 // are sequentially consistent; thus, we satisfy any provided memory_order.
287 template <unsigned _Waiter_bit
= 0x80000000>
288 class __atomic_futex_unsigned
290 typedef chrono::system_clock __clock_t
;
294 condition_variable _M_condvar
;
298 __atomic_futex_unsigned(unsigned __data
) : _M_data(__data
)
301 _GLIBCXX_ALWAYS_INLINE
unsigned
302 _M_load(memory_order __mo
)
304 unique_lock
<mutex
> __lock(_M_mutex
);
308 _GLIBCXX_ALWAYS_INLINE
unsigned
309 _M_load_when_not_equal(unsigned __val
, memory_order __mo
)
311 unique_lock
<mutex
> __lock(_M_mutex
);
312 while (_M_data
== __val
)
313 _M_condvar
.wait(__lock
);
317 _GLIBCXX_ALWAYS_INLINE
void
318 _M_load_when_equal(unsigned __val
, memory_order __mo
)
320 unique_lock
<mutex
> __lock(_M_mutex
);
321 while (_M_data
!= __val
)
322 _M_condvar
.wait(__lock
);
325 template<typename _Rep
, typename _Period
>
326 _GLIBCXX_ALWAYS_INLINE
bool
327 _M_load_when_equal_for(unsigned __val
, memory_order __mo
,
328 const chrono::duration
<_Rep
, _Period
>& __rtime
)
330 unique_lock
<mutex
> __lock(_M_mutex
);
331 return _M_condvar
.wait_for(__lock
, __rtime
,
332 [&] { return _M_data
== __val
;});
335 template<typename _Clock
, typename _Duration
>
336 _GLIBCXX_ALWAYS_INLINE
bool
337 _M_load_when_equal_until(unsigned __val
, memory_order __mo
,
338 const chrono::time_point
<_Clock
, _Duration
>& __atime
)
340 unique_lock
<mutex
> __lock(_M_mutex
);
341 return _M_condvar
.wait_until(__lock
, __atime
,
342 [&] { return _M_data
== __val
;});
345 _GLIBCXX_ALWAYS_INLINE
void
346 _M_store_notify_all(unsigned __val
, memory_order __mo
)
348 unique_lock
<mutex
> __lock(_M_mutex
);
350 _M_condvar
.notify_all();
354 #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
355 #endif // _GLIBCXX_HAS_GTHREADS
357 _GLIBCXX_END_NAMESPACE_VERSION