]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/include/bits/atomic_base.h
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / bits / atomic_base.h
CommitLineData
c0000147 1// -*- C++ -*- header.
2c122b5c 2
fbd26352 3// Copyright (C) 2008-2019 Free Software Foundation, Inc.
2c122b5c 4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
f2d9c745 25/** @file bits/atomic_base.h
c0000147 26 * This is an internal header file, included by other library headers.
5846aeac 27 * Do not attempt to use it directly. @headername{atomic}
2c122b5c 28 */
29
2c122b5c 30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
c0000147 33#pragma GCC system_header
34
35#include <bits/c++config.h>
c0000147 36#include <stdint.h>
06bc5cca 37#include <bits/atomic_lockfree_defines.h>
c0000147 38
b6de2eaf 39#ifndef _GLIBCXX_ALWAYS_INLINE
bdf63028 40#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
b6de2eaf 41#endif
42
2948dd21 43namespace std _GLIBCXX_VISIBILITY(default)
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
2c122b5c 46
47 /**
48 * @defgroup atomics Atomics
49 *
50 * Components for performing atomic operations.
51 * @{
52 */
53
54 /// Enumeration for memory_order
55 typedef enum memory_order
56 {
57 memory_order_relaxed,
58 memory_order_consume,
59 memory_order_acquire,
60 memory_order_release,
61 memory_order_acq_rel,
62 memory_order_seq_cst
63 } memory_order;
64
c7b2ff9a 65 enum __memory_order_modifier
66 {
67 __memory_order_mask = 0x0ffff,
68 __memory_order_modifier_mask = 0xffff0000,
69 __memory_order_hle_acquire = 0x10000,
70 __memory_order_hle_release = 0x20000
71 };
72
73 constexpr memory_order
74 operator|(memory_order __m, __memory_order_modifier __mod)
75 {
76 return memory_order(__m | int(__mod));
77 }
78
79 constexpr memory_order
80 operator&(memory_order __m, __memory_order_modifier __mod)
81 {
82 return memory_order(__m & int(__mod));
83 }
84
e11c67d0 85 // Drop release ordering as per [atomics.types.operations.req]/21
86 constexpr memory_order
c7b2ff9a 87 __cmpexch_failure_order2(memory_order __m) noexcept
c0000147 88 {
e11c67d0 89 return __m == memory_order_acq_rel ? memory_order_acquire
90 : __m == memory_order_release ? memory_order_relaxed : __m;
c0000147 91 }
92
c7b2ff9a 93 constexpr memory_order
94 __cmpexch_failure_order(memory_order __m) noexcept
95 {
96 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
97 | (__m & __memory_order_modifier_mask));
98 }
99
b6de2eaf 100 _GLIBCXX_ALWAYS_INLINE void
10b744a3 101 atomic_thread_fence(memory_order __m) noexcept
06bc5cca 102 { __atomic_thread_fence(__m); }
f2d9c745 103
b6de2eaf 104 _GLIBCXX_ALWAYS_INLINE void
10b744a3 105 atomic_signal_fence(memory_order __m) noexcept
918af6ab 106 { __atomic_signal_fence(__m); }
f2d9c745 107
c0000147 108 /// kill_dependency
109 template<typename _Tp>
110 inline _Tp
dad79eaf 111 kill_dependency(_Tp __y) noexcept
c0000147 112 {
5355b6ff 113 _Tp __ret(__y);
114 return __ret;
c0000147 115 }
116
c0000147 117
118 // Base types for atomics.
1cd6e20d 119 template<typename _IntTp>
120 struct __atomic_base;
c0000147 121
c0000147 122
c0000147 123#define ATOMIC_VAR_INIT(_VI) { _VI }
124
125 template<typename _Tp>
126 struct atomic;
2c122b5c 127
f2d9c745 128 template<typename _Tp>
129 struct atomic<_Tp*>;
130
df024a61 131 /* The target's "set" value for test-and-set may not be exactly 1. */
132#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
133 typedef bool __atomic_flag_data_type;
134#else
135 typedef unsigned char __atomic_flag_data_type;
136#endif
1cd6e20d 137
138 /**
139 * @brief Base type for atomic_flag.
140 *
141 * Base type is POD with data, allowing atomic_flag to derive from
142 * it and meet the standard layout type requirement. In addition to
7b96e840 143 * compatibility with a C interface, this allows different
1cd6e20d 144 * implementations of atomic_flag to use the same atomic operation
145 * functions, via a standard conversion to the __atomic_flag_base
146 * argument.
147 */
148 _GLIBCXX_BEGIN_EXTERN_C
149
150 struct __atomic_flag_base
151 {
df024a61 152 __atomic_flag_data_type _M_i;
1cd6e20d 153 };
154
155 _GLIBCXX_END_EXTERN_C
156
2257a543 157#define ATOMIC_FLAG_INIT { 0 }
1cd6e20d 158
159 /// atomic_flag
160 struct atomic_flag : public __atomic_flag_base
161 {
162 atomic_flag() noexcept = default;
163 ~atomic_flag() noexcept = default;
164 atomic_flag(const atomic_flag&) = delete;
165 atomic_flag& operator=(const atomic_flag&) = delete;
166 atomic_flag& operator=(const atomic_flag&) volatile = delete;
167
168 // Conversion to ATOMIC_FLAG_INIT.
2257a543 169 constexpr atomic_flag(bool __i) noexcept
df024a61 170 : __atomic_flag_base{ _S_init(__i) }
2257a543 171 { }
1cd6e20d 172
b6de2eaf 173 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 174 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
175 {
10b744a3 176 return __atomic_test_and_set (&_M_i, __m);
1cd6e20d 177 }
178
b6de2eaf 179 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 180 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
181 {
10b744a3 182 return __atomic_test_and_set (&_M_i, __m);
1cd6e20d 183 }
184
b6de2eaf 185 _GLIBCXX_ALWAYS_INLINE void
1cd6e20d 186 clear(memory_order __m = memory_order_seq_cst) noexcept
187 {
c7b2ff9a 188 memory_order __b = __m & __memory_order_mask;
189 __glibcxx_assert(__b != memory_order_consume);
190 __glibcxx_assert(__b != memory_order_acquire);
191 __glibcxx_assert(__b != memory_order_acq_rel);
1cd6e20d 192
10b744a3 193 __atomic_clear (&_M_i, __m);
1cd6e20d 194 }
195
b6de2eaf 196 _GLIBCXX_ALWAYS_INLINE void
1cd6e20d 197 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
198 {
c7b2ff9a 199 memory_order __b = __m & __memory_order_mask;
200 __glibcxx_assert(__b != memory_order_consume);
201 __glibcxx_assert(__b != memory_order_acquire);
202 __glibcxx_assert(__b != memory_order_acq_rel);
1cd6e20d 203
10b744a3 204 __atomic_clear (&_M_i, __m);
1cd6e20d 205 }
df024a61 206
207 private:
208 static constexpr __atomic_flag_data_type
209 _S_init(bool __i)
210 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
1cd6e20d 211 };
212
213
214 /// Base class for atomic integrals.
215 //
216 // For each of the integral types, define atomic_[integral type] struct
217 //
218 // atomic_bool bool
219 // atomic_char char
220 // atomic_schar signed char
221 // atomic_uchar unsigned char
222 // atomic_short short
223 // atomic_ushort unsigned short
224 // atomic_int int
225 // atomic_uint unsigned int
226 // atomic_long long
227 // atomic_ulong unsigned long
228 // atomic_llong long long
229 // atomic_ullong unsigned long long
230 // atomic_char16_t char16_t
231 // atomic_char32_t char32_t
232 // atomic_wchar_t wchar_t
233 //
234 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
235 // 8 bytes, since that is what GCC built-in functions for atomic
236 // memory access expect.
237 template<typename _ITp>
238 struct __atomic_base
239 {
4ea6fc6b 240 using value_type = _ITp;
241 using difference_type = value_type;
242
1cd6e20d 243 private:
244 typedef _ITp __int_type;
245
f479daa6 246 static constexpr int _S_alignment =
247 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
248
249 alignas(_S_alignment) __int_type _M_i;
1cd6e20d 250
251 public:
252 __atomic_base() noexcept = default;
253 ~__atomic_base() noexcept = default;
254 __atomic_base(const __atomic_base&) = delete;
255 __atomic_base& operator=(const __atomic_base&) = delete;
256 __atomic_base& operator=(const __atomic_base&) volatile = delete;
257
258 // Requires __int_type convertible to _M_i.
259 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
260
261 operator __int_type() const noexcept
262 { return load(); }
263
264 operator __int_type() const volatile noexcept
265 { return load(); }
266
267 __int_type
268 operator=(__int_type __i) noexcept
269 {
270 store(__i);
271 return __i;
272 }
273
274 __int_type
275 operator=(__int_type __i) volatile noexcept
276 {
277 store(__i);
278 return __i;
279 }
280
281 __int_type
282 operator++(int) noexcept
283 { return fetch_add(1); }
284
285 __int_type
286 operator++(int) volatile noexcept
287 { return fetch_add(1); }
288
289 __int_type
290 operator--(int) noexcept
291 { return fetch_sub(1); }
292
293 __int_type
294 operator--(int) volatile noexcept
295 { return fetch_sub(1); }
296
297 __int_type
298 operator++() noexcept
299 { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
300
301 __int_type
302 operator++() volatile noexcept
303 { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
304
305 __int_type
306 operator--() noexcept
307 { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
308
309 __int_type
310 operator--() volatile noexcept
311 { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
312
313 __int_type
314 operator+=(__int_type __i) noexcept
315 { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
316
317 __int_type
318 operator+=(__int_type __i) volatile noexcept
319 { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
320
321 __int_type
322 operator-=(__int_type __i) noexcept
323 { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
324
325 __int_type
326 operator-=(__int_type __i) volatile noexcept
327 { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
328
329 __int_type
330 operator&=(__int_type __i) noexcept
331 { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
332
333 __int_type
334 operator&=(__int_type __i) volatile noexcept
335 { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
336
337 __int_type
338 operator|=(__int_type __i) noexcept
339 { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
340
341 __int_type
342 operator|=(__int_type __i) volatile noexcept
343 { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
344
345 __int_type
346 operator^=(__int_type __i) noexcept
347 { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
348
349 __int_type
350 operator^=(__int_type __i) volatile noexcept
351 { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
352
353 bool
354 is_lock_free() const noexcept
c4d336f9 355 {
4ca99588 356 // Use a fake, minimally aligned pointer.
357 return __atomic_is_lock_free(sizeof(_M_i),
1b06ab64 358 reinterpret_cast<void *>(-_S_alignment));
c4d336f9 359 }
1cd6e20d 360
361 bool
362 is_lock_free() const volatile noexcept
c4d336f9 363 {
4ca99588 364 // Use a fake, minimally aligned pointer.
365 return __atomic_is_lock_free(sizeof(_M_i),
1b06ab64 366 reinterpret_cast<void *>(-_S_alignment));
c4d336f9 367 }
1cd6e20d 368
b6de2eaf 369 _GLIBCXX_ALWAYS_INLINE void
1cd6e20d 370 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
371 {
0b3b97e0 372 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 373 __glibcxx_assert(__b != memory_order_acquire);
374 __glibcxx_assert(__b != memory_order_acq_rel);
375 __glibcxx_assert(__b != memory_order_consume);
1cd6e20d 376
377 __atomic_store_n(&_M_i, __i, __m);
378 }
379
b6de2eaf 380 _GLIBCXX_ALWAYS_INLINE void
1cd6e20d 381 store(__int_type __i,
382 memory_order __m = memory_order_seq_cst) volatile noexcept
383 {
0b3b97e0 384 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 385 __glibcxx_assert(__b != memory_order_acquire);
386 __glibcxx_assert(__b != memory_order_acq_rel);
387 __glibcxx_assert(__b != memory_order_consume);
1cd6e20d 388
389 __atomic_store_n(&_M_i, __i, __m);
390 }
391
b6de2eaf 392 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 393 load(memory_order __m = memory_order_seq_cst) const noexcept
394 {
0b3b97e0 395 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 396 __glibcxx_assert(__b != memory_order_release);
397 __glibcxx_assert(__b != memory_order_acq_rel);
1cd6e20d 398
399 return __atomic_load_n(&_M_i, __m);
400 }
401
b6de2eaf 402 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 403 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
404 {
0b3b97e0 405 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 406 __glibcxx_assert(__b != memory_order_release);
407 __glibcxx_assert(__b != memory_order_acq_rel);
1cd6e20d 408
409 return __atomic_load_n(&_M_i, __m);
410 }
411
b6de2eaf 412 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 413 exchange(__int_type __i,
414 memory_order __m = memory_order_seq_cst) noexcept
415 {
416 return __atomic_exchange_n(&_M_i, __i, __m);
417 }
418
419
b6de2eaf 420 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 421 exchange(__int_type __i,
422 memory_order __m = memory_order_seq_cst) volatile noexcept
423 {
424 return __atomic_exchange_n(&_M_i, __i, __m);
425 }
426
b6de2eaf 427 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 428 compare_exchange_weak(__int_type& __i1, __int_type __i2,
429 memory_order __m1, memory_order __m2) noexcept
430 {
0b3b97e0 431 memory_order __b2 = __m2 & __memory_order_mask;
432 memory_order __b1 = __m1 & __memory_order_mask;
c7b2ff9a 433 __glibcxx_assert(__b2 != memory_order_release);
434 __glibcxx_assert(__b2 != memory_order_acq_rel);
435 __glibcxx_assert(__b2 <= __b1);
1cd6e20d 436
437 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
438 }
439
b6de2eaf 440 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 441 compare_exchange_weak(__int_type& __i1, __int_type __i2,
442 memory_order __m1,
443 memory_order __m2) volatile noexcept
444 {
0b3b97e0 445 memory_order __b2 = __m2 & __memory_order_mask;
446 memory_order __b1 = __m1 & __memory_order_mask;
c7b2ff9a 447 __glibcxx_assert(__b2 != memory_order_release);
448 __glibcxx_assert(__b2 != memory_order_acq_rel);
449 __glibcxx_assert(__b2 <= __b1);
1cd6e20d 450
451 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
452 }
453
b6de2eaf 454 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 455 compare_exchange_weak(__int_type& __i1, __int_type __i2,
456 memory_order __m = memory_order_seq_cst) noexcept
457 {
458 return compare_exchange_weak(__i1, __i2, __m,
e11c67d0 459 __cmpexch_failure_order(__m));
1cd6e20d 460 }
461
b6de2eaf 462 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 463 compare_exchange_weak(__int_type& __i1, __int_type __i2,
464 memory_order __m = memory_order_seq_cst) volatile noexcept
465 {
466 return compare_exchange_weak(__i1, __i2, __m,
e11c67d0 467 __cmpexch_failure_order(__m));
1cd6e20d 468 }
469
b6de2eaf 470 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 471 compare_exchange_strong(__int_type& __i1, __int_type __i2,
472 memory_order __m1, memory_order __m2) noexcept
473 {
0b3b97e0 474 memory_order __b2 = __m2 & __memory_order_mask;
475 memory_order __b1 = __m1 & __memory_order_mask;
c7b2ff9a 476 __glibcxx_assert(__b2 != memory_order_release);
477 __glibcxx_assert(__b2 != memory_order_acq_rel);
478 __glibcxx_assert(__b2 <= __b1);
1cd6e20d 479
480 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
481 }
482
b6de2eaf 483 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 484 compare_exchange_strong(__int_type& __i1, __int_type __i2,
485 memory_order __m1,
486 memory_order __m2) volatile noexcept
487 {
0b3b97e0 488 memory_order __b2 = __m2 & __memory_order_mask;
489 memory_order __b1 = __m1 & __memory_order_mask;
c7b2ff9a 490
491 __glibcxx_assert(__b2 != memory_order_release);
492 __glibcxx_assert(__b2 != memory_order_acq_rel);
493 __glibcxx_assert(__b2 <= __b1);
1cd6e20d 494
495 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
496 }
497
b6de2eaf 498 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 499 compare_exchange_strong(__int_type& __i1, __int_type __i2,
500 memory_order __m = memory_order_seq_cst) noexcept
501 {
502 return compare_exchange_strong(__i1, __i2, __m,
e11c67d0 503 __cmpexch_failure_order(__m));
1cd6e20d 504 }
505
b6de2eaf 506 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 507 compare_exchange_strong(__int_type& __i1, __int_type __i2,
508 memory_order __m = memory_order_seq_cst) volatile noexcept
509 {
510 return compare_exchange_strong(__i1, __i2, __m,
e11c67d0 511 __cmpexch_failure_order(__m));
1cd6e20d 512 }
513
b6de2eaf 514 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 515 fetch_add(__int_type __i,
516 memory_order __m = memory_order_seq_cst) noexcept
517 { return __atomic_fetch_add(&_M_i, __i, __m); }
518
b6de2eaf 519 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 520 fetch_add(__int_type __i,
521 memory_order __m = memory_order_seq_cst) volatile noexcept
522 { return __atomic_fetch_add(&_M_i, __i, __m); }
523
b6de2eaf 524 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 525 fetch_sub(__int_type __i,
526 memory_order __m = memory_order_seq_cst) noexcept
527 { return __atomic_fetch_sub(&_M_i, __i, __m); }
528
b6de2eaf 529 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 530 fetch_sub(__int_type __i,
531 memory_order __m = memory_order_seq_cst) volatile noexcept
532 { return __atomic_fetch_sub(&_M_i, __i, __m); }
533
b6de2eaf 534 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 535 fetch_and(__int_type __i,
536 memory_order __m = memory_order_seq_cst) noexcept
537 { return __atomic_fetch_and(&_M_i, __i, __m); }
538
b6de2eaf 539 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 540 fetch_and(__int_type __i,
541 memory_order __m = memory_order_seq_cst) volatile noexcept
542 { return __atomic_fetch_and(&_M_i, __i, __m); }
543
b6de2eaf 544 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 545 fetch_or(__int_type __i,
546 memory_order __m = memory_order_seq_cst) noexcept
547 { return __atomic_fetch_or(&_M_i, __i, __m); }
548
b6de2eaf 549 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 550 fetch_or(__int_type __i,
551 memory_order __m = memory_order_seq_cst) volatile noexcept
552 { return __atomic_fetch_or(&_M_i, __i, __m); }
553
b6de2eaf 554 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 555 fetch_xor(__int_type __i,
556 memory_order __m = memory_order_seq_cst) noexcept
557 { return __atomic_fetch_xor(&_M_i, __i, __m); }
558
b6de2eaf 559 _GLIBCXX_ALWAYS_INLINE __int_type
1cd6e20d 560 fetch_xor(__int_type __i,
561 memory_order __m = memory_order_seq_cst) volatile noexcept
562 { return __atomic_fetch_xor(&_M_i, __i, __m); }
563 };
564
565
566 /// Partial specialization for pointer types.
567 template<typename _PTp>
568 struct __atomic_base<_PTp*>
569 {
570 private:
571 typedef _PTp* __pointer_type;
572
573 __pointer_type _M_p;
574
520da0e4 575 // Factored out to facilitate explicit specialization.
576 constexpr ptrdiff_t
10d10714 577 _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
520da0e4 578
579 constexpr ptrdiff_t
10d10714 580 _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
520da0e4 581
1cd6e20d 582 public:
583 __atomic_base() noexcept = default;
584 ~__atomic_base() noexcept = default;
585 __atomic_base(const __atomic_base&) = delete;
586 __atomic_base& operator=(const __atomic_base&) = delete;
587 __atomic_base& operator=(const __atomic_base&) volatile = delete;
588
589 // Requires __pointer_type convertible to _M_p.
590 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
591
592 operator __pointer_type() const noexcept
593 { return load(); }
594
595 operator __pointer_type() const volatile noexcept
596 { return load(); }
597
598 __pointer_type
599 operator=(__pointer_type __p) noexcept
600 {
601 store(__p);
602 return __p;
603 }
604
605 __pointer_type
606 operator=(__pointer_type __p) volatile noexcept
607 {
608 store(__p);
609 return __p;
610 }
611
612 __pointer_type
613 operator++(int) noexcept
614 { return fetch_add(1); }
615
616 __pointer_type
617 operator++(int) volatile noexcept
618 { return fetch_add(1); }
619
620 __pointer_type
621 operator--(int) noexcept
622 { return fetch_sub(1); }
623
624 __pointer_type
625 operator--(int) volatile noexcept
626 { return fetch_sub(1); }
627
628 __pointer_type
629 operator++() noexcept
520da0e4 630 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
631 memory_order_seq_cst); }
1cd6e20d 632
633 __pointer_type
634 operator++() volatile noexcept
520da0e4 635 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
636 memory_order_seq_cst); }
1cd6e20d 637
638 __pointer_type
639 operator--() noexcept
520da0e4 640 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
641 memory_order_seq_cst); }
1cd6e20d 642
643 __pointer_type
644 operator--() volatile noexcept
520da0e4 645 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
646 memory_order_seq_cst); }
1cd6e20d 647
648 __pointer_type
649 operator+=(ptrdiff_t __d) noexcept
520da0e4 650 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
651 memory_order_seq_cst); }
1cd6e20d 652
653 __pointer_type
654 operator+=(ptrdiff_t __d) volatile noexcept
520da0e4 655 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
656 memory_order_seq_cst); }
1cd6e20d 657
658 __pointer_type
659 operator-=(ptrdiff_t __d) noexcept
520da0e4 660 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
661 memory_order_seq_cst); }
1cd6e20d 662
663 __pointer_type
664 operator-=(ptrdiff_t __d) volatile noexcept
520da0e4 665 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
666 memory_order_seq_cst); }
1cd6e20d 667
668 bool
669 is_lock_free() const noexcept
c4d336f9 670 {
671 // Produce a fake, minimally aligned pointer.
4ca99588 672 return __atomic_is_lock_free(sizeof(_M_p),
673 reinterpret_cast<void *>(-__alignof(_M_p)));
c4d336f9 674 }
1cd6e20d 675
676 bool
677 is_lock_free() const volatile noexcept
c4d336f9 678 {
679 // Produce a fake, minimally aligned pointer.
4ca99588 680 return __atomic_is_lock_free(sizeof(_M_p),
681 reinterpret_cast<void *>(-__alignof(_M_p)));
c4d336f9 682 }
1cd6e20d 683
b6de2eaf 684 _GLIBCXX_ALWAYS_INLINE void
1cd6e20d 685 store(__pointer_type __p,
686 memory_order __m = memory_order_seq_cst) noexcept
687 {
c7b2ff9a 688 memory_order __b = __m & __memory_order_mask;
689
690 __glibcxx_assert(__b != memory_order_acquire);
691 __glibcxx_assert(__b != memory_order_acq_rel);
692 __glibcxx_assert(__b != memory_order_consume);
1cd6e20d 693
694 __atomic_store_n(&_M_p, __p, __m);
695 }
696
b6de2eaf 697 _GLIBCXX_ALWAYS_INLINE void
1cd6e20d 698 store(__pointer_type __p,
699 memory_order __m = memory_order_seq_cst) volatile noexcept
700 {
0b3b97e0 701 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 702 __glibcxx_assert(__b != memory_order_acquire);
703 __glibcxx_assert(__b != memory_order_acq_rel);
704 __glibcxx_assert(__b != memory_order_consume);
1cd6e20d 705
706 __atomic_store_n(&_M_p, __p, __m);
707 }
708
b6de2eaf 709 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 710 load(memory_order __m = memory_order_seq_cst) const noexcept
711 {
0b3b97e0 712 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 713 __glibcxx_assert(__b != memory_order_release);
714 __glibcxx_assert(__b != memory_order_acq_rel);
1cd6e20d 715
716 return __atomic_load_n(&_M_p, __m);
717 }
718
b6de2eaf 719 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 720 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
721 {
0b3b97e0 722 memory_order __b = __m & __memory_order_mask;
c7b2ff9a 723 __glibcxx_assert(__b != memory_order_release);
724 __glibcxx_assert(__b != memory_order_acq_rel);
1cd6e20d 725
726 return __atomic_load_n(&_M_p, __m);
727 }
728
b6de2eaf 729 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 730 exchange(__pointer_type __p,
731 memory_order __m = memory_order_seq_cst) noexcept
732 {
733 return __atomic_exchange_n(&_M_p, __p, __m);
734 }
735
736
b6de2eaf 737 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 738 exchange(__pointer_type __p,
739 memory_order __m = memory_order_seq_cst) volatile noexcept
740 {
741 return __atomic_exchange_n(&_M_p, __p, __m);
742 }
743
b6de2eaf 744 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 745 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
746 memory_order __m1,
747 memory_order __m2) noexcept
748 {
0b3b97e0 749 memory_order __b2 = __m2 & __memory_order_mask;
750 memory_order __b1 = __m1 & __memory_order_mask;
c7b2ff9a 751 __glibcxx_assert(__b2 != memory_order_release);
752 __glibcxx_assert(__b2 != memory_order_acq_rel);
753 __glibcxx_assert(__b2 <= __b1);
1cd6e20d 754
755 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
756 }
757
b6de2eaf 758 _GLIBCXX_ALWAYS_INLINE bool
1cd6e20d 759 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
760 memory_order __m1,
761 memory_order __m2) volatile noexcept
762 {
0b3b97e0 763 memory_order __b2 = __m2 & __memory_order_mask;
764 memory_order __b1 = __m1 & __memory_order_mask;
c7b2ff9a 765
766 __glibcxx_assert(__b2 != memory_order_release);
767 __glibcxx_assert(__b2 != memory_order_acq_rel);
768 __glibcxx_assert(__b2 <= __b1);
1cd6e20d 769
770 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
771 }
772
b6de2eaf 773 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 774 fetch_add(ptrdiff_t __d,
775 memory_order __m = memory_order_seq_cst) noexcept
520da0e4 776 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
1cd6e20d 777
b6de2eaf 778 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 779 fetch_add(ptrdiff_t __d,
780 memory_order __m = memory_order_seq_cst) volatile noexcept
520da0e4 781 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
1cd6e20d 782
b6de2eaf 783 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 784 fetch_sub(ptrdiff_t __d,
785 memory_order __m = memory_order_seq_cst) noexcept
520da0e4 786 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
1cd6e20d 787
b6de2eaf 788 _GLIBCXX_ALWAYS_INLINE __pointer_type
1cd6e20d 789 fetch_sub(ptrdiff_t __d,
790 memory_order __m = memory_order_seq_cst) volatile noexcept
520da0e4 791 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
1cd6e20d 792 };
793
2c122b5c 794 // @} group atomics
795
2948dd21 796_GLIBCXX_END_NAMESPACE_VERSION
f2d9c745 797} // namespace std
2c122b5c 798
2c122b5c 799#endif