]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/bits/atomic_base.h
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / bits / atomic_base.h
1 // -*- C++ -*- header.
2
3 // Copyright (C) 2008-2019 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
32
33 #pragma GCC system_header
34
35 #include <bits/c++config.h>
36 #include <stdint.h>
37 #include <bits/atomic_lockfree_defines.h>
38
39 #ifndef _GLIBCXX_ALWAYS_INLINE
40 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
41 #endif
42
43 namespace std _GLIBCXX_VISIBILITY(default)
44 {
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @defgroup atomics Atomics
49 *
50 * Components for performing atomic operations.
51 * @{
52 */
53
54 /// Enumeration for memory_order
55 typedef enum memory_order
56 {
57 memory_order_relaxed,
58 memory_order_consume,
59 memory_order_acquire,
60 memory_order_release,
61 memory_order_acq_rel,
62 memory_order_seq_cst
63 } memory_order;
64
65 enum __memory_order_modifier
66 {
67 __memory_order_mask = 0x0ffff,
68 __memory_order_modifier_mask = 0xffff0000,
69 __memory_order_hle_acquire = 0x10000,
70 __memory_order_hle_release = 0x20000
71 };
72
73 constexpr memory_order
74 operator|(memory_order __m, __memory_order_modifier __mod)
75 {
76 return memory_order(__m | int(__mod));
77 }
78
79 constexpr memory_order
80 operator&(memory_order __m, __memory_order_modifier __mod)
81 {
82 return memory_order(__m & int(__mod));
83 }
84
85 // Drop release ordering as per [atomics.types.operations.req]/21
86 constexpr memory_order
87 __cmpexch_failure_order2(memory_order __m) noexcept
88 {
89 return __m == memory_order_acq_rel ? memory_order_acquire
90 : __m == memory_order_release ? memory_order_relaxed : __m;
91 }
92
93 constexpr memory_order
94 __cmpexch_failure_order(memory_order __m) noexcept
95 {
96 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
97 | (__m & __memory_order_modifier_mask));
98 }
99
100 _GLIBCXX_ALWAYS_INLINE void
101 atomic_thread_fence(memory_order __m) noexcept
102 { __atomic_thread_fence(__m); }
103
104 _GLIBCXX_ALWAYS_INLINE void
105 atomic_signal_fence(memory_order __m) noexcept
106 { __atomic_signal_fence(__m); }
107
108 /// kill_dependency
109 template<typename _Tp>
110 inline _Tp
111 kill_dependency(_Tp __y) noexcept
112 {
113 _Tp __ret(__y);
114 return __ret;
115 }
116
117
118 // Base types for atomics.
119 template<typename _IntTp>
120 struct __atomic_base;
121
122
123 #define ATOMIC_VAR_INIT(_VI) { _VI }
124
125 template<typename _Tp>
126 struct atomic;
127
128 template<typename _Tp>
129 struct atomic<_Tp*>;
130
131 /* The target's "set" value for test-and-set may not be exactly 1. */
132 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
133 typedef bool __atomic_flag_data_type;
134 #else
135 typedef unsigned char __atomic_flag_data_type;
136 #endif
137
138 /**
139 * @brief Base type for atomic_flag.
140 *
141 * Base type is POD with data, allowing atomic_flag to derive from
142 * it and meet the standard layout type requirement. In addition to
143 * compatibility with a C interface, this allows different
144 * implementations of atomic_flag to use the same atomic operation
145 * functions, via a standard conversion to the __atomic_flag_base
146 * argument.
147 */
148 _GLIBCXX_BEGIN_EXTERN_C
149
150 struct __atomic_flag_base
151 {
152 __atomic_flag_data_type _M_i;
153 };
154
155 _GLIBCXX_END_EXTERN_C
156
157 #define ATOMIC_FLAG_INIT { 0 }
158
159 /// atomic_flag
160 struct atomic_flag : public __atomic_flag_base
161 {
162 atomic_flag() noexcept = default;
163 ~atomic_flag() noexcept = default;
164 atomic_flag(const atomic_flag&) = delete;
165 atomic_flag& operator=(const atomic_flag&) = delete;
166 atomic_flag& operator=(const atomic_flag&) volatile = delete;
167
168 // Conversion to ATOMIC_FLAG_INIT.
169 constexpr atomic_flag(bool __i) noexcept
170 : __atomic_flag_base{ _S_init(__i) }
171 { }
172
173 _GLIBCXX_ALWAYS_INLINE bool
174 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
175 {
176 return __atomic_test_and_set (&_M_i, __m);
177 }
178
179 _GLIBCXX_ALWAYS_INLINE bool
180 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
181 {
182 return __atomic_test_and_set (&_M_i, __m);
183 }
184
185 _GLIBCXX_ALWAYS_INLINE void
186 clear(memory_order __m = memory_order_seq_cst) noexcept
187 {
188 memory_order __b = __m & __memory_order_mask;
189 __glibcxx_assert(__b != memory_order_consume);
190 __glibcxx_assert(__b != memory_order_acquire);
191 __glibcxx_assert(__b != memory_order_acq_rel);
192
193 __atomic_clear (&_M_i, __m);
194 }
195
196 _GLIBCXX_ALWAYS_INLINE void
197 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
198 {
199 memory_order __b = __m & __memory_order_mask;
200 __glibcxx_assert(__b != memory_order_consume);
201 __glibcxx_assert(__b != memory_order_acquire);
202 __glibcxx_assert(__b != memory_order_acq_rel);
203
204 __atomic_clear (&_M_i, __m);
205 }
206
207 private:
208 static constexpr __atomic_flag_data_type
209 _S_init(bool __i)
210 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
211 };
212
213
214 /// Base class for atomic integrals.
215 //
216 // For each of the integral types, define atomic_[integral type] struct
217 //
218 // atomic_bool bool
219 // atomic_char char
220 // atomic_schar signed char
221 // atomic_uchar unsigned char
222 // atomic_short short
223 // atomic_ushort unsigned short
224 // atomic_int int
225 // atomic_uint unsigned int
226 // atomic_long long
227 // atomic_ulong unsigned long
228 // atomic_llong long long
229 // atomic_ullong unsigned long long
230 // atomic_char16_t char16_t
231 // atomic_char32_t char32_t
232 // atomic_wchar_t wchar_t
233 //
234 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
235 // 8 bytes, since that is what GCC built-in functions for atomic
236 // memory access expect.
237 template<typename _ITp>
238 struct __atomic_base
239 {
240 using value_type = _ITp;
241 using difference_type = value_type;
242
243 private:
244 typedef _ITp __int_type;
245
246 static constexpr int _S_alignment =
247 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
248
249 alignas(_S_alignment) __int_type _M_i;
250
251 public:
252 __atomic_base() noexcept = default;
253 ~__atomic_base() noexcept = default;
254 __atomic_base(const __atomic_base&) = delete;
255 __atomic_base& operator=(const __atomic_base&) = delete;
256 __atomic_base& operator=(const __atomic_base&) volatile = delete;
257
258 // Requires __int_type convertible to _M_i.
259 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
260
261 operator __int_type() const noexcept
262 { return load(); }
263
264 operator __int_type() const volatile noexcept
265 { return load(); }
266
267 __int_type
268 operator=(__int_type __i) noexcept
269 {
270 store(__i);
271 return __i;
272 }
273
274 __int_type
275 operator=(__int_type __i) volatile noexcept
276 {
277 store(__i);
278 return __i;
279 }
280
281 __int_type
282 operator++(int) noexcept
283 { return fetch_add(1); }
284
285 __int_type
286 operator++(int) volatile noexcept
287 { return fetch_add(1); }
288
289 __int_type
290 operator--(int) noexcept
291 { return fetch_sub(1); }
292
293 __int_type
294 operator--(int) volatile noexcept
295 { return fetch_sub(1); }
296
297 __int_type
298 operator++() noexcept
299 { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
300
301 __int_type
302 operator++() volatile noexcept
303 { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
304
305 __int_type
306 operator--() noexcept
307 { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
308
309 __int_type
310 operator--() volatile noexcept
311 { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
312
313 __int_type
314 operator+=(__int_type __i) noexcept
315 { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
316
317 __int_type
318 operator+=(__int_type __i) volatile noexcept
319 { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
320
321 __int_type
322 operator-=(__int_type __i) noexcept
323 { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
324
325 __int_type
326 operator-=(__int_type __i) volatile noexcept
327 { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
328
329 __int_type
330 operator&=(__int_type __i) noexcept
331 { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
332
333 __int_type
334 operator&=(__int_type __i) volatile noexcept
335 { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
336
337 __int_type
338 operator|=(__int_type __i) noexcept
339 { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
340
341 __int_type
342 operator|=(__int_type __i) volatile noexcept
343 { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
344
345 __int_type
346 operator^=(__int_type __i) noexcept
347 { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
348
349 __int_type
350 operator^=(__int_type __i) volatile noexcept
351 { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
352
353 bool
354 is_lock_free() const noexcept
355 {
356 // Use a fake, minimally aligned pointer.
357 return __atomic_is_lock_free(sizeof(_M_i),
358 reinterpret_cast<void *>(-_S_alignment));
359 }
360
361 bool
362 is_lock_free() const volatile noexcept
363 {
364 // Use a fake, minimally aligned pointer.
365 return __atomic_is_lock_free(sizeof(_M_i),
366 reinterpret_cast<void *>(-_S_alignment));
367 }
368
369 _GLIBCXX_ALWAYS_INLINE void
370 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
371 {
372 memory_order __b = __m & __memory_order_mask;
373 __glibcxx_assert(__b != memory_order_acquire);
374 __glibcxx_assert(__b != memory_order_acq_rel);
375 __glibcxx_assert(__b != memory_order_consume);
376
377 __atomic_store_n(&_M_i, __i, __m);
378 }
379
380 _GLIBCXX_ALWAYS_INLINE void
381 store(__int_type __i,
382 memory_order __m = memory_order_seq_cst) volatile noexcept
383 {
384 memory_order __b = __m & __memory_order_mask;
385 __glibcxx_assert(__b != memory_order_acquire);
386 __glibcxx_assert(__b != memory_order_acq_rel);
387 __glibcxx_assert(__b != memory_order_consume);
388
389 __atomic_store_n(&_M_i, __i, __m);
390 }
391
392 _GLIBCXX_ALWAYS_INLINE __int_type
393 load(memory_order __m = memory_order_seq_cst) const noexcept
394 {
395 memory_order __b = __m & __memory_order_mask;
396 __glibcxx_assert(__b != memory_order_release);
397 __glibcxx_assert(__b != memory_order_acq_rel);
398
399 return __atomic_load_n(&_M_i, __m);
400 }
401
402 _GLIBCXX_ALWAYS_INLINE __int_type
403 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
404 {
405 memory_order __b = __m & __memory_order_mask;
406 __glibcxx_assert(__b != memory_order_release);
407 __glibcxx_assert(__b != memory_order_acq_rel);
408
409 return __atomic_load_n(&_M_i, __m);
410 }
411
412 _GLIBCXX_ALWAYS_INLINE __int_type
413 exchange(__int_type __i,
414 memory_order __m = memory_order_seq_cst) noexcept
415 {
416 return __atomic_exchange_n(&_M_i, __i, __m);
417 }
418
419
420 _GLIBCXX_ALWAYS_INLINE __int_type
421 exchange(__int_type __i,
422 memory_order __m = memory_order_seq_cst) volatile noexcept
423 {
424 return __atomic_exchange_n(&_M_i, __i, __m);
425 }
426
427 _GLIBCXX_ALWAYS_INLINE bool
428 compare_exchange_weak(__int_type& __i1, __int_type __i2,
429 memory_order __m1, memory_order __m2) noexcept
430 {
431 memory_order __b2 = __m2 & __memory_order_mask;
432 memory_order __b1 = __m1 & __memory_order_mask;
433 __glibcxx_assert(__b2 != memory_order_release);
434 __glibcxx_assert(__b2 != memory_order_acq_rel);
435 __glibcxx_assert(__b2 <= __b1);
436
437 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
438 }
439
440 _GLIBCXX_ALWAYS_INLINE bool
441 compare_exchange_weak(__int_type& __i1, __int_type __i2,
442 memory_order __m1,
443 memory_order __m2) volatile noexcept
444 {
445 memory_order __b2 = __m2 & __memory_order_mask;
446 memory_order __b1 = __m1 & __memory_order_mask;
447 __glibcxx_assert(__b2 != memory_order_release);
448 __glibcxx_assert(__b2 != memory_order_acq_rel);
449 __glibcxx_assert(__b2 <= __b1);
450
451 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
452 }
453
454 _GLIBCXX_ALWAYS_INLINE bool
455 compare_exchange_weak(__int_type& __i1, __int_type __i2,
456 memory_order __m = memory_order_seq_cst) noexcept
457 {
458 return compare_exchange_weak(__i1, __i2, __m,
459 __cmpexch_failure_order(__m));
460 }
461
462 _GLIBCXX_ALWAYS_INLINE bool
463 compare_exchange_weak(__int_type& __i1, __int_type __i2,
464 memory_order __m = memory_order_seq_cst) volatile noexcept
465 {
466 return compare_exchange_weak(__i1, __i2, __m,
467 __cmpexch_failure_order(__m));
468 }
469
470 _GLIBCXX_ALWAYS_INLINE bool
471 compare_exchange_strong(__int_type& __i1, __int_type __i2,
472 memory_order __m1, memory_order __m2) noexcept
473 {
474 memory_order __b2 = __m2 & __memory_order_mask;
475 memory_order __b1 = __m1 & __memory_order_mask;
476 __glibcxx_assert(__b2 != memory_order_release);
477 __glibcxx_assert(__b2 != memory_order_acq_rel);
478 __glibcxx_assert(__b2 <= __b1);
479
480 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
481 }
482
483 _GLIBCXX_ALWAYS_INLINE bool
484 compare_exchange_strong(__int_type& __i1, __int_type __i2,
485 memory_order __m1,
486 memory_order __m2) volatile noexcept
487 {
488 memory_order __b2 = __m2 & __memory_order_mask;
489 memory_order __b1 = __m1 & __memory_order_mask;
490
491 __glibcxx_assert(__b2 != memory_order_release);
492 __glibcxx_assert(__b2 != memory_order_acq_rel);
493 __glibcxx_assert(__b2 <= __b1);
494
495 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
496 }
497
498 _GLIBCXX_ALWAYS_INLINE bool
499 compare_exchange_strong(__int_type& __i1, __int_type __i2,
500 memory_order __m = memory_order_seq_cst) noexcept
501 {
502 return compare_exchange_strong(__i1, __i2, __m,
503 __cmpexch_failure_order(__m));
504 }
505
506 _GLIBCXX_ALWAYS_INLINE bool
507 compare_exchange_strong(__int_type& __i1, __int_type __i2,
508 memory_order __m = memory_order_seq_cst) volatile noexcept
509 {
510 return compare_exchange_strong(__i1, __i2, __m,
511 __cmpexch_failure_order(__m));
512 }
513
514 _GLIBCXX_ALWAYS_INLINE __int_type
515 fetch_add(__int_type __i,
516 memory_order __m = memory_order_seq_cst) noexcept
517 { return __atomic_fetch_add(&_M_i, __i, __m); }
518
519 _GLIBCXX_ALWAYS_INLINE __int_type
520 fetch_add(__int_type __i,
521 memory_order __m = memory_order_seq_cst) volatile noexcept
522 { return __atomic_fetch_add(&_M_i, __i, __m); }
523
524 _GLIBCXX_ALWAYS_INLINE __int_type
525 fetch_sub(__int_type __i,
526 memory_order __m = memory_order_seq_cst) noexcept
527 { return __atomic_fetch_sub(&_M_i, __i, __m); }
528
529 _GLIBCXX_ALWAYS_INLINE __int_type
530 fetch_sub(__int_type __i,
531 memory_order __m = memory_order_seq_cst) volatile noexcept
532 { return __atomic_fetch_sub(&_M_i, __i, __m); }
533
534 _GLIBCXX_ALWAYS_INLINE __int_type
535 fetch_and(__int_type __i,
536 memory_order __m = memory_order_seq_cst) noexcept
537 { return __atomic_fetch_and(&_M_i, __i, __m); }
538
539 _GLIBCXX_ALWAYS_INLINE __int_type
540 fetch_and(__int_type __i,
541 memory_order __m = memory_order_seq_cst) volatile noexcept
542 { return __atomic_fetch_and(&_M_i, __i, __m); }
543
544 _GLIBCXX_ALWAYS_INLINE __int_type
545 fetch_or(__int_type __i,
546 memory_order __m = memory_order_seq_cst) noexcept
547 { return __atomic_fetch_or(&_M_i, __i, __m); }
548
549 _GLIBCXX_ALWAYS_INLINE __int_type
550 fetch_or(__int_type __i,
551 memory_order __m = memory_order_seq_cst) volatile noexcept
552 { return __atomic_fetch_or(&_M_i, __i, __m); }
553
554 _GLIBCXX_ALWAYS_INLINE __int_type
555 fetch_xor(__int_type __i,
556 memory_order __m = memory_order_seq_cst) noexcept
557 { return __atomic_fetch_xor(&_M_i, __i, __m); }
558
559 _GLIBCXX_ALWAYS_INLINE __int_type
560 fetch_xor(__int_type __i,
561 memory_order __m = memory_order_seq_cst) volatile noexcept
562 { return __atomic_fetch_xor(&_M_i, __i, __m); }
563 };
564
565
566 /// Partial specialization for pointer types.
567 template<typename _PTp>
568 struct __atomic_base<_PTp*>
569 {
570 private:
571 typedef _PTp* __pointer_type;
572
573 __pointer_type _M_p;
574
575 // Factored out to facilitate explicit specialization.
576 constexpr ptrdiff_t
577 _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
578
579 constexpr ptrdiff_t
580 _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
581
582 public:
583 __atomic_base() noexcept = default;
584 ~__atomic_base() noexcept = default;
585 __atomic_base(const __atomic_base&) = delete;
586 __atomic_base& operator=(const __atomic_base&) = delete;
587 __atomic_base& operator=(const __atomic_base&) volatile = delete;
588
589 // Requires __pointer_type convertible to _M_p.
590 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
591
592 operator __pointer_type() const noexcept
593 { return load(); }
594
595 operator __pointer_type() const volatile noexcept
596 { return load(); }
597
598 __pointer_type
599 operator=(__pointer_type __p) noexcept
600 {
601 store(__p);
602 return __p;
603 }
604
605 __pointer_type
606 operator=(__pointer_type __p) volatile noexcept
607 {
608 store(__p);
609 return __p;
610 }
611
612 __pointer_type
613 operator++(int) noexcept
614 { return fetch_add(1); }
615
616 __pointer_type
617 operator++(int) volatile noexcept
618 { return fetch_add(1); }
619
620 __pointer_type
621 operator--(int) noexcept
622 { return fetch_sub(1); }
623
624 __pointer_type
625 operator--(int) volatile noexcept
626 { return fetch_sub(1); }
627
628 __pointer_type
629 operator++() noexcept
630 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
631 memory_order_seq_cst); }
632
633 __pointer_type
634 operator++() volatile noexcept
635 { return __atomic_add_fetch(&_M_p, _M_type_size(1),
636 memory_order_seq_cst); }
637
638 __pointer_type
639 operator--() noexcept
640 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
641 memory_order_seq_cst); }
642
643 __pointer_type
644 operator--() volatile noexcept
645 { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
646 memory_order_seq_cst); }
647
648 __pointer_type
649 operator+=(ptrdiff_t __d) noexcept
650 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
651 memory_order_seq_cst); }
652
653 __pointer_type
654 operator+=(ptrdiff_t __d) volatile noexcept
655 { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
656 memory_order_seq_cst); }
657
658 __pointer_type
659 operator-=(ptrdiff_t __d) noexcept
660 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
661 memory_order_seq_cst); }
662
663 __pointer_type
664 operator-=(ptrdiff_t __d) volatile noexcept
665 { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
666 memory_order_seq_cst); }
667
668 bool
669 is_lock_free() const noexcept
670 {
671 // Produce a fake, minimally aligned pointer.
672 return __atomic_is_lock_free(sizeof(_M_p),
673 reinterpret_cast<void *>(-__alignof(_M_p)));
674 }
675
676 bool
677 is_lock_free() const volatile noexcept
678 {
679 // Produce a fake, minimally aligned pointer.
680 return __atomic_is_lock_free(sizeof(_M_p),
681 reinterpret_cast<void *>(-__alignof(_M_p)));
682 }
683
684 _GLIBCXX_ALWAYS_INLINE void
685 store(__pointer_type __p,
686 memory_order __m = memory_order_seq_cst) noexcept
687 {
688 memory_order __b = __m & __memory_order_mask;
689
690 __glibcxx_assert(__b != memory_order_acquire);
691 __glibcxx_assert(__b != memory_order_acq_rel);
692 __glibcxx_assert(__b != memory_order_consume);
693
694 __atomic_store_n(&_M_p, __p, __m);
695 }
696
697 _GLIBCXX_ALWAYS_INLINE void
698 store(__pointer_type __p,
699 memory_order __m = memory_order_seq_cst) volatile noexcept
700 {
701 memory_order __b = __m & __memory_order_mask;
702 __glibcxx_assert(__b != memory_order_acquire);
703 __glibcxx_assert(__b != memory_order_acq_rel);
704 __glibcxx_assert(__b != memory_order_consume);
705
706 __atomic_store_n(&_M_p, __p, __m);
707 }
708
709 _GLIBCXX_ALWAYS_INLINE __pointer_type
710 load(memory_order __m = memory_order_seq_cst) const noexcept
711 {
712 memory_order __b = __m & __memory_order_mask;
713 __glibcxx_assert(__b != memory_order_release);
714 __glibcxx_assert(__b != memory_order_acq_rel);
715
716 return __atomic_load_n(&_M_p, __m);
717 }
718
719 _GLIBCXX_ALWAYS_INLINE __pointer_type
720 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
721 {
722 memory_order __b = __m & __memory_order_mask;
723 __glibcxx_assert(__b != memory_order_release);
724 __glibcxx_assert(__b != memory_order_acq_rel);
725
726 return __atomic_load_n(&_M_p, __m);
727 }
728
729 _GLIBCXX_ALWAYS_INLINE __pointer_type
730 exchange(__pointer_type __p,
731 memory_order __m = memory_order_seq_cst) noexcept
732 {
733 return __atomic_exchange_n(&_M_p, __p, __m);
734 }
735
736
737 _GLIBCXX_ALWAYS_INLINE __pointer_type
738 exchange(__pointer_type __p,
739 memory_order __m = memory_order_seq_cst) volatile noexcept
740 {
741 return __atomic_exchange_n(&_M_p, __p, __m);
742 }
743
744 _GLIBCXX_ALWAYS_INLINE bool
745 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
746 memory_order __m1,
747 memory_order __m2) noexcept
748 {
749 memory_order __b2 = __m2 & __memory_order_mask;
750 memory_order __b1 = __m1 & __memory_order_mask;
751 __glibcxx_assert(__b2 != memory_order_release);
752 __glibcxx_assert(__b2 != memory_order_acq_rel);
753 __glibcxx_assert(__b2 <= __b1);
754
755 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
756 }
757
758 _GLIBCXX_ALWAYS_INLINE bool
759 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
760 memory_order __m1,
761 memory_order __m2) volatile noexcept
762 {
763 memory_order __b2 = __m2 & __memory_order_mask;
764 memory_order __b1 = __m1 & __memory_order_mask;
765
766 __glibcxx_assert(__b2 != memory_order_release);
767 __glibcxx_assert(__b2 != memory_order_acq_rel);
768 __glibcxx_assert(__b2 <= __b1);
769
770 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
771 }
772
773 _GLIBCXX_ALWAYS_INLINE __pointer_type
774 fetch_add(ptrdiff_t __d,
775 memory_order __m = memory_order_seq_cst) noexcept
776 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
777
778 _GLIBCXX_ALWAYS_INLINE __pointer_type
779 fetch_add(ptrdiff_t __d,
780 memory_order __m = memory_order_seq_cst) volatile noexcept
781 { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
782
783 _GLIBCXX_ALWAYS_INLINE __pointer_type
784 fetch_sub(ptrdiff_t __d,
785 memory_order __m = memory_order_seq_cst) noexcept
786 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
787
788 _GLIBCXX_ALWAYS_INLINE __pointer_type
789 fetch_sub(ptrdiff_t __d,
790 memory_order __m = memory_order_seq_cst) volatile noexcept
791 { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
792 };
793
794 // @} group atomics
795
796 _GLIBCXX_END_NAMESPACE_VERSION
797 } // namespace std
798
799 #endif