1 // shared_ptr atomic access -*- C++ -*-
3 // Copyright (C) 2014-2022 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file bits/shared_ptr_atomic.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
30 #ifndef _SHARED_PTR_ATOMIC_H
31 #define _SHARED_PTR_ATOMIC_H 1
33 #include <bits/atomic_base.h>
35 // Annotations for the custom locking in atomic<shared_ptr<T>>.
36 #if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>)
37 #include <sanitizer/tsan_interface.h>
38 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \
39 __tsan_mutex_destroy(X, __tsan_mutex_not_static)
40 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \
41 __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock)
42 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \
43 __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0)
44 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \
45 __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0)
46 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0)
47 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0)
48 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0)
49 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0)
51 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X)
52 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X)
53 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X)
54 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X)
55 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X)
56 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X)
57 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X)
58 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X)
61 namespace std
_GLIBCXX_VISIBILITY(default)
63 _GLIBCXX_BEGIN_NAMESPACE_VERSION
66 * @addtogroup pointer_abstractions
69 /// @relates shared_ptr @{
71 /// @cond undocumented
75 _Sp_locker(const _Sp_locker
&) = delete;
76 _Sp_locker
& operator=(const _Sp_locker
&) = delete;
80 _Sp_locker(const void*) noexcept
;
81 _Sp_locker(const void*, const void*) noexcept
;
85 unsigned char _M_key1
;
86 unsigned char _M_key2
;
88 explicit _Sp_locker(const void*, const void* = nullptr) { }
95 * @brief Report whether shared_ptr atomic operations are lock-free.
96 * @param __p A non-null pointer to a shared_ptr object.
97 * @return True if atomic access to @c *__p is lock-free, false otherwise.
100 template<typename _Tp
, _Lock_policy _Lp
>
102 atomic_is_lock_free(const __shared_ptr
<_Tp
, _Lp
>* __p
)
105 return __gthread_active_p() == 0;
111 template<typename _Tp
>
113 atomic_is_lock_free(const shared_ptr
<_Tp
>* __p
)
114 { return std::atomic_is_lock_free
<_Tp
, __default_lock_policy
>(__p
); }
119 * @brief Atomic load for shared_ptr objects.
120 * @param __p A non-null pointer to a shared_ptr object.
123 * The memory order shall not be @c memory_order_release or
124 * @c memory_order_acq_rel.
127 template<typename _Tp
>
128 inline shared_ptr
<_Tp
>
129 atomic_load_explicit(const shared_ptr
<_Tp
>* __p
, memory_order
)
131 _Sp_locker __lock
{__p
};
135 template<typename _Tp
>
136 inline shared_ptr
<_Tp
>
137 atomic_load(const shared_ptr
<_Tp
>* __p
)
138 { return std::atomic_load_explicit(__p
, memory_order_seq_cst
); }
140 template<typename _Tp
, _Lock_policy _Lp
>
141 inline __shared_ptr
<_Tp
, _Lp
>
142 atomic_load_explicit(const __shared_ptr
<_Tp
, _Lp
>* __p
, memory_order
)
144 _Sp_locker __lock
{__p
};
148 template<typename _Tp
, _Lock_policy _Lp
>
149 inline __shared_ptr
<_Tp
, _Lp
>
150 atomic_load(const __shared_ptr
<_Tp
, _Lp
>* __p
)
151 { return std::atomic_load_explicit(__p
, memory_order_seq_cst
); }
155 * @brief Atomic store for shared_ptr objects.
156 * @param __p A non-null pointer to a shared_ptr object.
157 * @param __r The value to store.
159 * The memory order shall not be @c memory_order_acquire or
160 * @c memory_order_acq_rel.
163 template<typename _Tp
>
165 atomic_store_explicit(shared_ptr
<_Tp
>* __p
, shared_ptr
<_Tp
> __r
,
168 _Sp_locker __lock
{__p
};
169 __p
->swap(__r
); // use swap so that **__p not destroyed while lock held
172 template<typename _Tp
>
174 atomic_store(shared_ptr
<_Tp
>* __p
, shared_ptr
<_Tp
> __r
)
175 { std::atomic_store_explicit(__p
, std::move(__r
), memory_order_seq_cst
); }
177 template<typename _Tp
, _Lock_policy _Lp
>
179 atomic_store_explicit(__shared_ptr
<_Tp
, _Lp
>* __p
,
180 __shared_ptr
<_Tp
, _Lp
> __r
,
183 _Sp_locker __lock
{__p
};
184 __p
->swap(__r
); // use swap so that **__p not destroyed while lock held
187 template<typename _Tp
, _Lock_policy _Lp
>
189 atomic_store(__shared_ptr
<_Tp
, _Lp
>* __p
, __shared_ptr
<_Tp
, _Lp
> __r
)
190 { std::atomic_store_explicit(__p
, std::move(__r
), memory_order_seq_cst
); }
194 * @brief Atomic exchange for shared_ptr objects.
195 * @param __p A non-null pointer to a shared_ptr object.
196 * @param __r New value to store in @c *__p.
197 * @return The original value of @c *__p
200 template<typename _Tp
>
201 inline shared_ptr
<_Tp
>
202 atomic_exchange_explicit(shared_ptr
<_Tp
>* __p
, shared_ptr
<_Tp
> __r
,
205 _Sp_locker __lock
{__p
};
210 template<typename _Tp
>
211 inline shared_ptr
<_Tp
>
212 atomic_exchange(shared_ptr
<_Tp
>* __p
, shared_ptr
<_Tp
> __r
)
214 return std::atomic_exchange_explicit(__p
, std::move(__r
),
215 memory_order_seq_cst
);
218 template<typename _Tp
, _Lock_policy _Lp
>
219 inline __shared_ptr
<_Tp
, _Lp
>
220 atomic_exchange_explicit(__shared_ptr
<_Tp
, _Lp
>* __p
,
221 __shared_ptr
<_Tp
, _Lp
> __r
,
224 _Sp_locker __lock
{__p
};
229 template<typename _Tp
, _Lock_policy _Lp
>
230 inline __shared_ptr
<_Tp
, _Lp
>
231 atomic_exchange(__shared_ptr
<_Tp
, _Lp
>* __p
, __shared_ptr
<_Tp
, _Lp
> __r
)
233 return std::atomic_exchange_explicit(__p
, std::move(__r
),
234 memory_order_seq_cst
);
239 * @brief Atomic compare-and-swap for shared_ptr objects.
240 * @param __p A non-null pointer to a shared_ptr object.
241 * @param __v A non-null pointer to a shared_ptr object.
242 * @param __w A non-null pointer to a shared_ptr object.
243 * @return True if @c *__p was equivalent to @c *__v, false otherwise.
245 * The memory order for failure shall not be @c memory_order_release or
246 * @c memory_order_acq_rel, or stronger than the memory order for success.
249 template<typename _Tp
>
251 atomic_compare_exchange_strong_explicit(shared_ptr
<_Tp
>* __p
,
252 shared_ptr
<_Tp
>* __v
,
257 shared_ptr
<_Tp
> __x
; // goes out of scope after __lock
258 _Sp_locker __lock
{__p
, __v
};
259 owner_less
<shared_ptr
<_Tp
>> __less
;
260 if (*__p
== *__v
&& !__less(*__p
, *__v
) && !__less(*__v
, *__p
))
262 __x
= std::move(*__p
);
263 *__p
= std::move(__w
);
266 __x
= std::move(*__v
);
271 template<typename _Tp
>
273 atomic_compare_exchange_strong(shared_ptr
<_Tp
>* __p
, shared_ptr
<_Tp
>* __v
,
276 return std::atomic_compare_exchange_strong_explicit(__p
, __v
,
277 std::move(__w
), memory_order_seq_cst
, memory_order_seq_cst
);
280 template<typename _Tp
>
282 atomic_compare_exchange_weak_explicit(shared_ptr
<_Tp
>* __p
,
283 shared_ptr
<_Tp
>* __v
,
285 memory_order __success
,
286 memory_order __failure
)
288 return std::atomic_compare_exchange_strong_explicit(__p
, __v
,
289 std::move(__w
), __success
, __failure
);
292 template<typename _Tp
>
294 atomic_compare_exchange_weak(shared_ptr
<_Tp
>* __p
, shared_ptr
<_Tp
>* __v
,
297 return std::atomic_compare_exchange_weak_explicit(__p
, __v
,
298 std::move(__w
), memory_order_seq_cst
, memory_order_seq_cst
);
301 template<typename _Tp
, _Lock_policy _Lp
>
303 atomic_compare_exchange_strong_explicit(__shared_ptr
<_Tp
, _Lp
>* __p
,
304 __shared_ptr
<_Tp
, _Lp
>* __v
,
305 __shared_ptr
<_Tp
, _Lp
> __w
,
309 __shared_ptr
<_Tp
, _Lp
> __x
; // goes out of scope after __lock
310 _Sp_locker __lock
{__p
, __v
};
311 owner_less
<__shared_ptr
<_Tp
, _Lp
>> __less
;
312 if (*__p
== *__v
&& !__less(*__p
, *__v
) && !__less(*__v
, *__p
))
314 __x
= std::move(*__p
);
315 *__p
= std::move(__w
);
318 __x
= std::move(*__v
);
323 template<typename _Tp
, _Lock_policy _Lp
>
325 atomic_compare_exchange_strong(__shared_ptr
<_Tp
, _Lp
>* __p
,
326 __shared_ptr
<_Tp
, _Lp
>* __v
,
327 __shared_ptr
<_Tp
, _Lp
> __w
)
329 return std::atomic_compare_exchange_strong_explicit(__p
, __v
,
330 std::move(__w
), memory_order_seq_cst
, memory_order_seq_cst
);
333 template<typename _Tp
, _Lock_policy _Lp
>
335 atomic_compare_exchange_weak_explicit(__shared_ptr
<_Tp
, _Lp
>* __p
,
336 __shared_ptr
<_Tp
, _Lp
>* __v
,
337 __shared_ptr
<_Tp
, _Lp
> __w
,
338 memory_order __success
,
339 memory_order __failure
)
341 return std::atomic_compare_exchange_strong_explicit(__p
, __v
,
342 std::move(__w
), __success
, __failure
);
345 template<typename _Tp
, _Lock_policy _Lp
>
347 atomic_compare_exchange_weak(__shared_ptr
<_Tp
, _Lp
>* __p
,
348 __shared_ptr
<_Tp
, _Lp
>* __v
,
349 __shared_ptr
<_Tp
, _Lp
> __w
)
351 return std::atomic_compare_exchange_weak_explicit(__p
, __v
,
352 std::move(__w
), memory_order_seq_cst
, memory_order_seq_cst
);
356 #if __cplusplus >= 202002L
357 # define __cpp_lib_atomic_shared_ptr 201711L
358 template<typename _Tp
>
361 template<typename _Up
>
362 static constexpr bool __is_shared_ptr
= false;
363 template<typename _Up
>
364 static constexpr bool __is_shared_ptr
<shared_ptr
<_Up
>> = true;
366 template<typename _Tp
>
369 using value_type
= _Tp
;
371 friend class atomic
<_Tp
>;
373 // An atomic version of __shared_count<> and __weak_count<>.
374 // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
377 // Either __shared_count<> or __weak_count<>
378 using __count_type
= decltype(_Tp::_M_refcount
);
380 // _Sp_counted_base<>*
381 using pointer
= decltype(__count_type::_M_pi
);
383 // Ensure we can use the LSB as the lock bit.
384 static_assert(alignof(remove_pointer_t
<pointer
>) > 1);
386 constexpr _Atomic_count() noexcept
= default;
389 _Atomic_count(__count_type
&& __c
) noexcept
390 : _M_val(reinterpret_cast<uintptr_t>(__c
._M_pi
))
397 auto __val
= _M_val
.load(memory_order_relaxed
);
398 _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val
);
399 __glibcxx_assert(!(__val
& _S_lock_bit
));
400 if (auto __pi
= reinterpret_cast<pointer
>(__val
))
402 if constexpr (__is_shared_ptr
<_Tp
>)
405 __pi
->_M_weak_release();
409 _Atomic_count(const _Atomic_count
&) = delete;
410 _Atomic_count
& operator=(const _Atomic_count
&) = delete;
412 // Precondition: Caller does not hold lock!
413 // Returns the raw pointer value without the lock bit set.
415 lock(memory_order __o
) const noexcept
417 // To acquire the lock we flip the LSB from 0 to 1.
419 auto __current
= _M_val
.load(memory_order_relaxed
);
420 while (__current
& _S_lock_bit
)
422 #if __cpp_lib_atomic_wait
423 __detail::__thread_relax();
425 __current
= _M_val
.load(memory_order_relaxed
);
428 _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val
);
430 while (!_M_val
.compare_exchange_strong(__current
,
431 __current
| _S_lock_bit
,
433 memory_order_relaxed
))
435 _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val
);
436 #if __cpp_lib_atomic_wait
437 __detail::__thread_relax();
439 __current
= __current
& ~_S_lock_bit
;
440 _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val
);
442 _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val
);
443 return reinterpret_cast<pointer
>(__current
);
446 // Precondition: caller holds lock!
448 unlock(memory_order __o
) const noexcept
450 _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val
);
451 _M_val
.fetch_sub(1, __o
);
452 _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val
);
455 // Swaps the values of *this and __c, and unlocks *this.
456 // Precondition: caller holds lock!
458 _M_swap_unlock(__count_type
& __c
, memory_order __o
) noexcept
460 if (__o
!= memory_order_seq_cst
)
461 __o
= memory_order_release
;
462 auto __x
= reinterpret_cast<uintptr_t>(__c
._M_pi
);
463 _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val
);
464 __x
= _M_val
.exchange(__x
, __o
);
465 _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val
);
466 __c
._M_pi
= reinterpret_cast<pointer
>(__x
& ~_S_lock_bit
);
469 #if __cpp_lib_atomic_wait
470 // Precondition: caller holds lock!
472 _M_wait_unlock(memory_order __o
) const noexcept
474 _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val
);
475 auto __v
= _M_val
.fetch_sub(1, memory_order_relaxed
);
476 _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val
);
477 _M_val
.wait(__v
& ~_S_lock_bit
, __o
);
481 notify_one() noexcept
483 _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val
);
485 _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val
);
489 notify_all() noexcept
491 _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val
);
493 _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val
);
498 mutable __atomic_base
<uintptr_t> _M_val
{0};
499 static constexpr uintptr_t _S_lock_bit
{1};
502 typename
_Tp::element_type
* _M_ptr
= nullptr;
503 _Atomic_count _M_refcount
;
505 static typename
_Atomic_count::pointer
506 _S_add_ref(typename
_Atomic_count::pointer __p
)
510 if constexpr (__is_shared_ptr
<_Tp
>)
511 __p
->_M_add_ref_copy();
513 __p
->_M_weak_add_ref();
518 constexpr _Sp_atomic() noexcept
= default;
521 _Sp_atomic(value_type __r
) noexcept
522 : _M_ptr(__r
._M_ptr
), _M_refcount(std::move(__r
._M_refcount
))
525 ~_Sp_atomic() = default;
527 _Sp_atomic(const _Sp_atomic
&) = delete;
528 void operator=(const _Sp_atomic
&) = delete;
531 load(memory_order __o
) const noexcept
533 __glibcxx_assert(__o
!= memory_order_release
534 && __o
!= memory_order_acq_rel
);
535 // Ensure that the correct value of _M_ptr is visible after locking.,
536 // by upgrading relaxed or consume to acquire.
537 if (__o
!= memory_order_seq_cst
)
538 __o
= memory_order_acquire
;
541 auto __pi
= _M_refcount
.lock(__o
);
542 __ret
._M_ptr
= _M_ptr
;
543 __ret
._M_refcount
._M_pi
= _S_add_ref(__pi
);
544 _M_refcount
.unlock(memory_order_relaxed
);
549 swap(value_type
& __r
, memory_order __o
) noexcept
551 _M_refcount
.lock(memory_order_acquire
);
552 std::swap(_M_ptr
, __r
._M_ptr
);
553 _M_refcount
._M_swap_unlock(__r
._M_refcount
, __o
);
557 compare_exchange_strong(value_type
& __expected
, value_type __desired
,
558 memory_order __o
, memory_order __o2
) noexcept
560 bool __result
= true;
561 auto __pi
= _M_refcount
.lock(memory_order_acquire
);
562 if (_M_ptr
== __expected
._M_ptr
563 && __pi
== __expected
._M_refcount
._M_pi
)
565 _M_ptr
= __desired
._M_ptr
;
566 _M_refcount
._M_swap_unlock(__desired
._M_refcount
, __o
);
570 _Tp __sink
= std::move(__expected
);
571 __expected
._M_ptr
= _M_ptr
;
572 __expected
._M_refcount
._M_pi
= _S_add_ref(__pi
);
573 _M_refcount
.unlock(__o2
);
579 #if __cpp_lib_atomic_wait
581 wait(value_type __old
, memory_order __o
) const noexcept
583 auto __pi
= _M_refcount
.lock(memory_order_acquire
);
584 if (_M_ptr
== __old
._M_ptr
&& __pi
== __old
._M_refcount
._M_pi
)
585 _M_refcount
._M_wait_unlock(__o
);
587 _M_refcount
.unlock(memory_order_relaxed
);
591 notify_one() noexcept
593 _M_refcount
.notify_one();
597 notify_all() noexcept
599 _M_refcount
.notify_all();
604 template<typename _Tp
>
605 class atomic
<shared_ptr
<_Tp
>>
608 using value_type
= shared_ptr
<_Tp
>;
610 static constexpr bool is_always_lock_free
= false;
613 is_lock_free() const noexcept
616 constexpr atomic() noexcept
= default;
618 // _GLIBCXX_RESOLVE_LIB_DEFECTS
619 // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work
620 constexpr atomic(nullptr_t
) noexcept
: atomic() { }
622 atomic(shared_ptr
<_Tp
> __r
) noexcept
623 : _M_impl(std::move(__r
))
626 atomic(const atomic
&) = delete;
627 void operator=(const atomic
&) = delete;
630 load(memory_order __o
= memory_order_seq_cst
) const noexcept
631 { return _M_impl
.load(__o
); }
633 operator shared_ptr
<_Tp
>() const noexcept
634 { return _M_impl
.load(memory_order_seq_cst
); }
637 store(shared_ptr
<_Tp
> __desired
,
638 memory_order __o
= memory_order_seq_cst
) noexcept
639 { _M_impl
.swap(__desired
, __o
); }
642 operator=(shared_ptr
<_Tp
> __desired
) noexcept
643 { _M_impl
.swap(__desired
, memory_order_seq_cst
); }
645 // _GLIBCXX_RESOLVE_LIB_DEFECTS
646 // 3893. LWG 3661 broke atomic<shared_ptr<T>> a; a = nullptr;
648 operator=(nullptr_t
) noexcept
652 exchange(shared_ptr
<_Tp
> __desired
,
653 memory_order __o
= memory_order_seq_cst
) noexcept
655 _M_impl
.swap(__desired
, __o
);
660 compare_exchange_strong(shared_ptr
<_Tp
>& __expected
,
661 shared_ptr
<_Tp
> __desired
,
662 memory_order __o
, memory_order __o2
) noexcept
664 return _M_impl
.compare_exchange_strong(__expected
, __desired
, __o
, __o2
);
668 compare_exchange_strong(value_type
& __expected
, value_type __desired
,
669 memory_order __o
= memory_order_seq_cst
) noexcept
674 case memory_order_acq_rel
:
675 __o2
= memory_order_acquire
;
677 case memory_order_release
:
678 __o2
= memory_order_relaxed
;
683 return compare_exchange_strong(__expected
, std::move(__desired
),
688 compare_exchange_weak(value_type
& __expected
, value_type __desired
,
689 memory_order __o
, memory_order __o2
) noexcept
691 return compare_exchange_strong(__expected
, std::move(__desired
),
696 compare_exchange_weak(value_type
& __expected
, value_type __desired
,
697 memory_order __o
= memory_order_seq_cst
) noexcept
699 return compare_exchange_strong(__expected
, std::move(__desired
), __o
);
702 #if __cpp_lib_atomic_wait
704 wait(value_type __old
,
705 memory_order __o
= memory_order_seq_cst
) const noexcept
707 _M_impl
.wait(std::move(__old
), __o
);
711 notify_one() noexcept
713 _M_impl
.notify_one();
717 notify_all() noexcept
719 _M_impl
.notify_all();
724 _Sp_atomic
<shared_ptr
<_Tp
>> _M_impl
;
727 template<typename _Tp
>
728 class atomic
<weak_ptr
<_Tp
>>
731 using value_type
= weak_ptr
<_Tp
>;
733 static constexpr bool is_always_lock_free
= false;
736 is_lock_free() const noexcept
739 constexpr atomic() noexcept
= default;
741 atomic(weak_ptr
<_Tp
> __r
) noexcept
745 atomic(const atomic
&) = delete;
746 void operator=(const atomic
&) = delete;
749 load(memory_order __o
= memory_order_seq_cst
) const noexcept
750 { return _M_impl
.load(__o
); }
752 operator weak_ptr
<_Tp
>() const noexcept
753 { return _M_impl
.load(memory_order_seq_cst
); }
756 store(weak_ptr
<_Tp
> __desired
,
757 memory_order __o
= memory_order_seq_cst
) noexcept
758 { _M_impl
.swap(__desired
, __o
); }
761 operator=(weak_ptr
<_Tp
> __desired
) noexcept
762 { _M_impl
.swap(__desired
, memory_order_seq_cst
); }
765 exchange(weak_ptr
<_Tp
> __desired
,
766 memory_order __o
= memory_order_seq_cst
) noexcept
768 _M_impl
.swap(__desired
, __o
);
773 compare_exchange_strong(weak_ptr
<_Tp
>& __expected
,
774 weak_ptr
<_Tp
> __desired
,
775 memory_order __o
, memory_order __o2
) noexcept
777 return _M_impl
.compare_exchange_strong(__expected
, __desired
, __o
, __o2
);
781 compare_exchange_strong(value_type
& __expected
, value_type __desired
,
782 memory_order __o
= memory_order_seq_cst
) noexcept
787 case memory_order_acq_rel
:
788 __o2
= memory_order_acquire
;
790 case memory_order_release
:
791 __o2
= memory_order_relaxed
;
796 return compare_exchange_strong(__expected
, std::move(__desired
),
801 compare_exchange_weak(value_type
& __expected
, value_type __desired
,
802 memory_order __o
, memory_order __o2
) noexcept
804 return compare_exchange_strong(__expected
, std::move(__desired
),
809 compare_exchange_weak(value_type
& __expected
, value_type __desired
,
810 memory_order __o
= memory_order_seq_cst
) noexcept
812 return compare_exchange_strong(__expected
, std::move(__desired
), __o
);
815 #if __cpp_lib_atomic_wait
817 wait(value_type __old
,
818 memory_order __o
= memory_order_seq_cst
) const noexcept
820 _M_impl
.wait(std::move(__old
), __o
);
824 notify_one() noexcept
826 _M_impl
.notify_one();
830 notify_all() noexcept
832 _M_impl
.notify_all();
837 _Sp_atomic
<weak_ptr
<_Tp
>> _M_impl
;
841 /// @} relates shared_ptr
842 /// @} group pointer_abstractions
844 _GLIBCXX_END_NAMESPACE_VERSION
847 #endif // _SHARED_PTR_ATOMIC_H