]> git.ipfire.org Git - thirdparty/gcc.git/blobdiff - libstdc++-v3/include/std/shared_mutex
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / std / shared_mutex
index 47cfc6433d96f4268c53fc83a8e46b76f62bd9d2..1b6478f30c39e1d526e6c757ea5c7ab33b3a7090 100644 (file)
@@ -1,6 +1,6 @@
 // <shared_mutex> -*- C++ -*-
 
-// Copyright (C) 2013-2015 Free Software Foundation, Inc.
+// Copyright (C) 2013-2024 Free Software Foundation, Inc.
 //
 // This file is part of the GNU ISO C++ Library.  This library is free
 // software; you can redistribute it and/or modify it under the
 
 #pragma GCC system_header
 
-#if __cplusplus <= 201103L
-# include <bits/c++14_warning.h>
-#else
+#include <bits/requires_hosted.h> // concurrency
+
+#if __cplusplus >= 201402L
 
-#include <bits/c++config.h>
-#include <mutex>
-#include <condition_variable>
+#include <bits/chrono.h>
+#include <bits/error_constants.h>
 #include <bits/functexcept.h>
+#include <bits/move.h>        // move, __exchange
+#include <bits/std_mutex.h>   // defer_lock_t
+
+#define __glibcxx_want_shared_mutex
+#define __glibcxx_want_shared_timed_mutex
+#include <bits/version.h>
+
+#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+# include <condition_variable>
+#endif
 
 namespace std _GLIBCXX_VISIBILITY(default)
 {
 _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
   /**
-   * @ingroup mutexes
+   * @addtogroup mutexes
    * @{
    */
 
-#ifdef _GLIBCXX_USE_C99_STDINT_TR1
 #ifdef _GLIBCXX_HAS_GTHREADS
 
-#define __cpp_lib_shared_timed_mutex 201402
+#ifdef __cpp_lib_shared_mutex // C++ >= 17 && hosted && gthread
+  class shared_mutex;
+#endif
 
-  /// shared_timed_mutex
-  class shared_timed_mutex
+  class shared_timed_mutex;
+
+  /// @cond undocumented
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
+#ifdef __gthrw
+#define _GLIBCXX_GTHRW(name) \
+  __gthrw(pthread_ ## name); \
+  static inline int \
+  __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
+  { \
+    if (__gthread_active_p ()) \
+      return __gthrw_(pthread_ ## name) (__rwlock); \
+    else \
+      return 0; \
+  }
+  _GLIBCXX_GTHRW(rwlock_rdlock)
+  _GLIBCXX_GTHRW(rwlock_tryrdlock)
+  _GLIBCXX_GTHRW(rwlock_wrlock)
+  _GLIBCXX_GTHRW(rwlock_trywrlock)
+  _GLIBCXX_GTHRW(rwlock_unlock)
+# ifndef PTHREAD_RWLOCK_INITIALIZER
+  _GLIBCXX_GTHRW(rwlock_destroy)
+  __gthrw(pthread_rwlock_init);
+  static inline int
+  __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
+  {
+    if (__gthread_active_p ())
+      return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
+    else
+      return 0;
+  }
+# endif
+# if _GTHREAD_USE_MUTEX_TIMEDLOCK
+   __gthrw(pthread_rwlock_timedrdlock);
+  static inline int
+  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
+                               const timespec *__ts)
+  {
+    if (__gthread_active_p ())
+      return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
+    else
+      return 0;
+  }
+   __gthrw(pthread_rwlock_timedwrlock);
+  static inline int
+  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
+                               const timespec *__ts)
+  {
+    if (__gthread_active_p ())
+      return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
+    else
+      return 0;
+  }
+# endif
+#else
+  static inline int
+  __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_rdlock (__rwlock); }
+  static inline int
+  __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_tryrdlock (__rwlock); }
+  static inline int
+  __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_wrlock (__rwlock); }
+  static inline int
+  __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_trywrlock (__rwlock); }
+  static inline int
+  __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_unlock (__rwlock); }
+  static inline int
+  __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_destroy (__rwlock); }
+  static inline int
+  __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
+  { return pthread_rwlock_init (__rwlock, NULL); }
+# if _GTHREAD_USE_MUTEX_TIMEDLOCK
+  static inline int
+  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
+                               const timespec *__ts)
+  { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
+  static inline int
+  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
+                               const timespec *__ts)
+  { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
+# endif
+#endif
+
+  /// A shared mutex type implemented using pthread_rwlock_t.
+  class __shared_mutex_pthread
   {
-#if defined(__GTHREADS_CXX0X)
-    typedef chrono::system_clock       __clock_t;
+    friend class shared_timed_mutex;
 
-    pthread_rwlock_t                   _M_rwlock;
+#ifdef PTHREAD_RWLOCK_INITIALIZER
+    pthread_rwlock_t   _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
+
+  public:
+    __shared_mutex_pthread() = default;
+    ~__shared_mutex_pthread() = default;
+#else
+    pthread_rwlock_t   _M_rwlock;
 
   public:
-    shared_timed_mutex()
+    __shared_mutex_pthread()
     {
-      int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
+      int __ret = __glibcxx_rwlock_init(&_M_rwlock);
       if (__ret == ENOMEM)
-       throw bad_alloc();
+       __throw_bad_alloc();
       else if (__ret == EAGAIN)
        __throw_system_error(int(errc::resource_unavailable_try_again));
       else if (__ret == EPERM)
        __throw_system_error(int(errc::operation_not_permitted));
       // Errors not handled: EBUSY, EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
     }
 
-    ~shared_timed_mutex()
+    ~__shared_mutex_pthread()
     {
-      int __ret __attribute((unused)) = pthread_rwlock_destroy(&_M_rwlock);
+      int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
       // Errors not handled: EBUSY, EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
     }
+#endif
 
-    shared_timed_mutex(const shared_timed_mutex&) = delete;
-    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
-
-    // Exclusive ownership
+    __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
+    __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
 
     void
     lock()
     {
-      int __ret = pthread_rwlock_wrlock(&_M_rwlock);
+      int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
       if (__ret == EDEADLK)
        __throw_system_error(int(errc::resource_deadlock_would_occur));
       // Errors not handled: EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
     }
 
     bool
     try_lock()
     {
-      int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
+      int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
       if (__ret == EBUSY) return false;
       // Errors not handled: EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
       return true;
     }
 
-#if _GTHREAD_USE_MUTEX_TIMEDLOCK
-    template<typename _Rep, typename _Period>
-      bool
-      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
-      {
-       return try_lock_until(__clock_t::now() + __rel_time);
-      }
-
-    template<typename _Duration>
-      bool
-      try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
-      {
-       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
-       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
-
-       __gthread_time_t __ts =
-         {
-           static_cast<std::time_t>(__s.time_since_epoch().count()),
-           static_cast<long>(__ns.count())
-         };
-
-       int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
-       // On self-deadlock, we just fail to acquire the lock.  Technically,
-       // the program violated the precondition.
-       if (__ret == ETIMEDOUT || __ret == EDEADLK)
-         return false;
-       // Errors not handled: EINVAL
-       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
-       return true;
-      }
-
-    template<typename _Clock, typename _Duration>
-      bool
-      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
-      {
-       // DR 887 - Sync unknown clock to known clock.
-       const typename _Clock::time_point __c_entry = _Clock::now();
-       const __clock_t::time_point __s_entry = __clock_t::now();
-       const auto __delta = __abs_time - __c_entry;
-       const auto __s_atime = __s_entry + __delta;
-       return try_lock_until(__s_atime);
-      }
-#endif
-
     void
     unlock()
     {
-      int __ret __attribute((unused)) = pthread_rwlock_unlock(&_M_rwlock);
+      int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
       // Errors not handled: EPERM, EBUSY, EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
     }
 
     // Shared ownership
@@ -165,122 +225,103 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     lock_shared()
     {
-      int __ret = pthread_rwlock_rdlock(&_M_rwlock);
+      int __ret;
+      // We retry if we exceeded the maximum number of read locks supported by
+      // the POSIX implementation; this can result in busy-waiting, but this
+      // is okay based on the current specification of forward progress
+      // guarantees by the standard.
+      do
+       __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
+      while (__ret == EAGAIN);
       if (__ret == EDEADLK)
        __throw_system_error(int(errc::resource_deadlock_would_occur));
-      if (__ret == EAGAIN)
-       // Maximum number of read locks has been exceeded.
-       __throw_system_error(int(errc::device_or_resource_busy));
       // Errors not handled: EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
     }
 
     bool
     try_lock_shared()
     {
-      int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
+      int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
       // If the maximum number of read locks has been exceeded, we just fail
       // to acquire the lock.  Unlike for lock(), we are not allowed to throw
       // an exception.
       if (__ret == EBUSY || __ret == EAGAIN) return false;
       // Errors not handled: EINVAL
-      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+      __glibcxx_assert(__ret == 0);
       return true;
     }
 
-#if _GTHREAD_USE_MUTEX_TIMEDLOCK
-    template<typename _Rep, typename _Period>
-      bool
-      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
-      {
-       return try_lock_shared_until(__clock_t::now() + __rel_time);
-      }
-
-    template<typename _Duration>
-      bool
-      try_lock_shared_until(const chrono::time_point<__clock_t,
-                           _Duration>& __atime)
-      {
-       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
-       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
-
-       __gthread_time_t __ts =
-         {
-           static_cast<std::time_t>(__s.time_since_epoch().count()),
-           static_cast<long>(__ns.count())
-         };
-
-       int __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
-       // If the maximum number of read locks has been exceeded, or we would
-       // deadlock, we just fail to acquire the lock.  Unlike for lock(),
-       // we are not allowed to throw an exception.
-       if (__ret == ETIMEDOUT || __ret == EAGAIN || __ret == EDEADLK)
-         return false;
-       // Errors not handled: EINVAL
-       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
-       return true;
-      }
-
-    template<typename _Clock, typename _Duration>
-      bool
-      try_lock_shared_until(const chrono::time_point<_Clock,
-                           _Duration>& __abs_time)
-      {
-       // DR 887 - Sync unknown clock to known clock.
-       const typename _Clock::time_point __c_entry = _Clock::now();
-       const __clock_t::time_point __s_entry = __clock_t::now();
-       const auto __delta = __abs_time - __c_entry;
-       const auto __s_atime = __s_entry + __delta;
-       return try_lock_shared_until(__s_atime);
-      }
-#endif
-
     void
     unlock_shared()
     {
       unlock();
     }
 
-#else // defined(__GTHREADS_CXX0X)
-
-#if _GTHREAD_USE_MUTEX_TIMEDLOCK
-    struct _Mutex : mutex, __timed_mutex_impl<_Mutex>
-    {
-      template<typename _Rep, typename _Period>
-       bool
-       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
-       { return _M_try_lock_for(__rtime); }
-
-      template<typename _Clock, typename _Duration>
-       bool
-       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
-       { return _M_try_lock_until(__atime); }
-    };
-#else
-    typedef mutex _Mutex;
+    void* native_handle() { return &_M_rwlock; }
+  };
 #endif
 
-    // Based on Howard Hinnant's reference implementation from N2406
-
-    _Mutex             _M_mut;
+#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+  /// A shared mutex type implemented using std::condition_variable.
+  class __shared_mutex_cv
+  {
+    friend class shared_timed_mutex;
+
+    // Based on Howard Hinnant's reference implementation from N2406.
+
+    // The high bit of _M_state is the write-entered flag which is set to
+    // indicate a writer has taken the lock or is queuing to take the lock.
+    // The remaining bits are the count of reader locks.
+    //
+    // To take a reader lock, block on gate1 while the write-entered flag is
+    // set or the maximum number of reader locks is held, then increment the
+    // reader lock count.
+    // To release, decrement the count, then if the write-entered flag is set
+    // and the count is zero then signal gate2 to wake a queued writer,
+    // otherwise if the maximum number of reader locks was held signal gate1
+    // to wake a reader.
+    //
+    // To take a writer lock, block on gate1 while the write-entered flag is
+    // set, then set the write-entered flag to start queueing, then block on
+    // gate2 while the number of reader locks is non-zero.
+    // To release, unset the write-entered flag and signal gate1 to wake all
+    // blocked readers and writers.
+    //
+    // This means that when no reader locks are held readers and writers get
+    // equal priority. When one or more reader locks is held a writer gets
+    // priority and no more reader locks can be taken while the writer is
+    // queued.
+
+    // Only locked when accessing _M_state or waiting on condition variables.
+    mutex              _M_mut;
+    // Used to block while write-entered is set or reader count at maximum.
     condition_variable _M_gate1;
+    // Used to block queued writers while reader count is non-zero.
     condition_variable _M_gate2;
+    // The write-entered flag and reader count.
     unsigned           _M_state;
 
     static constexpr unsigned _S_write_entered
       = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
-    static constexpr unsigned _M_n_readers = ~_S_write_entered;
+    static constexpr unsigned _S_max_readers = ~_S_write_entered;
+
+    // Test whether the write-entered flag is set. _M_mut must be locked.
+    bool _M_write_entered() const { return _M_state & _S_write_entered; }
+
+    // The number of reader locks currently held. _M_mut must be locked.
+    unsigned _M_readers() const { return _M_state & _S_max_readers; }
 
   public:
-    shared_timed_mutex() : _M_state(0) {}
+    __shared_mutex_cv() : _M_state(0) {}
 
-    ~shared_timed_mutex()
+    ~__shared_mutex_cv()
     {
-      _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
+      __glibcxx_assert( _M_state == 0 );
     }
 
-    shared_timed_mutex(const shared_timed_mutex&) = delete;
-    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
+    __shared_mutex_cv(const __shared_mutex_cv&) = delete;
+    __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
 
     // Exclusive ownership
 
@@ -288,11 +329,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     lock()
     {
       unique_lock<mutex> __lk(_M_mut);
-      while (_M_state & _S_write_entered)
-       _M_gate1.wait(__lk);
+      // Wait until we can set the write-entered flag.
+      _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
       _M_state |= _S_write_entered;
-      while (_M_state & _M_n_readers)
-       _M_gate2.wait(__lk);
+      // Then wait until there are no more readers.
+      _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
     }
 
     bool
@@ -307,41 +348,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       return false;
     }
 
-#if _GTHREAD_USE_MUTEX_TIMEDLOCK
-    template<typename _Rep, typename _Period>
-      bool
-      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
-      {
-       unique_lock<_Mutex> __lk(_M_mut, __rel_time);
-       if (__lk.owns_lock() && _M_state == 0)
-         {
-           _M_state = _S_write_entered;
-           return true;
-         }
-       return false;
-      }
-
-    template<typename _Clock, typename _Duration>
-      bool
-      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
-      {
-       unique_lock<_Mutex> __lk(_M_mut, __abs_time);
-       if (__lk.owns_lock() && _M_state == 0)
-         {
-           _M_state = _S_write_entered;
-           return true;
-         }
-       return false;
-      }
-#endif
-
     void
     unlock()
     {
-      {
-       lock_guard<_Mutex> __lk(_M_mut);
-       _M_state = 0;
-      }
+      lock_guard<mutex> __lk(_M_mut);
+      __glibcxx_assert( _M_write_entered() );
+      _M_state = 0;
+      // call notify_all() while mutex is held so that another thread can't
+      // lock and unlock the mutex then destroy *this before we make the call.
       _M_gate1.notify_all();
     }
 
@@ -351,94 +365,364 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     lock_shared()
     {
       unique_lock<mutex> __lk(_M_mut);
-      while ((_M_state & _S_write_entered)
-         || (_M_state & _M_n_readers) == _M_n_readers)
-       {
-         _M_gate1.wait(__lk);
-       }
-      unsigned __num_readers = (_M_state & _M_n_readers) + 1;
-      _M_state &= ~_M_n_readers;
-      _M_state |= __num_readers;
+      _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
+      ++_M_state;
     }
 
     bool
     try_lock_shared()
     {
-      unique_lock<_Mutex> __lk(_M_mut, try_to_lock);
-      unsigned __num_readers = _M_state & _M_n_readers;
-      if (__lk.owns_lock() && !(_M_state & _S_write_entered)
-         && __num_readers != _M_n_readers)
+      unique_lock<mutex> __lk(_M_mut, try_to_lock);
+      if (!__lk.owns_lock())
+       return false;
+      if (_M_state < _S_max_readers)
        {
-         ++__num_readers;
-         _M_state &= ~_M_n_readers;
-         _M_state |= __num_readers;
+         ++_M_state;
          return true;
        }
       return false;
     }
 
-#if _GTHREAD_USE_MUTEX_TIMEDLOCK
+    void
+    unlock_shared()
+    {
+      lock_guard<mutex> __lk(_M_mut);
+      __glibcxx_assert( _M_readers() > 0 );
+      auto __prev = _M_state--;
+      if (_M_write_entered())
+       {
+         // Wake the queued writer if there are no more readers.
+         if (_M_readers() == 0)
+           _M_gate2.notify_one();
+         // No need to notify gate1 because we give priority to the queued
+         // writer, and that writer will eventually notify gate1 after it
+         // clears the write-entered flag.
+       }
+      else
+       {
+         // Wake any thread that was blocked on reader overflow.
+         if (__prev == _S_max_readers)
+           _M_gate1.notify_one();
+       }
+    }
+  };
+#endif
+  /// @endcond
+
+#ifdef __cpp_lib_shared_mutex
+  /// The standard shared mutex type.
+  class shared_mutex
+  {
+  public:
+    shared_mutex() = default;
+    ~shared_mutex() = default;
+
+    shared_mutex(const shared_mutex&) = delete;
+    shared_mutex& operator=(const shared_mutex&) = delete;
+
+    // Exclusive ownership
+
+    void lock() { _M_impl.lock(); }
+    [[nodiscard]] bool try_lock() { return _M_impl.try_lock(); }
+    void unlock() { _M_impl.unlock(); }
+
+    // Shared ownership
+
+    void lock_shared() { _M_impl.lock_shared(); }
+    [[nodiscard]] bool try_lock_shared() { return _M_impl.try_lock_shared(); }
+    void unlock_shared() { _M_impl.unlock_shared(); }
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
+    typedef void* native_handle_type;
+    native_handle_type native_handle() { return _M_impl.native_handle(); }
+
+  private:
+    __shared_mutex_pthread _M_impl;
+#else
+  private:
+    __shared_mutex_cv _M_impl;
+#endif
+  };
+#endif // __cpp_lib_shared_mutex
+
+  /// @cond undocumented
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+  using __shared_timed_mutex_base = __shared_mutex_pthread;
+#else
+  using __shared_timed_mutex_base = __shared_mutex_cv;
+#endif
+  /// @endcond
+
+  /// The standard shared timed mutex type.
+  class shared_timed_mutex
+  : private __shared_timed_mutex_base
+  {
+    using _Base = __shared_timed_mutex_base;
+
+    // Must use the same clock as condition_variable for __shared_mutex_cv.
+#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
+    using __clock_t = chrono::steady_clock;
+#else
+    using __clock_t = chrono::system_clock;
+#endif
+
+  public:
+    shared_timed_mutex() = default;
+    ~shared_timed_mutex() = default;
+
+    shared_timed_mutex(const shared_timed_mutex&) = delete;
+    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
+
+    // Exclusive ownership
+
+    void lock() { _Base::lock(); }
+    _GLIBCXX_NODISCARD bool try_lock() { return _Base::try_lock(); }
+    void unlock() { _Base::unlock(); }
+
     template<typename _Rep, typename _Period>
+      _GLIBCXX_NODISCARD
       bool
-      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
+      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
       {
-       unique_lock<_Mutex> __lk(_M_mut, __rel_time);
-       if (__lk.owns_lock())
+       auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
+       if (ratio_greater<__clock_t::period, _Period>())
+         ++__rt;
+       return try_lock_until(__clock_t::now() + __rt);
+      }
+
+    // Shared ownership
+
+    void lock_shared() { _Base::lock_shared(); }
+    _GLIBCXX_NODISCARD
+    bool try_lock_shared() { return _Base::try_lock_shared(); }
+    void unlock_shared() { _Base::unlock_shared(); }
+
+    template<typename _Rep, typename _Period>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
+      {
+       auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
+       if (ratio_greater<__clock_t::period, _Period>())
+         ++__rt;
+       return try_lock_shared_until(__clock_t::now() + __rt);
+      }
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+
+    // Exclusive ownership
+
+    template<typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_until(const chrono::time_point<chrono::system_clock,
+                    _Duration>& __atime)
+      {
+       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+       __gthread_time_t __ts =
          {
-           unsigned __num_readers = _M_state & _M_n_readers;
-           if (!(_M_state & _S_write_entered)
-               && __num_readers != _M_n_readers)
-             {
-               ++__num_readers;
-               _M_state &= ~_M_n_readers;
-               _M_state |= __num_readers;
-               return true;
-             }
-         }
+           static_cast<std::time_t>(__s.time_since_epoch().count()),
+           static_cast<long>(__ns.count())
+         };
+
+       int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
+       // On self-deadlock, we just fail to acquire the lock.  Technically,
+       // the program violated the precondition.
+       if (__ret == ETIMEDOUT || __ret == EDEADLK)
+         return false;
+       // Errors not handled: EINVAL
+       __glibcxx_assert(__ret == 0);
+       return true;
+      }
+
+#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
+    template<typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_until(const chrono::time_point<chrono::steady_clock,
+                  _Duration>& __atime)
+      {
+       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+       __gthread_time_t __ts =
+         {
+           static_cast<std::time_t>(__s.time_since_epoch().count()),
+           static_cast<long>(__ns.count())
+         };
+
+       int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
+                                              &__ts);
+       // On self-deadlock, we just fail to acquire the lock.  Technically,
+       // the program violated the precondition.
+       if (__ret == ETIMEDOUT || __ret == EDEADLK)
+         return false;
+       // Errors not handled: EINVAL
+       __glibcxx_assert(__ret == 0);
+       return true;
+      }
+#endif
+
+    template<typename _Clock, typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+      {
+#if __cplusplus > 201703L
+       static_assert(chrono::is_clock_v<_Clock>);
+#endif
+       // The user-supplied clock may not tick at the same rate as
+       // steady_clock, so we must loop in order to guarantee that
+       // the timeout has expired before returning false.
+       typename _Clock::time_point __now = _Clock::now();
+       do {
+           auto __rtime = __atime - __now;
+           if (try_lock_for(__rtime))
+             return true;
+           __now = _Clock::now();
+       } while (__atime > __now);
        return false;
       }
 
+    // Shared ownership
+
+    template<typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_shared_until(const chrono::time_point<chrono::system_clock,
+                           _Duration>& __atime)
+      {
+       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+       __gthread_time_t __ts =
+         {
+           static_cast<std::time_t>(__s.time_since_epoch().count()),
+           static_cast<long>(__ns.count())
+         };
+
+       int __ret;
+       // Unlike for lock(), we are not allowed to throw an exception so if
+       // the maximum number of read locks has been exceeded, or we would
+       // deadlock, we just try to acquire the lock again (and will time out
+       // eventually).
+       // In cases where we would exceed the maximum number of read locks
+       // throughout the whole time until the timeout, we will fail to
+       // acquire the lock even if it would be logically free; however, this
+       // is allowed by the standard, and we made a "strong effort"
+       // (see C++14 30.4.1.4p26).
+       // For cases where the implementation detects a deadlock we
+       // intentionally block and timeout so that an early return isn't
+       // mistaken for a spurious failure, which might help users realise
+       // there is a deadlock.
+       do
+         __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
+       while (__ret == EAGAIN || __ret == EDEADLK);
+       if (__ret == ETIMEDOUT)
+         return false;
+       // Errors not handled: EINVAL
+       __glibcxx_assert(__ret == 0);
+       return true;
+      }
+
+#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
+    template<typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
+                           _Duration>& __atime)
+      {
+       auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+       auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+       __gthread_time_t __ts =
+         {
+           static_cast<std::time_t>(__s.time_since_epoch().count()),
+           static_cast<long>(__ns.count())
+         };
+
+       int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
+                                              &__ts);
+       // On self-deadlock, we just fail to acquire the lock.  Technically,
+       // the program violated the precondition.
+       if (__ret == ETIMEDOUT || __ret == EDEADLK)
+         return false;
+       // Errors not handled: EINVAL
+       __glibcxx_assert(__ret == 0);
+       return true;
+      }
+#endif
+
+    template<typename _Clock, typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_shared_until(const chrono::time_point<_Clock,
+                                                    _Duration>& __atime)
+      {
+#if __cplusplus > 201703L
+       static_assert(chrono::is_clock_v<_Clock>);
+#endif
+       // The user-supplied clock may not tick at the same rate as
+       // steady_clock, so we must loop in order to guarantee that
+       // the timeout has expired before returning false.
+       typename _Clock::time_point __now = _Clock::now();
+       do {
+           auto __rtime = __atime - __now;
+           if (try_lock_shared_for(__rtime))
+             return true;
+           __now = _Clock::now();
+       } while (__atime > __now);
+       return false;
+      }
+
+#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+
+    // Exclusive ownership
+
+    template<typename _Clock, typename _Duration>
+      _GLIBCXX_NODISCARD
+      bool
+      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
+      {
+       unique_lock<mutex> __lk(_M_mut);
+       if (!_M_gate1.wait_until(__lk, __abs_time,
+                                [=]{ return !_M_write_entered(); }))
+         {
+           return false;
+         }
+       _M_state |= _S_write_entered;
+       if (!_M_gate2.wait_until(__lk, __abs_time,
+                                [=]{ return _M_readers() == 0; }))
+         {
+           _M_state ^= _S_write_entered;
+           // Wake all threads blocked while the write-entered flag was set.
+           _M_gate1.notify_all();
+           return false;
+         }
+       return true;
+      }
+
+    // Shared ownership
+
     template <typename _Clock, typename _Duration>
+      _GLIBCXX_NODISCARD
       bool
       try_lock_shared_until(const chrono::time_point<_Clock,
                                                     _Duration>& __abs_time)
       {
-       unique_lock<_Mutex> __lk(_M_mut, __abs_time);
-       if (__lk.owns_lock())
+       unique_lock<mutex> __lk(_M_mut);
+       if (!_M_gate1.wait_until(__lk, __abs_time,
+                                [=]{ return _M_state < _S_max_readers; }))
          {
-           unsigned __num_readers = _M_state & _M_n_readers;
-           if (!(_M_state & _S_write_entered)
-               && __num_readers != _M_n_readers)
-             {
-               ++__num_readers;
-               _M_state &= ~_M_n_readers;
-               _M_state |= __num_readers;
-               return true;
-             }
+           return false;
          }
-       return false;
+       ++_M_state;
+       return true;
       }
-#endif
 
-    void
-    unlock_shared()
-    {
-      lock_guard<_Mutex> __lk(_M_mut);
-      unsigned __num_readers = (_M_state & _M_n_readers) - 1;
-      _M_state &= ~_M_n_readers;
-      _M_state |= __num_readers;
-      if (_M_state & _S_write_entered)
-       {
-         if (__num_readers == 0)
-           _M_gate2.notify_one();
-       }
-      else
-       {
-         if (__num_readers == _M_n_readers - 1)
-           _M_gate1.notify_one();
-       }
-    }
-#endif // !defined(__GTHREADS_CXX0X)
+#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
   };
 #endif // _GLIBCXX_HAS_GTHREADS
 
@@ -454,27 +738,30 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
 
       explicit
-      shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
+      shared_lock(mutex_type& __m)
+      : _M_pm(std::__addressof(__m)), _M_owns(true)
       { __m.lock_shared(); }
 
       shared_lock(mutex_type& __m, defer_lock_t) noexcept
-      : _M_pm(&__m), _M_owns(false) { }
+      : _M_pm(std::__addressof(__m)), _M_owns(false) { }
 
       shared_lock(mutex_type& __m, try_to_lock_t)
-      : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
+      : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
 
       shared_lock(mutex_type& __m, adopt_lock_t)
-      : _M_pm(&__m), _M_owns(true) { }
+      : _M_pm(std::__addressof(__m)), _M_owns(true) { }
 
       template<typename _Clock, typename _Duration>
        shared_lock(mutex_type& __m,
                    const chrono::time_point<_Clock, _Duration>& __abs_time)
-      : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
+      : _M_pm(std::__addressof(__m)),
+       _M_owns(__m.try_lock_shared_until(__abs_time)) { }
 
       template<typename _Rep, typename _Period>
        shared_lock(mutex_type& __m,
                    const chrono::duration<_Rep, _Period>& __rel_time)
-      : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
+      : _M_pm(std::__addressof(__m)),
+       _M_owns(__m.try_lock_shared_for(__rel_time)) { }
 
       ~shared_lock()
       {
@@ -503,6 +790,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
        _M_owns = true;
       }
 
+      _GLIBCXX_NODISCARD
       bool
       try_lock()
       {
@@ -511,6 +799,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
       template<typename _Rep, typename _Period>
+       _GLIBCXX_NODISCARD
        bool
        try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
        {
@@ -519,6 +808,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
        }
 
       template<typename _Clock, typename _Duration>
+       _GLIBCXX_NODISCARD
        bool
        try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
        {
@@ -530,7 +820,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       unlock()
       {
        if (!_M_owns)
-         __throw_system_error(int(errc::resource_deadlock_would_occur));
+         __throw_system_error(int(errc::operation_not_permitted));
        _M_pm->unlock_shared();
        _M_owns = false;
       }
@@ -548,15 +838,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       release() noexcept
       {
        _M_owns = false;
-       return std::exchange(_M_pm, nullptr);
+       return std::__exchange(_M_pm, nullptr);
       }
 
       // Getters
 
+      _GLIBCXX_NODISCARD
       bool owns_lock() const noexcept { return _M_owns; }
 
       explicit operator bool() const noexcept { return _M_owns; }
 
+      _GLIBCXX_NODISCARD
       mutex_type* mutex() const noexcept { return _M_pm; }
 
     private:
@@ -574,14 +866,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     };
 
   /// Swap specialization for shared_lock
+  /// @relates shared_mutex
   template<typename _Mutex>
     void
     swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
     { __x.swap(__y); }
 
-#endif // _GLIBCXX_USE_C99_STDINT_TR1
-
-  // @} group mutexes
+  /// @} group mutexes
 _GLIBCXX_END_NAMESPACE_VERSION
 } // namespace