try_lock()
{
int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
- if (__ret == EBUSY) return false;
- // Errors not handled: EINVAL
+ if (__ret == 0)
+ return true;
+ if (__ret == EBUSY)
+ return false;
+ // Errors not handled: EINVAL, EDEADLK
__glibcxx_assert(__ret == 0);
- return true;
+ // try_lock() is not permitted to throw
+ return false;
}
void
{
struct timespec __ts = chrono::__to_timeout_timespec(__atime);
int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
- // On self-deadlock, we just fail to acquire the lock. Technically,
- // the program violated the precondition.
- if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ if (__ret == 0)
+ return true;
+ if (__ret == ETIMEDOUT)
return false;
- // Errors not handled: EINVAL
+ // Errors not handled: EINVAL, EDEADLK
__glibcxx_assert(__ret == 0);
- return true;
+ return false;
}
#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
struct timespec __ts = chrono::__to_timeout_timespec(__atime);
int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
&__ts);
- // On self-deadlock, we just fail to acquire the lock. Technically,
- // the program violated the precondition.
- if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ if (__ret == 0)
+ return true;
+ if (__ret == ETIMEDOUT)
return false;
- // Errors not handled: EINVAL
+ // Errors not handled: EINVAL, EDEADLK
__glibcxx_assert(__ret == 0);
- return true;
+ return false;
}
#endif
// acquire the lock even if it would be logically free; however, this
// is allowed by the standard, and we made a "strong effort"
// (see C++14 30.4.1.4p26).
- // For cases where the implementation detects a deadlock we
- // intentionally block and timeout so that an early return isn't
- // mistaken for a spurious failure, which might help users realise
- // there is a deadlock.
do
__ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
- while (__ret == EAGAIN || __ret == EDEADLK);
+ while (__ret == EAGAIN);
+ if (__ret == 0)
+ return true;
if (__ret == ETIMEDOUT)
return false;
- // Errors not handled: EINVAL
+ // Errors not handled: EINVAL, EDEADLK
__glibcxx_assert(__ret == 0);
- return true;
+ return false;
}
#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
struct timespec __ts = chrono::__to_timeout_timespec(__atime);
int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
&__ts);
- // On self-deadlock, we just fail to acquire the lock. Technically,
- // the program violated the precondition.
- if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ // On self-deadlock, if _GLIBCXX_ASSERTIONS is not defined, we just
+ // fail to acquire the lock. Technically, the program violated the
+ // precondition.
+ if (__ret == 0)
+ return true;
+ if (__ret == ETIMEDOUT)
return false;
- // Errors not handled: EINVAL
+ // Errors not handled: EINVAL, EDEADLK
__glibcxx_assert(__ret == 0);
- return true;
+ return false;
}
#endif
#define VERIFY_IN_NEW_THREAD(X) \
(void) std::async(std::launch::async, [&] { VERIFY(X); })
+// Unfortunately POSIX says that pthread_rwlock_clockwrlock,
+// pthread_rwlock_clockrdlock, pthread_rwlock_timedwrlock and
+// pthread_rwlock_timedrdlock are allowed to deadlock rather than return
+// EDEADLK or just time out if the thread already has the associated lock. This
+// means that we can't enable the deadlock tests by default. The glibc
+// implementation does return EDEADLK and __shared_mutex_cv times out so we can
+// test both of them.
+#if (defined(__GLIBC__) || ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)) \
+ && ! defined(_GLIBCXX_ASSERTIONS)
+#define DEADLOCK_VERIFY(X) \
+ VERIFY(X)
+#else
+#define DEADLOCK_VERIFY(X) \
+ do {} while(0)
+#endif
+
template <typename Clock>
void
test_exclusive_absolute(chrono::nanoseconds offset)
std::shared_timed_mutex stm;
chrono::time_point<Clock> tp(offset);
VERIFY(stm.try_lock_until(tp));
+
+ // Trying to acquire the same long on the same thread
+ DEADLOCK_VERIFY(!stm.try_lock_until(tp));
+
+ // Check that false is returned on timeouts even if the implementation
+ // returned EDEADLK above by trying to lock on a different thread.
VERIFY_IN_NEW_THREAD(!stm.try_lock_until(tp));
}
stm.unlock_shared();
VERIFY(stm.try_lock_for(chrono::seconds{10}));
+ DEADLOCK_VERIFY(!stm.try_lock_shared_until(tp));
VERIFY_IN_NEW_THREAD(!stm.try_lock_shared_until(tp));
}
std::shared_timed_mutex stm;
const auto d = -Clock::now().time_since_epoch() + offset;
VERIFY(stm.try_lock_for(d));
+ DEADLOCK_VERIFY(!stm.try_lock_for(d));
VERIFY_IN_NEW_THREAD(!stm.try_lock_for(d));
}
stm.unlock_shared();
// Should complete immediately
VERIFY(stm.try_lock_for(chrono::seconds{10}));
+ DEADLOCK_VERIFY(!stm.try_lock_shared_for(d));
VERIFY_IN_NEW_THREAD(!stm.try_lock_shared_for(d));
}