1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
21 namespace __sanitizer
{
23 class MUTEX StaticSpinMutex
{
26 atomic_store(&state_
, 0, memory_order_relaxed
);
29 void Lock() ACQUIRE() {
35 bool TryLock() TRY_ACQUIRE(true) {
36 return atomic_exchange(&state_
, 1, memory_order_acquire
) == 0;
39 void Unlock() RELEASE() { atomic_store(&state_
, 0, memory_order_release
); }
41 void CheckLocked() const CHECK_LOCKED() {
42 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), 1);
46 atomic_uint8_t state_
;
48 void NOINLINE
LockSlow() {
49 for (int i
= 0;; i
++) {
53 internal_sched_yield();
54 if (atomic_load(&state_
, memory_order_relaxed
) == 0
55 && atomic_exchange(&state_
, 1, memory_order_acquire
) == 0)
61 class MUTEX SpinMutex
: public StaticSpinMutex
{
68 SpinMutex(const SpinMutex
&) = delete;
69 void operator=(const SpinMutex
&) = delete;
72 // Semaphore provides an OS-dependent way to park/unpark threads.
73 // The last thread returned from Wait can destroy the object
74 // (destruction-safety).
77 constexpr Semaphore() {}
78 Semaphore(const Semaphore
&) = delete;
79 void operator=(const Semaphore
&) = delete;
82 void Post(u32 count
= 1);
85 atomic_uint32_t state_
= {0};
88 // Reader-writer mutex.
93 void Lock() ACQUIRE() {
94 u64 reset_mask
= ~0ull;
95 u64 state
= atomic_load_relaxed(&state_
);
96 const uptr kMaxSpinIters
= 1500;
97 for (uptr spin_iters
= 0;; spin_iters
++) {
99 bool locked
= (state
& (kWriterLock
| kReaderLockMask
)) != 0;
100 if (LIKELY(!locked
)) {
101 // The mutex is not read-/write-locked, try to lock.
102 new_state
= (state
| kWriterLock
) & reset_mask
;
103 } else if (spin_iters
> kMaxSpinIters
) {
104 // We've spun enough, increment waiting writers count and block.
105 // The counter will be decremented by whoever wakes us.
106 new_state
= (state
+ kWaitingWriterInc
) & reset_mask
;
107 } else if ((state
& kWriterSpinWait
) == 0) {
108 // Active spinning, but denote our presence so that unlocking
109 // thread does not wake up other threads.
110 new_state
= state
| kWriterSpinWait
;
113 state
= atomic_load(&state_
, memory_order_relaxed
);
116 if (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
117 memory_order_acquire
)))
120 return; // We've locked the mutex.
121 if (spin_iters
> kMaxSpinIters
) {
122 // We've incremented waiting writers, so now block.
125 state
= atomic_load(&state_
, memory_order_relaxed
);
126 DCHECK_NE(state
& kWriterSpinWait
, 0);
128 // We've set kWriterSpinWait, but we are still in active spinning.
130 // We either blocked and were unblocked,
131 // or we just spun but set kWriterSpinWait.
132 // Either way we need to reset kWriterSpinWait
133 // next time we take the lock or block again.
134 reset_mask
= ~kWriterSpinWait
;
138 void Unlock() RELEASE() {
142 u64 state
= atomic_load_relaxed(&state_
);
144 DCHECK_NE(state
& kWriterLock
, 0);
145 DCHECK_EQ(state
& kReaderLockMask
, 0);
146 new_state
= state
& ~kWriterLock
;
148 (state
& kWriterSpinWait
) == 0 && (state
& kWaitingWriterMask
) != 0;
150 new_state
= (new_state
- kWaitingWriterInc
) | kWriterSpinWait
;
152 (state
& (kWriterSpinWait
| kWaitingWriterMask
)) != 0
154 : ((state
& kWaitingReaderMask
) >> kWaitingReaderShift
);
156 new_state
= (new_state
& ~kWaitingReaderMask
) +
157 (wake_readers
<< kReaderLockShift
);
158 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
159 memory_order_release
)));
160 if (UNLIKELY(wake_writer
))
162 else if (UNLIKELY(wake_readers
))
163 readers_
.Post(wake_readers
);
166 void ReadLock() ACQUIRE_SHARED() {
169 u64 state
= atomic_load_relaxed(&state_
);
172 (state
& kReaderLockMask
) == 0 &&
173 (state
& (kWriterLock
| kWriterSpinWait
| kWaitingWriterMask
)) != 0;
175 new_state
= state
+ kReaderLockInc
;
177 new_state
= state
+ kWaitingReaderInc
;
178 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
179 memory_order_acquire
)));
180 if (UNLIKELY(locked
))
182 DCHECK_EQ(atomic_load_relaxed(&state_
) & kWriterLock
, 0);
183 DCHECK_NE(atomic_load_relaxed(&state_
) & kReaderLockMask
, 0);
186 void ReadUnlock() RELEASE_SHARED() {
189 u64 state
= atomic_load_relaxed(&state_
);
191 DCHECK_NE(state
& kReaderLockMask
, 0);
192 DCHECK_EQ(state
& (kWaitingReaderMask
| kWriterLock
), 0);
193 new_state
= state
- kReaderLockInc
;
194 wake
= (new_state
& (kReaderLockMask
| kWriterSpinWait
)) == 0 &&
195 (new_state
& kWaitingWriterMask
) != 0;
197 new_state
= (new_state
- kWaitingWriterInc
) | kWriterSpinWait
;
198 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
199 memory_order_release
)));
204 // This function does not guarantee an explicit check that the calling thread
205 // is the thread which owns the mutex. This behavior, while more strictly
206 // correct, causes problems in cases like StopTheWorld, where a parent thread
207 // owns the mutex but a child checks that it is locked. Rather than
208 // maintaining complex state to work around those situations, the check only
209 // checks that the mutex is owned.
210 void CheckWriteLocked() const CHECK_LOCKED() {
211 CHECK(atomic_load(&state_
, memory_order_relaxed
) & kWriterLock
);
214 void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
216 void CheckReadLocked() const CHECK_LOCKED() {
217 CHECK(atomic_load(&state_
, memory_order_relaxed
) & kReaderLockMask
);
221 atomic_uint64_t state_
= {0};
225 // The state has 3 counters:
226 // - number of readers holding the lock,
227 // if non zero, the mutex is read-locked
228 // - number of waiting readers,
229 // if not zero, the mutex is write-locked
230 // - number of waiting writers,
231 // if non zero, the mutex is read- or write-locked
234 // if set, the mutex is write-locked
235 // - a writer is awake and spin-waiting
236 // the flag is used to prevent thundering herd problem
237 // (new writers are not woken if this flag is set)
239 // Writer support active spinning, readers does not.
240 // But readers are more aggressive and always take the mutex
241 // if there are any other readers.
242 // Writers hand off the mutex to readers: after wake up readers
243 // already assume ownership of the mutex (don't need to do any
244 // state updates). But the mutex is not handed off to writers,
245 // after wake up writers compete to lock the mutex again.
246 // This is needed to allow repeated write locks even in presence
247 // of other blocked writers.
248 static constexpr u64 kCounterWidth
= 20;
249 static constexpr u64 kReaderLockShift
= 0;
250 static constexpr u64 kReaderLockInc
= 1ull << kReaderLockShift
;
251 static constexpr u64 kReaderLockMask
= ((1ull << kCounterWidth
) - 1)
253 static constexpr u64 kWaitingReaderShift
= kCounterWidth
;
254 static constexpr u64 kWaitingReaderInc
= 1ull << kWaitingReaderShift
;
255 static constexpr u64 kWaitingReaderMask
= ((1ull << kCounterWidth
) - 1)
256 << kWaitingReaderShift
;
257 static constexpr u64 kWaitingWriterShift
= 2 * kCounterWidth
;
258 static constexpr u64 kWaitingWriterInc
= 1ull << kWaitingWriterShift
;
259 static constexpr u64 kWaitingWriterMask
= ((1ull << kCounterWidth
) - 1)
260 << kWaitingWriterShift
;
261 static constexpr u64 kWriterLock
= 1ull << (3 * kCounterWidth
);
262 static constexpr u64 kWriterSpinWait
= 1ull << (3 * kCounterWidth
+ 1);
264 Mutex2(const Mutex2
&) = delete;
265 void operator=(const Mutex2
&) = delete;
268 void FutexWait(atomic_uint32_t
*p
, u32 cmp
);
269 void FutexWake(atomic_uint32_t
*p
, u32 count
);
271 class MUTEX BlockingMutex
{
273 explicit constexpr BlockingMutex(LinkerInitialized
)
274 : opaque_storage_
{0, }, owner_
{0} {}
276 void Lock() ACQUIRE();
277 void Unlock() RELEASE();
279 // This function does not guarantee an explicit check that the calling thread
280 // is the thread which owns the mutex. This behavior, while more strictly
281 // correct, causes problems in cases like StopTheWorld, where a parent thread
282 // owns the mutex but a child checks that it is locked. Rather than
283 // maintaining complex state to work around those situations, the check only
284 // checks that the mutex is owned, and assumes callers to be generally
286 void CheckLocked() const CHECK_LOCKED();
289 // Solaris mutex_t has a member that requires 64-bit alignment.
290 ALIGNED(8) uptr opaque_storage_
[10];
291 uptr owner_
; // for debugging
294 // Reader-writer spin mutex.
295 class MUTEX RWMutex
{
298 atomic_store(&state_
, kUnlocked
, memory_order_relaxed
);
302 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
305 void Lock() ACQUIRE() {
307 if (atomic_compare_exchange_strong(&state_
, &cmp
, kWriteLock
,
308 memory_order_acquire
))
313 void Unlock() RELEASE() {
314 u32 prev
= atomic_fetch_sub(&state_
, kWriteLock
, memory_order_release
);
315 DCHECK_NE(prev
& kWriteLock
, 0);
319 void ReadLock() ACQUIRE_SHARED() {
320 u32 prev
= atomic_fetch_add(&state_
, kReadLock
, memory_order_acquire
);
321 if ((prev
& kWriteLock
) == 0)
326 void ReadUnlock() RELEASE_SHARED() {
327 u32 prev
= atomic_fetch_sub(&state_
, kReadLock
, memory_order_release
);
328 DCHECK_EQ(prev
& kWriteLock
, 0);
329 DCHECK_GT(prev
& ~kWriteLock
, 0);
333 void CheckLocked() const CHECK_LOCKED() {
334 CHECK_NE(atomic_load(&state_
, memory_order_relaxed
), kUnlocked
);
338 atomic_uint32_t state_
;
346 void NOINLINE
LockSlow() {
347 for (int i
= 0;; i
++) {
351 internal_sched_yield();
352 u32 cmp
= atomic_load(&state_
, memory_order_relaxed
);
353 if (cmp
== kUnlocked
&&
354 atomic_compare_exchange_weak(&state_
, &cmp
, kWriteLock
,
355 memory_order_acquire
))
360 void NOINLINE
ReadLockSlow() {
361 for (int i
= 0;; i
++) {
365 internal_sched_yield();
366 u32 prev
= atomic_load(&state_
, memory_order_acquire
);
367 if ((prev
& kWriteLock
) == 0)
372 RWMutex(const RWMutex
&) = delete;
373 void operator=(const RWMutex
&) = delete;
376 template <typename MutexType
>
377 class SCOPED_LOCK GenericScopedLock
{
379 explicit GenericScopedLock(MutexType
*mu
) ACQUIRE(mu
) : mu_(mu
) {
383 ~GenericScopedLock() RELEASE() { mu_
->Unlock(); }
388 GenericScopedLock(const GenericScopedLock
&) = delete;
389 void operator=(const GenericScopedLock
&) = delete;
392 template <typename MutexType
>
393 class SCOPED_LOCK GenericScopedReadLock
{
395 explicit GenericScopedReadLock(MutexType
*mu
) ACQUIRE(mu
) : mu_(mu
) {
399 ~GenericScopedReadLock() RELEASE() { mu_
->ReadUnlock(); }
404 GenericScopedReadLock(const GenericScopedReadLock
&) = delete;
405 void operator=(const GenericScopedReadLock
&) = delete;
408 typedef GenericScopedLock
<StaticSpinMutex
> SpinMutexLock
;
409 typedef GenericScopedLock
<BlockingMutex
> BlockingMutexLock
;
410 typedef GenericScopedLock
<RWMutex
> RWMutexLock
;
411 typedef GenericScopedReadLock
<RWMutex
> RWMutexReadLock
;
413 } // namespace __sanitizer
415 #endif // SANITIZER_MUTEX_H