]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/sanitizer_common/sanitizer_mutex.h
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_mutex.h
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
15
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
20
21 namespace __sanitizer {
22
23 class MUTEX StaticSpinMutex {
24 public:
25 void Init() {
26 atomic_store(&state_, 0, memory_order_relaxed);
27 }
28
29 void Lock() ACQUIRE() {
30 if (TryLock())
31 return;
32 LockSlow();
33 }
34
35 bool TryLock() TRY_ACQUIRE(true) {
36 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37 }
38
39 void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
40
41 void CheckLocked() const CHECK_LOCKED() {
42 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
43 }
44
45 private:
46 atomic_uint8_t state_;
47
48 void NOINLINE LockSlow() {
49 for (int i = 0;; i++) {
50 if (i < 10)
51 proc_yield(10);
52 else
53 internal_sched_yield();
54 if (atomic_load(&state_, memory_order_relaxed) == 0
55 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
56 return;
57 }
58 }
59 };
60
61 class MUTEX SpinMutex : public StaticSpinMutex {
62 public:
63 SpinMutex() {
64 Init();
65 }
66
67 private:
68 SpinMutex(const SpinMutex &) = delete;
69 void operator=(const SpinMutex &) = delete;
70 };
71
72 // Semaphore provides an OS-dependent way to park/unpark threads.
73 // The last thread returned from Wait can destroy the object
74 // (destruction-safety).
75 class Semaphore {
76 public:
77 constexpr Semaphore() {}
78 Semaphore(const Semaphore &) = delete;
79 void operator=(const Semaphore &) = delete;
80
81 void Wait();
82 void Post(u32 count = 1);
83
84 private:
85 atomic_uint32_t state_ = {0};
86 };
87
88 // Reader-writer mutex.
89 class MUTEX Mutex2 {
90 public:
91 constexpr Mutex2() {}
92
93 void Lock() ACQUIRE() {
94 u64 reset_mask = ~0ull;
95 u64 state = atomic_load_relaxed(&state_);
96 const uptr kMaxSpinIters = 1500;
97 for (uptr spin_iters = 0;; spin_iters++) {
98 u64 new_state;
99 bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
100 if (LIKELY(!locked)) {
101 // The mutex is not read-/write-locked, try to lock.
102 new_state = (state | kWriterLock) & reset_mask;
103 } else if (spin_iters > kMaxSpinIters) {
104 // We've spun enough, increment waiting writers count and block.
105 // The counter will be decremented by whoever wakes us.
106 new_state = (state + kWaitingWriterInc) & reset_mask;
107 } else if ((state & kWriterSpinWait) == 0) {
108 // Active spinning, but denote our presence so that unlocking
109 // thread does not wake up other threads.
110 new_state = state | kWriterSpinWait;
111 } else {
112 // Active spinning.
113 state = atomic_load(&state_, memory_order_relaxed);
114 continue;
115 }
116 if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
117 memory_order_acquire)))
118 continue;
119 if (LIKELY(!locked))
120 return; // We've locked the mutex.
121 if (spin_iters > kMaxSpinIters) {
122 // We've incremented waiting writers, so now block.
123 writers_.Wait();
124 spin_iters = 0;
125 state = atomic_load(&state_, memory_order_relaxed);
126 DCHECK_NE(state & kWriterSpinWait, 0);
127 } else {
128 // We've set kWriterSpinWait, but we are still in active spinning.
129 }
130 // We either blocked and were unblocked,
131 // or we just spun but set kWriterSpinWait.
132 // Either way we need to reset kWriterSpinWait
133 // next time we take the lock or block again.
134 reset_mask = ~kWriterSpinWait;
135 }
136 }
137
138 void Unlock() RELEASE() {
139 bool wake_writer;
140 u64 wake_readers;
141 u64 new_state;
142 u64 state = atomic_load_relaxed(&state_);
143 do {
144 DCHECK_NE(state & kWriterLock, 0);
145 DCHECK_EQ(state & kReaderLockMask, 0);
146 new_state = state & ~kWriterLock;
147 wake_writer =
148 (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
149 if (wake_writer)
150 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
151 wake_readers =
152 (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
153 ? 0
154 : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
155 if (wake_readers)
156 new_state = (new_state & ~kWaitingReaderMask) +
157 (wake_readers << kReaderLockShift);
158 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
159 memory_order_release)));
160 if (UNLIKELY(wake_writer))
161 writers_.Post();
162 else if (UNLIKELY(wake_readers))
163 readers_.Post(wake_readers);
164 }
165
166 void ReadLock() ACQUIRE_SHARED() {
167 bool locked;
168 u64 new_state;
169 u64 state = atomic_load_relaxed(&state_);
170 do {
171 locked =
172 (state & kReaderLockMask) == 0 &&
173 (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
174 if (LIKELY(!locked))
175 new_state = state + kReaderLockInc;
176 else
177 new_state = state + kWaitingReaderInc;
178 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
179 memory_order_acquire)));
180 if (UNLIKELY(locked))
181 readers_.Wait();
182 DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
183 DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
184 }
185
186 void ReadUnlock() RELEASE_SHARED() {
187 bool wake;
188 u64 new_state;
189 u64 state = atomic_load_relaxed(&state_);
190 do {
191 DCHECK_NE(state & kReaderLockMask, 0);
192 DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
193 new_state = state - kReaderLockInc;
194 wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
195 (new_state & kWaitingWriterMask) != 0;
196 if (wake)
197 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
198 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
199 memory_order_release)));
200 if (UNLIKELY(wake))
201 writers_.Post();
202 }
203
204 // This function does not guarantee an explicit check that the calling thread
205 // is the thread which owns the mutex. This behavior, while more strictly
206 // correct, causes problems in cases like StopTheWorld, where a parent thread
207 // owns the mutex but a child checks that it is locked. Rather than
208 // maintaining complex state to work around those situations, the check only
209 // checks that the mutex is owned.
210 void CheckWriteLocked() const CHECK_LOCKED() {
211 CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
212 }
213
214 void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
215
216 void CheckReadLocked() const CHECK_LOCKED() {
217 CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
218 }
219
220 private:
221 atomic_uint64_t state_ = {0};
222 Semaphore writers_;
223 Semaphore readers_;
224
225 // The state has 3 counters:
226 // - number of readers holding the lock,
227 // if non zero, the mutex is read-locked
228 // - number of waiting readers,
229 // if not zero, the mutex is write-locked
230 // - number of waiting writers,
231 // if non zero, the mutex is read- or write-locked
232 // And 2 flags:
233 // - writer lock
234 // if set, the mutex is write-locked
235 // - a writer is awake and spin-waiting
236 // the flag is used to prevent thundering herd problem
237 // (new writers are not woken if this flag is set)
238 //
239 // Writer support active spinning, readers does not.
240 // But readers are more aggressive and always take the mutex
241 // if there are any other readers.
242 // Writers hand off the mutex to readers: after wake up readers
243 // already assume ownership of the mutex (don't need to do any
244 // state updates). But the mutex is not handed off to writers,
245 // after wake up writers compete to lock the mutex again.
246 // This is needed to allow repeated write locks even in presence
247 // of other blocked writers.
248 static constexpr u64 kCounterWidth = 20;
249 static constexpr u64 kReaderLockShift = 0;
250 static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
251 static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
252 << kReaderLockShift;
253 static constexpr u64 kWaitingReaderShift = kCounterWidth;
254 static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
255 static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
256 << kWaitingReaderShift;
257 static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
258 static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
259 static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
260 << kWaitingWriterShift;
261 static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
262 static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
263
264 Mutex2(const Mutex2 &) = delete;
265 void operator=(const Mutex2 &) = delete;
266 };
267
268 void FutexWait(atomic_uint32_t *p, u32 cmp);
269 void FutexWake(atomic_uint32_t *p, u32 count);
270
271 class MUTEX BlockingMutex {
272 public:
273 explicit constexpr BlockingMutex(LinkerInitialized)
274 : opaque_storage_ {0, }, owner_ {0} {}
275 BlockingMutex();
276 void Lock() ACQUIRE();
277 void Unlock() RELEASE();
278
279 // This function does not guarantee an explicit check that the calling thread
280 // is the thread which owns the mutex. This behavior, while more strictly
281 // correct, causes problems in cases like StopTheWorld, where a parent thread
282 // owns the mutex but a child checks that it is locked. Rather than
283 // maintaining complex state to work around those situations, the check only
284 // checks that the mutex is owned, and assumes callers to be generally
285 // well-behaved.
286 void CheckLocked() const CHECK_LOCKED();
287
288 private:
289 // Solaris mutex_t has a member that requires 64-bit alignment.
290 ALIGNED(8) uptr opaque_storage_[10];
291 uptr owner_; // for debugging
292 };
293
294 // Reader-writer spin mutex.
295 class MUTEX RWMutex {
296 public:
297 RWMutex() {
298 atomic_store(&state_, kUnlocked, memory_order_relaxed);
299 }
300
301 ~RWMutex() {
302 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
303 }
304
305 void Lock() ACQUIRE() {
306 u32 cmp = kUnlocked;
307 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
308 memory_order_acquire))
309 return;
310 LockSlow();
311 }
312
313 void Unlock() RELEASE() {
314 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
315 DCHECK_NE(prev & kWriteLock, 0);
316 (void)prev;
317 }
318
319 void ReadLock() ACQUIRE_SHARED() {
320 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
321 if ((prev & kWriteLock) == 0)
322 return;
323 ReadLockSlow();
324 }
325
326 void ReadUnlock() RELEASE_SHARED() {
327 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
328 DCHECK_EQ(prev & kWriteLock, 0);
329 DCHECK_GT(prev & ~kWriteLock, 0);
330 (void)prev;
331 }
332
333 void CheckLocked() const CHECK_LOCKED() {
334 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
335 }
336
337 private:
338 atomic_uint32_t state_;
339
340 enum {
341 kUnlocked = 0,
342 kWriteLock = 1,
343 kReadLock = 2
344 };
345
346 void NOINLINE LockSlow() {
347 for (int i = 0;; i++) {
348 if (i < 10)
349 proc_yield(10);
350 else
351 internal_sched_yield();
352 u32 cmp = atomic_load(&state_, memory_order_relaxed);
353 if (cmp == kUnlocked &&
354 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
355 memory_order_acquire))
356 return;
357 }
358 }
359
360 void NOINLINE ReadLockSlow() {
361 for (int i = 0;; i++) {
362 if (i < 10)
363 proc_yield(10);
364 else
365 internal_sched_yield();
366 u32 prev = atomic_load(&state_, memory_order_acquire);
367 if ((prev & kWriteLock) == 0)
368 return;
369 }
370 }
371
372 RWMutex(const RWMutex &) = delete;
373 void operator=(const RWMutex &) = delete;
374 };
375
376 template <typename MutexType>
377 class SCOPED_LOCK GenericScopedLock {
378 public:
379 explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
380 mu_->Lock();
381 }
382
383 ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
384
385 private:
386 MutexType *mu_;
387
388 GenericScopedLock(const GenericScopedLock &) = delete;
389 void operator=(const GenericScopedLock &) = delete;
390 };
391
392 template <typename MutexType>
393 class SCOPED_LOCK GenericScopedReadLock {
394 public:
395 explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
396 mu_->ReadLock();
397 }
398
399 ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
400
401 private:
402 MutexType *mu_;
403
404 GenericScopedReadLock(const GenericScopedReadLock &) = delete;
405 void operator=(const GenericScopedReadLock &) = delete;
406 };
407
408 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
409 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
410 typedef GenericScopedLock<RWMutex> RWMutexLock;
411 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
412
413 } // namespace __sanitizer
414
415 #endif // SANITIZER_MUTEX_H