]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_mutex.h
[Ada] Sem_Ch13: fix uninitialized parameter static analysis warning
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_mutex.h
CommitLineData
549e2197 1//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2//
2fc4da48 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
549e2197 6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef SANITIZER_MUTEX_H
14#define SANITIZER_MUTEX_H
15
16#include "sanitizer_atomic.h"
17#include "sanitizer_internal_defs.h"
18#include "sanitizer_libc.h"
19
20namespace __sanitizer {
21
22class StaticSpinMutex {
23 public:
24 void Init() {
25 atomic_store(&state_, 0, memory_order_relaxed);
26 }
27
28 void Lock() {
4a2c1ffc 29 if (TryLock())
549e2197 30 return;
31 LockSlow();
32 }
33
4a2c1ffc 34 bool TryLock() {
35 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
36 }
37
549e2197 38 void Unlock() {
39 atomic_store(&state_, 0, memory_order_release);
40 }
41
4fc7b5ac 42 void CheckLocked() {
43 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
44 }
45
549e2197 46 private:
47 atomic_uint8_t state_;
48
49 void NOINLINE LockSlow() {
50 for (int i = 0;; i++) {
51 if (i < 10)
52 proc_yield(10);
53 else
54 internal_sched_yield();
55 if (atomic_load(&state_, memory_order_relaxed) == 0
56 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
57 return;
58 }
59 }
60};
61
62class SpinMutex : public StaticSpinMutex {
63 public:
64 SpinMutex() {
65 Init();
66 }
67
68 private:
69 SpinMutex(const SpinMutex&);
70 void operator=(const SpinMutex&);
71};
72
4a2c1ffc 73class BlockingMutex {
74 public:
5645a48f 75 explicit constexpr BlockingMutex(LinkerInitialized)
d2ef4bee 76 : opaque_storage_ {0, }, owner_ {0} {}
1e80ce41 77 BlockingMutex();
4a2c1ffc 78 void Lock();
79 void Unlock();
36093749 80
81 // This function does not guarantee an explicit check that the calling thread
82 // is the thread which owns the mutex. This behavior, while more strictly
83 // correct, causes problems in cases like StopTheWorld, where a parent thread
84 // owns the mutex but a child checks that it is locked. Rather than
85 // maintaining complex state to work around those situations, the check only
86 // checks that the mutex is owned, and assumes callers to be generally
87 // well-behaved.
1e80ce41 88 void CheckLocked();
d2ef4bee 89
4a2c1ffc 90 private:
d2ef4bee 91 // Solaris mutex_t has a member that requires 64-bit alignment.
92 ALIGNED(8) uptr opaque_storage_[10];
4a2c1ffc 93 uptr owner_; // for debugging
94};
95
7d752f28 96// Reader-writer spin mutex.
97class RWMutex {
98 public:
99 RWMutex() {
100 atomic_store(&state_, kUnlocked, memory_order_relaxed);
101 }
102
103 ~RWMutex() {
104 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
105 }
106
107 void Lock() {
108 u32 cmp = kUnlocked;
109 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
110 memory_order_acquire))
111 return;
112 LockSlow();
113 }
114
115 void Unlock() {
116 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
117 DCHECK_NE(prev & kWriteLock, 0);
118 (void)prev;
119 }
120
121 void ReadLock() {
122 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
123 if ((prev & kWriteLock) == 0)
124 return;
125 ReadLockSlow();
126 }
127
128 void ReadUnlock() {
129 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
130 DCHECK_EQ(prev & kWriteLock, 0);
131 DCHECK_GT(prev & ~kWriteLock, 0);
132 (void)prev;
133 }
134
135 void CheckLocked() {
136 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
137 }
138
139 private:
140 atomic_uint32_t state_;
141
142 enum {
143 kUnlocked = 0,
144 kWriteLock = 1,
145 kReadLock = 2
146 };
147
148 void NOINLINE LockSlow() {
149 for (int i = 0;; i++) {
150 if (i < 10)
151 proc_yield(10);
152 else
153 internal_sched_yield();
154 u32 cmp = atomic_load(&state_, memory_order_relaxed);
155 if (cmp == kUnlocked &&
156 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
157 memory_order_acquire))
158 return;
159 }
160 }
161
162 void NOINLINE ReadLockSlow() {
163 for (int i = 0;; i++) {
164 if (i < 10)
165 proc_yield(10);
166 else
167 internal_sched_yield();
168 u32 prev = atomic_load(&state_, memory_order_acquire);
169 if ((prev & kWriteLock) == 0)
170 return;
171 }
172 }
173
174 RWMutex(const RWMutex&);
175 void operator = (const RWMutex&);
176};
177
549e2197 178template<typename MutexType>
179class GenericScopedLock {
180 public:
181 explicit GenericScopedLock(MutexType *mu)
182 : mu_(mu) {
183 mu_->Lock();
184 }
185
186 ~GenericScopedLock() {
187 mu_->Unlock();
188 }
189
190 private:
191 MutexType *mu_;
192
193 GenericScopedLock(const GenericScopedLock&);
194 void operator=(const GenericScopedLock&);
195};
196
197template<typename MutexType>
198class GenericScopedReadLock {
199 public:
200 explicit GenericScopedReadLock(MutexType *mu)
201 : mu_(mu) {
202 mu_->ReadLock();
203 }
204
205 ~GenericScopedReadLock() {
206 mu_->ReadUnlock();
207 }
208
209 private:
210 MutexType *mu_;
211
212 GenericScopedReadLock(const GenericScopedReadLock&);
213 void operator=(const GenericScopedReadLock&);
214};
215
216typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
4a2c1ffc 217typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
7d752f28 218typedef GenericScopedLock<RWMutex> RWMutexLock;
219typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
549e2197 220
221} // namespace __sanitizer
222
223#endif // SANITIZER_MUTEX_H