]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/sanitizer_common/sanitizer_mutex.h
ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins...
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_mutex.h
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 //
10 //===----------------------------------------------------------------------===//
11
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
14
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
18
19 namespace __sanitizer {
20
21 class StaticSpinMutex {
22 public:
23 void Init() {
24 atomic_store(&state_, 0, memory_order_relaxed);
25 }
26
27 void Lock() {
28 if (TryLock())
29 return;
30 LockSlow();
31 }
32
33 bool TryLock() {
34 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
35 }
36
37 void Unlock() {
38 atomic_store(&state_, 0, memory_order_release);
39 }
40
41 void CheckLocked() {
42 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
43 }
44
45 private:
46 atomic_uint8_t state_;
47
48 void NOINLINE LockSlow() {
49 for (int i = 0;; i++) {
50 if (i < 10)
51 proc_yield(10);
52 else
53 internal_sched_yield();
54 if (atomic_load(&state_, memory_order_relaxed) == 0
55 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
56 return;
57 }
58 }
59 };
60
61 class SpinMutex : public StaticSpinMutex {
62 public:
63 SpinMutex() {
64 Init();
65 }
66
67 private:
68 SpinMutex(const SpinMutex&);
69 void operator=(const SpinMutex&);
70 };
71
72 class BlockingMutex {
73 public:
74 #if SANITIZER_WINDOWS
75 // Windows does not currently support LinkerInitialized
76 explicit BlockingMutex(LinkerInitialized);
77 #else
78 explicit constexpr BlockingMutex(LinkerInitialized)
79 : opaque_storage_ {0, }, owner_(0) {}
80 #endif
81 BlockingMutex();
82 void Lock();
83 void Unlock();
84
85 // This function does not guarantee an explicit check that the calling thread
86 // is the thread which owns the mutex. This behavior, while more strictly
87 // correct, causes problems in cases like StopTheWorld, where a parent thread
88 // owns the mutex but a child checks that it is locked. Rather than
89 // maintaining complex state to work around those situations, the check only
90 // checks that the mutex is owned, and assumes callers to be generally
91 // well-behaved.
92 void CheckLocked();
93 private:
94 uptr opaque_storage_[10];
95 uptr owner_; // for debugging
96 };
97
98 // Reader-writer spin mutex.
99 class RWMutex {
100 public:
101 RWMutex() {
102 atomic_store(&state_, kUnlocked, memory_order_relaxed);
103 }
104
105 ~RWMutex() {
106 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
107 }
108
109 void Lock() {
110 u32 cmp = kUnlocked;
111 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
112 memory_order_acquire))
113 return;
114 LockSlow();
115 }
116
117 void Unlock() {
118 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
119 DCHECK_NE(prev & kWriteLock, 0);
120 (void)prev;
121 }
122
123 void ReadLock() {
124 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
125 if ((prev & kWriteLock) == 0)
126 return;
127 ReadLockSlow();
128 }
129
130 void ReadUnlock() {
131 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
132 DCHECK_EQ(prev & kWriteLock, 0);
133 DCHECK_GT(prev & ~kWriteLock, 0);
134 (void)prev;
135 }
136
137 void CheckLocked() {
138 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
139 }
140
141 private:
142 atomic_uint32_t state_;
143
144 enum {
145 kUnlocked = 0,
146 kWriteLock = 1,
147 kReadLock = 2
148 };
149
150 void NOINLINE LockSlow() {
151 for (int i = 0;; i++) {
152 if (i < 10)
153 proc_yield(10);
154 else
155 internal_sched_yield();
156 u32 cmp = atomic_load(&state_, memory_order_relaxed);
157 if (cmp == kUnlocked &&
158 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
159 memory_order_acquire))
160 return;
161 }
162 }
163
164 void NOINLINE ReadLockSlow() {
165 for (int i = 0;; i++) {
166 if (i < 10)
167 proc_yield(10);
168 else
169 internal_sched_yield();
170 u32 prev = atomic_load(&state_, memory_order_acquire);
171 if ((prev & kWriteLock) == 0)
172 return;
173 }
174 }
175
176 RWMutex(const RWMutex&);
177 void operator = (const RWMutex&);
178 };
179
180 template<typename MutexType>
181 class GenericScopedLock {
182 public:
183 explicit GenericScopedLock(MutexType *mu)
184 : mu_(mu) {
185 mu_->Lock();
186 }
187
188 ~GenericScopedLock() {
189 mu_->Unlock();
190 }
191
192 private:
193 MutexType *mu_;
194
195 GenericScopedLock(const GenericScopedLock&);
196 void operator=(const GenericScopedLock&);
197 };
198
199 template<typename MutexType>
200 class GenericScopedReadLock {
201 public:
202 explicit GenericScopedReadLock(MutexType *mu)
203 : mu_(mu) {
204 mu_->ReadLock();
205 }
206
207 ~GenericScopedReadLock() {
208 mu_->ReadUnlock();
209 }
210
211 private:
212 MutexType *mu_;
213
214 GenericScopedReadLock(const GenericScopedReadLock&);
215 void operator=(const GenericScopedReadLock&);
216 };
217
218 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
219 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
220 typedef GenericScopedLock<RWMutex> RWMutexLock;
221 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
222
223 } // namespace __sanitizer
224
225 #endif // SANITIZER_MUTEX_H