]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/sanitizer_common/sanitizer_mutex.h
Remove support for alternative Solaris 11.4 ld -V output
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_mutex.h
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 //
10 //===----------------------------------------------------------------------===//
11
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
14
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
18
19 namespace __sanitizer {
20
21 class StaticSpinMutex {
22 public:
23 void Init() {
24 atomic_store(&state_, 0, memory_order_relaxed);
25 }
26
27 void Lock() {
28 if (TryLock())
29 return;
30 LockSlow();
31 }
32
33 bool TryLock() {
34 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
35 }
36
37 void Unlock() {
38 atomic_store(&state_, 0, memory_order_release);
39 }
40
41 void CheckLocked() {
42 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
43 }
44
45 private:
46 atomic_uint8_t state_;
47
48 void NOINLINE LockSlow() {
49 for (int i = 0;; i++) {
50 if (i < 10)
51 proc_yield(10);
52 else
53 internal_sched_yield();
54 if (atomic_load(&state_, memory_order_relaxed) == 0
55 && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
56 return;
57 }
58 }
59 };
60
61 class SpinMutex : public StaticSpinMutex {
62 public:
63 SpinMutex() {
64 Init();
65 }
66
67 private:
68 SpinMutex(const SpinMutex&);
69 void operator=(const SpinMutex&);
70 };
71
72 class BlockingMutex {
73 public:
74 explicit constexpr BlockingMutex(LinkerInitialized)
75 : opaque_storage_ {0, }, owner_ {0} {}
76 BlockingMutex();
77 void Lock();
78 void Unlock();
79
80 // This function does not guarantee an explicit check that the calling thread
81 // is the thread which owns the mutex. This behavior, while more strictly
82 // correct, causes problems in cases like StopTheWorld, where a parent thread
83 // owns the mutex but a child checks that it is locked. Rather than
84 // maintaining complex state to work around those situations, the check only
85 // checks that the mutex is owned, and assumes callers to be generally
86 // well-behaved.
87 void CheckLocked();
88
89 private:
90 // Solaris mutex_t has a member that requires 64-bit alignment.
91 ALIGNED(8) uptr opaque_storage_[10];
92 uptr owner_; // for debugging
93 };
94
95 // Reader-writer spin mutex.
96 class RWMutex {
97 public:
98 RWMutex() {
99 atomic_store(&state_, kUnlocked, memory_order_relaxed);
100 }
101
102 ~RWMutex() {
103 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
104 }
105
106 void Lock() {
107 u32 cmp = kUnlocked;
108 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
109 memory_order_acquire))
110 return;
111 LockSlow();
112 }
113
114 void Unlock() {
115 u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
116 DCHECK_NE(prev & kWriteLock, 0);
117 (void)prev;
118 }
119
120 void ReadLock() {
121 u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
122 if ((prev & kWriteLock) == 0)
123 return;
124 ReadLockSlow();
125 }
126
127 void ReadUnlock() {
128 u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
129 DCHECK_EQ(prev & kWriteLock, 0);
130 DCHECK_GT(prev & ~kWriteLock, 0);
131 (void)prev;
132 }
133
134 void CheckLocked() {
135 CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
136 }
137
138 private:
139 atomic_uint32_t state_;
140
141 enum {
142 kUnlocked = 0,
143 kWriteLock = 1,
144 kReadLock = 2
145 };
146
147 void NOINLINE LockSlow() {
148 for (int i = 0;; i++) {
149 if (i < 10)
150 proc_yield(10);
151 else
152 internal_sched_yield();
153 u32 cmp = atomic_load(&state_, memory_order_relaxed);
154 if (cmp == kUnlocked &&
155 atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
156 memory_order_acquire))
157 return;
158 }
159 }
160
161 void NOINLINE ReadLockSlow() {
162 for (int i = 0;; i++) {
163 if (i < 10)
164 proc_yield(10);
165 else
166 internal_sched_yield();
167 u32 prev = atomic_load(&state_, memory_order_acquire);
168 if ((prev & kWriteLock) == 0)
169 return;
170 }
171 }
172
173 RWMutex(const RWMutex&);
174 void operator = (const RWMutex&);
175 };
176
177 template<typename MutexType>
178 class GenericScopedLock {
179 public:
180 explicit GenericScopedLock(MutexType *mu)
181 : mu_(mu) {
182 mu_->Lock();
183 }
184
185 ~GenericScopedLock() {
186 mu_->Unlock();
187 }
188
189 private:
190 MutexType *mu_;
191
192 GenericScopedLock(const GenericScopedLock&);
193 void operator=(const GenericScopedLock&);
194 };
195
196 template<typename MutexType>
197 class GenericScopedReadLock {
198 public:
199 explicit GenericScopedReadLock(MutexType *mu)
200 : mu_(mu) {
201 mu_->ReadLock();
202 }
203
204 ~GenericScopedReadLock() {
205 mu_->ReadUnlock();
206 }
207
208 private:
209 MutexType *mu_;
210
211 GenericScopedReadLock(const GenericScopedReadLock&);
212 void operator=(const GenericScopedReadLock&);
213 };
214
215 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
216 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
217 typedef GenericScopedLock<RWMutex> RWMutexLock;
218 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
219
220 } // namespace __sanitizer
221
222 #endif // SANITIZER_MUTEX_H