]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
1 //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Deadlock detector implementation based on adjacency lists.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_deadlock_detector_interface.h"
13 #include "sanitizer_common.h"
14 #include "sanitizer_allocator_internal.h"
15 #include "sanitizer_placement_new.h"
16 #include "sanitizer_mutex.h"
18 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
20 namespace __sanitizer
{
22 const int kMaxNesting
= 64;
24 const u32 kEndId
= -2;
25 const int kMaxLink
= 8;
26 const int kL1Size
= 1024;
27 const int kL2Size
= 1024;
28 const int kMaxMutex
= kL1Size
* kL2Size
;
34 explicit Id(u32 id
= 0, u32 seq
= 0)
47 explicit Link(u32 id
= 0, u32 seq
= 0, u32 tid
= 0, u32 s0
= 0, u32 s1
= 0)
56 struct DDPhysicalThread
{
59 bool visited
[kMaxMutex
];
60 Link pending
[kMaxMutex
];
69 struct DDLogicalThread
{
71 ThreadMutex locked
[kMaxNesting
];
82 struct DD
: public DDetector
{
83 explicit DD(const DDFlags
*flags
);
85 DDPhysicalThread
* CreatePhysicalThread();
86 void DestroyPhysicalThread(DDPhysicalThread
*pt
);
88 DDLogicalThread
* CreateLogicalThread(u64 ctx
);
89 void DestroyLogicalThread(DDLogicalThread
*lt
);
91 void MutexInit(DDCallback
*cb
, DDMutex
*m
);
92 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
93 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
95 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
96 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
);
98 DDReport
*GetReport(DDCallback
*cb
);
100 void CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, DDMutex
*mtx
);
101 void Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
);
102 u32
allocateId(DDCallback
*cb
);
103 Mutex
*getMutex(u32 id
);
104 u32
getMutexId(Mutex
*m
);
108 Mutex
* mutex
[kL1Size
];
111 InternalMmapVector
<u32
> free_id
;
115 DDetector
*DDetector::Create(const DDFlags
*flags
) {
117 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
118 return new(mem
) DD(flags
);
121 DD::DD(const DDFlags
*flags
) : flags(*flags
) { free_id
.reserve(1024); }
123 DDPhysicalThread
* DD::CreatePhysicalThread() {
124 DDPhysicalThread
*pt
= (DDPhysicalThread
*)MmapOrDie(sizeof(DDPhysicalThread
),
125 "deadlock detector (physical thread)");
129 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
130 pt
->~DDPhysicalThread();
131 UnmapOrDie(pt
, sizeof(DDPhysicalThread
));
134 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
135 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(
136 sizeof(DDLogicalThread
));
142 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
143 lt
->~DDLogicalThread();
147 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
148 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb
->lt
->ctx
, m
);
151 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
154 Mutex
*DD::getMutex(u32 id
) {
155 return &mutex
[id
/ kL2Size
][id
% kL2Size
];
158 u32
DD::getMutexId(Mutex
*m
) {
159 for (int i
= 0; i
< kL1Size
; i
++) {
160 Mutex
*tab
= mutex
[i
];
163 if (m
>= tab
&& m
< tab
+ kL2Size
)
164 return i
* kL2Size
+ (m
- tab
);
169 u32
DD::allocateId(DDCallback
*cb
) {
171 SpinMutexLock
l(&mtx
);
172 if (free_id
.size() > 0) {
176 CHECK_LT(id_gen
, kMaxMutex
);
177 if ((id_gen
% kL2Size
) == 0) {
178 mutex
[id_gen
/ kL2Size
] = (Mutex
*)MmapOrDie(kL2Size
* sizeof(Mutex
),
179 "deadlock detector (mutex table)");
183 CHECK_LE(id
, kMaxMutex
);
184 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb
->lt
->ctx
, id
);
188 void DD::MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
189 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
190 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
191 DDPhysicalThread
*pt
= cb
->pt
;
192 DDLogicalThread
*lt
= cb
->lt
;
194 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
195 if (owner
== (uptr
)cb
->lt
) {
196 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
201 CHECK_LE(lt
->nlocked
, kMaxNesting
);
203 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
205 m
->id
= allocateId(cb
);
207 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
209 if (flags
.second_deadlock_stack
)
210 tm
->stk
= cb
->Unwind();
211 if (lt
->nlocked
== 1) {
212 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
218 Mutex
*mtx
= getMutex(m
->id
);
219 for (int i
= 0; i
< lt
->nlocked
- 1; i
++) {
220 u32 id1
= lt
->locked
[i
].id
;
221 u32 stk1
= lt
->locked
[i
].stk
;
222 Mutex
*mtx1
= getMutex(id1
);
223 SpinMutexLock
l(&mtx1
->mtx
);
224 if (mtx1
->nlink
== kMaxLink
) {
225 // FIXME(dvyukov): check stale links
229 for (; li
< mtx1
->nlink
; li
++) {
230 Link
*link
= &mtx1
->link
[li
];
231 if (link
->id
== m
->id
) {
232 if (link
->seq
!= mtx
->seq
) {
233 link
->seq
= mtx
->seq
;
236 link
->stk1
= cb
->Unwind();
238 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
239 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
244 if (li
== mtx1
->nlink
) {
245 // FIXME(dvyukov): check stale links
246 Link
*link
= &mtx1
->link
[mtx1
->nlink
++];
248 link
->seq
= mtx
->seq
;
251 link
->stk1
= cb
->Unwind();
253 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
254 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
258 if (!added
|| mtx
->nlink
== 0) {
259 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
264 CycleCheck(pt
, lt
, m
);
267 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
269 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
270 cb
->lt
->ctx
, m
, wlock
, trylock
, cb
->lt
->nlocked
);
271 DDLogicalThread
*lt
= cb
->lt
;
273 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
274 if (owner
== (uptr
)cb
->lt
) {
275 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb
->lt
->ctx
);
282 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb
->lt
->ctx
);
283 CHECK_EQ(m
->recursion
, 0);
285 atomic_store(&m
->owner
, (uptr
)cb
->lt
, memory_order_relaxed
);
291 CHECK_LE(lt
->nlocked
, kMaxNesting
);
293 m
->id
= allocateId(cb
);
294 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
296 if (flags
.second_deadlock_stack
)
297 tm
->stk
= cb
->Unwind();
300 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
301 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
302 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
303 DDLogicalThread
*lt
= cb
->lt
;
305 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
306 if (owner
== (uptr
)cb
->lt
) {
307 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb
->lt
->ctx
);
308 if (--m
->recursion
> 0)
310 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb
->lt
->ctx
);
311 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
313 CHECK_NE(m
->id
, kNoId
);
314 int last
= lt
->nlocked
- 1;
315 for (int i
= last
; i
>= 0; i
--) {
316 if (cb
->lt
->locked
[i
].id
== m
->id
) {
317 lt
->locked
[i
] = lt
->locked
[last
];
324 void DD::MutexDestroy(DDCallback
*cb
, DDMutex
*m
) {
325 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
327 DDLogicalThread
*lt
= cb
->lt
;
332 // Remove the mutex from lt->locked if there.
333 int last
= lt
->nlocked
- 1;
334 for (int i
= last
; i
>= 0; i
--) {
335 if (lt
->locked
[i
].id
== m
->id
) {
336 lt
->locked
[i
] = lt
->locked
[last
];
342 // Clear and invalidate the mutex descriptor.
344 Mutex
*mtx
= getMutex(m
->id
);
345 SpinMutexLock
l(&mtx
->mtx
);
350 // Return id to cache.
352 SpinMutexLock
l(&mtx
);
353 free_id
.push_back(m
->id
);
357 void DD::CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
,
359 internal_memset(pt
->visited
, 0, sizeof(pt
->visited
));
363 Mutex
*mtx
= getMutex(m
->id
);
364 SpinMutexLock
l(&mtx
->mtx
);
365 for (int li
= 0; li
< mtx
->nlink
; li
++)
366 pt
->pending
[npending
++] = mtx
->link
[li
];
368 while (npending
> 0) {
369 Link link
= pt
->pending
[--npending
];
370 if (link
.id
== kEndId
) {
374 if (pt
->visited
[link
.id
])
376 Mutex
*mtx1
= getMutex(link
.id
);
377 SpinMutexLock
l(&mtx1
->mtx
);
378 if (mtx1
->seq
!= link
.seq
)
380 pt
->visited
[link
.id
] = true;
381 if (mtx1
->nlink
== 0)
383 pt
->path
[npath
++] = link
;
384 pt
->pending
[npending
++] = Link(kEndId
);
385 if (link
.id
== m
->id
)
386 return Report(pt
, lt
, npath
); // Bingo!
387 for (int li
= 0; li
< mtx1
->nlink
; li
++) {
388 Link
*link1
= &mtx1
->link
[li
];
389 // Mutex *mtx2 = getMutex(link->id);
390 // FIXME(dvyukov): fast seq check
391 // FIXME(dvyukov): fast nlink != 0 check
392 // FIXME(dvyukov): fast pending check?
393 // FIXME(dvyukov): npending can be larger than kMaxMutex
394 pt
->pending
[npending
++] = *link1
;
399 void DD::Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
) {
400 DDReport
*rep
= &pt
->rep
;
402 for (int i
= 0; i
< npath
; i
++) {
403 Link
*link
= &pt
->path
[i
];
404 Link
*link0
= &pt
->path
[i
? i
- 1 : npath
- 1];
405 rep
->loop
[i
].thr_ctx
= link
->tid
;
406 rep
->loop
[i
].mtx_ctx0
= link0
->id
;
407 rep
->loop
[i
].mtx_ctx1
= link
->id
;
408 rep
->loop
[i
].stk
[0] = flags
.second_deadlock_stack
? link
->stk0
: 0;
409 rep
->loop
[i
].stk
[1] = link
->stk1
;
411 pt
->report_pending
= true;
414 DDReport
*DD::GetReport(DDCallback
*cb
) {
415 if (!cb
->pt
->report_pending
)
417 cb
->pt
->report_pending
= false;
421 } // namespace __sanitizer
422 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2