]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/tsan/tsan_clock.cc
[libsanitizer] merge from upstream r168514
[thirdparty/gcc.git] / libsanitizer / tsan / tsan_clock.cc
1 //===-- tsan_clock.cc -----------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "tsan_clock.h"
12 #include "tsan_rtl.h"
13
14 // It's possible to optimize clock operations for some important cases
15 // so that they are O(1). The cases include singletons, once's, local mutexes.
16 // First, SyncClock must be re-implemented to allow indexing by tid.
17 // It must not necessarily be a full vector clock, though. For example it may
18 // be a multi-level table.
19 // Then, each slot in SyncClock must contain a dirty bit (it's united with
20 // the clock value, so no space increase). The acquire algorithm looks
21 // as follows:
22 // void acquire(thr, tid, thr_clock, sync_clock) {
23 // if (!sync_clock[tid].dirty)
24 // return; // No new info to acquire.
25 // // This handles constant reads of singleton pointers and
26 // // stop-flags.
27 // acquire_impl(thr_clock, sync_clock); // As usual, O(N).
28 // sync_clock[tid].dirty = false;
29 // sync_clock.dirty_count--;
30 // }
31 // The release operation looks as follows:
32 // void release(thr, tid, thr_clock, sync_clock) {
33 // // thr->sync_cache is a simple fixed-size hash-based cache that holds
34 // // several previous sync_clock's.
35 // if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
36 // // The thread did no acquire operations since last release on this clock.
37 // // So update only the thread's slot (other slots can't possibly change).
38 // sync_clock[tid].clock = thr->epoch;
39 // if (sync_clock.dirty_count == sync_clock.cnt
40 // || (sync_clock.dirty_count == sync_clock.cnt - 1
41 // && sync_clock[tid].dirty == false))
42 // // All dirty flags are set, bail out.
43 // return;
44 // set all dirty bits, but preserve the thread's bit. // O(N)
45 // update sync_clock.dirty_count;
46 // return;
47 // }
48 // release_impl(thr_clock, sync_clock); // As usual, O(N).
49 // set all dirty bits, but preserve the thread's bit.
50 // // The previous step is combined with release_impl(), so that
51 // // we scan the arrays only once.
52 // update sync_clock.dirty_count;
53 // }
54
55 namespace __tsan {
56
57 ThreadClock::ThreadClock() {
58 nclk_ = 0;
59 for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
60 clk_[i] = 0;
61 }
62
63 void ThreadClock::acquire(const SyncClock *src) {
64 DCHECK(nclk_ <= kMaxTid);
65 DCHECK(src->clk_.Size() <= kMaxTid);
66
67 const uptr nclk = src->clk_.Size();
68 if (nclk == 0)
69 return;
70 nclk_ = max(nclk_, nclk);
71 for (uptr i = 0; i < nclk; i++) {
72 if (clk_[i] < src->clk_[i])
73 clk_[i] = src->clk_[i];
74 }
75 }
76
77 void ThreadClock::release(SyncClock *dst) const {
78 DCHECK(nclk_ <= kMaxTid);
79 DCHECK(dst->clk_.Size() <= kMaxTid);
80
81 if (dst->clk_.Size() < nclk_)
82 dst->clk_.Resize(nclk_);
83 for (uptr i = 0; i < nclk_; i++) {
84 if (dst->clk_[i] < clk_[i])
85 dst->clk_[i] = clk_[i];
86 }
87 }
88
89 void ThreadClock::ReleaseStore(SyncClock *dst) const {
90 DCHECK(nclk_ <= kMaxTid);
91 DCHECK(dst->clk_.Size() <= kMaxTid);
92
93 if (dst->clk_.Size() < nclk_)
94 dst->clk_.Resize(nclk_);
95 for (uptr i = 0; i < nclk_; i++)
96 dst->clk_[i] = clk_[i];
97 for (uptr i = nclk_; i < dst->clk_.Size(); i++)
98 dst->clk_[i] = 0;
99 }
100
101 void ThreadClock::acq_rel(SyncClock *dst) {
102 acquire(dst);
103 release(dst);
104 }
105
106 SyncClock::SyncClock()
107 : clk_(MBlockClock) {
108 }
109 } // namespace __tsan