]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
Remove support for alternative Solaris 11.4 ld -V output
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_stackdepotbase.h
CommitLineData
a9586c9c 1//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// Implementation of a mapping from arbitrary values to unique 32-bit
9// identifiers.
10//===----------------------------------------------------------------------===//
5645a48f 11
a9586c9c 12#ifndef SANITIZER_STACKDEPOTBASE_H
13#define SANITIZER_STACKDEPOTBASE_H
14
15#include "sanitizer_internal_defs.h"
16#include "sanitizer_mutex.h"
17#include "sanitizer_atomic.h"
18#include "sanitizer_persistent_allocator.h"
19
20namespace __sanitizer {
21
22template <class Node, int kReservedBits, int kTabSizeLog>
23class StackDepotBase {
24 public:
25 typedef typename Node::args_type args_type;
26 typedef typename Node::handle_type handle_type;
27 // Maps stack trace to an unique id.
5645a48f 28 handle_type Put(args_type args, bool *inserted = nullptr);
a9586c9c 29 // Retrieves a stored stack trace by the id.
30 args_type Get(u32 id);
31
32 StackDepotStats *GetStats() { return &stats; }
33
34 void LockAll();
35 void UnlockAll();
36
37 private:
38 static Node *find(Node *s, args_type args, u32 hash);
39 static Node *lock(atomic_uintptr_t *p);
40 static void unlock(atomic_uintptr_t *p, Node *s);
41
42 static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
43 static const int kPartBits = 8;
44 static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
45 static const int kPartCount =
46 1 << kPartBits; // Number of subparts in the table.
47 static const int kPartSize = kTabSize / kPartCount;
48 static const int kMaxId = 1 << kPartShift;
49
50 atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
51 atomic_uint32_t seq[kPartCount]; // Unique id generators.
52
53 StackDepotStats stats;
54
55 friend class StackDepotReverseMap;
56};
57
58template <class Node, int kReservedBits, int kTabSizeLog>
59Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
60 args_type args,
61 u32 hash) {
62 // Searches linked list s for the stack, returns its id.
63 for (; s; s = s->link) {
64 if (s->eq(hash, args)) {
65 return s;
66 }
67 }
5645a48f 68 return nullptr;
a9586c9c 69}
70
71template <class Node, int kReservedBits, int kTabSizeLog>
72Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
73 atomic_uintptr_t *p) {
74 // Uses the pointer lsb as mutex.
75 for (int i = 0;; i++) {
76 uptr cmp = atomic_load(p, memory_order_relaxed);
77 if ((cmp & 1) == 0 &&
78 atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
79 return (Node *)cmp;
80 if (i < 10)
81 proc_yield(10);
82 else
83 internal_sched_yield();
84 }
85}
86
87template <class Node, int kReservedBits, int kTabSizeLog>
88void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
89 atomic_uintptr_t *p, Node *s) {
90 DCHECK_EQ((uptr)s & 1, 0);
91 atomic_store(p, (uptr)s, memory_order_release);
92}
93
94template <class Node, int kReservedBits, int kTabSizeLog>
95typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
96StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
97 bool *inserted) {
98 if (inserted) *inserted = false;
0328398d 99 if (!Node::is_valid(args)) return handle_type();
100 uptr h = Node::hash(args);
a9586c9c 101 atomic_uintptr_t *p = &tab[h % kTabSize];
102 uptr v = atomic_load(p, memory_order_consume);
103 Node *s = (Node *)(v & ~1);
104 // First, try to find the existing stack.
105 Node *node = find(s, args, h);
106 if (node) return node->get_handle();
107 // If failed, lock, retry and insert new.
108 Node *s2 = lock(p);
109 if (s2 != s) {
110 node = find(s2, args, h);
111 if (node) {
112 unlock(p, s2);
113 return node->get_handle();
114 }
115 }
116 uptr part = (h % kTabSize) / kPartSize;
117 u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
118 stats.n_uniq_ids++;
119 CHECK_LT(id, kMaxId);
120 id |= part << kPartShift;
121 CHECK_NE(id, 0);
122 CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
123 uptr memsz = Node::storage_size(args);
124 s = (Node *)PersistentAlloc(memsz);
125 stats.allocated += memsz;
126 s->id = id;
127 s->store(args, h);
128 s->link = s2;
129 unlock(p, s);
130 if (inserted) *inserted = true;
131 return s->get_handle();
132}
133
134template <class Node, int kReservedBits, int kTabSizeLog>
135typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
136StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
137 if (id == 0) {
138 return args_type();
139 }
140 CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
141 // High kPartBits contain part id, so we need to scan at most kPartSize lists.
142 uptr part = id >> kPartShift;
143 for (int i = 0; i != kPartSize; i++) {
144 uptr idx = part * kPartSize + i;
145 CHECK_LT(idx, kTabSize);
146 atomic_uintptr_t *p = &tab[idx];
147 uptr v = atomic_load(p, memory_order_consume);
148 Node *s = (Node *)(v & ~1);
149 for (; s; s = s->link) {
150 if (s->id == id) {
151 return s->load();
152 }
153 }
154 }
155 return args_type();
156}
157
158template <class Node, int kReservedBits, int kTabSizeLog>
159void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
160 for (int i = 0; i < kTabSize; ++i) {
161 lock(&tab[i]);
162 }
163}
164
165template <class Node, int kReservedBits, int kTabSizeLog>
166void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
167 for (int i = 0; i < kTabSize; ++i) {
168 atomic_uintptr_t *p = &tab[i];
169 uptr s = atomic_load(p, memory_order_relaxed);
170 unlock(p, (Node *)(s & ~1UL));
171 }
172}
173
5645a48f 174} // namespace __sanitizer
175
176#endif // SANITIZER_STACKDEPOTBASE_H