]>
Commit | Line | Data |
---|---|---|
03b00917 RG |
1 | /* |
2 | * This file is part of PowerDNS or dnsdist. | |
3 | * Copyright -- PowerDNS.COM B.V. and its contributors | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of version 2 of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * In addition, for the avoidance of any doubt, permission is granted to | |
10 | * link this program with OpenSSL and to (re)distribute the binaries | |
11 | * produced as the result of such linking. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
21 | */ | |
22 | #pragma once | |
23 | ||
24 | #include <mutex> | |
25 | #include <time.h> | |
26 | #include <unordered_map> | |
27 | ||
28 | #include <boost/circular_buffer.hpp> | |
29 | #include <boost/variant.hpp> | |
30 | ||
31 | #include "dnsname.hh" | |
32 | #include "iputils.hh" | |
33 | ||
34 | ||
35 | struct Rings { | |
36 | struct Query | |
37 | { | |
38 | struct timespec when; | |
39 | ComboAddress requestor; | |
40 | DNSName name; | |
41 | uint16_t size; | |
42 | uint16_t qtype; | |
43 | struct dnsheader dh; | |
44 | }; | |
45 | struct Response | |
46 | { | |
47 | struct timespec when; | |
48 | ComboAddress requestor; | |
49 | DNSName name; | |
50 | uint16_t qtype; | |
51 | unsigned int usec; | |
52 | unsigned int size; | |
53 | struct dnsheader dh; | |
54 | ComboAddress ds; // who handled it | |
55 | }; | |
56 | ||
57 | struct Shard | |
58 | { | |
59 | boost::circular_buffer<Query> queryRing; | |
60 | boost::circular_buffer<Response> respRing; | |
61 | std::mutex queryLock; | |
62 | std::mutex respLock; | |
63 | }; | |
64 | ||
cfe4b655 | 65 | Rings(size_t capacity=10000, size_t numberOfShards=1, size_t nbLockTries=5, bool keepLockingStats=false): d_blockingQueryInserts(0), d_blockingResponseInserts(0), d_deferredQueryInserts(0), d_deferredResponseInserts(0), d_nbQueryEntries(0), d_nbResponseEntries(0), d_currentShardId(0), d_numberOfShards(numberOfShards), d_nbLockTries(nbLockTries), d_keepLockingStats(keepLockingStats) |
03b00917 RG |
66 | { |
67 | setCapacity(capacity, numberOfShards); | |
68 | if (numberOfShards <= 1) { | |
69 | d_nbLockTries = 0; | |
70 | } | |
71 | } | |
72 | std::unordered_map<int, vector<boost::variant<string,double> > > getTopBandwidth(unsigned int numentries); | |
73 | size_t numDistinctRequestors(); | |
be67763a | 74 | /* This function should only be called at configuration time before any query or response has been inserted */ |
03b00917 RG |
75 | void setCapacity(size_t newCapacity, size_t numberOfShards) |
76 | { | |
77 | if (numberOfShards < d_numberOfShards) { | |
78 | throw std::runtime_error("Decreasing the number of shards in the query and response rings is not supported"); | |
79 | } | |
80 | ||
81 | d_shards.resize(numberOfShards); | |
82 | d_numberOfShards = numberOfShards; | |
83 | ||
84 | /* resize all the rings */ | |
cdcb5fbd RG |
85 | for (auto& shard : d_shards) { |
86 | shard = std::unique_ptr<Shard>(new Shard()); | |
03b00917 | 87 | { |
cdcb5fbd RG |
88 | std::lock_guard<std::mutex> wl(shard->queryLock); |
89 | shard->queryRing.set_capacity(newCapacity / numberOfShards); | |
03b00917 RG |
90 | } |
91 | { | |
cdcb5fbd RG |
92 | std::lock_guard<std::mutex> wl(shard->respLock); |
93 | shard->respRing.set_capacity(newCapacity / numberOfShards); | |
03b00917 RG |
94 | } |
95 | } | |
be67763a RG |
96 | |
97 | /* we just recreated the shards so they are now empty */ | |
98 | d_nbQueryEntries = 0; | |
99 | d_nbResponseEntries = 0; | |
03b00917 RG |
100 | } |
101 | ||
102 | void setNumberOfLockRetries(size_t retries) | |
103 | { | |
104 | if (d_numberOfShards <= 1) { | |
105 | d_nbLockTries = 0; | |
106 | } else { | |
107 | d_nbLockTries = retries; | |
108 | } | |
109 | } | |
110 | ||
111 | size_t getNumberOfShards() const | |
112 | { | |
113 | return d_numberOfShards; | |
114 | } | |
115 | ||
cfe4b655 RG |
116 | size_t getNumberOfQueryEntries() const |
117 | { | |
118 | return d_nbQueryEntries; | |
119 | } | |
120 | ||
121 | size_t getNumberOfResponseEntries() const | |
122 | { | |
123 | return d_nbResponseEntries; | |
124 | } | |
125 | ||
03b00917 RG |
126 | void insertQuery(const struct timespec& when, const ComboAddress& requestor, const DNSName& name, uint16_t qtype, uint16_t size, const struct dnsheader& dh) |
127 | { | |
128 | for (size_t idx = 0; idx < d_nbLockTries; idx++) { | |
cfe4b655 RG |
129 | auto& shard = getOneShard(); |
130 | std::unique_lock<std::mutex> wl(shard->queryLock, std::try_to_lock); | |
03b00917 | 131 | if (wl.owns_lock()) { |
cfe4b655 | 132 | insertQueryLocked(shard, when, requestor, name, qtype, size, dh); |
03b00917 RG |
133 | return; |
134 | } | |
cfe4b655 RG |
135 | if (d_keepLockingStats) { |
136 | d_deferredQueryInserts++; | |
137 | } | |
03b00917 RG |
138 | } |
139 | ||
140 | /* out of luck, let's just wait */ | |
cfe4b655 RG |
141 | if (d_keepLockingStats) { |
142 | d_blockingResponseInserts++; | |
143 | } | |
144 | auto& shard = getOneShard(); | |
145 | std::lock_guard<std::mutex> wl(shard->queryLock); | |
146 | insertQueryLocked(shard, when, requestor, name, qtype, size, dh); | |
03b00917 RG |
147 | } |
148 | ||
149 | void insertResponse(const struct timespec& when, const ComboAddress& requestor, const DNSName& name, uint16_t qtype, unsigned int usec, unsigned int size, const struct dnsheader& dh, const ComboAddress& backend) | |
150 | { | |
151 | for (size_t idx = 0; idx < d_nbLockTries; idx++) { | |
cfe4b655 RG |
152 | auto& shard = getOneShard(); |
153 | std::unique_lock<std::mutex> wl(shard->respLock, std::try_to_lock); | |
03b00917 | 154 | if (wl.owns_lock()) { |
cfe4b655 | 155 | insertResponseLocked(shard, when, requestor, name, qtype, usec, size, dh, backend); |
03b00917 RG |
156 | return; |
157 | } | |
cfe4b655 RG |
158 | if (d_keepLockingStats) { |
159 | d_deferredResponseInserts++; | |
160 | } | |
03b00917 RG |
161 | } |
162 | ||
163 | /* out of luck, let's just wait */ | |
cfe4b655 RG |
164 | if (d_keepLockingStats) { |
165 | d_blockingResponseInserts++; | |
166 | } | |
167 | auto& shard = getOneShard(); | |
168 | std::lock_guard<std::mutex> wl(shard->respLock); | |
169 | insertResponseLocked(shard, when, requestor, name, qtype, usec, size, dh, backend); | |
03b00917 RG |
170 | } |
171 | ||
1d3ba133 RG |
172 | void clear() |
173 | { | |
174 | for (auto& shard : d_shards) { | |
175 | { | |
176 | std::lock_guard<std::mutex> wl(shard->queryLock); | |
177 | shard->queryRing.clear(); | |
178 | } | |
179 | { | |
180 | std::lock_guard<std::mutex> wl(shard->respLock); | |
181 | shard->respRing.clear(); | |
182 | } | |
183 | } | |
184 | ||
185 | d_nbQueryEntries.store(0); | |
186 | d_nbResponseEntries.store(0); | |
187 | d_currentShardId.store(0); | |
188 | d_blockingQueryInserts.store(0); | |
189 | d_blockingResponseInserts.store(0); | |
190 | d_deferredQueryInserts.store(0); | |
191 | d_deferredResponseInserts.store(0); | |
192 | } | |
193 | ||
03b00917 | 194 | std::vector<std::unique_ptr<Shard> > d_shards; |
cfe4b655 RG |
195 | std::atomic<uint64_t> d_blockingQueryInserts; |
196 | std::atomic<uint64_t> d_blockingResponseInserts; | |
197 | std::atomic<uint64_t> d_deferredQueryInserts; | |
198 | std::atomic<uint64_t> d_deferredResponseInserts; | |
03b00917 RG |
199 | |
200 | private: | |
201 | size_t getShardId() | |
202 | { | |
203 | return (d_currentShardId++ % d_numberOfShards); | |
204 | } | |
205 | ||
cfe4b655 RG |
206 | std::unique_ptr<Shard>& getOneShard() |
207 | { | |
208 | return d_shards[getShardId()]; | |
209 | } | |
210 | ||
211 | void insertQueryLocked(std::unique_ptr<Shard>& shard, const struct timespec& when, const ComboAddress& requestor, const DNSName& name, uint16_t qtype, uint16_t size, const struct dnsheader& dh) | |
212 | { | |
213 | if (!shard->queryRing.full()) { | |
214 | d_nbQueryEntries++; | |
215 | } | |
216 | shard->queryRing.push_back({when, requestor, name, size, qtype, dh}); | |
217 | } | |
218 | ||
219 | void insertResponseLocked(std::unique_ptr<Shard>& shard, const struct timespec& when, const ComboAddress& requestor, const DNSName& name, uint16_t qtype, unsigned int usec, unsigned int size, const struct dnsheader& dh, const ComboAddress& backend) | |
220 | { | |
221 | if (!shard->respRing.full()) { | |
222 | d_nbResponseEntries++; | |
223 | } | |
224 | shard->respRing.push_back({when, requestor, name, qtype, usec, size, dh, backend}); | |
225 | } | |
226 | ||
227 | std::atomic<size_t> d_nbQueryEntries; | |
228 | std::atomic<size_t> d_nbResponseEntries; | |
03b00917 RG |
229 | std::atomic<size_t> d_currentShardId; |
230 | ||
231 | size_t d_numberOfShards; | |
232 | size_t d_nbLockTries = 5; | |
cfe4b655 | 233 | bool d_keepLockingStats{false}; |
03b00917 RG |
234 | }; |
235 | ||
236 | extern Rings g_rings; |