]> git.ipfire.org Git - thirdparty/pdns.git/blob - pdns/recursordist/recpacketcache.cc
Merge pull request #12980 from Habbie/debian-trixie
[thirdparty/pdns.git] / pdns / recursordist / recpacketcache.cc
1 #ifdef HAVE_CONFIG_H
2 #include "config.h"
3 #endif
4 #include <iostream>
5 #include <cinttypes>
6
7 #include "recpacketcache.hh"
8 #include "cachecleaner.hh"
9 #include "dns.hh"
10 #include "namespaces.hh"
11 #include "rec-taskqueue.hh"
12
13 unsigned int RecursorPacketCache::s_refresh_ttlperc{0};
14
15 void RecursorPacketCache::setShardSizes(size_t shardSize)
16 {
17 for (auto& shard : d_maps) {
18 auto lock = shard.lock();
19 lock->d_shardSize = shardSize;
20 }
21 }
22
23 uint64_t RecursorPacketCache::size() const
24 {
25 uint64_t count = 0;
26 for (const auto& map : d_maps) {
27 count += map.getEntriesCount();
28 }
29 return count;
30 }
31
32 uint64_t RecursorPacketCache::bytes()
33 {
34 uint64_t sum = 0;
35 for (auto& shard : d_maps) {
36 auto lock = shard.lock();
37 for (const auto& entry : lock->d_map) {
38 sum += sizeof(entry) + entry.d_packet.length() + 4;
39 }
40 }
41 return sum;
42 }
43
44 uint64_t RecursorPacketCache::getHits()
45 {
46 uint64_t sum = 0;
47 for (auto& shard : d_maps) {
48 auto lock = shard.lock();
49 sum += lock->d_hits;
50 }
51 return sum;
52 }
53
54 uint64_t RecursorPacketCache::getMisses()
55 {
56 uint64_t sum = 0;
57 for (auto& shard : d_maps) {
58 auto lock = shard.lock();
59 sum += lock->d_misses;
60 }
61 return sum;
62 }
63
64 pair<uint64_t, uint64_t> RecursorPacketCache::stats()
65 {
66 uint64_t contended = 0;
67 uint64_t acquired = 0;
68 for (auto& shard : d_maps) {
69 auto content = shard.lock();
70 contended += content->d_contended_count;
71 acquired += content->d_acquired_count;
72 }
73 return {contended, acquired};
74 }
75
76 uint64_t RecursorPacketCache::doWipePacketCache(const DNSName& name, uint16_t qtype, bool subtree)
77 {
78 uint64_t count = 0;
79 for (auto& map : d_maps) {
80 auto shard = map.lock();
81 auto& idx = shard->d_map.get<NameTag>();
82 for (auto iter = idx.lower_bound(name); iter != idx.end();) {
83 if (subtree) {
84 if (!iter->d_name.isPartOf(name)) { // this is case insensitive
85 break;
86 }
87 }
88 else {
89 if (iter->d_name != name) {
90 break;
91 }
92 }
93 if (qtype == 0xffff || iter->d_type == qtype) {
94 iter = idx.erase(iter);
95 map.decEntriesCount();
96 count++;
97 }
98 else {
99 ++iter;
100 }
101 }
102 }
103 return count;
104 }
105
106 bool RecursorPacketCache::qrMatch(const packetCache_t::index<HashTag>::type::iterator& iter, const std::string& queryPacket, const DNSName& qname, uint16_t qtype, uint16_t qclass)
107 {
108 // this ignores checking on the EDNS subnet flags!
109 if (qname != iter->d_name || iter->d_type != qtype || iter->d_class != qclass) {
110 return false;
111 }
112
113 static const std::unordered_set<uint16_t> optionsToSkip{EDNSOptionCode::COOKIE, EDNSOptionCode::ECS};
114 return queryMatches(iter->d_query, queryPacket, qname, optionsToSkip);
115 }
116
117 bool RecursorPacketCache::checkResponseMatches(MapCombo::LockedContent& shard, std::pair<packetCache_t::index<HashTag>::type::iterator, packetCache_t::index<HashTag>::type::iterator> range, const std::string& queryPacket, const DNSName& qname, uint16_t qtype, uint16_t qclass, time_t now, std::string* responsePacket, uint32_t* age, vState* valState, OptPBData* pbdata)
118 {
119 for (auto iter = range.first; iter != range.second; ++iter) {
120 // the possibility is VERY real that we get hits that are not right - birthday paradox
121 if (!qrMatch(iter, queryPacket, qname, qtype, qclass)) {
122 continue;
123 }
124
125 if (now < iter->d_ttd) { // it is right, it is fresh!
126 *age = static_cast<uint32_t>(now - iter->d_creation);
127 // we know ttl is > 0
128 auto ttl = static_cast<uint32_t>(iter->d_ttd - now);
129 if (s_refresh_ttlperc > 0 && !iter->d_submitted && taskQTypeIsSupported(qtype)) {
130 const dnsheader_aligned header(iter->d_packet.data());
131 const auto* headerPtr = header.get();
132 if (headerPtr->rcode == RCode::NoError) {
133 const uint32_t deadline = iter->getOrigTTL() * s_refresh_ttlperc / 100;
134 const bool almostExpired = ttl <= deadline;
135 if (almostExpired) {
136 iter->d_submitted = true;
137 pushAlmostExpiredTask(qname, qtype, iter->d_ttd, Netmask());
138 }
139 }
140 }
141 *responsePacket = iter->d_packet;
142 responsePacket->replace(0, 2, queryPacket.c_str(), 2);
143 *valState = iter->d_vstate;
144
145 const size_t wirelength = qname.wirelength();
146 if (responsePacket->size() > (sizeof(dnsheader) + wirelength)) {
147 responsePacket->replace(sizeof(dnsheader), wirelength, queryPacket, sizeof(dnsheader), wirelength);
148 }
149
150 shard.d_hits++;
151 moveCacheItemToBack<SequencedTag>(shard.d_map, iter);
152
153 if (pbdata != nullptr) {
154 if (iter->d_pbdata) {
155 *pbdata = iter->d_pbdata;
156 }
157 else {
158 *pbdata = boost::none;
159 }
160 }
161
162 return true;
163 }
164 // We used to move the item to the front of "the to be deleted" sequence,
165 // but we very likely will update the entry very soon, so leave it
166 shard.d_misses++;
167 break;
168 }
169
170 return false;
171 }
172
173 static const std::unordered_set<uint16_t> s_skipOptions = {EDNSOptionCode::ECS, EDNSOptionCode::COOKIE};
174
175 bool RecursorPacketCache::getResponsePacket(unsigned int tag, const std::string& queryPacket, const DNSName& qname, uint16_t qtype, uint16_t qclass, time_t now,
176 std::string* responsePacket, uint32_t* age, vState* valState, uint32_t* qhash, OptPBData* pbdata, bool tcp)
177 {
178 *qhash = canHashPacket(queryPacket, s_skipOptions);
179 auto& map = getMap(tag, *qhash, tcp);
180 auto shard = map.lock();
181 const auto& idx = shard->d_map.get<HashTag>();
182 auto range = idx.equal_range(std::tie(tag, *qhash, tcp));
183
184 if (range.first == range.second) {
185 shard->d_misses++;
186 return false;
187 }
188
189 return checkResponseMatches(*shard, range, queryPacket, qname, qtype, qclass, now, responsePacket, age, valState, pbdata);
190 }
191
192 bool RecursorPacketCache::getResponsePacket(unsigned int tag, const std::string& queryPacket, DNSName& qname, uint16_t* qtype, uint16_t* qclass, time_t now,
193 std::string* responsePacket, uint32_t* age, vState* valState, uint32_t* qhash, OptPBData* pbdata, bool tcp)
194 {
195 *qhash = canHashPacket(queryPacket, s_skipOptions);
196 auto& map = getMap(tag, *qhash, tcp);
197 auto shard = map.lock();
198 const auto& idx = shard->d_map.get<HashTag>();
199 auto range = idx.equal_range(std::tie(tag, *qhash, tcp));
200
201 if (range.first == range.second) {
202 shard->d_misses++;
203 return false;
204 }
205
206 qname = DNSName(queryPacket.c_str(), static_cast<int>(queryPacket.length()), sizeof(dnsheader), false, qtype, qclass);
207
208 return checkResponseMatches(*shard, range, queryPacket, qname, *qtype, *qclass, now, responsePacket, age, valState, pbdata);
209 }
210
211 void RecursorPacketCache::insertResponsePacket(unsigned int tag, uint32_t qhash, std::string&& query, const DNSName& qname, uint16_t qtype, uint16_t qclass, std::string&& responsePacket, time_t now, uint32_t ttl, const vState& valState, OptPBData&& pbdata, bool tcp)
212 {
213 auto& map = getMap(tag, qhash, tcp);
214 auto shard = map.lock();
215 auto& idx = shard->d_map.get<HashTag>();
216 auto range = idx.equal_range(std::tie(tag, qhash, tcp));
217 auto iter = range.first;
218
219 for (; iter != range.second; ++iter) {
220 if (iter->d_type != qtype || iter->d_class != qclass || iter->d_name != qname) {
221 continue;
222 }
223
224 moveCacheItemToBack<SequencedTag>(shard->d_map, iter);
225 iter->d_packet = std::move(responsePacket);
226 iter->d_query = std::move(query);
227 iter->d_ttd = now + ttl;
228 iter->d_creation = now;
229 iter->d_vstate = valState;
230 iter->d_submitted = false;
231 if (pbdata) {
232 iter->d_pbdata = std::move(*pbdata);
233 }
234
235 return;
236 }
237
238 struct Entry entry(DNSName(qname), qtype, qclass, std::move(responsePacket), std::move(query), tcp, qhash, now + ttl, now, tag, valState);
239 if (pbdata) {
240 entry.d_pbdata = std::move(*pbdata);
241 }
242
243 shard->d_map.insert(entry);
244 map.incEntriesCount();
245
246 if (shard->d_map.size() > shard->d_shardSize) {
247 auto& seq_idx = shard->d_map.get<SequencedTag>();
248 seq_idx.erase(seq_idx.begin());
249 map.decEntriesCount();
250 }
251 assert(map.getEntriesCount() == shard->d_map.size()); // NOLINT(cppcoreguidelines-pro-bounds-array-to-pointer-decay): clib implementation
252 }
253
254 void RecursorPacketCache::doPruneTo(size_t maxSize)
255 {
256 size_t cacheSize = size();
257 pruneMutexCollectionsVector<SequencedTag>(*this, d_maps, maxSize, cacheSize);
258 }
259
260 uint64_t RecursorPacketCache::doDump(int file)
261 {
262 int fdupped = dup(file);
263 if (fdupped == -1) {
264 return 0;
265 }
266 auto filePtr = std::unique_ptr<FILE, decltype(&fclose)>(fdopen(fdupped, "w"), fclose);
267 if (!filePtr) {
268 close(fdupped);
269 return 0;
270 }
271
272 uint64_t count = 0;
273 time_t now = time(nullptr);
274
275 size_t shardNum = 0;
276 size_t min = std::numeric_limits<size_t>::max();
277 size_t max = 0;
278 uint64_t maxSize = 0;
279
280 for (auto& shard : d_maps) {
281 auto lock = shard.lock();
282 const auto& sidx = lock->d_map.get<SequencedTag>();
283 const auto shardSize = lock->d_map.size();
284 fprintf(filePtr.get(), "; packetcache shard %zu; size %zu/%zu\n", shardNum, shardSize, lock->d_shardSize);
285 min = std::min(min, shardSize);
286 max = std::max(max, shardSize);
287 maxSize += lock->d_shardSize;
288 shardNum++;
289 for (const auto& entry : sidx) {
290 count++;
291 try {
292 fprintf(filePtr.get(), "%s %" PRId64 " %s ; tag %d %s\n", entry.d_name.toString().c_str(), static_cast<int64_t>(entry.d_ttd - now), DNSRecordContent::NumberToType(entry.d_type).c_str(), entry.d_tag, entry.d_tcp ? "tcp" : "udp");
293 }
294 catch (...) {
295 fprintf(filePtr.get(), "; error printing '%s'\n", entry.d_name.empty() ? "EMPTY" : entry.d_name.toString().c_str());
296 }
297 }
298 }
299 fprintf(filePtr.get(), "; packetcache size: %" PRIu64 "/%" PRIu64 " shards: %zu min/max shard size: %zu/%zu\n", size(), maxSize, d_maps.size(), min, max);
300 return count;
301 }