7 #include "recpacketcache.hh"
8 #include "cachecleaner.hh"
10 #include "namespaces.hh"
11 #include "rec-taskqueue.hh"
13 unsigned int RecursorPacketCache::s_refresh_ttlperc
{0};
15 void RecursorPacketCache::setShardSizes(size_t shardSize
)
17 for (auto& shard
: d_maps
) {
18 auto lock
= shard
.lock();
19 lock
->d_shardSize
= shardSize
;
23 uint64_t RecursorPacketCache::size() const
26 for (const auto& map
: d_maps
) {
27 count
+= map
.getEntriesCount();
32 uint64_t RecursorPacketCache::bytes()
35 for (auto& shard
: d_maps
) {
36 auto lock
= shard
.lock();
37 for (const auto& entry
: lock
->d_map
) {
38 sum
+= sizeof(entry
) + entry
.d_packet
.length() + 4;
44 uint64_t RecursorPacketCache::getHits()
47 for (auto& shard
: d_maps
) {
48 auto lock
= shard
.lock();
54 uint64_t RecursorPacketCache::getMisses()
57 for (auto& shard
: d_maps
) {
58 auto lock
= shard
.lock();
59 sum
+= lock
->d_misses
;
64 pair
<uint64_t, uint64_t> RecursorPacketCache::stats()
66 uint64_t contended
= 0;
67 uint64_t acquired
= 0;
68 for (auto& shard
: d_maps
) {
69 auto content
= shard
.lock();
70 contended
+= content
->d_contended_count
;
71 acquired
+= content
->d_acquired_count
;
73 return {contended
, acquired
};
76 uint64_t RecursorPacketCache::doWipePacketCache(const DNSName
& name
, uint16_t qtype
, bool subtree
)
79 for (auto& map
: d_maps
) {
80 auto shard
= map
.lock();
81 auto& idx
= shard
->d_map
.get
<NameTag
>();
82 for (auto iter
= idx
.lower_bound(name
); iter
!= idx
.end();) {
84 if (!iter
->d_name
.isPartOf(name
)) { // this is case insensitive
89 if (iter
->d_name
!= name
) {
93 if (qtype
== 0xffff || iter
->d_type
== qtype
) {
94 iter
= idx
.erase(iter
);
95 map
.decEntriesCount();
106 bool RecursorPacketCache::qrMatch(const packetCache_t::index
<HashTag
>::type::iterator
& iter
, const std::string
& queryPacket
, const DNSName
& qname
, uint16_t qtype
, uint16_t qclass
)
108 // this ignores checking on the EDNS subnet flags!
109 if (qname
!= iter
->d_name
|| iter
->d_type
!= qtype
|| iter
->d_class
!= qclass
) {
113 static const std::unordered_set
<uint16_t> optionsToSkip
{EDNSOptionCode::COOKIE
, EDNSOptionCode::ECS
};
114 return queryMatches(iter
->d_query
, queryPacket
, qname
, optionsToSkip
);
117 bool RecursorPacketCache::checkResponseMatches(MapCombo::LockedContent
& shard
, std::pair
<packetCache_t::index
<HashTag
>::type::iterator
, packetCache_t::index
<HashTag
>::type::iterator
> range
, const std::string
& queryPacket
, const DNSName
& qname
, uint16_t qtype
, uint16_t qclass
, time_t now
, std::string
* responsePacket
, uint32_t* age
, vState
* valState
, OptPBData
* pbdata
)
119 for (auto iter
= range
.first
; iter
!= range
.second
; ++iter
) {
120 // the possibility is VERY real that we get hits that are not right - birthday paradox
121 if (!qrMatch(iter
, queryPacket
, qname
, qtype
, qclass
)) {
125 if (now
< iter
->d_ttd
) { // it is right, it is fresh!
126 *age
= static_cast<uint32_t>(now
- iter
->d_creation
);
127 // we know ttl is > 0
128 auto ttl
= static_cast<uint32_t>(iter
->d_ttd
- now
);
129 if (s_refresh_ttlperc
> 0 && !iter
->d_submitted
&& taskQTypeIsSupported(qtype
)) {
130 const dnsheader_aligned
header(iter
->d_packet
.data());
131 const auto* headerPtr
= header
.get();
132 if (headerPtr
->rcode
== RCode::NoError
) {
133 const uint32_t deadline
= iter
->getOrigTTL() * s_refresh_ttlperc
/ 100;
134 const bool almostExpired
= ttl
<= deadline
;
136 iter
->d_submitted
= true;
137 pushAlmostExpiredTask(qname
, qtype
, iter
->d_ttd
, Netmask());
141 *responsePacket
= iter
->d_packet
;
142 responsePacket
->replace(0, 2, queryPacket
.c_str(), 2);
143 *valState
= iter
->d_vstate
;
145 const size_t wirelength
= qname
.wirelength();
146 if (responsePacket
->size() > (sizeof(dnsheader
) + wirelength
)) {
147 responsePacket
->replace(sizeof(dnsheader
), wirelength
, queryPacket
, sizeof(dnsheader
), wirelength
);
151 moveCacheItemToBack
<SequencedTag
>(shard
.d_map
, iter
);
153 if (pbdata
!= nullptr) {
154 if (iter
->d_pbdata
) {
155 *pbdata
= iter
->d_pbdata
;
158 *pbdata
= boost::none
;
164 // We used to move the item to the front of "the to be deleted" sequence,
165 // but we very likely will update the entry very soon, so leave it
173 static const std::unordered_set
<uint16_t> s_skipOptions
= {EDNSOptionCode::ECS
, EDNSOptionCode::COOKIE
};
175 bool RecursorPacketCache::getResponsePacket(unsigned int tag
, const std::string
& queryPacket
, const DNSName
& qname
, uint16_t qtype
, uint16_t qclass
, time_t now
,
176 std::string
* responsePacket
, uint32_t* age
, vState
* valState
, uint32_t* qhash
, OptPBData
* pbdata
, bool tcp
)
178 *qhash
= canHashPacket(queryPacket
, s_skipOptions
);
179 auto& map
= getMap(tag
, *qhash
, tcp
);
180 auto shard
= map
.lock();
181 const auto& idx
= shard
->d_map
.get
<HashTag
>();
182 auto range
= idx
.equal_range(std::tie(tag
, *qhash
, tcp
));
184 if (range
.first
== range
.second
) {
189 return checkResponseMatches(*shard
, range
, queryPacket
, qname
, qtype
, qclass
, now
, responsePacket
, age
, valState
, pbdata
);
192 bool RecursorPacketCache::getResponsePacket(unsigned int tag
, const std::string
& queryPacket
, DNSName
& qname
, uint16_t* qtype
, uint16_t* qclass
, time_t now
,
193 std::string
* responsePacket
, uint32_t* age
, vState
* valState
, uint32_t* qhash
, OptPBData
* pbdata
, bool tcp
)
195 *qhash
= canHashPacket(queryPacket
, s_skipOptions
);
196 auto& map
= getMap(tag
, *qhash
, tcp
);
197 auto shard
= map
.lock();
198 const auto& idx
= shard
->d_map
.get
<HashTag
>();
199 auto range
= idx
.equal_range(std::tie(tag
, *qhash
, tcp
));
201 if (range
.first
== range
.second
) {
206 qname
= DNSName(queryPacket
.c_str(), static_cast<int>(queryPacket
.length()), sizeof(dnsheader
), false, qtype
, qclass
);
208 return checkResponseMatches(*shard
, range
, queryPacket
, qname
, *qtype
, *qclass
, now
, responsePacket
, age
, valState
, pbdata
);
211 void RecursorPacketCache::insertResponsePacket(unsigned int tag
, uint32_t qhash
, std::string
&& query
, const DNSName
& qname
, uint16_t qtype
, uint16_t qclass
, std::string
&& responsePacket
, time_t now
, uint32_t ttl
, const vState
& valState
, OptPBData
&& pbdata
, bool tcp
)
213 auto& map
= getMap(tag
, qhash
, tcp
);
214 auto shard
= map
.lock();
215 auto& idx
= shard
->d_map
.get
<HashTag
>();
216 auto range
= idx
.equal_range(std::tie(tag
, qhash
, tcp
));
217 auto iter
= range
.first
;
219 for (; iter
!= range
.second
; ++iter
) {
220 if (iter
->d_type
!= qtype
|| iter
->d_class
!= qclass
|| iter
->d_name
!= qname
) {
224 moveCacheItemToBack
<SequencedTag
>(shard
->d_map
, iter
);
225 iter
->d_packet
= std::move(responsePacket
);
226 iter
->d_query
= std::move(query
);
227 iter
->d_ttd
= now
+ ttl
;
228 iter
->d_creation
= now
;
229 iter
->d_vstate
= valState
;
230 iter
->d_submitted
= false;
232 iter
->d_pbdata
= std::move(*pbdata
);
238 struct Entry
entry(DNSName(qname
), qtype
, qclass
, std::move(responsePacket
), std::move(query
), tcp
, qhash
, now
+ ttl
, now
, tag
, valState
);
240 entry
.d_pbdata
= std::move(*pbdata
);
243 shard
->d_map
.insert(entry
);
244 map
.incEntriesCount();
246 if (shard
->d_map
.size() > shard
->d_shardSize
) {
247 auto& seq_idx
= shard
->d_map
.get
<SequencedTag
>();
248 seq_idx
.erase(seq_idx
.begin());
249 map
.decEntriesCount();
251 assert(map
.getEntriesCount() == shard
->d_map
.size()); // NOLINT(cppcoreguidelines-pro-bounds-array-to-pointer-decay): clib implementation
254 void RecursorPacketCache::doPruneTo(time_t now
, size_t maxSize
)
256 size_t cacheSize
= size();
257 pruneMutexCollectionsVector
<SequencedTag
>(now
, d_maps
, maxSize
, cacheSize
);
260 uint64_t RecursorPacketCache::doDump(int file
)
262 int fdupped
= dup(file
);
266 auto filePtr
= std::unique_ptr
<FILE, decltype(&fclose
)>(fdopen(fdupped
, "w"), fclose
);
273 time_t now
= time(nullptr);
276 size_t min
= std::numeric_limits
<size_t>::max();
278 uint64_t maxSize
= 0;
280 for (auto& shard
: d_maps
) {
281 auto lock
= shard
.lock();
282 const auto& sidx
= lock
->d_map
.get
<SequencedTag
>();
283 const auto shardSize
= lock
->d_map
.size();
284 fprintf(filePtr
.get(), "; packetcache shard %zu; size %zu/%zu\n", shardNum
, shardSize
, lock
->d_shardSize
);
285 min
= std::min(min
, shardSize
);
286 max
= std::max(max
, shardSize
);
287 maxSize
+= lock
->d_shardSize
;
289 for (const auto& entry
: sidx
) {
292 fprintf(filePtr
.get(), "%s %" PRId64
" %s ; tag %d %s\n", entry
.d_name
.toString().c_str(), static_cast<int64_t>(entry
.d_ttd
- now
), DNSRecordContent::NumberToType(entry
.d_type
).c_str(), entry
.d_tag
, entry
.d_tcp
? "tcp" : "udp");
295 fprintf(filePtr
.get(), "; error printing '%s'\n", entry
.d_name
.empty() ? "EMPTY" : entry
.d_name
.toString().c_str());
299 fprintf(filePtr
.get(), "; packetcache size: %" PRIu64
"/%" PRIu64
" shards: %zu min/max shard size: %zu/%zu\n", size(), maxSize
, d_maps
.size(), min
, max
);