{
uint64_t maxNumberOfEntries = d_maxEntries;
std::vector<DNSName> emptyEntries;
-
uint64_t erased = 0;
- uint64_t lookedAt = 0;
- uint64_t toLook = std::max(d_entriesCount / 5U, static_cast<uint64_t>(1U));
- if (d_entriesCount > maxNumberOfEntries) {
- uint64_t toErase = d_entriesCount - maxNumberOfEntries;
- toLook = toErase * 5;
- // we are full, scan at max 5 * toErase entries and stop once we have nuked enough
+ auto zones = d_zones.write_lock();
+ // To start, just look through 10% of each zone and nuke everything that is expired
+ zones->visit([now, &erased, &emptyEntries](const SuffixMatchTree<std::shared_ptr<LockGuarded<ZoneEntry>>>& node) {
+ if (!node.d_value) {
+ return;
+ }
- auto zones = d_zones.write_lock();
- zones->visit([now, &erased, toErase, toLook, &lookedAt, &emptyEntries](const SuffixMatchTree<std::shared_ptr<LockGuarded<ZoneEntry>>>& node) {
- if (!node.d_value || erased > toErase || lookedAt > toLook) {
- return;
+ auto zoneEntry = node.d_value->lock();
+ auto& sidx = boost::multi_index::get<ZoneEntry::SequencedTag>(zoneEntry->d_entries);
+ const auto toLookAtForThisZone = (zoneEntry->d_entries.size() + 9) / 10;
+ uint64_t lookedAt = 0;
+ for (auto it = sidx.begin(); it != sidx.end() && lookedAt < toLookAtForThisZone; ++lookedAt) {
+ if (it->d_ttd < now) {
+ it = sidx.erase(it);
+ ++erased;
+ }
+ else {
+ ++it;
}
+ }
- auto zoneEntry = node.d_value->lock();
- auto& sidx = boost::multi_index::get<ZoneEntry::SequencedTag>(zoneEntry->d_entries);
- for (auto it = sidx.begin(); it != sidx.end(); ++lookedAt) {
- if (erased >= toErase || lookedAt >= toLook) {
- break;
- }
+ if (zoneEntry->d_entries.empty()) {
+ emptyEntries.push_back(zoneEntry->d_zone);
+ }
+ });
- if (it->d_ttd < now) {
- it = sidx.erase(it);
- ++erased;
- }
- else {
- ++it;
- }
- }
+ d_entriesCount -= erased;
- if (zoneEntry->d_entries.size() == 0) {
- emptyEntries.push_back(zoneEntry->d_zone);
- }
- });
- }
- else {
- // we are not full, just look through 10% of the cache and nuke everything that is expired
- auto zones = d_zones.write_lock();
- zones->visit([now, &erased, toLook, &lookedAt, &emptyEntries](const SuffixMatchTree<std::shared_ptr<LockGuarded<ZoneEntry>>>& node) {
- if (!node.d_value) {
+ // If we are still above try harder by nuking entries from each zone in LRU order
+ auto entriesCount = d_entriesCount.load();
+ if (entriesCount > maxNumberOfEntries) {
+ erased = 0;
+ uint64_t toErase = entriesCount - maxNumberOfEntries;
+ zones->visit([&erased, &toErase, &entriesCount, &emptyEntries](const SuffixMatchTree<std::shared_ptr<LockGuarded<ZoneEntry>>>& node) {
+ if (!node.d_value || entriesCount == 0) {
return;
}
-
auto zoneEntry = node.d_value->lock();
+ const auto zoneSize = zoneEntry->d_entries.size();
auto& sidx = boost::multi_index::get<ZoneEntry::SequencedTag>(zoneEntry->d_entries);
- for (auto it = sidx.begin(); it != sidx.end(); ++lookedAt) {
- if (lookedAt >= toLook) {
+ const auto toTrimForThisZone = static_cast<uint64_t>(std::round(static_cast<double>(toErase) * static_cast<double>(zoneSize) / static_cast<double>(entriesCount)));
+ if (entriesCount < zoneSize) {
+ throw std::runtime_error("Inconsistent agggressive cache " + std::to_string(entriesCount) + " " + std::to_string(zoneSize));
+ }
+ // This is comparable to what cachecleaner.hh::pruneMutexCollectionsVector() is doing, look there for an explanation
+ entriesCount -= zoneSize;
+ uint64_t trimmedFromThisZone = 0;
+ for (auto it = sidx.begin(); it != sidx.end() && trimmedFromThisZone < toTrimForThisZone; ) {
+ it = sidx.erase(it);
+ ++erased;
+ ++trimmedFromThisZone;
+ if (--toErase == 0) {
break;
}
- if (it->d_ttd < now || lookedAt > toLook) {
- it = sidx.erase(it);
- ++erased;
- }
- else {
- ++it;
- }
}
-
- if (zoneEntry->d_entries.size() == 0) {
+ if (zoneEntry->d_entries.empty()) {
emptyEntries.push_back(zoneEntry->d_zone);
}
});
- }
- d_entriesCount -= erased;
+ d_entriesCount -= erased;
+ }
if (!emptyEntries.empty()) {
- auto zones = d_zones.write_lock();
for (const auto& entry : emptyEntries) {
zones->remove(entry);
}
return false;
}
+ auto firstIndexIterator = zoneEntry->d_entries.project<ZoneEntry::OrderedTag>(it);
if (it->d_ttd <= now) {
- moveCacheItemToFront<ZoneEntry::SequencedTag>(zoneEntry->d_entries, it);
+ moveCacheItemToFront<ZoneEntry::SequencedTag>(zoneEntry->d_entries, firstIndexIterator);
return false;
}
entry = *it;
- moveCacheItemToBack<ZoneEntry::SequencedTag>(zoneEntry->d_entries, it);
+ moveCacheItemToBack<ZoneEntry::SequencedTag>(zoneEntry->d_entries, firstIndexIterator);
return true;
}
BOOST_CHECK_EQUAL(queriesCount, 5U);
}
+BOOST_AUTO_TEST_CASE(test_aggressive_nsec_replace)
+{
+ const size_t testSize = 10000;
+ auto cache = make_unique<AggressiveNSECCache>(testSize);
+
+ struct timeval now{};
+ Utility::gettimeofday(&now, nullptr);
+
+ vector<DNSName> names;
+ names.reserve(testSize);
+ for (size_t i = 0; i < testSize; i++) {
+ names.emplace_back(std::to_string(i) + "powerdns.com");
+ }
+
+ DTime time;
+ time.set();
+
+ for (const auto& name : names) {
+ DNSRecord rec;
+ rec.d_name = name;
+ rec.d_type = QType::NSEC3;
+ rec.d_ttl = now.tv_sec + 10;
+ rec.setContent(getRecordContent(QType::NSEC3, "1 0 500 ab HASG==== A RRSIG NSEC3"));
+ auto rrsig = std::make_shared<RRSIGRecordContent>("NSEC3 5 3 10 20370101000000 20370101000000 24567 dummy. data");
+ cache->insertNSEC(DNSName("powerdns.com"), rec.d_name, rec, {rrsig}, true);
+ }
+ auto diff1 = time.udiff(true);
+
+ BOOST_CHECK_EQUAL(cache->getEntriesCount(), testSize);
+ for (const auto& name : names) {
+ DNSRecord rec;
+ rec.d_name = name;
+ rec.d_type = QType::NSEC3;
+ rec.d_ttl = now.tv_sec + 10;
+ rec.setContent(getRecordContent(QType::NSEC3, "1 0 500 ab HASG==== A RRSIG NSEC3"));
+ auto rrsig = std::make_shared<RRSIGRecordContent>("NSEC 5 3 10 20370101000000 20370101000000 24567 dummy. data");
+ cache->insertNSEC(DNSName("powerdns.com"), rec.d_name, rec, {rrsig}, true);
+ }
+
+ BOOST_CHECK_EQUAL(cache->getEntriesCount(), testSize);
+
+ auto diff2 = time.udiff(true);
+ // Check that replace is about equally fast as insert
+ BOOST_ASSERT(diff1 < diff2 * 1.2 && diff2 < diff1 * 1.2);
+}
+
BOOST_AUTO_TEST_CASE(test_aggressive_nsec_wiping)
{
auto cache = make_unique<AggressiveNSECCache>(10000);
rec.d_name = DNSName("www.powerdns.org");
rec.d_type = QType::NSEC3;
- rec.d_ttl = now.tv_sec + 10;
+ rec.d_ttl = now.tv_sec + 20;
rec.setContent(getRecordContent(QType::NSEC3, "1 0 500 ab HASG==== A RRSIG NSEC3"));
rrsig = std::make_shared<RRSIGRecordContent>("NSEC3 5 3 10 20370101000000 20370101000000 24567 dummy. data");
cache->insertNSEC(DNSName("powerdns.org"), rec.d_name, rec, {rrsig}, true);
BOOST_CHECK_EQUAL(cache->getEntriesCount(), 3U);
/* we have set a upper bound to 2 entries, so we are above,
- and all entries are actually expired, so we will prune one entry
+ and one entry are actually expired, so we will prune one entry
to get below the limit */
- cache->prune(now.tv_sec + 600);
+ cache->prune(now.tv_sec + 15);
BOOST_CHECK_EQUAL(cache->getEntriesCount(), 2U);
- /* now we are at the limit, so we will scan 1/5th of the entries,
- and prune the expired ones, which mean we will also remove only one */
- cache->prune(now.tv_sec + 600);
- BOOST_CHECK_EQUAL(cache->getEntriesCount(), 1U);
-
- /* now we are below the limit, so we will scan 1/5th of the entries again,
- and prune the expired ones, which mean we will remove the last one */
+ /* now we are at the limit, so we will scan 1/5th of all zones entries, rounded up,
+ and prune the expired ones, which mean we will also remoing twoe */
cache->prune(now.tv_sec + 600);
BOOST_CHECK_EQUAL(cache->getEntriesCount(), 0U);
}