From 7ce9aad613fcf08bbec672b73d83f665cd442da6 Mon Sep 17 00:00:00 2001 From: Otto Moerbeek Date: Fri, 13 Dec 2019 12:56:02 +0100 Subject: [PATCH] Contention stats plus variable # of shards --- pdns/cachecleaner.hh | 4 ++-- pdns/pdns_recursor.cc | 10 ++++++++-- pdns/recursor_cache.cc | 33 ++++++++++++++++++++++----------- pdns/recursor_cache.hh | 28 +++++++++++++++++++++------- 4 files changed, 53 insertions(+), 22 deletions(-) diff --git a/pdns/cachecleaner.hh b/pdns/cachecleaner.hh index c66e430bb4..9b748e8e60 100644 --- a/pdns/cachecleaner.hh +++ b/pdns/cachecleaner.hh @@ -173,7 +173,7 @@ template uint64_t pruneMutexCollectionsVect return 0; for (auto& mc : maps) { - const std::lock_guard lock(mc.mutex); + const typename C::lock l(mc); mc.d_cachecachevalid = false; auto& sidx = boost::multi_index::get(mc.d_map); uint64_t erased = 0, lookedAt = 0; @@ -207,7 +207,7 @@ template uint64_t pruneMutexCollectionsVect while (toTrim > 0) { size_t pershard = toTrim / maps_size + 1; for (auto& mc : maps) { - const std::lock_guard lock(mc.mutex); + const typename C::lock l(mc); mc.d_cachecachevalid = false; auto& sidx = boost::multi_index::get(mc.d_map); size_t removed = 0; diff --git a/pdns/pdns_recursor.cc b/pdns/pdns_recursor.cc index b54a91fbe4..a6e5c52525 100644 --- a/pdns/pdns_recursor.cc +++ b/pdns/pdns_recursor.cc @@ -127,7 +127,7 @@ static thread_local uint64_t t_frameStreamServersGeneration; #endif /* HAVE_FSTRM */ thread_local std::unique_ptr MT; // the big MTasker -std::unique_ptr s_RC = std::unique_ptr(new MemRecursorCache()); +std::unique_ptr s_RC; thread_local std::unique_ptr t_packetCache; @@ -2847,12 +2847,15 @@ static void doStats(void) uint64_t cacheHits = s_RC->cacheHits; uint64_t cacheMisses = s_RC->cacheMisses; uint64_t cacheSize = s_RC->size(); - + auto rc_stats = s_RC->stats(); + double r = rc_stats.second == 0 ? 0.0 : (100.0 * rc_stats.first / rc_stats.second); + if(g_stats.qcounter && (cacheHits + cacheMisses) && SyncRes::s_queries && SyncRes::s_outqueries) { g_log<(pleaseGetNegCacheSize)<<" negative entries, "<< (int)((cacheHits*100.0)/(cacheHits+cacheMisses))<<"% cache hits"<(pleaseGetThrottleSize) <<", ns speeds: " @@ -4789,6 +4792,7 @@ int main(int argc, char **argv) ::arg().setSwitch("qname-minimization", "Use Query Name Minimization")="yes"; ::arg().setSwitch("nothing-below-nxdomain", "When an NXDOMAIN exists in cache for a name with fewer labels than the qname, send NXDOMAIN without doing a lookup (see RFC 8020)")="dnssec"; ::arg().set("max-generate-steps", "Maximum number of $GENERATE steps when loading a zone from a file")="0"; + ::arg().set("record-cache-shards", "Number of shards in the record cache")="1024"; #ifdef NOD_ENABLED ::arg().set("new-domain-tracking", "Track newly observed domains (i.e. never seen before).")="no"; @@ -4887,6 +4891,8 @@ int main(int argc, char **argv) exit(0); } + s_RC = std::unique_ptr(new MemRecursorCache(::arg().asNum("record-cache-shards"))); + Logger::Urgency logUrgency = (Logger::Urgency)::arg().asNum("loglevel"); if (logUrgency < Logger::Error) diff --git a/pdns/recursor_cache.cc b/pdns/recursor_cache.cc index 4ceb1a1f7e..e9aadd423a 100644 --- a/pdns/recursor_cache.cc +++ b/pdns/recursor_cache.cc @@ -21,10 +21,10 @@ MemRecursorCache::MemRecursorCache(size_t mapsCount) : d_maps(mapsCount) MemRecursorCache::~MemRecursorCache() { try { - typedef std::unique_ptr> lock_t; + typedef std::unique_ptr lock_t; vector locks; for (auto& map : d_maps) { - locks.push_back(lock_t(new std::lock_guard(map.mutex))); + locks.push_back(lock_t(new lock(map))); } } catch(...) { @@ -40,12 +40,23 @@ size_t MemRecursorCache::size() return count; } +pair MemRecursorCache::stats() +{ + uint64_t c = 0, a = 0; + for (auto& map : d_maps) { + const lock l(map); + c += map.d_contended_count; + a += map.d_acuired_count; + } + return pair(c, a); +} + size_t MemRecursorCache::ecsIndexSize() { // XXX! size_t count = 0; for (auto& map : d_maps) { - const std::lock_guard lock(map.mutex); + const lock l(map); count += map.d_ecsIndex.size(); } return count; @@ -56,7 +67,7 @@ size_t MemRecursorCache::bytes() { size_t ret = 0; for (auto& map : d_maps) { - const std::lock_guard lock(map.mutex); + const lock l(map); for (const auto& i : map.d_map) { ret += sizeof(struct CacheEntry); ret += i.d_qname.toString().length(); @@ -210,7 +221,7 @@ int32_t MemRecursorCache::get(time_t now, const DNSName &qname, const QType& qt, const uint16_t qtype = qt.getCode(); auto& map = getMap(qname); - const std::lock_guard lock(map.mutex); + const lock l(map); /* If we don't have any netmask-specific entries at all, let's just skip this to be able to use the nice d_cachecache hack. */ @@ -272,7 +283,7 @@ int32_t MemRecursorCache::get(time_t now, const DNSName &qname, const QType& qt, void MemRecursorCache::replace(time_t now, const DNSName &qname, const QType& qt, const vector& content, const vector>& signatures, const std::vector>& authorityRecs, bool auth, boost::optional ednsmask, vState state) { auto& map = getMap(qname); - const std::lock_guard lock(map.mutex); + const lock l(map); map.d_cachecachevalid = false; // cerr<<"Replacing "<toString() : "everyone") << endl; @@ -362,7 +373,7 @@ size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, uint16_t qty if (!sub) { auto& map = getMap(name); - const std::lock_guard lock(map.mutex); + const lock l(map); map.d_cachecachevalid = false; auto& idx = map.d_map.get(); size_t n = idx.erase(name); @@ -381,7 +392,7 @@ size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, uint16_t qty } else { for (auto& map : d_maps) { - const std::lock_guard lock(map.mutex); + const lock l(map); map.d_cachecachevalid = false; auto& idx = map.d_map.get(); for (auto i = idx.lower_bound(name); i != idx.end(); ) { @@ -414,7 +425,7 @@ size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, uint16_t qty bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, uint16_t qtype, uint32_t newTTL) { auto& map = getMap(name); - const std::lock_guard lock(map.mutex); + const lock l(map); cache_t::iterator iter = map.d_map.find(tie(name, qtype)); if (iter == map.d_map.end()) { return false; @@ -442,7 +453,7 @@ bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, uint16_t qtyp bool MemRecursorCache::updateValidationStatus(time_t now, const DNSName &qname, const QType& qt, const ComboAddress& who, bool requireAuth, vState newState, boost::optional capTTD) { auto& map = getMap(qname); - const std::lock_guard lock(map.mutex); + const lock l(map); bool updated = false; uint16_t qtype = qt.getCode(); @@ -496,7 +507,7 @@ uint64_t MemRecursorCache::doDump(int fd) uint64_t count = 0; for (auto& map : d_maps) { - const std::lock_guard lock(map.mutex); + const lock l(map); const auto& sidx = map.d_map.get(); time_t now = time(0); diff --git a/pdns/recursor_cache.hh b/pdns/recursor_cache.hh index db5bd4766e..19535e2eda 100644 --- a/pdns/recursor_cache.hh +++ b/pdns/recursor_cache.hh @@ -52,6 +52,7 @@ public: size_t size(); size_t bytes(); + pair stats(); size_t ecsIndexSize(); int32_t get(time_t, const DNSName &qname, const QType& qt, bool requireAuth, vector* res, const ComboAddress& who, vector>* signatures=nullptr, std::vector>* authorityRecs=nullptr, bool* variable=nullptr, vState* state=nullptr, bool* wasAuth=nullptr); @@ -190,12 +191,7 @@ private: struct MapCombo { - MapCombo() - { - } - ~MapCombo() - { - } + MapCombo() {} MapCombo(const MapCombo &) = delete; MapCombo & operator=(const MapCombo &) = delete; cache_t d_map; @@ -205,8 +201,10 @@ private: std::mutex mutex; bool d_cachecachevalid{false}; std::atomic d_entriesCount{0}; + uint64_t d_contended_count{0}; + uint64_t d_acuired_count{0}; }; - + vector d_maps; MapCombo& getMap(const DNSName &qname) { @@ -219,6 +217,22 @@ private: int32_t handleHit(MapCombo& map, OrderedTagIterator_t& entry, const DNSName& qname, const ComboAddress& who, vector* res, vector>* signatures, std::vector>* authorityRecs, bool* variable, vState* state, bool* wasAuth); public: + struct lock { + lock(MapCombo& map) : m(map.mutex) + { + if (!m.try_lock()) { + m.lock(); + map.d_contended_count++; + } + map.d_acuired_count++; + } + ~lock() { + m.unlock(); + } + private: + std::mutex &m; + }; + void preRemoval(const CacheEntry& entry) { if (entry.d_netmask.empty()) { -- 2.47.2