return 0;
for (auto& mc : maps) {
- const std::lock_guard<std::mutex> lock(mc.mutex);
+ const typename C::lock l(mc);
mc.d_cachecachevalid = false;
auto& sidx = boost::multi_index::get<S>(mc.d_map);
uint64_t erased = 0, lookedAt = 0;
while (toTrim > 0) {
size_t pershard = toTrim / maps_size + 1;
for (auto& mc : maps) {
- const std::lock_guard<std::mutex> lock(mc.mutex);
+ const typename C::lock l(mc);
mc.d_cachecachevalid = false;
auto& sidx = boost::multi_index::get<S>(mc.d_map);
size_t removed = 0;
#endif /* HAVE_FSTRM */
thread_local std::unique_ptr<MT_t> MT; // the big MTasker
-std::unique_ptr<MemRecursorCache> s_RC = std::unique_ptr<MemRecursorCache>(new MemRecursorCache());
+std::unique_ptr<MemRecursorCache> s_RC;
thread_local std::unique_ptr<RecursorPacketCache> t_packetCache;
uint64_t cacheHits = s_RC->cacheHits;
uint64_t cacheMisses = s_RC->cacheMisses;
uint64_t cacheSize = s_RC->size();
-
+ auto rc_stats = s_RC->stats();
+ double r = rc_stats.second == 0 ? 0.0 : (100.0 * rc_stats.first / rc_stats.second);
+
if(g_stats.qcounter && (cacheHits + cacheMisses) && SyncRes::s_queries && SyncRes::s_outqueries) {
g_log<<Logger::Notice<<"stats: "<<g_stats.qcounter<<" questions, "<<
cacheSize << " cache entries, "<<
broadcastAccFunction<uint64_t>(pleaseGetNegCacheSize)<<" negative entries, "<<
(int)((cacheHits*100.0)/(cacheHits+cacheMisses))<<"% cache hits"<<endl;
+ g_log << Logger::Notice<< "stats: cache contended/acquired " << rc_stats.first << '/' << rc_stats.second << " = " << r << '%' << endl;
g_log<<Logger::Notice<<"stats: throttle map: "
<< broadcastAccFunction<uint64_t>(pleaseGetThrottleSize) <<", ns speeds: "
::arg().setSwitch("qname-minimization", "Use Query Name Minimization")="yes";
::arg().setSwitch("nothing-below-nxdomain", "When an NXDOMAIN exists in cache for a name with fewer labels than the qname, send NXDOMAIN without doing a lookup (see RFC 8020)")="dnssec";
::arg().set("max-generate-steps", "Maximum number of $GENERATE steps when loading a zone from a file")="0";
+ ::arg().set("record-cache-shards", "Number of shards in the record cache")="1024";
#ifdef NOD_ENABLED
::arg().set("new-domain-tracking", "Track newly observed domains (i.e. never seen before).")="no";
exit(0);
}
+ s_RC = std::unique_ptr<MemRecursorCache>(new MemRecursorCache(::arg().asNum("record-cache-shards")));
+
Logger::Urgency logUrgency = (Logger::Urgency)::arg().asNum("loglevel");
if (logUrgency < Logger::Error)
MemRecursorCache::~MemRecursorCache()
{
try {
- typedef std::unique_ptr<std::lock_guard<std::mutex>> lock_t;
+ typedef std::unique_ptr<lock> lock_t;
vector<lock_t> locks;
for (auto& map : d_maps) {
- locks.push_back(lock_t(new std::lock_guard<std::mutex>(map.mutex)));
+ locks.push_back(lock_t(new lock(map)));
}
}
catch(...) {
return count;
}
+pair<uint64_t,uint64_t> MemRecursorCache::stats()
+{
+ uint64_t c = 0, a = 0;
+ for (auto& map : d_maps) {
+ const lock l(map);
+ c += map.d_contended_count;
+ a += map.d_acuired_count;
+ }
+ return pair<uint64_t,uint64_t>(c, a);
+}
+
size_t MemRecursorCache::ecsIndexSize()
{
// XXX!
size_t count = 0;
for (auto& map : d_maps) {
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
count += map.d_ecsIndex.size();
}
return count;
{
size_t ret = 0;
for (auto& map : d_maps) {
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
for (const auto& i : map.d_map) {
ret += sizeof(struct CacheEntry);
ret += i.d_qname.toString().length();
const uint16_t qtype = qt.getCode();
auto& map = getMap(qname);
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
/* If we don't have any netmask-specific entries at all, let's just skip this
to be able to use the nice d_cachecache hack. */
void MemRecursorCache::replace(time_t now, const DNSName &qname, const QType& qt, const vector<DNSRecord>& content, const vector<shared_ptr<RRSIGRecordContent>>& signatures, const std::vector<std::shared_ptr<DNSRecord>>& authorityRecs, bool auth, boost::optional<Netmask> ednsmask, vState state)
{
auto& map = getMap(qname);
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
map.d_cachecachevalid = false;
// cerr<<"Replacing "<<qname<<" for "<< (ednsmask ? ednsmask->toString() : "everyone") << endl;
if (!sub) {
auto& map = getMap(name);
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
map.d_cachecachevalid = false;
auto& idx = map.d_map.get<NameOnlyHashedTag>();
size_t n = idx.erase(name);
}
else {
for (auto& map : d_maps) {
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
map.d_cachecachevalid = false;
auto& idx = map.d_map.get<OrderedTag>();
for (auto i = idx.lower_bound(name); i != idx.end(); ) {
bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, uint16_t qtype, uint32_t newTTL)
{
auto& map = getMap(name);
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
cache_t::iterator iter = map.d_map.find(tie(name, qtype));
if (iter == map.d_map.end()) {
return false;
bool MemRecursorCache::updateValidationStatus(time_t now, const DNSName &qname, const QType& qt, const ComboAddress& who, bool requireAuth, vState newState, boost::optional<time_t> capTTD)
{
auto& map = getMap(qname);
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
bool updated = false;
uint16_t qtype = qt.getCode();
uint64_t count = 0;
for (auto& map : d_maps) {
- const std::lock_guard<std::mutex> lock(map.mutex);
+ const lock l(map);
const auto& sidx = map.d_map.get<SequencedTag>();
time_t now = time(0);
size_t size();
size_t bytes();
+ pair<uint64_t,uint64_t> stats();
size_t ecsIndexSize();
int32_t get(time_t, const DNSName &qname, const QType& qt, bool requireAuth, vector<DNSRecord>* res, const ComboAddress& who, vector<std::shared_ptr<RRSIGRecordContent>>* signatures=nullptr, std::vector<std::shared_ptr<DNSRecord>>* authorityRecs=nullptr, bool* variable=nullptr, vState* state=nullptr, bool* wasAuth=nullptr);
struct MapCombo
{
- MapCombo()
- {
- }
- ~MapCombo()
- {
- }
+ MapCombo() {}
MapCombo(const MapCombo &) = delete;
MapCombo & operator=(const MapCombo &) = delete;
cache_t d_map;
std::mutex mutex;
bool d_cachecachevalid{false};
std::atomic<uint64_t> d_entriesCount{0};
+ uint64_t d_contended_count{0};
+ uint64_t d_acuired_count{0};
};
-
+
vector<MapCombo> d_maps;
MapCombo& getMap(const DNSName &qname)
{
int32_t handleHit(MapCombo& map, OrderedTagIterator_t& entry, const DNSName& qname, const ComboAddress& who, vector<DNSRecord>* res, vector<std::shared_ptr<RRSIGRecordContent>>* signatures, std::vector<std::shared_ptr<DNSRecord>>* authorityRecs, bool* variable, vState* state, bool* wasAuth);
public:
+ struct lock {
+ lock(MapCombo& map) : m(map.mutex)
+ {
+ if (!m.try_lock()) {
+ m.lock();
+ map.d_contended_count++;
+ }
+ map.d_acuired_count++;
+ }
+ ~lock() {
+ m.unlock();
+ }
+ private:
+ std::mutex &m;
+ };
+
void preRemoval(const CacheEntry& entry)
{
if (entry.d_netmask.empty()) {