From: Otto Moerbeek Date: Mon, 11 Jul 2022 13:25:48 +0000 (+0200) Subject: Add a comment block describing how serve-stale works X-Git-Tag: rec-4.8.0-alpha1~24^2~1 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8c08b4ecab3bb8cc1d6e5ab199b2f1974f0bd6c6;p=thirdparty%2Fpdns.git Add a comment block describing how serve-stale works Plus docs and a few tweaks --- diff --git a/pdns/recursor_cache.cc b/pdns/recursor_cache.cc index 820760a305..6a060893a4 100644 --- a/pdns/recursor_cache.cc +++ b/pdns/recursor_cache.cc @@ -15,6 +15,43 @@ #include "cachecleaner.hh" #include "rec-taskqueue.hh" +/* + * SERVE-STALE: the general approach + * + * The general switch to enable serve-stale is s_maxServedStaleExtensions. If this value is zero, no + * serve-stale is done. If it is positive, it determines how many times the serve-stale status of a + * record can be extended. + * + * Each record in the cache has a field d_servedStale. If this value is zero, no special handling is + * done. If it is positive, the record is being served stale. The value determines how many times + * the serve-stale status was extended. Each time an extension happens, the value is incremented and + * a task to see is the record resolves will be pushed. When the served-stale status is extended, + * the TTD of a record is also changed so the record will be considered not-expired by the get() + * function. The TTD will be s_serveStaleExtensionPeriod in the future, unless the original TTL was + * smaller than that. If d_servedStale reaches s_maxServedStaleExtensions the serve-stale status + * will no longer be extended and the record will be considered really expired. + * + * With s_serveStaleExtensionPeriod of 30 seconds, setting s_maxServedStaleExtensions to 1440 will + * cause a record to be served stale a maximum of 30s * 1440 = 12 hours. If the original TTL is + * smaller than 30, this period will be shorter. If there was a long time between serve-stale + * extensions, the value of d_servedStale will be incremented by more than one to account for the + * longer period. + * + * If serve-stale is enabled, the resolving process first will try to resolve a record in the + * ordinary way, with the difference that a timeout will not lead to an ImmediateServFailException + * being passed to the caller, but the resolving wil be tried again with a flag to allow marking + * records as served-stale. If the second time around a timeout happens, an + * ImmediateServFailException *will* be passed to the caller. + * + * When serving stale, records are only wiped from the cache if they are older than + * s_maxServedStaleExtensions * s_serveStaleExtensionPeriod. See isStale(). This is to have a good + * chance of records being available for marking stale if a name server has an issue. + * + * The tasks to see if nameservers are reachable again do a resolve in refresh mode, considering + * served-stale records as expired. When a record resolves again, the d_servedStale field wil be + * reset. + */ + uint16_t MemRecursorCache::s_maxServedStaleExtensions; MemRecursorCache::MemRecursorCache(size_t mapsCount) : @@ -169,15 +206,25 @@ void MemRecursorCache::updateStaleEntry(time_t now, MemRecursorCache::OrderedTag { // We need to take care a infrequently access stale item cannot be extended past // s_maxServedStaleExtension * s_serveStaleExtensionPeriod - // We we look how old the entry is, and increase d_servedStale accordingly, taking care not to overflow + // We look how old the entry is, and increase d_servedStale accordingly, taking care not to overflow const time_t howlong = std::max(static_cast(1), now - entry->d_ttd); const uint32_t extension = std::max(1U, std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod)); entry->d_servedStale = std::min(entry->d_servedStale + 1 + howlong / extension, static_cast(s_maxServedStaleExtensions)); - entry->d_ttd = now + std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod); + entry->d_ttd = now + extension; pushRefreshTask(entry->d_qname, entry->d_qtype, entry->d_ttd, entry->d_netmask); } +// If we are serving this record stale (or *should*) and the ttd has +// passed increase ttd to the future and remember that we did. Also +// push a refresh task. +void MemRecursorCache::handleServeStaleBookkeeping(time_t now, bool serveStale, MemRecursorCache::OrderedTagIterator_t& entry) +{ + if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) { + updateStaleEntry(now, entry); + } +} + MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSIndex(MapCombo::LockedContent& map, time_t now, const DNSName& qname, const QType qtype, bool requireAuth, const ComboAddress& who, bool serveStale) { // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock) @@ -202,12 +249,8 @@ MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSInde } continue; } - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, entry); - } + handleServeStaleBookkeeping(now, serveStale, entry); + if (entry->d_ttd > now) { if (!requireAuth || entry->d_auth) { return entry; @@ -232,12 +275,7 @@ MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSInde auto key = std::make_tuple(qname, qtype, boost::none, Netmask()); auto entry = map.d_map.find(key); if (entry != map.d_map.end()) { - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, entry); - } + handleServeStaleBookkeeping(now, serveStale, entry); if (entry->d_ttd > now) { if (!requireAuth || entry->d_auth) { return entry; @@ -378,7 +416,7 @@ time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qt, F firstIndexIterator = map->d_map.project(i); // When serving stale, we consider expired records - if (i->d_ttd <= now && !serveStale && i->d_servedStale == 0) { + if (i->isEntryUsable(now, serveStale)) { moveCacheItemToFront(map->d_map, firstIndexIterator); continue; } @@ -387,12 +425,9 @@ time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qt, F continue; } found = true; - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || i->d_servedStale > 0) && i->d_ttd <= now && i->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, firstIndexIterator); - } + + handleServeStaleBookkeeping(now, serveStale, firstIndexIterator); + ttd = handleHit(*map, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); if (qt != QType::ANY && qt != QType::ADDR) { // normally if we have a hit, we are done @@ -422,7 +457,7 @@ time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qt, F firstIndexIterator = map->d_map.project(i); // When serving stale, we consider expired records - if (i->d_ttd <= now && !serveStale && i->d_servedStale == 0) { + if (i->isEntryUsable(now, serveStale)) { moveCacheItemToFront(map->d_map, firstIndexIterator); continue; } @@ -431,12 +466,9 @@ time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qt, F continue; } found = true; - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || i->d_servedStale > 0) && i->d_ttd <= now && i->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, firstIndexIterator); - } + + handleServeStaleBookkeeping(now, serveStale, firstIndexIterator); + ttd = handleHit(*map, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); if (qt != QType::ANY && qt != QType::ADDR) { // normally if we have a hit, we are done @@ -519,7 +551,6 @@ void MemRecursorCache::replace(time_t now, const DNSName& qname, const QType qt, // the parent // BUT make sure that we CAN refresh the root if (ce.d_auth && auth && qt == QType::NS && !isNew && !qname.isRoot()) { - // cerr<<"\tLimiting TTL of auth->auth NS set replace to "< 0) { - return d_ttd + s_maxServedStaleExtensions * std::min(s_serveStaleExtensionPeriod, d_orig_ttl) < now; + return d_ttd + static_cast(s_maxServedStaleExtensions) * std::min(s_serveStaleExtensionPeriod, d_orig_ttl) < now; } else { return d_ttd < now; } } + bool isEntryUsable(time_t now, bool serveStale) const + { + // When serving stale, we consider expired records + return d_ttd <= now && !serveStale && d_servedStale == 0; + } + records_t d_records; std::vector> d_signatures; std::vector> d_authorityRecs; @@ -272,6 +278,7 @@ private: time_t handleHit(MapCombo::LockedContent& content, OrderedTagIterator_t& entry, const DNSName& qname, uint32_t& origTTL, vector* res, vector>* signatures, std::vector>* authorityRecs, bool* variable, boost::optional& state, bool* wasAuth, DNSName* authZone, ComboAddress* fromAuthIP); void updateStaleEntry(time_t now, OrderedTagIterator_t& entry); + void handleServeStaleBookkeeping(time_t, bool, OrderedTagIterator_t&); public: void preRemoval(MapCombo::LockedContent& map, const CacheEntry& entry) diff --git a/pdns/recursordist/docs/appendices/internals.rst b/pdns/recursordist/docs/appendices/internals.rst index fcbafeaad6..fb73a3fd5e 100644 --- a/pdns/recursordist/docs/appendices/internals.rst +++ b/pdns/recursordist/docs/appendices/internals.rst @@ -453,6 +453,30 @@ As experience shows, this configuration error is encountered in the wild often enough to warrant this workaround. See :ref:`setting-save-parent-ns-set`. +.. _serve-stale: + +Serve Stale +----------- + +Starting with version 4.8.0, the Recursor implements ``Serve Stale`` (:rfc:`8767`). +This is a mechanism that allows records in the record cache that are expired +but that cannot be refreshed (due to network or authoritative server issues) to be served anyway. + +The :ref:`setting-serve-stale-extensions` determines how many times the records lifetime can be extended. +Each extension of the lifetimeof a record lasts 30s. +A value of 1440 means the maximum extra life time is 30 * 1440 seconds which is 12 hours. +If the original TTL of a record was less than 30s, the original TTLs will be used as extension period. + +On each extension an asynchronous task to resolve the name will be created. +If that task succeeds, the record will not be served stale anymore, as an up-to-date value is now available. + + +If :ref:`setting-serve-stale-extensions` is not zero expired records will be kept in the record cache until the number of records becomes too large. +Cache eviction will then be done on a least-recently-used basis. + +When dumping the cache using ``rec_control dump-cache`` the ``ss`` value shows the serve stale extension count. +A value of 0 means the record is not being served stale, while +a positive value shows the number of times the serve stale period has been extended.  Some small things ------------------ diff --git a/pdns/recursordist/docs/settings.rst b/pdns/recursordist/docs/settings.rst index 713b960c7d..47b20b6f48 100644 --- a/pdns/recursordist/docs/settings.rst +++ b/pdns/recursordist/docs/settings.rst @@ -1789,6 +1789,21 @@ Setting this to an empty string disables secpoll. This makes the server authoritatively aware of: ``10.in-addr.arpa``, ``168.192.in-addr.arpa``, ``16-31.172.in-addr.arpa``, which saves load on the AS112 servers. Individual parts of these zones can still be loaded or forwarded. +.. _setting-serve-stale-extensions: + +``serve-stale-extensions`` +-------------------------- +.. versionadded:: 4.8.0 + +- Integer +- Default: 0 + +Maximum number of times an expired record's TTL is extended by 30s when serving stale. +Extension only occurs if a record cannot be refreshed. +A value of 0 means the ``Serve Stale`` mechanism is not used. +To allow records becoming stale to be served for an hour, use a value of 200. +See :ref:`serve-stale` for a description of the Serve Stale mechanism. + .. _setting-server-down-max-fails: ``server-down-max-fails`` diff --git a/pdns/recursordist/negcache.cc b/pdns/recursordist/negcache.cc index 0a1db6ae38..4da1d92021 100644 --- a/pdns/recursordist/negcache.cc +++ b/pdns/recursordist/negcache.cc @@ -27,6 +27,7 @@ #include "utility.hh" #include "rec-taskqueue.hh" +// For a description on how ServeStale works, see recursor_cache.cc, the general structure is the same. uint16_t NegCache::s_maxServedStaleExtensions; NegCache::NegCache(size_t mapsCount) : diff --git a/pdns/recursordist/negcache.hh b/pdns/recursordist/negcache.hh index 4cef2a3de7..4afe166414 100644 --- a/pdns/recursordist/negcache.hh +++ b/pdns/recursordist/negcache.hh @@ -56,6 +56,7 @@ class NegCache : public boost::noncopyable public: NegCache(size_t mapsCount = 1024); + // For a description on how ServeStale works, see recursor_cache.cc, the general structure is the same. // The number of times a stale cache entry is extended static uint16_t s_maxServedStaleExtensions; // The time a stale cache entry is extended @@ -77,7 +78,7 @@ public: { // We like to keep things in cache when we (potentially) should serve stale if (s_maxServedStaleExtensions > 0) { - return d_ttd + s_maxServedStaleExtensions * std::min(s_serveStaleExtensionPeriod, d_orig_ttl) < now; + return d_ttd + static_cast(s_maxServedStaleExtensions) * std::min(s_serveStaleExtensionPeriod, d_orig_ttl) < now; } else { return d_ttd < now; diff --git a/pdns/recursordist/rec-main.cc b/pdns/recursordist/rec-main.cc index 22db5cdb5e..637d180304 100644 --- a/pdns/recursordist/rec-main.cc +++ b/pdns/recursordist/rec-main.cc @@ -1511,8 +1511,16 @@ static int serviceMain(int argc, char* argv[], Logr::log_t log) SyncRes::s_event_trace_enabled = ::arg().asNum("event-trace-enabled"); SyncRes::s_save_parent_ns_set = ::arg().mustDo("save-parent-ns-set"); SyncRes::s_max_busy_dot_probes = ::arg().asNum("max-busy-dot-probes"); - MemRecursorCache::s_maxServedStaleExtensions = ::arg().asNum("serve-stale-extensions"); - NegCache::s_maxServedStaleExtensions = ::arg().asNum("serve-stale-extensions"); + { + uint64_t sse = ::arg().asNum("serve-stale-extensions"); + if (sse > std::numeric_limits::max()) { + SLOG(g_log << Logger::Error << "Illegal serve-stale-extensions value: " << sse << "; range = 0..65536" << endl, + log->info(Logr::Error, "Illegal serve-stale-extensions value; range = 0..65536", "value", Logging::Loggable(sse))); + exit(1); + } + MemRecursorCache::s_maxServedStaleExtensions = sse; + NegCache::s_maxServedStaleExtensions = sse; + } if (SyncRes::s_tcp_fast_open_connect) { checkFastOpenSysctl(true, log); diff --git a/pdns/recursordist/recursor_cache.cc b/pdns/recursordist/recursor_cache.cc deleted file mode 100644 index d1c3522cc9..0000000000 --- a/pdns/recursordist/recursor_cache.cc +++ /dev/null @@ -1,758 +0,0 @@ -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include - -#include "recursor_cache.hh" -#include "misc.hh" -#include -#include "dnsrecords.hh" -#include "arguments.hh" -#include "syncres.hh" -#include "recursor_cache.hh" -#include "namespaces.hh" -#include "cachecleaner.hh" -#include "rec-taskqueue.hh" - -uint16_t MemRecursorCache::s_maxServedStaleExtensions; - -MemRecursorCache::MemRecursorCache(size_t mapsCount) : - d_maps(mapsCount) -{ -} - -size_t MemRecursorCache::size() const -{ - size_t count = 0; - for (const auto& map : d_maps) { - count += map.d_entriesCount; - } - return count; -} - -pair MemRecursorCache::stats() -{ - uint64_t c = 0, a = 0; - for (auto& mc : d_maps) { - auto content = mc.lock(); - c += content->d_contended_count; - a += content->d_acquired_count; - } - return pair(c, a); -} - -size_t MemRecursorCache::ecsIndexSize() -{ - // XXX! - size_t count = 0; - for (auto& mc : d_maps) { - auto content = mc.lock(); - count += content->d_ecsIndex.size(); - } - return count; -} - -// this function is too slow to poll! -size_t MemRecursorCache::bytes() -{ - size_t ret = 0; - for (auto& mc : d_maps) { - auto m = mc.lock(); - for (const auto& i : m->d_map) { - ret += sizeof(struct CacheEntry); - ret += i.d_qname.toString().length(); - for (const auto& record : i.d_records) { - ret += sizeof(record); // XXX WRONG we don't know the stored size! - } - } - } - return ret; -} - -static void updateDNSSECValidationStateFromCache(boost::optional& state, const vState stateUpdate) -{ - // if there was no state it's easy */ - if (state == boost::none) { - state = stateUpdate; - return; - } - - if (stateUpdate == vState::TA) { - state = vState::Secure; - } - else if (stateUpdate == vState::NTA) { - state = vState::Insecure; - } - else if (vStateIsBogus(stateUpdate)) { - state = stateUpdate; - } - else if (stateUpdate == vState::Indeterminate) { - state = stateUpdate; - } - else if (stateUpdate == vState::Insecure) { - if (!vStateIsBogus(*state) && *state != vState::Indeterminate) { - state = stateUpdate; - } - } - else if (stateUpdate == vState::Secure) { - if (!vStateIsBogus(*state) && *state != vState::Indeterminate) { - state = stateUpdate; - } - } -} - -time_t MemRecursorCache::handleHit(MapCombo::LockedContent& content, MemRecursorCache::OrderedTagIterator_t& entry, const DNSName& qname, uint32_t& origTTL, vector* res, vector>* signatures, std::vector>* authorityRecs, bool* variable, boost::optional& state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP) -{ - // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock) - time_t ttd = entry->d_ttd; - origTTL = entry->d_orig_ttl; - - if (variable && (!entry->d_netmask.empty() || entry->d_rtag)) { - *variable = true; - } - - if (res) { - res->reserve(res->size() + entry->d_records.size()); - - for (const auto& k : entry->d_records) { - DNSRecord dr; - dr.d_name = qname; - dr.d_type = entry->d_qtype; - dr.d_class = QClass::IN; - dr.d_content = k; - dr.d_ttl = static_cast(entry->d_ttd); // XXX truncation - dr.d_place = DNSResourceRecord::ANSWER; - res->push_back(std::move(dr)); - } - } - - if (signatures) { - signatures->insert(signatures->end(), entry->d_signatures.begin(), entry->d_signatures.end()); - } - - if (authorityRecs) { - authorityRecs->insert(authorityRecs->end(), entry->d_authorityRecs.begin(), entry->d_authorityRecs.end()); - } - - updateDNSSECValidationStateFromCache(state, entry->d_state); - - if (wasAuth) { - *wasAuth = *wasAuth && entry->d_auth; - } - - if (fromAuthZone) { - *fromAuthZone = entry->d_authZone; - } - - if (fromAuthIP) { - *fromAuthIP = entry->d_from; - } - - moveCacheItemToBack(content.d_map, entry); - - return ttd; -} - -static void pushRefreshTask(const DNSName& qname, QType qtype, time_t deadline, const Netmask& netmask) -{ - if (qtype == QType::ADDR) { - pushAlmostExpiredTask(qname, QType::A, deadline, netmask); - pushAlmostExpiredTask(qname, QType::AAAA, deadline, netmask); - } - else { - pushAlmostExpiredTask(qname, qtype, deadline, netmask); - } -} - -void MemRecursorCache::updateStaleEntry(time_t now, MemRecursorCache::OrderedTagIterator_t& entry) -{ - // We need to take care a infrequently access stale item cannot be extended past - // s_maxServedStaleExtension * s_serveStaleExtensionPeriod - // We we look how old the entry is, and increase d_servedStale accordingly, taking care not to overflow - const time_t howlong = std::max(static_cast(1), now - entry->d_ttd); - const uint32_t extension = std::max(1U, std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod)); - entry->d_servedStale = std::min(entry->d_servedStale + 1 + howlong / extension, static_cast(s_maxServedStaleExtensions)); - entry->d_ttd = now + std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod); - - pushRefreshTask(entry->d_qname, entry->d_qtype, entry->d_ttd, entry->d_netmask); -} - -MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSIndex(MapCombo::LockedContent& map, time_t now, const DNSName& qname, const QType qtype, bool requireAuth, const ComboAddress& who, bool serveStale) -{ - // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock) - auto ecsIndexKey = std::tie(qname, qtype); - auto ecsIndex = map.d_ecsIndex.find(ecsIndexKey); - if (ecsIndex != map.d_ecsIndex.end() && !ecsIndex->isEmpty()) { - /* we have netmask-specific entries, let's see if we match one */ - while (true) { - const Netmask best = ecsIndex->lookupBestMatch(who); - if (best.empty()) { - /* we have nothing more specific for you */ - break; - } - auto key = std::make_tuple(qname, qtype, boost::none, best); - auto entry = map.d_map.find(key); - if (entry == map.d_map.end()) { - /* ecsIndex is not up-to-date */ - ecsIndex->removeNetmask(best); - if (ecsIndex->isEmpty()) { - map.d_ecsIndex.erase(ecsIndex); - break; - } - continue; - } - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, entry); - } - if (entry->d_ttd > now) { - if (!requireAuth || entry->d_auth) { - return entry; - } - /* we need auth data and the best match is not authoritative */ - return map.d_map.end(); - } - else { - /* this netmask-specific entry has expired */ - moveCacheItemToFront(map.d_map, entry); - // XXX when serving stale, it should be kept, but we don't want a match wth lookupBestMatch()... - ecsIndex->removeNetmask(best); - if (ecsIndex->isEmpty()) { - map.d_ecsIndex.erase(ecsIndex); - break; - } - } - } - } - - /* we have nothing specific, let's see if we have a generic one */ - auto key = std::make_tuple(qname, qtype, boost::none, Netmask()); - auto entry = map.d_map.find(key); - if (entry != map.d_map.end()) { - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, entry); - } - if (entry->d_ttd > now) { - if (!requireAuth || entry->d_auth) { - return entry; - } - } - else { - moveCacheItemToFront(map.d_map, entry); - } - } - - /* nothing for you, sorry */ - return map.d_map.end(); -} - -MemRecursorCache::Entries MemRecursorCache::getEntries(MapCombo::LockedContent& map, const DNSName& qname, const QType qt, const OptTag& rtag) -{ - // MUTEX SHOULD BE ACQUIRED - if (!map.d_cachecachevalid || map.d_cachedqname != qname || map.d_cachedrtag != rtag) { - map.d_cachedqname = qname; - map.d_cachedrtag = rtag; - const auto& idx = map.d_map.get(); - map.d_cachecache = idx.equal_range(std::tie(qname, rtag)); - map.d_cachecachevalid = true; - } - return map.d_cachecache; -} - -bool MemRecursorCache::entryMatches(MemRecursorCache::OrderedTagIterator_t& entry, const QType qt, bool requireAuth, const ComboAddress& who) -{ - // This code assumes that if a routing tag is present, it matches - // MUTEX SHOULD BE ACQUIRED - if (requireAuth && !entry->d_auth) - return false; - - bool match = (entry->d_qtype == qt || qt == QType::ANY || (qt == QType::ADDR && (entry->d_qtype == QType::A || entry->d_qtype == QType::AAAA))) - && (entry->d_netmask.empty() || entry->d_netmask.match(who)); - return match; -} - -// Fake a cache miss if more than refreshTTLPerc of the original TTL has passed -time_t MemRecursorCache::fakeTTD(MemRecursorCache::OrderedTagIterator_t& entry, const DNSName& qname, QType qtype, time_t ret, time_t now, uint32_t origTTL, bool refresh) -{ - time_t ttl = ret - now; - // If we are checking an entry being served stale in refresh mode, - // we always consider it stale so a real refresh attempt will be - // kicked by SyncRes - if (refresh && entry->d_servedStale > 0) { - return -1; - } - if (ttl > 0 && SyncRes::s_refresh_ttlperc > 0) { - const uint32_t deadline = origTTL * SyncRes::s_refresh_ttlperc / 100; - const bool almostExpired = static_cast(ttl) <= deadline; - if (almostExpired && qname != g_rootdnsname) { - if (refresh) { - return -1; - } - else { - if (!entry->d_submitted) { - pushRefreshTask(qname, qtype, entry->d_ttd, entry->d_netmask); - entry->d_submitted = true; - } - } - } - } - return ttl; -} -// returns -1 for no hits -time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qt, Flags flags, vector* res, const ComboAddress& who, const OptTag& routingTag, vector>* signatures, std::vector>* authorityRecs, bool* variable, vState* state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP) -{ - bool requireAuth = flags & RequireAuth; - bool refresh = flags & Refresh; - bool serveStale = flags & ServeStale; - - boost::optional cachedState{boost::none}; - uint32_t origTTL; - - if (res) { - res->clear(); - } - const uint16_t qtype = qt.getCode(); - if (wasAuth) { - // we might retrieve more than one entry, we need to set that to true - // so it will be set to false if at least one entry is not auth - *wasAuth = true; - } - - auto& mc = getMap(qname); - auto map = mc.lock(); - - /* If we don't have any netmask-specific entries at all, let's just skip this - to be able to use the nice d_cachecache hack. */ - if (qtype != QType::ANY && !map->d_ecsIndex.empty() && !routingTag) { - if (qtype == QType::ADDR) { - time_t ret = -1; - - auto entryA = getEntryUsingECSIndex(*map, now, qname, QType::A, requireAuth, who, serveStale); - if (entryA != map->d_map.end()) { - ret = handleHit(*map, entryA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); - } - auto entryAAAA = getEntryUsingECSIndex(*map, now, qname, QType::AAAA, requireAuth, who, serveStale); - if (entryAAAA != map->d_map.end()) { - time_t ttdAAAA = handleHit(*map, entryAAAA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); - if (ret > 0) { - ret = std::min(ret, ttdAAAA); - } - else { - ret = ttdAAAA; - } - } - - if (state && cachedState) { - *state = *cachedState; - } - - return ret > 0 ? (ret - now) : ret; - } - else { - auto entry = getEntryUsingECSIndex(*map, now, qname, qtype, requireAuth, who, serveStale); - if (entry != map->d_map.end()) { - time_t ret = handleHit(*map, entry, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); - if (state && cachedState) { - *state = *cachedState; - } - return fakeTTD(entry, qname, qtype, ret, now, origTTL, refresh); - } - return -1; - } - } - - if (routingTag) { - auto entries = getEntries(*map, qname, qt, routingTag); - bool found = false; - time_t ttd; - - if (entries.first != entries.second) { - OrderedTagIterator_t firstIndexIterator; - for (auto i = entries.first; i != entries.second; ++i) { - firstIndexIterator = map->d_map.project(i); - - // When serving stale, we consider expired records - if (i->d_ttd <= now && !serveStale && i->d_servedStale == 0) { - moveCacheItemToFront(map->d_map, firstIndexIterator); - continue; - } - - if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) { - continue; - } - found = true; - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || i->d_servedStale > 0) && i->d_ttd <= now && i->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, firstIndexIterator); - } - ttd = handleHit(*map, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); - - if (qt != QType::ANY && qt != QType::ADDR) { // normally if we have a hit, we are done - break; - } - } - if (found) { - if (state && cachedState) { - *state = *cachedState; - } - return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh); - } - else { - return -1; - } - } - } - // Try (again) without tag - auto entries = getEntries(*map, qname, qt, boost::none); - - if (entries.first != entries.second) { - OrderedTagIterator_t firstIndexIterator; - bool found = false; - time_t ttd; - - for (auto i = entries.first; i != entries.second; ++i) { - firstIndexIterator = map->d_map.project(i); - - // When serving stale, we consider expired records - if (i->d_ttd <= now && !serveStale && i->d_servedStale == 0) { - moveCacheItemToFront(map->d_map, firstIndexIterator); - continue; - } - - if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) { - continue; - } - found = true; - // If we are serving this record stale (or *should*) and the - // ttd has passed increase ttd to the future and remember that - // we did. Also push a refresh task. - if ((serveStale || i->d_servedStale > 0) && i->d_ttd <= now && i->d_servedStale < s_maxServedStaleExtensions) { - updateStaleEntry(now, firstIndexIterator); - } - ttd = handleHit(*map, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP); - - if (qt != QType::ANY && qt != QType::ADDR) { // normally if we have a hit, we are done - break; - } - } - if (found) { - if (state && cachedState) { - *state = *cachedState; - } - return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh); - } - } - return -1; -} - -void MemRecursorCache::replace(time_t now, const DNSName& qname, const QType qt, const vector& content, const vector>& signatures, const std::vector>& authorityRecs, bool auth, const DNSName& authZone, boost::optional ednsmask, const OptTag& routingTag, vState state, boost::optional from) -{ - auto& mc = getMap(qname); - auto map = mc.lock(); - - map->d_cachecachevalid = false; - if (ednsmask) { - ednsmask = ednsmask->getNormalized(); - } - - // We only store with a tag if we have an ednsmask and the tag is available - // We only store an ednsmask if we do not have a tag and we do have a mask. - auto key = std::make_tuple(qname, qt.getCode(), ednsmask ? routingTag : boost::none, (ednsmask && !routingTag) ? *ednsmask : Netmask()); - bool isNew = false; - cache_t::iterator stored = map->d_map.find(key); - if (stored == map->d_map.end()) { - stored = map->d_map.insert(CacheEntry(key, auth)).first; - ++mc.d_entriesCount; - isNew = true; - } - - /* if we are inserting a new entry or updating an expired one (in which case the - ECS index might have been removed but the entry still exists because it has not - been garbage collected yet) we might need to update the ECS index. - Otherwise it should already be indexed and we don't need to update it. - */ - if (isNew || stored->d_ttd <= now) { - /* don't bother building an ecsIndex if we don't have any netmask-specific entries */ - if (!routingTag && ednsmask && !ednsmask->empty()) { - auto ecsIndexKey = std::make_tuple(qname, qt.getCode()); - auto ecsIndex = map->d_ecsIndex.find(ecsIndexKey); - if (ecsIndex == map->d_ecsIndex.end()) { - ecsIndex = map->d_ecsIndex.insert(ECSIndexEntry(qname, qt.getCode())).first; - } - ecsIndex->addMask(*ednsmask); - } - } - - time_t maxTTD = std::numeric_limits::max(); - CacheEntry ce = *stored; // this is a COPY - ce.d_qtype = qt.getCode(); - - if (!auth && ce.d_auth) { // unauth data came in, we have some auth data, but is it fresh? - if (ce.d_ttd > now) { // we still have valid data, ignore unauth data - return; - } - else { - ce.d_auth = false; // new data won't be auth - } - } - - if (auth) { - /* we don't want to keep a non-auth entry while we have an auth one */ - if (vStateIsBogus(state) && (!vStateIsBogus(ce.d_state) && ce.d_state != vState::Indeterminate) && ce.d_ttd > now) { - /* the new entry is Bogus, the existing one is not and is still valid, let's keep the existing one */ - return; - } - } - - ce.d_state = state; - - // refuse any attempt to *raise* the TTL of auth NS records, as it would make it possible - // for an auth to keep a "ghost" zone alive forever, even after the delegation is gone from - // the parent - // BUT make sure that we CAN refresh the root - if (ce.d_auth && auth && qt == QType::NS && !isNew && !qname.isRoot()) { - // cerr<<"\tLimiting TTL of auth->auth NS set replace to "<(i.d_ttl)); // XXX this does weird things if TTLs differ in the set - ce.d_orig_ttl = ce.d_ttd - now; - ce.d_records.push_back(i.d_content); - } - - if (!isNew) { - moveCacheItemToBack(map->d_map, stored); - } - ce.d_submitted = false; - ce.d_servedStale = 0; - map->d_map.replace(stored, ce); -} - -size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, const QType qtype) -{ - size_t count = 0; - - if (!sub) { - auto& mc = getMap(name); - auto map = mc.lock(); - map->d_cachecachevalid = false; - auto& idx = map->d_map.get(); - auto range = idx.equal_range(name); - auto i = range.first; - while (i != range.second) { - if (i->d_qtype == qtype || qtype == 0xffff) { - i = idx.erase(i); - count++; - --mc.d_entriesCount; - } - else { - ++i; - } - } - - if (qtype == 0xffff) { - auto& ecsIdx = map->d_ecsIndex.get(); - auto ecsIndexRange = ecsIdx.equal_range(name); - ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second); - } - else { - auto& ecsIdx = map->d_ecsIndex.get(); - auto ecsIndexRange = ecsIdx.equal_range(std::tie(name, qtype)); - ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second); - } - } - else { - for (auto& mc : d_maps) { - auto map = mc.lock(); - map->d_cachecachevalid = false; - auto& idx = map->d_map.get(); - for (auto i = idx.lower_bound(name); i != idx.end();) { - if (!i->d_qname.isPartOf(name)) - break; - if (i->d_qtype == qtype || qtype == 0xffff) { - count++; - i = idx.erase(i); - --mc.d_entriesCount; - } - else { - ++i; - } - } - auto& ecsIdx = map->d_ecsIndex.get(); - for (auto i = ecsIdx.lower_bound(name); i != ecsIdx.end();) { - if (!i->d_qname.isPartOf(name)) - break; - if (i->d_qtype == qtype || qtype == 0xffff) { - i = ecsIdx.erase(i); - } - else { - ++i; - } - } - } - } - return count; -} - -// Name should be doLimitTime or so -bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, const QType qtype, uint32_t newTTL) -{ - auto& mc = getMap(name); - auto map = mc.lock(); - cache_t::iterator iter = map->d_map.find(std::tie(name, qtype)); - if (iter == map->d_map.end()) { - return false; - } - - CacheEntry ce = *iter; - if (ce.d_ttd < now) - return false; // would be dead anyhow - - uint32_t maxTTL = static_cast(ce.d_ttd - now); - if (maxTTL > newTTL) { - map->d_cachecachevalid = false; - - time_t newTTD = now + newTTL; - - if (ce.d_ttd > newTTD) { - ce.d_ttd = newTTD; - map->d_map.replace(iter, ce); - } - return true; - } - return false; -} - -bool MemRecursorCache::updateValidationStatus(time_t now, const DNSName& qname, const QType qt, const ComboAddress& who, const OptTag& routingTag, bool requireAuth, vState newState, boost::optional capTTD) -{ - uint16_t qtype = qt.getCode(); - if (qtype == QType::ANY) { - throw std::runtime_error("Trying to update the DNSSEC validation status of all (via ANY) records for " + qname.toLogString()); - } - if (qtype == QType::ADDR) { - throw std::runtime_error("Trying to update the DNSSEC validation status of several (via ADDR) records for " + qname.toLogString()); - } - - auto& mc = getMap(qname); - auto map = mc.lock(); - - bool updated = false; - if (!map->d_ecsIndex.empty() && !routingTag) { - auto entry = getEntryUsingECSIndex(*map, now, qname, qtype, requireAuth, who, false); // XXX serveStale? - if (entry == map->d_map.end()) { - return false; - } - - entry->d_state = newState; - if (capTTD) { - entry->d_ttd = std::min(entry->d_ttd, *capTTD); - } - return true; - } - - auto entries = getEntries(*map, qname, qt, routingTag); - - for (auto i = entries.first; i != entries.second; ++i) { - auto firstIndexIterator = map->d_map.project(i); - - if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) { - continue; - } - - i->d_state = newState; - if (capTTD) { - i->d_ttd = std::min(i->d_ttd, *capTTD); - } - updated = true; - - break; - } - - return updated; -} - -uint64_t MemRecursorCache::doDump(int fd) -{ - int newfd = dup(fd); - if (newfd == -1) { - return 0; - } - auto fp = std::unique_ptr(fdopen(newfd, "w"), fclose); - if (!fp) { // dup probably failed - close(newfd); - return 0; - } - - fprintf(fp.get(), "; main record cache dump follows\n;\n"); - uint64_t count = 0; - - for (auto& mc : d_maps) { - auto map = mc.lock(); - const auto& sidx = map->d_map.get(); - - time_t now = time(nullptr); - for (const auto& i : sidx) { - for (const auto& j : i.d_records) { - count++; - try { - fprintf(fp.get(), "%s %" PRIu32 " %" PRId64 " IN %s %s ; (%s) auth=%i zone=%s from=%s nm=%s rtag=%s ss=%hd\n", i.d_qname.toString().c_str(), i.d_orig_ttl, static_cast(i.d_ttd - now), i.d_qtype.toString().c_str(), j->getZoneRepresentation().c_str(), vStateToString(i.d_state).c_str(), i.d_auth, i.d_authZone.toLogString().c_str(), i.d_from.toString().c_str(), i.d_netmask.empty() ? "" : i.d_netmask.toString().c_str(), !i.d_rtag ? "" : i.d_rtag.get().c_str(), i.d_servedStale); - } - catch (...) { - fprintf(fp.get(), "; error printing '%s'\n", i.d_qname.empty() ? "EMPTY" : i.d_qname.toString().c_str()); - } - } - for (const auto& sig : i.d_signatures) { - count++; - try { - fprintf(fp.get(), "%s %" PRIu32 " %" PRId64 " IN RRSIG %s ; %s\n", i.d_qname.toString().c_str(), i.d_orig_ttl, static_cast(i.d_ttd - now), sig->getZoneRepresentation().c_str(), i.d_netmask.empty() ? "" : i.d_netmask.toString().c_str()); - } - catch (...) { - fprintf(fp.get(), "; error printing '%s'\n", i.d_qname.empty() ? "EMPTY" : i.d_qname.toString().c_str()); - } - } - } - } - return count; -} - -void MemRecursorCache::doPrune(size_t keep) -{ - // size_t maxCached = d_maxEntries; - size_t cacheSize = size(); - pruneMutexCollectionsVector(*this, d_maps, keep, cacheSize); -} - -namespace boost -{ -size_t hash_value(const MemRecursorCache::OptTag& o) -{ - return o ? hash_value(o.get()) : 0xcafebaaf; -} -} diff --git a/pdns/recursordist/recursor_cache.cc b/pdns/recursordist/recursor_cache.cc new file mode 120000 index 0000000000..00b6d25b91 --- /dev/null +++ b/pdns/recursordist/recursor_cache.cc @@ -0,0 +1 @@ +../recursor_cache.cc \ No newline at end of file diff --git a/pdns/recursordist/test-syncres_cc10.cc b/pdns/recursordist/test-syncres_cc10.cc index 4bd864d42d..3d747d9977 100644 --- a/pdns/recursordist/test-syncres_cc10.cc +++ b/pdns/recursordist/test-syncres_cc10.cc @@ -1075,7 +1075,9 @@ BOOST_AUTO_TEST_CASE(test_servestale) size_t downCount = 0; size_t lookupCount = 0; - sr->setAsyncCallback([&downServers,&downCount,&lookupCount, target](const ComboAddress& ip, const DNSName& domain, int type, bool doTCP, bool sendRDQuery, int EDNS0Level, struct timeval* now, boost::optional& srcmask, boost::optional context, LWResult* res, bool* chained) { + const int theTTL = 5; + + sr->setAsyncCallback([&downServers, &downCount, &lookupCount, target](const ComboAddress& ip, const DNSName& domain, int type, bool doTCP, bool sendRDQuery, int EDNS0Level, struct timeval* now, boost::optional& srcmask, boost::optional context, LWResult* res, bool* chained) { /* this will cause issue with qname minimization if we ever implement it */ if (downServers.find(ip) != downServers.end()) { downCount++; @@ -1091,12 +1093,12 @@ BOOST_AUTO_TEST_CASE(test_servestale) } else if (ip == ComboAddress("192.0.2.1:53") || ip == ComboAddress("[2001:DB8::1]:53")) { setLWResult(res, 0, false, false, true); - addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns1.powerdns.com.", DNSResourceRecord::AUTHORITY, 5); - addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns2.powerdns.com.", DNSResourceRecord::AUTHORITY, 5); + addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns1.powerdns.com.", DNSResourceRecord::AUTHORITY, theTTL); + addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns2.powerdns.com.", DNSResourceRecord::AUTHORITY, theTTL); addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::A, "192.0.2.2", DNSResourceRecord::ADDITIONAL, 5); - addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::AAAA, "2001:DB8::2", DNSResourceRecord::ADDITIONAL, 5); + addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::AAAA, "2001:DB8::2", DNSResourceRecord::ADDITIONAL, theTTL); addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::A, "192.0.2.3", DNSResourceRecord::ADDITIONAL, 5); - addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::AAAA, "2001:DB8::3", DNSResourceRecord::ADDITIONAL, 5); + addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::AAAA, "2001:DB8::3", DNSResourceRecord::ADDITIONAL, theTTL); return LWResult::Result::Success; } else if (ip == ComboAddress("192.0.2.2:53") || ip == ComboAddress("192.0.2.3:53") || ip == ComboAddress("[2001:DB8::2]:53") || ip == ComboAddress("[2001:DB8::3]:53")) { @@ -1126,8 +1128,10 @@ BOOST_AUTO_TEST_CASE(test_servestale) downServers.insert(ComboAddress("[2001:DB8::2]:53")); downServers.insert(ComboAddress("[2001:DB8::3]:53")); - sr->setNow(timeval{now + 6, 0}); - + sr->setNow(timeval{now + theTTL + 1, 0}); + + BOOST_REQUIRE_EQUAL(getTaskSize(), 0U); + // record is expired, so serve stale should kick in ret.clear(); res = sr->beginResolve(target, QType(QType::A), QClass::IN, ret); @@ -1138,6 +1142,11 @@ BOOST_AUTO_TEST_CASE(test_servestale) BOOST_CHECK_EQUAL(downCount, 4U); BOOST_CHECK_EQUAL(lookupCount, 1U); + BOOST_REQUIRE_EQUAL(getTaskSize(), 1U); + auto task = taskQueuePop(); + BOOST_CHECK(task.d_qname == target); + BOOST_CHECK_EQUAL(task.d_qtype, QType::A); + // Again, no lookup as the record is marked stale ret.clear(); res = sr->beginResolve(target, QType(QType::A), QClass::IN, ret); @@ -1149,7 +1158,7 @@ BOOST_AUTO_TEST_CASE(test_servestale) BOOST_CHECK_EQUAL(lookupCount, 1U); // Again, no lookup as the record is marked stale but as the TTL has passed a task should have been pushed - sr->setNow(timeval{now + 12, 0}); + sr->setNow(timeval{now + 2 * (theTTL + 1), 0}); ret.clear(); res = sr->beginResolve(target, QType(QType::A), QClass::IN, ret); BOOST_CHECK_EQUAL(res, RCode::NoError); @@ -1160,12 +1169,12 @@ BOOST_AUTO_TEST_CASE(test_servestale) BOOST_CHECK_EQUAL(lookupCount, 1U); BOOST_REQUIRE_EQUAL(getTaskSize(), 1U); - auto task = taskQueuePop(); + task = taskQueuePop(); BOOST_CHECK(task.d_qname == target); BOOST_CHECK_EQUAL(task.d_qtype, QType::A); // Now simulate a succeeding task execution - sr->setNow(timeval{now + 18, 0}); + sr->setNow(timeval{now + 3 * (theTTL + 1), 0}); downServers.clear(); sr->setRefreshAlmostExpired(true); ret.clear(); @@ -1187,7 +1196,6 @@ BOOST_AUTO_TEST_CASE(test_servestale) BOOST_CHECK_EQUAL(ret[0].d_name, target); BOOST_CHECK_EQUAL(downCount, 4U); BOOST_CHECK_EQUAL(lookupCount, 2U); - } BOOST_AUTO_TEST_CASE(test_servestale_neg) @@ -1205,7 +1213,9 @@ BOOST_AUTO_TEST_CASE(test_servestale_neg) size_t downCount = 0; size_t lookupCount = 0; - sr->setAsyncCallback([&downServers,&downCount,&lookupCount, target](const ComboAddress& ip, const DNSName& domain, int type, bool doTCP, bool sendRDQuery, int EDNS0Level, struct timeval* now, boost::optional& srcmask, boost::optional context, LWResult* res, bool* chained) { + const int theTTL = 5; + + sr->setAsyncCallback([&downServers, &downCount, &lookupCount, target](const ComboAddress& ip, const DNSName& domain, int type, bool doTCP, bool sendRDQuery, int EDNS0Level, struct timeval* now, boost::optional& srcmask, boost::optional context, LWResult* res, bool* chained) { /* this will cause issue with qname minimization if we ever implement it */ if (downServers.find(ip) != downServers.end()) { downCount++; @@ -1221,12 +1231,12 @@ BOOST_AUTO_TEST_CASE(test_servestale_neg) } else if (ip == ComboAddress("192.0.2.1:53") || ip == ComboAddress("[2001:DB8::1]:53")) { setLWResult(res, 0, false, false, true); - addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns1.powerdns.com.", DNSResourceRecord::AUTHORITY, 5); - addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns2.powerdns.com.", DNSResourceRecord::AUTHORITY, 5); - addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::A, "192.0.2.2", DNSResourceRecord::ADDITIONAL, 5); - addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::AAAA, "2001:DB8::2", DNSResourceRecord::ADDITIONAL, 5); - addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::A, "192.0.2.3", DNSResourceRecord::ADDITIONAL, 5); - addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::AAAA, "2001:DB8::3", DNSResourceRecord::ADDITIONAL, 5); + addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns1.powerdns.com.", DNSResourceRecord::AUTHORITY, theTTL); + addRecordToLW(res, "powerdns.com.", QType::NS, "pdns-public-ns2.powerdns.com.", DNSResourceRecord::AUTHORITY, theTTL); + addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::A, "192.0.2.2", DNSResourceRecord::ADDITIONAL, theTTL); + addRecordToLW(res, "pdns-public-ns1.powerdns.com.", QType::AAAA, "2001:DB8::2", DNSResourceRecord::ADDITIONAL, theTTL); + addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::A, "192.0.2.3", DNSResourceRecord::ADDITIONAL, theTTL); + addRecordToLW(res, "pdns-public-ns2.powerdns.com.", QType::AAAA, "2001:DB8::3", DNSResourceRecord::ADDITIONAL, theTTL); return LWResult::Result::Success; } else if (ip == ComboAddress("192.0.2.2:53") || ip == ComboAddress("192.0.2.3:53") || ip == ComboAddress("[2001:DB8::2]:53") || ip == ComboAddress("[2001:DB8::3]:53")) { @@ -1256,7 +1266,9 @@ BOOST_AUTO_TEST_CASE(test_servestale_neg) downServers.insert(ComboAddress("[2001:DB8::2]:53")); downServers.insert(ComboAddress("[2001:DB8::3]:53")); - sr->setNow(timeval{now + 61, 0}); + const int negTTL = 60; + + sr->setNow(timeval{now + negTTL + 1, 0}); // record is expired, so serve stale should kick in ret.clear(); @@ -1279,7 +1291,7 @@ BOOST_AUTO_TEST_CASE(test_servestale_neg) BOOST_CHECK_EQUAL(lookupCount, 1U); // Again, no lookup as the record is marked stale but as the TTL has passed a task should have been pushed - sr->setNow(timeval{now + 122, 0}); + sr->setNow(timeval{now + 2 * (negTTL + 1), 0}); ret.clear(); res = sr->beginResolve(target, QType(QType::A), QClass::IN, ret); BOOST_CHECK_EQUAL(res, RCode::NoError); @@ -1295,7 +1307,7 @@ BOOST_AUTO_TEST_CASE(test_servestale_neg) BOOST_CHECK_EQUAL(task.d_qtype, QType::A); // Now simulate a succeeding task execution - sr->setNow(timeval{now + 183, 0}); + sr->setNow(timeval{now + 3 * (negTTL + 1), 0}); downServers.clear(); sr->setRefreshAlmostExpired(true); ret.clear();