]> git.ipfire.org Git - thirdparty/pdns.git/commitdiff
auth: lua-records, support cleaning old hashed entries 13752/head
authorCharles-Henri Bruyand <charles-henri.bruyand@open-xchange.com>
Thu, 8 Feb 2024 13:20:45 +0000 (14:20 +0100)
committerCharles-Henri Bruyand <charles-henri.bruyand@open-xchange.com>
Fri, 9 Feb 2024 13:11:40 +0000 (14:11 +0100)
docs/lua-records/functions.rst
docs/settings.rst
pdns/auth-main.cc
pdns/auth-main.hh
pdns/lua-record.cc

index e63e9c7a112cca62515b33c6cc4cd72c341a48d5..0c578c72d3834c1945aef1f1b9864b8572161153 100644 (file)
@@ -220,6 +220,10 @@ Record creation functions
   - updating the weight of an entry will only affect a part of the distribution
   - because of the previous properties, the CPU and memory cost is a bit higher than :func:`pickwhashed`
 
+  Hashes will be pre computed the first time such a record is hit and refreshed if needed. If updating the list is done often,
+  the cash may grow. A cleanup routine is performed every :ref:`setting-lua-consistent-hashes-cleanup-interval` seconds (default 1h)
+  and cleans cached entries for records that haven't been used for :ref:`setting-lua-consistent-hashes-expire-delay` seconds (default 24h)
+
   An example::
 
     mydomain.example.com    IN    LUA    A ("pickchashed({                             "
index 4d191681e67c4e31dc472c051152cc1e9d8e285f..17d61bebdf2a8d43d0a3820943691c9ea3945ca5 100644 (file)
@@ -1010,6 +1010,30 @@ When enabled, log messages are formatted like structured logs, including their l
 
 Script to be used to edit incoming AXFRs, see :ref:`modes-of-operation-axfrfilter`
 
+.. _setting-lua-consistent-hashes-cleanup-interval:
+
+``lua-consistent-hashes-cleanup-interval``
+------------------------------------------
+
+-  Integer
+-  Default: 3600
+
+.. versionadded:: 4.9.0
+
+Amount of time (in seconds) between subsequent cleanup routines for pre-computed hashes related to :func:`pickchashed()`.
+
+.. _setting-lua-consistent-hashes-expire-delay:
+
+``lua-consistent-hashes-expire-delay``
+--------------------------------------
+
+-  Integer
+-  Default: 86400
+
+.. versionadded:: 4.9.0
+
+Amount of time (in seconds) a pre-computed hash entry will be considered as expired when unused. See :func:`pickchashed()`.
+
 .. _setting-lua-health-checks-expire-delay:
 
 ``lua-health-checks-expire-delay``
index 0c9dc4b80ee4fa863fef6a9db74b68e2c480c506..4155b743de5650089cf7d7b7a3c074c29544fae3 100644 (file)
@@ -109,6 +109,8 @@ bool g_doLuaRecord;
 int g_luaRecordExecLimit;
 time_t g_luaHealthChecksInterval{5};
 time_t g_luaHealthChecksExpireDelay{3600};
+time_t g_luaConsistentHashesExpireDelay{86400};
+time_t g_luaConsistentHashesCleanupInterval{3600};
 #endif
 #ifdef ENABLE_GSS_TSIG
 bool g_doGssTSIG;
@@ -308,6 +310,8 @@ static void declareArguments()
   ::arg().set("lua-records-exec-limit", "LUA records scripts execution limit (instructions count). Values <= 0 mean no limit") = "1000";
   ::arg().set("lua-health-checks-expire-delay", "Stops doing health checks after the record hasn't been used for that delay (in seconds)") = "3600";
   ::arg().set("lua-health-checks-interval", "LUA records health checks monitoring interval in seconds") = "5";
+  ::arg().set("lua-consistent-hashes-cleanup-interval", "Pre-computed hashes cleanup interval (in seconds)") = "3600";
+  ::arg().set("lua-consistent-hashes-expire-delay", "Cleanup pre-computed hashes that haven't been used for the given delay (in seconds). See pickchashed() LUA function") = "86400";
 #endif
   ::arg().setSwitch("axfr-lower-serial", "Also AXFR a zone from a primary with a lower serial") = "no";
 
@@ -699,6 +703,8 @@ static void mainthread()
   g_LuaRecordSharedState = (::arg()["enable-lua-records"] == "shared");
   g_luaRecordExecLimit = ::arg().asNum("lua-records-exec-limit");
   g_luaHealthChecksInterval = ::arg().asNum("lua-health-checks-interval");
+  g_luaConsistentHashesExpireDelay = ::arg().asNum("lua-consistent-hashes-expire-delay");
+  g_luaConsistentHashesCleanupInterval = ::arg().asNum("lua-consistent-hashes-cleanup-interval");
   g_luaHealthChecksExpireDelay = ::arg().asNum("lua-health-checks-expire-delay");
 #endif
 #ifdef ENABLE_GSS_TSIG
index 764a643a7299cea6144089eeb32a93b0948ec725..b96a61c681870a6060b20544d8c64d9d927d2636 100644 (file)
@@ -52,4 +52,6 @@ extern bool g_doLuaRecord;
 extern bool g_LuaRecordSharedState;
 extern time_t g_luaHealthChecksInterval;
 extern time_t g_luaHealthChecksExpireDelay;
+extern time_t g_luaConsistentHashesExpireDelay;
+extern time_t g_luaConsistentHashesCleanupInterval;
 #endif // HAVE_LUA_RECORDS
index d4a78a23ee76f0ce246790b4f70bf801062fbf65..5f3e7cb6156b717e76d58d4d5a1a81afffc42f1c 100644 (file)
@@ -14,7 +14,6 @@
 #include "sstuff.hh"
 #include "minicurl.hh"
 #include "ueberbackend.hh"
-#include "dnsrecords.hh"
 #include "dns_random.hh"
 #include "auth-main.hh"
 #include "../modules/geoipbackend/geoipinterface.hh" // only for the enum
@@ -656,12 +655,14 @@ static thread_local unique_ptr<lua_record_ctx_t> s_lua_record_ctx;
 /*
  *  Holds computed hashes for a given entry
  */
-struct EntryHashesHolder {
+struct EntryHashesHolder
+{
   std::atomic<size_t> weight;
   std::string entry;
   SharedLockGuarded<std::vector<unsigned int>> hashes;
+  std::atomic<time_t> lastUsed;
 
-  EntryHashesHolder(size_t weight_, std::string entry_): weight(weight_), entry(std::move(entry_)) {
+  EntryHashesHolder(size_t weight_, std::string entry_, time_t lastUsed_ = time(nullptr)): weight(weight_), entry(std::move(entry_)), lastUsed(lastUsed_) {
   }
 
   bool hashesComputed() {
@@ -683,24 +684,72 @@ struct EntryHashesHolder {
   }
 };
 
-static std::map<
-  std::tuple<int, std::string, std::string>, // zoneid qname entry
+using zone_hashes_key_t = std::tuple<int, std::string, std::string>;
+
+static SharedLockGuarded<std::map<
+  zone_hashes_key_t, // zoneid qname entry
   std::shared_ptr<EntryHashesHolder> // entry w/ corresponding hashes
-  >
+  >>
 s_zone_hashes;
 
+static std::atomic<time_t> s_lastConsistentHashesCleanup = 0;
+
+/**
+ * every ~g_luaConsistentHashesCleanupInterval, do a cleanup to delete entries that haven't been used in the last g_luaConsistentHashesExpireDelay
+ */
+static void cleanZoneHashes()
+{
+  auto now = time(nullptr);
+  if (s_lastConsistentHashesCleanup > (now - g_luaConsistentHashesCleanupInterval)) {
+    return ;
+  }
+  s_lastConsistentHashesCleanup = now;
+  std::vector<zone_hashes_key_t> toDelete{};
+  {
+    auto locked = s_zone_hashes.read_lock();
+    auto someTimeAgo = now - g_luaConsistentHashesExpireDelay;
+
+    for (const auto& [key, entry]: *locked) {
+      if (entry->lastUsed > someTimeAgo) {
+        toDelete.push_back(key);
+      }
+    }
+  }
+  if (!toDelete.empty()) {
+    auto wlocked = s_zone_hashes.write_lock();
+    for (const auto& key : toDelete) {
+      wlocked->erase(key);
+    }
+  }
+}
+
 static std::vector<std::shared_ptr<EntryHashesHolder>> getCHashedEntries(const int zoneId, const std::string& queryName, const std::vector<std::pair<int, std::string>>& items)
 {
   std::vector<std::shared_ptr<EntryHashesHolder>> result{};
+  std::map<zone_hashes_key_t, std::shared_ptr<EntryHashesHolder>> newEntries{};
+
+  {
+    time_t now = time(nullptr);
+    auto locked = s_zone_hashes.read_lock();
 
-  for (const auto& [weight, entry]: items) {
-    auto key = std::make_tuple(zoneId, queryName, entry);
-    if (s_zone_hashes.count(key) == 0) {
-      s_zone_hashes[key] = std::make_shared<EntryHashesHolder>(weight, entry);
-    } else {
-      s_zone_hashes.at(key)->weight = weight;
+    for (const auto& [weight, entry]: items) {
+      auto key = std::make_tuple(zoneId, queryName, entry);
+      if (locked->count(key) == 0) {
+        newEntries[key] = std::make_shared<EntryHashesHolder>(weight, entry, now);
+      } else {
+        locked->at(key)->weight = weight;
+        locked->at(key)->lastUsed = now;
+        result.push_back(locked->at(key));
+      }
+    }
+  }
+  if (!newEntries.empty()) {
+    auto wlocked = s_zone_hashes.write_lock();
+
+    for (auto& [key, entry]: newEntries) {
+      result.push_back(entry);
+      (*wlocked)[key] = std::move(entry);
     }
-    result.push_back(s_zone_hashes.at(key));
   }
 
   return result;
@@ -716,6 +765,8 @@ static std::string pickConsistentWeightedHashed(const ComboAddress& bestwho, con
   boost::optional<std::string> ret;
   boost::optional<std::string> first;
 
+  cleanZoneHashes();
+
   auto entries = getCHashedEntries(zoneId, queryName, items);
 
   ComboAddress::addressOnlyHash addrOnlyHash;
@@ -1153,12 +1204,12 @@ static void setupLuaRecords(LuaContext& lua) // NOLINT(readability-function-cogn
    * supplied, as weighted by the various `weight` parameters and distributed consistently
    * @example pickchashed({ {15, '1.2.3.4'}, {50, '5.4.3.2'} })
    */
-  lua.writeFunction("pickchashed", [](std::unordered_map<int, wiplist_t > ips) {
-    vector< pair<int, string> > items;
+  lua.writeFunction("pickchashed", [](const std::unordered_map<int, wiplist_t>& ips) {
+    std::vector<std::pair<int, std::string>> items;
 
     items.reserve(ips.size());
-    for (auto& entry : ips) {
-      items.emplace_back(atoi(entry.second[1].c_str()), entry.second[2]);
+    for (const auto& entry : ips) {
+      items.emplace_back(atoi(entry.second.at(1).c_str()), entry.second.at(2));
     }
 
     return pickConsistentWeightedHashed(s_lua_record_ctx->bestwho, items);