From 965bab926f673ba0970834497b3bebfacd0201e5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Vladim=C3=ADr=20=C4=8Cun=C3=A1t?= Date: Thu, 27 Jul 2017 11:25:59 +0200 Subject: [PATCH] predict: refresh expiring records immediately It seemed a bit strange to have 'stats' module collect expiring records in an LRU, then once a few minutes convert that via JSON to a lua table, and put those records into prefetching queue. Apart from complexity, it wouldn't work well with short TTLs typical for today's CDNs, e.g. 30 or 60 seconds. --- modules/predict/README.rst | 1 + modules/predict/predict.lua | 34 ++++++++++++++++++++++------------ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/modules/predict/README.rst b/modules/predict/README.rst index e1c9ab78b..ac10565bb 100644 --- a/modules/predict/README.rst +++ b/modules/predict/README.rst @@ -29,6 +29,7 @@ Example configuration Defaults are 15 minutes window, 6 hours period. .. tip:: Use period 0 to turn off prediction and just do prefetching of expiring records. + That works even without the 'stats' module. Exported metrics ^^^^^^^^^^^^^^^^ diff --git a/modules/predict/predict.lua b/modules/predict/predict.lua index 4bc1acf3f..da0a7a837 100644 --- a/modules/predict/predict.lua +++ b/modules/predict/predict.lua @@ -76,13 +76,6 @@ local function enqueue_from_log(current) return queued end --- Prefetch soon-to-expire records -function predict.prefetch() - local queries = stats.expiring() - stats.clear_expiring() - return enqueue(queries) -end - -- Sample current epoch, return number of sampled queries function predict.sample(epoch_now) if not epoch_now then return 0, 0 end @@ -119,7 +112,9 @@ local function generate(epoch_now) end function predict.process(ev) - if not stats then error("'stats' module required") end + if (predict.period or 0) ~= 0 and not stats then + error("'stats' module required") + end -- Start a new epoch, or continue sampling predict.ev_sample = nil local epoch_now = current_epoch() @@ -140,8 +135,6 @@ function predict.process(ev) -- Sample current epoch local nr_learned = predict.sample(epoch_now) - -- Prefetch expiring records - nr_queued = nr_queued + predict.prefetch() -- Dispatch predicted queries if nr_queued > 0 then predict.queue_len = predict.queue_len + nr_queued @@ -151,8 +144,10 @@ function predict.process(ev) end end predict.ev_sample = event.after(next_event(), predict.process) - stats['predict.queue'] = predict.queue_len - stats['predict.learned'] = nr_learned + if stats then + stats['predict.queue'] = predict.queue_len + stats['predict.learned'] = nr_learned + end collectgarbage() end @@ -184,4 +179,19 @@ function predict.config(config) predict.init() end +predict.layer = { + -- Prefetch all expiring (sub-)queries immediately after the request finishes. + -- Doing that immediately is simplest and avoids creating (new) large bursts of activity. + finish = function (state, req) + req = kres.request_t(req) + local qrys = req.rplan.resolved + for i = 0, (tonumber(qrys.len) - 1) do -- size_t doesn't work for some reason + local qry = qrys.at[i] + if bit.band(qry.flags, kres.query.EXPIRING) ~= 0 then + worker.resolve(kres.dname2str(qry.sname), qry.stype, qry.sclass, kres.query.NO_CACHE) + end + end + end +} + return predict -- 2.47.2