]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
modules/prefetch: basic prefetching of frequently used records
authorMarek Vavruša <marek.vavrusa@nic.cz>
Sun, 5 Jul 2015 20:14:10 +0000 (22:14 +0200)
committerMarek Vavruša <marek.vavrusa@nic.cz>
Sun, 5 Jul 2015 20:23:46 +0000 (22:23 +0200)
daemon/worker.c
doc/modules.rst
modules/prefetch/README.rst
modules/prefetch/prefetch.lua

index 728616cf5183215b99d66ce6ea90df4a81f9647f..63f4dad13b045c2e0c9d427a45970f2950ee4d7c 100644 (file)
@@ -266,7 +266,6 @@ static int qr_task_step(struct qr_task *task, knot_pkt_t *packet)
                }
                connect->data = task;
        } else {
-               printf("sending: %s %u\n", knot_dname_to_str_alloc(knot_pkt_qname(next_query)), knot_pkt_qtype(next_query));
                if (qr_task_send(task, task->next_handle, addr, next_query) != 0) {
                        return qr_task_step(task, NULL);
                }
index 2a1e4cfd89d73ed1018fa0ae227d5d750e7d9623..5f6f2bb76135c3753d7cb283d961f8ac1d5ce071 100644 (file)
@@ -12,6 +12,7 @@ Implemented modules
 .. include:: ../modules/hints/README.rst
 .. include:: ../modules/block/README.rst
 .. include:: ../modules/stats/README.rst
+.. include:: ../modules/prefetch/README.rst
 .. include:: ../modules/cachectl/README.rst
 .. include:: ../modules/graphite/README.rst
 .. include:: ../modules/ketcd/README.rst
index fefb476d9517a794eec48ce96ec0c5b6fe9ea934..8d56190288f63922bb52bbae045db9912402e656 100644 (file)
@@ -1,4 +1,9 @@
 .. _mod-prefetch:
 
-Prefetching
------------
+Prefetching records
+-------------------
+
+The module tracks expiring records (having less than 5% of original TTL) and batches them for prefetch.
+This improves latency for frequently used records, as they are fetched in advance.
+
+.. todo:: Learn usage patterns from browser history, track usage pattern over time.
\ No newline at end of file
index 4035cad4ab03e263ce90b808f405b163fe8560cd..8246ce7a81eae4f24a08ec69bdef2d48b12786fc 100644 (file)
@@ -1,41 +1,53 @@
+-- Batch soon-expiring records in a queue and fetch them periodically.
+-- This helps to reduce a latency for records that are often accessed.
+-- @module prefetch
+-- @field queue table of scheduled records
+-- @field queue_max maximum length of the queue
+-- @field queue_len current length of the queue
+-- @field window length of the coalescing window
 local prefetch = {
        queue = {},
-       frequency = 2
-}
-
--- @function Block layer implementation
-prefetch.layer = {
-       produce = function(state, req, pkt)
+       queue_max = 100,
+       queue_len = 0,
+       window = 60,
+       layer = {
                -- Schedule cached entries that are expiring soon
-               local qry = kres.query_current(req)
-               if not kres.query_has_flag(qry, kres.query.CACHED) then
-                       return state
-               end
-               local rr = pkt:get(kres.ANSWER, 0)
-               if rr and rr.ttl > 0 and rr.ttl < prefetch.frequency then
-                       local key = rr.owner..rr.type
+               finish = function(state, req, answer)
+                       local qry = kres.query_resolved(req)
+                       if not kres.query.has_flag(qry, kres.query.EXPIRING) then
+                               return state
+                       end
+                       -- Refresh entries that probably expire in this time window
+                       local qlen = prefetch.queue_len
+                       if qlen > prefetch.queue_max then
+                               return state
+                       end
+                       -- Key: {qtype [1], qname [1-255]}
+                       local key = string.char(answer:qtype())..answer:qname()
                        local val = prefetch.queue[key]
                        if not val then
                                prefetch.queue[key] = 1
+                               prefetch.queue_len = qlen + 1
                        else
                                prefetch.queue[key] = val + 1
                        end
+                       return state
                end
-               return state
-       end
+       }
 }
 
+-- Resolve queued records and flush the queue
 function prefetch.batch(module)
        for key, val in pairs(prefetch.queue) do
-               print('prefetching',key,val)
+               worker.resolve(string.sub(key, 2), string.byte(key))
+               prefetch.queue[key] = nil
        end
-       prefetch.queue = {}
-       -- @TODO: next batch interval
-       event.after(prefetch.frequency * sec, prefetch.batch)
+       prefetch.queue_len = 0
+       return 0
 end
 
 function prefetch.init(module)
-       event.after(prefetch.frequency * sec, prefetch.batch)
+       event.recurrent(prefetch.window * sec, prefetch.batch)
 end
 
 function prefetch.deinit(module)