]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
modules/prefetch: fixed lua error on bad queries, iterative
authorMarek Vavruša <marek.vavrusa@nic.cz>
Mon, 6 Jul 2015 00:06:41 +0000 (02:06 +0200)
committerMarek Vavruša <marek.vavrusa@nic.cz>
Mon, 6 Jul 2015 00:06:41 +0000 (02:06 +0200)
daemon/bindings.c
lib/layer/rrcache.c
modules/prefetch/prefetch.lua

index 4fe3265a57071b555447c6adbf9fbfcc1789783b..c223cfe9fcb793025f9040ee38e5102835592f62 100644 (file)
@@ -581,11 +581,7 @@ static int wrk_resolve(lua_State *L)
        /* Resolve it */
        int ret = worker_resolve(worker, pkt);
        knot_pkt_free(&pkt);
-       if (ret != 0) {
-               lua_pushstring(L, kr_strerror(ret));
-               lua_error(L);
-       }
-       lua_pushboolean(L, true);
+       lua_pushboolean(L, ret == 0);
        return 1;
 }
 
index d2db8a5e13b3a6e237caf9f877bb02f3d73ba448..56f64b46c9ee1e75bd6fd97c3c33e348dfc96645 100644 (file)
@@ -51,8 +51,8 @@ static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t
                knot_pkt_put_question(pkt, qry->sname, qry->sclass, qry->stype);
        }
 
-       /* Mark as expiring if it has less than 5% TTL (or less than 5s) */
-       if (100 * (drift + 5) > 95 * knot_rrset_ttl(&cache_rr)) {
+       /* Mark as expiring if it has less than 1% TTL (or less than 5s) */
+       if (100 * (drift + 5) > 99 * knot_rrset_ttl(&cache_rr)) {
                qry->flags |= QUERY_EXPIRING;
        }
 
index 8246ce7a81eae4f24a08ec69bdef2d48b12786fc..b0c84055740501dbaec2966474c6918b9ab0b4b6 100644 (file)
@@ -7,9 +7,9 @@
 -- @field window length of the coalescing window
 local prefetch = {
        queue = {},
-       queue_max = 100,
+       queue_max = 1000,
        queue_len = 0,
-       window = 60,
+       window = 30,
        layer = {
                -- Schedule cached entries that are expiring soon
                finish = function(state, req, answer)
@@ -38,11 +38,21 @@ local prefetch = {
 
 -- Resolve queued records and flush the queue
 function prefetch.batch(module)
+       -- Defer prefetching if the server is loaded
+       if worker.stats().concurrent > 10 then
+               return 0
+       end
+       local to_delete = prefetch.queue_max / 5
+       local deleted = 0
        for key, val in pairs(prefetch.queue) do
                worker.resolve(string.sub(key, 2), string.byte(key))
                prefetch.queue[key] = nil
+               deleted = deleted + 1
+               if deleted == to_delete then
+                       break
+               end
        end
-       prefetch.queue_len = 0
+       prefetch.queue_len = prefetch.queue_len - deleted
        return 0
 end