/* Resolve it */
int ret = worker_resolve(worker, pkt);
knot_pkt_free(&pkt);
- if (ret != 0) {
- lua_pushstring(L, kr_strerror(ret));
- lua_error(L);
- }
- lua_pushboolean(L, true);
+ lua_pushboolean(L, ret == 0);
return 1;
}
knot_pkt_put_question(pkt, qry->sname, qry->sclass, qry->stype);
}
- /* Mark as expiring if it has less than 5% TTL (or less than 5s) */
- if (100 * (drift + 5) > 95 * knot_rrset_ttl(&cache_rr)) {
+ /* Mark as expiring if it has less than 1% TTL (or less than 5s) */
+ if (100 * (drift + 5) > 99 * knot_rrset_ttl(&cache_rr)) {
qry->flags |= QUERY_EXPIRING;
}
-- @field window length of the coalescing window
local prefetch = {
queue = {},
- queue_max = 100,
+ queue_max = 1000,
queue_len = 0,
- window = 60,
+ window = 30,
layer = {
-- Schedule cached entries that are expiring soon
finish = function(state, req, answer)
-- Resolve queued records and flush the queue
function prefetch.batch(module)
+ -- Defer prefetching if the server is loaded
+ if worker.stats().concurrent > 10 then
+ return 0
+ end
+ local to_delete = prefetch.queue_max / 5
+ local deleted = 0
for key, val in pairs(prefetch.queue) do
worker.resolve(string.sub(key, 2), string.byte(key))
prefetch.queue[key] = nil
+ deleted = deleted + 1
+ if deleted == to_delete then
+ break
+ end
end
- prefetch.queue_len = 0
+ prefetch.queue_len = prefetch.queue_len - deleted
return 0
end