'hour',
'day',
'panic',
- 'warn',
- 'log',
+ 'log_error',
+ 'log_warn',
+ 'log_info',
+ 'log_debug',
+ 'log_fmt',
+ 'log_qry',
+ 'log_req',
+ 'LOG_CRIT',
+ 'LOG_ERR',
+ 'LOG_WARNING',
+ 'LOG_NOTICE',
+ 'LOG_INFO',
+ 'LOG_DEBUG',
'mode',
'reorder_RR',
'option',
-- SPDX-License-Identifier: GPL-3.0-or-later
+local ffi = require('ffi')
local id = os.getenv('SYSTEMD_INSTANCE')
if not id then
- warn('environment variable $SYSTEMD_INSTANCE not set')
+ log_warn(ffi.C.LOG_GRP_SYSTEM, 'environment variable $SYSTEMD_INSTANCE not set')
else
-- Bind to control socket in run_dir
worker.control_path = '@run_dir@/control/'
local path = worker.control_path..id
local ok, err = pcall(net.listen, path, nil, { kind = 'control' })
if not ok then
- warn('bind to '..path..' failed '..err)
+ log_warn(ffi.C.LOG_GRP_NETWORK, 'bind to '..path..' failed '..err)
end
end
void (*error)(struct kr_query *, const struct kr_transport *, enum kr_selection_error);
struct local_state *local_state;
};
-typedef unsigned int log_groups_t;
+typedef unsigned long log_groups_t;
+typedef int log_level_t;
+enum kr_log_groups_type {LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_ZIMPORT, LOG_GRP_ZSCANNER, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
int kr_forward_add_target(struct kr_request *, const struct sockaddr *);
void kr_log_req1(const struct kr_request * const, uint32_t, const unsigned int, log_groups_t, const char *, const char *, ...);
void kr_log_q1(const struct kr_query * const, log_groups_t, const char *, const char *, ...);
+char *kr_log_grp2name(log_groups_t);
+void kr_log_fmt(log_groups_t, log_level_t, const char *, const char *, const char *, const char *, ...);
int kr_make_query(struct kr_query *, knot_pkt_t *);
void kr_pkt_make_auth_header(knot_pkt_t *);
int kr_pkt_put(knot_pkt_t *, const knot_dname_t *, uint32_t, uint16_t, uint16_t, const uint8_t *, uint16_t);
struct kr_module
struct kr_server_selection
log_groups_t
+ log_level_t
+ enum kr_log_groups_type
EOF
# static variables; these lines might not be simple to generate
# Utils
kr_log_req1
kr_log_q1
+ kr_log_grp2name
+ kr_log_fmt
kr_make_query
kr_pkt_make_auth_header
kr_pkt_put
local function void_xpcall_log_tb(func, req, msg)
local ok, err = xpcall(func, debug.traceback, req, msg)
if not ok then
- log('error: callback %s req %s msg %s stack traceback:\n%s', func, req, msg, err)
+ log_error(ffi.C.LOG_GRP_SYSTEM, 'callback %s req %s msg %s stack traceback:\n%s', func, req, msg, err)
end
end
local function void_xpcall_finish_tb(func, req)
local ok, err = xpcall(func, debug.traceback, req)
if not ok then
- log('error: callback %s req %s stack traceback:\n%s', func, req, err)
+ log_error(ffi.C.LOG_GRP_SYSTEM, 'callback %s req %s stack traceback:\n%s', func, req, err)
end
end
local path = worker.control_path..worker.pid
local ok, err = pcall(net.listen, path, nil, { kind = 'control' })
if not ok then
- warn('bind to '..path..' failed '..err)
+ log_warn(C.NETWORK, 'bind to '..path..' failed '..err)
end
end
end
-- Binding to other ifaces may fail
ok, err = pcall(net.listen, '127.0.0.1', 853)
- if not ok and verbose() then
- log('bind to 127.0.0.1@853 '..err)
+ if not ok then
+ log_info(ffi.C.LOG_GRP_NETWORK, 'bind to 127.0.0.1@853 '..err)
end
ok, err = pcall(net.listen, '::1')
- if not ok and verbose() then
- log('bind to ::1@53 '..err)
+ if not ok then
+ log_info(ffi.C.LOG_GRP_NETWORK, 'bind to ::1@53 '..err)
end
ok, err = pcall(net.listen, '::1', 853)
- if not ok and verbose() then
- log('bind to ::1@853 '..err)
+ if not ok then
+ log_info(ffi.C.LOG_GRP_NETWORK, 'bind to ::1@853 '..err)
end
-- Exit when kresd isn't listening on any interfaces
n_dns_socks, _ = count_sockets()
day = 24 * hour
-- Logging
+
+-- from syslog.h
+LOG_CRIT = 2
+LOG_ERR = 3
+LOG_WARNING = 4
+LOG_NOTICE = 5
+LOG_INFO = 6
+LOG_DEBUG = 7
+
+local function curr_file() return debug.getinfo(4,'S').source end
+local function curr_line() return debug.getinfo(4,'l').currentline end
+
+local function log_fmt(grp, level, fmt, ...)
+ ffi.C.kr_log_fmt(grp, level, "CODE_FILE="..curr_file(), "CODE_LINE="..curr_line(), "",
+ "[%s]" .. fmt, ffi.C.kr_log_grp2name(grp), ...)
+end
+
+function log_req(req, qry_uid, indent, grp, fmt, ...)
+ ffi.C.kr_log_req1(req, qry_uid, indent, grp, ffi.C.kr_log_grp2name(grp), fmt, ...)
+end
+
+function log_qry(qry, grp, fmt, ...)
+ ffi.C.kr_log_q1(qry, grp, ffi.C.kr_log_grp2name(grp), fmt, ...)
+end
+
function panic(fmt, ...)
print(debug.traceback('error occurred here (config filename:lineno is '
.. 'at the bottom, if config is involved):', 2))
error(string.format('ERROR: '.. fmt, ...), 0)
end
-function warn(fmt, ...)
- io.stderr:write(string.format(fmt..'\n', ...))
+
+function log_error(grp, fmt, ...)
+ log_fmt(grp, LOG_ERR, fmt, ...)
end
-function log(fmt, ...)
- print(string.format(fmt, ...))
+
+function log_warn(grp, fmt, ...)
+ log_fmt(grp, LOG_WARNING, fmt..'n', ...)
+end
+
+function log_info(grp, fmt, ...)
+ log_fmt(grp, LOG_INFO, fmt, ...)
+end
+
+function log_debug(grp, fmt, ...)
+ log_fmt(grp, LOG_DEBUG, fmt, ...)
end
-- Resolver bindings
cache.clear(cbname, cbexact_name, cbrr_type, cbchunk_size, cbself, cbrettable)
end)
elseif cbrettable.round > 1 then
- log('[cache] asynchonous cache.clear(\'' .. cbname .. '\', '
+ log_info(ffi.C.LOG_GRP_CACHE, 'asynchonous cache.clear(\'' .. cbname .. '\', '
.. tostring(cbexact_name) .. ') finished')
end
return cbrettable
work = function (self)
local ok, err, _, co = self.cq:step(0)
if not ok then
- warn('[%s] error: %s %s', self.name or 'worker', err, debug.traceback(co))
+ log_warn(ffi.C.LOG_GRP_SYSTEM, '%s error: %s %s', self.name or 'worker', err, debug.traceback(co))
end
-- Reschedule timeout or create new one
local timeout = self.cq:timeout()
local ret = kluautil.kr_table_pack(xpcall(func, debug.traceback))
local ok, serial = pcall(krprint.serialize_lua, ret, 'error')
if not ok then
- if verbose() then
- log('failed to serialize map() response %s (%s)',
- table_print(ret), serial)
- end
+ log_error(ffi.C.LOG_GRP_SYSTEM, 'failed to serialize map() response %s (%s)',
+ table_print(ret), serial)
return krprint.serialize_lua(
kluautil.kr_table_pack(false, "returned values cannot be serialized: "
.. serial))
s:setmode('bn', 'bn')
local status, err = pcall(s.connect, s)
if not status then
- log('map() error while connecting to control socket %s: '
+ log_error(ffi.C.LOG_GRP_NETWORK, 'map() error while connecting to control socket %s: '
.. '%s (ignoring this socket)', path, err)
return nil
end
end
local path = worker.control_path..file
local path_name = (local_exec and 'this instance') or path
- if verbose() then
- log('executing map() on %s: command %s', path_name, cmd)
- end
+ log_info(ffi.C.LOG_GRP_SYSTEM, 'executing map() on %s: command %s', path_name, cmd)
local ret
if local_exec then
ret = eval_cmd(cmd)
assert(type(ret) == 'table', 'map() protocol error, '
.. 'table with results not retured by follower')
if (ret.n ~= 2) then
- if verbose() then
- log('got unsupported map() response: %s', table_print(ret))
- end
+ log_error(ffi.C.LOG_GRP_SYSTEM, 'got unsupported map() response: %s', table_print(ret))
panic('unexpected number of return values in map() response: '
.. 'only single return value is allowed, '
.. 'use kluautil.kr_table_pack() helper')
if keydigest_is_valid(fields['validFrom'], fields['validUntil']) then
rrset = rrset .. '\n' .. rr
else
- log('[ ta ] skipping trust anchor "%s" ' ..
+ log_info(ffi.C.LOG_GRP_TA, 'skipping trust anchor "%s" ' ..
'because it is outside of validity range', rr)
end
end
end
end
if count == 0 then
- warn('[ ta ] ERROR: no anchors are trusted for ' ..
+ log_error(ffi.C.LOG_GRP_TA, 'ERROR: no anchors are trusted for ' ..
kres.dname2str(keyset.owner) .. ' !')
end
return count > 0 and not has_error
"[ ta ] keyfile '%s' doesn't exist and root key is already installed, "
.. "cannot bootstrap; provide a path to valid file with keys", path))
end
- log("[ ta ] keyfile '%s': doesn't exist, bootstrapping", path);
+ log_info(ffi.C.LOG_GRP_TA, "keyfile '%s': doesn't exist, bootstrapping", path);
local rrstr, msg = bootstrap(trust_anchors.bootstrap_url, trust_anchors.bootstrap_ca)
if not rrstr then
msg = msg .. '\n'
local owner_str = kres.dname2str(owner)
local keyset_orig = trust_anchors.keysets[owner]
if keyset_orig then
- warn('[ ta ] warning: overriding previously set trust anchors for ' .. owner_str)
+ log_warn(ffi.C.LOG_GRP_TA, 'warning: overriding previously set trust anchors for ' .. owner_str)
if keyset_orig.managed and ta_update then
ta_update.stop(owner)
end
-- Replace the TA store used for validation
if keyset_publish(keyset) and verbose() then
- log('[ ta ] installed trust anchors for domain ' .. owner_str .. ' are:\n'
+ log_info(ffi.C.LOG_GRP_TA, 'installed trust anchors for domain ' .. owner_str .. ' are:\n'
.. trust_anchors.summary(owner))
end
-- TODO: if failed and for root, try to rebootstrap?
panic('[ ta ] it is impossible to add an unmanaged TA for zone '
.. owner_str .. ' which already has a managed TA')
end
- warn('[ ta ] warning: extending previously set trust anchors for '
+ log_warn(ffi.C.LOG_GRP_TA, 'warning: extending previously set trust anchors for '
.. owner_str)
for _, ta in ipairs(keyset) do
table.insert(keyset_orig, ta)
trust_anchors.keysets[owner] = keyset
end
- if verbose() or err then log('New TA state:\n' .. trust_anchors.summary()) end
+ log_info(ffi.C.LOG_GRP_TA, 'New TA state:\n' .. trust_anchors.summary())
if err then
panic('[ ta ] .add() failed: ' .. err)
end
-- count warning messages
warn_msg = {}
-overriding_msg="[ ta ] warning: overriding previously set trust anchors for ."
+overriding_msg="warning: overriding previously set trust anchors for ."
warn_msg[overriding_msg] = 0
-function warn(fmt, ...)
+function log_warn(grp, fmt, ...) --luacheck: no unused args
msg = string.format(fmt, ...)
if warn_msg[msg] ~= nil then
warn_msg[msg] = warn_msg[msg] + 1
GRP_NAME_ITEM(LOG_GRP_DOH),
GRP_NAME_ITEM(LOG_GRP_DNSSEC),
GRP_NAME_ITEM(LOG_GRP_HINT),
+ GRP_NAME_ITEM(LOG_GRP_ITERATOR),
+ GRP_NAME_ITEM(LOG_GRP_VALIDATOR),
+ GRP_NAME_ITEM(LOG_GRP_RESOLVER),
+ GRP_NAME_ITEM(LOG_GRP_SELECTION),
+ GRP_NAME_ITEM(LOG_GRP_ZCUT),
+ GRP_NAME_ITEM(LOG_GRP_COOKIES),
+ GRP_NAME_ITEM(LOG_GRP_STATISTICS),
+ GRP_NAME_ITEM(LOG_GRP_REBIND),
+ GRP_NAME_ITEM(LOG_GRP_WORKER),
+ GRP_NAME_ITEM(LOG_GRP_POLICY),
+ GRP_NAME_ITEM(LOG_GRP_TASENTINEL),
+ GRP_NAME_ITEM(LOG_GRP_TASIGNALING),
+ GRP_NAME_ITEM(LOG_GRP_TAUPDATE),
+ GRP_NAME_ITEM(LOG_GRP_DAF),
+ GRP_NAME_ITEM(LOG_GRP_DETECTTIMEJUMP),
+ GRP_NAME_ITEM(LOG_GRP_DETECTTIMESKEW),
+ GRP_NAME_ITEM(LOG_GRP_GRAPHITE),
+ GRP_NAME_ITEM(LOG_GRP_PREFILL),
+ GRP_NAME_ITEM(LOG_GRP_PRIMING),
+ GRP_NAME_ITEM(LOG_GRP_SRVSTALE),
+ GRP_NAME_ITEM(LOG_GRP_WATCHDOG),
{ NULL, -1 },
};
int group_is_set(log_groups_t group)
{
- return kr_log_groups & (group);
+ return kr_log_groups & (1ULL << group);
}
void kr_log_fmt(log_groups_t group, log_level_t level, const char *file,
return kr_log_level;
}
-void kr_log_add_group(log_groups_t mask)
+void kr_log_add_group(log_groups_t group)
{
- kr_log_groups |= mask;
+ kr_log_groups |= (1ULL << group);
}
-void kr_log_del_group(log_groups_t mask)
+void kr_log_del_group(log_groups_t group)
{
- kr_log_groups &= (~mask);
+ kr_log_groups &= (~(1ULL << group));
}
void kr_log_init(log_level_t level, log_target_t target)
/* Groups */
-typedef uint32_t log_groups_t;
+typedef uint64_t log_groups_t;
typedef struct {
char *g_name;
log_groups_t g_val;
} log_group_names_t;
-/* Don't forget add *_TAG below and log_group_names[] item (log.c) */
-#define LOG_GRP_SYSTEM (1 << 1)
-#define LOG_GRP_CACHE (1 << 2)
-#define LOG_GRP_IO (1 << 3)
-#define LOG_GRP_NETWORK (1 << 4)
-#define LOG_GRP_TA (1 << 5)
-#define LOG_GRP_TLS (1 << 6)
-#define LOG_GRP_GNUTLS (1 << 7)
-#define LOG_GRP_TLSCLIENT (1 << 8)
-#define LOG_GRP_XDP (1 << 9)
-#define LOG_GRP_ZIMPORT (1 << 10)
-#define LOG_GRP_ZSCANNER (1 << 11)
-#define LOG_GRP_DOH (1 << 12)
-#define LOG_GRP_DNSSEC (1 << 13)
-#define LOG_GRP_HINT (1 << 14)
-#define LOG_GRP_PLAN (1 << 15)
-#define LOG_GRP_ITERATOR (1 << 16)
-#define LOG_GRP_VALIDATOR (1 << 17)
-#define LOG_GRP_RESOLVER (1 << 18)
-#define LOG_GRP_SELECTION (1 << 19)
-#define LOG_GRP_ZCUT (1 << 20)
-#define LOG_GRP_COOKIES (1 << 21)
-#define LOG_GRP_STATISTICS (1 << 22)
-#define LOG_GRP_REBIND (1 << 23)
-#define LOG_GRP_WORKER (1 << 24)
-#define LOG_GRP_POLICY (1 << 25)
-
-#define LOG_GRP_SYSTEM_TAG "system"
-#define LOG_GRP_CACHE_TAG "cache "
-#define LOG_GRP_IO_TAG "io "
-#define LOG_GRP_NETWORK_TAG "net "
-#define LOG_GRP_TA_TAG "ta "
-#define LOG_GRP_TLS_TAG "tls "
-#define LOG_GRP_GNUTLS_TAG "gnutls"
-#define LOG_GRP_TLSCLIENT_TAG "tls_cl"
-#define LOG_GRP_XDP_TAG "xdp "
-#define LOG_GRP_ZIMPORT_TAG "zimprt"
-#define LOG_GRP_ZSCANNER_TAG "zscann"
-#define LOG_GRP_DOH_TAG "doh "
-#define LOG_GRP_DNSSEC_TAG "dnssec"
-#define LOG_GRP_HINT_TAG "hint "
-#define LOG_GRP_PLAN_TAG "plan "
-#define LOG_GRP_ITERATOR_TAG "iterat"
-#define LOG_GRP_VALIDATOR_TAG "valdtr"
-#define LOG_GRP_RESOLVER_TAG "resolv"
-#define LOG_GRP_SELECTION_TAG "select"
-#define LOG_GRP_ZCUT_TAG "zoncut"
-#define LOG_GRP_COOKIES_TAG "cookie"
-#define LOG_GRP_STATISTICS_TAG "statis"
-#define LOG_GRP_REBIND_TAG "rebind"
-#define LOG_GRP_WORKER_TAG "worker"
-#define LOG_GRP_POLICY_TAG "policy"
-
+/* Don't forget add *_TAG below, log_group_names[] item (log.c) and generate
+ * new kres-gen.lua */
+enum kr_log_groups_type {
+ LOG_GRP_SYSTEM = 1,
+ LOG_GRP_CACHE,
+ LOG_GRP_IO,
+ LOG_GRP_NETWORK,
+ LOG_GRP_TA,
+ LOG_GRP_TLS,
+ LOG_GRP_GNUTLS,
+ LOG_GRP_TLSCLIENT,
+ LOG_GRP_XDP,
+ LOG_GRP_ZIMPORT,
+ LOG_GRP_ZSCANNER,
+ LOG_GRP_DOH,
+ LOG_GRP_DNSSEC,
+ LOG_GRP_HINT,
+ LOG_GRP_PLAN,
+ LOG_GRP_ITERATOR,
+ LOG_GRP_VALIDATOR,
+ LOG_GRP_RESOLVER,
+ LOG_GRP_SELECTION,
+ LOG_GRP_ZCUT,
+ LOG_GRP_COOKIES,
+ LOG_GRP_STATISTICS,
+ LOG_GRP_REBIND,
+ LOG_GRP_WORKER,
+ LOG_GRP_POLICY,
+ LOG_GRP_TASENTINEL,
+ LOG_GRP_TASIGNALING,
+ LOG_GRP_TAUPDATE,
+ LOG_GRP_DAF,
+ LOG_GRP_DETECTTIMEJUMP,
+ LOG_GRP_DETECTTIMESKEW,
+ LOG_GRP_GRAPHITE,
+ LOG_GRP_PREFILL,
+ LOG_GRP_PRIMING,
+ LOG_GRP_SRVSTALE,
+ LOG_GRP_WATCHDOG,
+};
+
+
+#define LOG_GRP_SYSTEM_TAG "system"
+#define LOG_GRP_CACHE_TAG "cache "
+#define LOG_GRP_IO_TAG "io "
+#define LOG_GRP_NETWORK_TAG "net "
+#define LOG_GRP_TA_TAG "ta "
+#define LOG_GRP_TLS_TAG "tls "
+#define LOG_GRP_GNUTLS_TAG "gnutls"
+#define LOG_GRP_TLSCLIENT_TAG "tls_cl"
+#define LOG_GRP_XDP_TAG "xdp "
+#define LOG_GRP_ZIMPORT_TAG "zimprt"
+#define LOG_GRP_ZSCANNER_TAG "zscann"
+#define LOG_GRP_DOH_TAG "doh "
+#define LOG_GRP_DNSSEC_TAG "dnssec"
+#define LOG_GRP_HINT_TAG "hint "
+#define LOG_GRP_PLAN_TAG "plan "
+#define LOG_GRP_ITERATOR_TAG "iterat"
+#define LOG_GRP_VALIDATOR_TAG "valdtr"
+#define LOG_GRP_RESOLVER_TAG "resolv"
+#define LOG_GRP_SELECTION_TAG "select"
+#define LOG_GRP_ZCUT_TAG "zoncut"
+#define LOG_GRP_COOKIES_TAG "cookie"
+#define LOG_GRP_STATISTICS_TAG "statis"
+#define LOG_GRP_REBIND_TAG "rebind"
+#define LOG_GRP_WORKER_TAG "worker"
+#define LOG_GRP_POLICY_TAG "policy"
+#define LOG_GRP_TASENTINEL_TAG "tasent"
+#define LOG_GRP_TASIGNALING_TAG "tasign"
+#define LOG_GRP_TAUPDATE_TAG "taupd "
+#define LOG_GRP_DAF_TAG "daf "
+#define LOG_GRP_DETECTTIMEJUMP_TAG "timejm"
+#define LOG_GRP_DETECTTIMESKEW_TAG "timesk"
+#define LOG_GRP_GRAPHITE_TAG "graphi"
+#define LOG_GRP_PREFILL_TAG "prefil"
+#define LOG_GRP_PRIMING_TAG "primin"
+#define LOG_GRP_SRVSTALE_TAG "srvstl"
+#define LOG_GRP_WATCHDOG_TAG "wtchdg"
KR_EXPORT
extern log_groups_t kr_log_groups;
KR_EXPORT
int group_is_set(log_groups_t group);
KR_EXPORT
-void kr_log_add_group(log_groups_t mask);
+void kr_log_add_group(log_groups_t group);
KR_EXPORT
-void kr_log_del_group(log_groups_t mask);
+void kr_log_del_group(log_groups_t group);
KR_EXPORT
char *kr_log_grp2name(log_groups_t group);
KR_EXPORT
-- SPDX-License-Identifier: GPL-3.0-or-later
+local ffi = require('ffi')
+
-- Load dependent modules
if not view then modules.load('view') end
if not policy then modules.load('policy') end
function M.config()
if not http then
if verbose() then
- log('[daf ] HTTP API unavailable because HTTP module is not loaded, use modules.load("http")')
+ log_error(ffi.C.LOG_GRP_DAF, 'HTTP API unavailable because HTTP module is not loaded, use modules.load("http")')
end
return
end
local actual_timeshift = os.time() * 1000 - tonumber(ffi.C.kr_now())
local jump_backward = cache_timeshift - actual_timeshift
if jump_backward > mod.threshold then
- log("Detected backwards time jump, clearing cache.\n" ..
+ log_info(ffi.C.LOG_GRP_DETECTTIMEJUMP, "Detected backwards time jump, clearing cache.\n" ..
"But what does that mean? It means your future hasn't been written yet."
)
cache.clear()
elseif -jump_backward > mod.threshold then
-- On Linux 4.17+ this shouldn't happen anymore: https://lwn.net/Articles/751482/
- log("Detected forward time jump. (Suspend-resume, possibly.)")
+ log_info(ffi.C.LOG_GRP_DETECTTIMEJUMP, "Detected forward time jump. (Suspend-resume, possibly.)")
cache.checkpoint(true)
end
end
-- luacheck: no unused args
local function check_time_callback(pkt, req)
if pkt == nil or pkt:rcode() ~= kres.rcode.NOERROR then
- warn("[detect_time_skew] cannot resolve '.' NS")
+ log_warn(ffi.C.LOG_GRP_DETECTTIMESKEW, "cannot resolve '.' NS")
return nil
end
local seen_rrsigs = 0
end
end
if seen_rrsigs == 0 then
- if verbose() then
- log("[detect_time_skew] No RRSIGs received! "..
- "You really should configure DNSSEC trust anchor for the root.")
- end
+ log_info(ffi.C.LOG_GRP_DETECTTIMESKEW, "No RRSIGs received! "..
+ "You really should configure DNSSEC trust anchor for the root.")
elseif valid_rrsigs == 0 then
- warn("[detect_time_skew] Local system time %q seems to be at "..
+ log_warn(ffi.C.LOG_GRP_DETECTTIMESKEW, "Local system time %q seems to be at "..
"least %u seconds in the %s. DNSSEC signatures for '.' NS "..
"are not valid %s. Please check your system clock!",
os.date("%c", now),
math.abs(time_diff),
time_diff > 0 and "future" or "past",
time_diff > 0 and "yet" or "anymore")
- elseif verbose() then
- log("[detect_time_skew] Local system time %q is within "..
+ else
+ log_info(ffi.C.LOG_GRP_DETECTTIMESKEW, "Local system time %q is within "..
"RRSIG validity interval <%q,%q>.", os.date("%c", now),
os.date("%c", inception), os.date("%c", expiration))
end
function M.layer.consume(state, _, pkt)
-- Only successful answers
if state == kres.FAIL then return state end
- -- log("%s", pkt:tostring())
+ -- log_debug(ffi.C.DOT, "%s", pkt:tostring())
local authority = pkt:section(kres.section.AUTHORITY)
local additional = pkt:section(kres.section.ADDITIONAL)
for _, rr in ipairs(authority) do
- --log("%d %s", rr.type, kres.dname2str(rr.rdata))
+ --log_debug(ffi.C.DOT, "%d %s", rr.type, kres.dname2str(rr.rdata))
if rr.type == kres.type.NS then
local name = kres.dname2str(rr.rdata):upper()
- -- log("NS %d", name:len())
+ -- log_debug(ffi.C.DOT, "NS %d", name:len())
if name:len() > 56 and str.starts(name, "DOT-") then
local k = basexx.to_base64(
basexx.from_base32(
C.inet_ntop(AF_INET6, ns_addr.sin6_addr, addrbuf, INET6_ADDRSTRLEN)
end
net.tls_client(ffi.string(addrbuf).."@853", {k})
- log("Adding %s IP %s %s", name_add, ffi.string(addrbuf).."@853", k)
+ log_info(ffi.C.LOG_GRP_DOT, "Adding %s IP %s %s", name_add, ffi.string(addrbuf).."@853", k)
end
end
end
-- This is leader-only module
local M = {}
+local ffi = require("ffi")
local socket = require("cqueues.socket")
local proto_txt = {
[socket.SOCK_DGRAM] = 'udp',
end
if not status then
- log('[graphite] connecting: %s@%d %s reason: %s',
+ log_info(ffi.C.LOG_GRP_GRAPHITE, 'connecting: %s@%d %s reason: %s',
host, port, proto_txt[stype], err)
return status, err
end
if tcp and host.seen + 2 * M.interval / 1000 <= now then
local sock_type = (host.tcp and socket.SOCK_STREAM)
or socket.SOCK_DGRAM
- log('[graphite] reconnecting: %s@%d %s reason: %s',
+ log_info(ffi.C.LOG_GRP_GRAPHITE, 'reconnecting: %s@%d %s reason: %s',
host.addr, host.port, proto_txt[sock_type], err)
s = make_tcp(host.addr, host.port)
if s then
}
-- log errors but do not throw
M.configs._builtin._all.onerror = function(myserver, context, op, err, errno) -- luacheck: ignore 212
- local msg = '[http] ' .. op .. ' on ' .. tostring(context) .. ' failed'
+ local msg = op .. ' on ' .. tostring(context) .. ' failed'
if err then
msg = msg .. ': ' .. tostring(err)
end
- if verbose() then
- log(msg)
- end
+ log_info(ffi.C.LOG_GRP_DOH, msg)
end
-- M.config() without explicit "kind" modifies this
-- Upgrade connection to WebSocket
local ws = http_websocket.new_from_stream(stream, h)
if ws then
- if verbose() then
- log('[http] %s %s HTTP/%d web socket open',
- m, path, connection.version)
- end
+ log_info(ffi.C.LOG_GRP_DOH, '%s %s HTTP/%d web socket open',
+ m, path, connection.version)
assert(ws:accept { protocols = {'json'} })
-- Continue streaming results to client
local ep = endpoints[path]
cb(h, ws)
end
ws:close()
- if verbose() then
- log('[http] %s %s HTTP/%d web socket closed',
- m, path, connection.version)
- end
+ log_info(ffi.C.LOG_GRP_DOH, '%s %s HTTP/%d web socket closed',
+ m, path, connection.version)
return
else
local ok, err, reason = http_util.yieldable_pcall(serve, endpoints, h, stream)
if not ok or err then
err = err or '500'
- if verbose() then
- log('[http] %s %s HTTTP/%d %s %s',
- m, path, connection.version, err, reason or '')
- end
+ log_info(ffi.C.LOG_GRP_DOH, '%s %s HTTTP/%d %s %s',
+ m, path, connection.version, err, reason or '')
-- Method is not supported
local hsend = http_headers.new()
hsend:append(':status', err)
else
assert(stream:write_headers(hsend, true))
end
- else if verbose() then
- log('[http] %s %s HTTP/%d 200',
+ else
+ log_info(ffi.C.LOG_GRP_DOH, '%s %s HTTP/%d 200',
m, path, connection.version)
- end
end
end
--]]
local tls_cert = {}
+local ffi = require('ffi')
local x509, pkey = require('openssl.x509'), require('openssl.pkey')
-- @function Create self-signed certificate; return certs, key
s.server.ctx = ephem_state.ctx
s.config.ctx = ephem_state.ctx -- not required, but let's keep it synchonized
end
- log('[http] created new ephemeral TLS certificate')
+ log_info(ffi.C.LOG_GRP_DOH, 'created new ephemeral TLS certificate')
local _, expiry_stamp = certs[1]:getLifetime()
local wait_msec = 1000 * math.max(1, expiry_stamp - os.time() - 3 * 24 * 3600)
if not ephem_state.timer_id then
assert(ctx:setCertificateChain(chain))
elseif not warned_old_luaossl then
-- old luaossl version -> only final cert sent to clients
- warn('[http] Warning: need luaossl >= 20181207 to support sending intermediary certificate to clients')
+ log_warn(ffi.C.LOG_GRP_DOH,
+ 'Warning: need luaossl >= 20181207 to support sending intermediary certificate to clients')
warned_old_luaossl = true
end
return ctx
while true do
ok, errstr = parser:parse()
if errstr then
- log('[poli] RPZ %s:%d: %s', path, tonumber(parser.line_counter), errstr)
+ log_info(ffi.C.LOG_GRP_POLICY, 'RPZ %s:%d: %s', path, tonumber(parser.line_counter), errstr)
end
if not ok then break end
local prefix_labels = ffi.C.knot_dname_in_bailiwick(full_name, parser.zone_origin)
if prefix_labels < 0 then
- log('[poli] RPZ %s:%d: RR owner "%s" outside the zone (ignored)',
+ log_info(ffi.C.LOG_GRP_POLICY, 'RPZ %s:%d: RR owner "%s" outside the zone (ignored)',
path, tonumber(parser.line_counter), kres.dname2str(full_name))
goto continue
end
if action_map[rdata] then
rules[name] = action_map[rdata]
else
- log('[poli] RPZ %s:%d: CNAME with custom target in RPZ is not supported yet (ignored)',
+ log_info(ffi.C.LOG_GRP_POLICY, 'RPZ %s:%d: CNAME with custom target in RPZ is not supported yet (ignored)',
path, tonumber(parser.line_counter))
end
else
if #name then
local is_bad = rrtype_bad[parser.r_type]
if is_bad == true or (is_bad == false and prefix_labels ~= 0) then
- log('[poli] RPZ %s:%d warning: RR type %s is not allowed in RPZ (ignored)',
+ log_warn(ffi.C.LOG_GRP_POLICY, 'RPZ %s:%d warning: RR type %s is not allowed in RPZ (ignored)',
path, tonumber(parser.line_counter), kres.tostring.type[parser.r_type])
elseif is_bad == nil then
if new_actions[name] == nil then new_actions[name] = {} end
end
table.insert(act.rdata, rdata)
if parser.r_ttl ~= act.ttl then -- be conservative
- log('[poli] RPZ %s:%d warning: different TTLs in a set (minimum taken)',
+ log_warn(ffi.C.LOG_GRP_POLICY, 'RPZ %s:%d warning: different TTLs in a set (minimum taken)',
path, tonumber(parser.line_counter))
act.ttl = math.min(act.ttl, parser.r_ttl)
end
-- Watcher will also fire for changes to the directory itself
if name == file then
-- If the file changes then reparse and replace the existing ruleset
- if verbose() then
- log('[poli] RPZ reloading: ' .. name)
- end
+ log_info(ffi.C.LOG_GRP_POLICY, 'RPZ reloading: ' .. name)
rules = rpz_parse(action, path)
end
end
end)
elseif watch then -- explicitly requested and failed
error('[poli] lua-cqueues required to watch and reload RPZ file')
- elseif verbose() then
- log('[poli] lua-cqueues required to watch and reload RPZ file, continuing without watching')
+ else
+ log_info(ffi.C.LOG_GRP_POLICY, 'lua-cqueues required to watch and reload RPZ file, continuing without watching')
end
end
local debug_logline_cb = ffi.cast('trace_log_f', function (_, msg)
jit.off(true, true) -- JIT for (C -> lua)^2 nesting isn't allowed
-- msg typically ends with newline
- io.write(ffi.string(msg))
+ log_debug(ffi.C.LOG_GRP_POLICY, "%s", msg)
end)
ffi.gc(debug_logline_cb, free_cb)
local debug_logfinish_cb = ffi.cast('trace_callback_f', function (req)
jit.off(true, true) -- JIT for (C -> lua)^2 nesting isn't allowed
- local LOG_GRP_POLICY= bit.lshift(1, 25)
- local LOG_GRP_POLICY_TAG="policy"
- ffi.C.kr_log_req1(req, 0, 0, LOG_GRP_POLICY, LOG_GRP_POLICY_TAG,
+ log_req(req, 0, 0, ffi.C.LOG_GRP_POLICY,
'following rrsets were marked as interesting:\n' ..
req:selected_tostring())
if req.answer ~= nil then
- ffi.C.kr_log_req1(req, 0, 0, LOG_GRP_POLICY, LOG_GRP_POLICY_TAG,
- 'answer packet:\n' ..
- tostring(req.answer))
+ log_req(req, 0, 0, ffi.C.LOG_GRP_POLICY,
+ 'answer packet:\n' .. tostring(req.answer))
else
- ffi.C.kr_log_req1(req, 0, 0, LOG_GRP_POLICY, LOG_GRP_POLICY_TAG,
+ log_req(req, 0, 0, ffi.C.LOG_GRP_POLICY,
'answer packet DROPPED\n')
end
end)
-- log request packet
function policy.REQTRACE(_, req)
- local LOG_GRP_POLICY= bit.lshift(1, 25)
- local LOG_GRP_POLICY_TAG="policy"
- ffi.C.kr_log_req1(req, 0, 0, LOG_GRP_POLICY, LOG_GRP_POLICY_TAG,
+ log_req(req, 0, 0, ffi.C.LOG_GRP_POLICY,
'request packet:\n%s',
tostring(req.qsource.packet))
end
fname, errmsg))
end
- log("[prefill] downloading root zone to file %s ...", fname)
+ log_info(ffi.C.LOG_GRP_PREFILL, "downloading root zone to file %s ...", fname)
rcode, errmsg = kluautil.kr_https_fetch(url, file, rz_ca_file)
if rcode == nil then
error(string.format("[prefill] fetch of `%s` failed: %s", url, errmsg))
if res.code == 1 then -- no TA found, wait
error("[prefill] no trust anchor found for root zone, import aborted")
elseif res.code == 0 then
- log("[prefill] root zone successfully parsed, import started")
+ log_info(ffi.C.LOG_GRP_PREFILL, "root zone successfully parsed, import started")
else
error(string.format("[prefill] root zone import failed (%s)", res.msg))
end
local file_ttl = get_file_ttl(rz_local_fname)
if file_ttl > rz_interval_threshold then
- log("[prefill] root zone file valid for %s, reusing data from disk",
+ log_info(ffi.C.LOG_GRP_PREFILL, "root zone file valid for %s, reusing data from disk",
display_delay(file_ttl))
else
local ok, errmsg = pcall(download, rz_url, rz_local_fname)
if not ok then
rz_cur_interval = rz_https_fail_interval
- math.random(rz_interval_randomizator_limit)
- log("[prefill] cannot download new zone (%s), "
+ log_info(ffi.C.LOG_GRP_PREFILL, "cannot download new zone (%s), "
.. "will retry root zone download in %s",
errmsg, display_delay(rz_cur_interval))
restart_timer(rz_cur_interval)
rz_cur_interval = rz_import_error_interval
- math.random(rz_interval_randomizator_limit)
end
- log("[prefill] root zone import failed (%s), retry in %s",
+ log_info(ffi.C.LOG_GRP_PREFILL, "root zone import failed (%s), retry in %s",
errmsg, display_delay(rz_cur_interval))
else
-- re-download before TTL expires
rz_cur_interval = (file_ttl - rz_interval_threshold
- math.random(rz_interval_randomizator_limit))
- log("[prefill] root zone refresh in %s",
+ log_info(ffi.C.LOG_GRP_PREFILL, "root zone refresh in %s",
display_delay(rz_cur_interval))
end
restart_timer(rz_cur_interval)
local function address_callback(pkt, req)
if pkt == nil or pkt:rcode() ~= kres.rcode.NOERROR then
pkt = req.qsource.packet
- warn("[priming] cannot resolve address '%s', type: %d", kres.dname2str(pkt:qname()), pkt:qtype())
+ log_warn(ffi.C.LOG_GRP_PRIMING, "cannot resolve address '%s', type: %d", kres.dname2str(pkt:qname()), pkt:qtype())
else
local section = pkt:rrsets(kres.section.ANSWER)
for i = 1, #section do
internal.to_resolve = internal.to_resolve - 1
if internal.to_resolve == 0 then
if count_addresses(internal.nsset) == 0 then
- warn("[priming] cannot resolve any root server address, next priming query in %d seconds", priming.retry_time / sec)
+ log_warn(ffi.C.LOG_GRP_PRIMING, "cannot resolve any root server address, \
+ next priming query in %d seconds", priming.retry_time / sec)
internal.event = event.after(priming.retry_time, internal.prime)
else
publish_hints(internal.nsset)
- if verbose() then
- log("[priming] triggered priming query, next in %d seconds", internal.min_ttl)
- end
+ log_info(ffi.C.LOG_GRP_PRIMING, "triggered priming query, next in %d seconds", internal.min_ttl)
internal.event = event.after(internal.min_ttl * sec, internal.prime)
end
end
-- luacheck: no unused args
local function priming_callback(pkt, req)
if pkt == nil or pkt:rcode() ~= kres.rcode.NOERROR then
- warn("[priming] cannot resolve '.' NS, next priming query in %d seconds", priming.retry_time / sec)
+ log_warn(ffi.C.LOG_GRP_PRIMING, "cannot resolve '.' NS, next priming query in %d seconds", priming.retry_time / sec)
internal.event = event.after(priming.retry_time, internal.prime)
return nil
end
state = refuse(req)
end
if verbose() then
- local LOG_GRP_REBIND= bit.lshift(1, 23)
- local LOG_GRP_REBIND_TAG="rebind"
- ffi.C.kr_log_q1(qry, LOG_GRP_REBIND, LOG_GRP_REBIND_TAG,
+ log_qry(qry, ffi.C.LOG_GRP_REBIND,
'blocking blacklisted IP in RR \'%s\'\n', kres.rr2str(bad_rr))
end
return state
M.callback = ffi.cast("kr_stale_cb",
function (ttl) --, name, type, qry)
- --log('[ ][stal] => called back with TTL: ' .. tostring(ttl))
+ --log_debug(ffi.C.SRVSTALE, ' => called back with TTL: ' .. tostring(ttl))
if ttl + 3600 * 24 > 0 then -- at most one day stale
return 1
else
local now = ffi.C.kr_now()
local deadline = qry.creation_time_mono + M.timeout
if now > deadline or qry.flags.NO_NS_FOUND then
- if verbose() then
- log('[ ][stal] => no reachable NS, using stale data')
- end
+ log_info(ffi.C.LOG_GRP_SRVSTALE, ' => no reachable NS, using stale data')
qry.stale_cb = M.callback
-- TODO: probably start the same request that doesn't stale-serve,
-- but first we need some detection of non-interactive / internal requests.
if not keytag then return state end
if req.rank ~= ffi.C.KR_RANK_SECURE or req.answer:cd() then
- if verbose() then
- log('[ta_sentinel] name+type OK but not AD+CD conditions')
- end
+ log_info(ffi.C.LOG_GRP_TASENTINEL, 'name+type OK but not AD+CD conditions')
return state
end
if keytag < 0 or keytag > 0xffff then
return state end -- invalid keytag?!, exit
- if verbose() then
- log('[ta_sentinel] key tag: ' .. keytag .. ', sentinel: ' .. tostring(sentype))
- end
+ log_info(ffi.C.LOG_GRP_TASENTINEL, 'key tag: ' .. keytag .. ', sentinel: ' .. tostring(sentype))
local found = false
local ds_set = ffi.C.kr_ta_get(kres.context().trust_anchors, '\0')
end
end
end
- if verbose() then
- log('[ta_sentinel] matching trusted TA found: ' .. tostring(found))
- if not found then -- print matching TAs in *other* states than Valid
- for i = 1, #(trust_anchors.keysets['\0'] or {}) do
- local key = trust_anchors.keysets['\0'][i]
- if key.key_tag == keytag and key.state ~= 'Valid' then
- log('[ta_sentinel] matching UNtrusted TA found in state: '
- .. key.state)
- end
+ log_info(ffi.C.LOG_GRP_TASENTINEL, 'matching trusted TA found: ' .. tostring(found))
+ if not found then -- print matching TAs in *other* states than Valid
+ for i = 1, #(trust_anchors.keysets['\0'] or {}) do
+ local key = trust_anchors.keysets['\0'][i]
+ if key.key_tag == keytag and key.state ~= 'Valid' then
+ log_info(ffi.C.LOG_GRP_TASENTINEL, 'matching UNtrusted TA found in state: '
+ .. key.state)
end
end
end
-- Module implementing RFC 8145 section 5
-- Signaling Trust Anchor Knowledge in DNS using Key Tag Query
local kres = require('kres')
+local ffi = require('ffi')
local M = {}
M.layer = {}
local keyset = trust_anchors.keysets[domain]
local qname = prepare_query_name(keyset, domain)
if qname ~= nil then
- if verbose() then
- log("[ta_signal_query] signalling query trigered: %s", qname)
- end
+ log_info(ffi.C.LOG_GRP_TASIGNALING, "signalling query trigered: %s", qname)
-- asynchronous query
-- we do not care about result or from where it was obtained
event.after(0, function ()
local function ta_find(keyset, rr)
local rr_tag = C.kr_dnssec_key_tag(rr.type, rr.rdata, #rr.rdata)
if rr_tag < 0 or rr_tag > 65535 then
- warn(string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, string.format('ignoring invalid or unsupported RR: %s: %s',
kres.rr2str(rr), ffi.string(C.knot_strerror(rr_tag))))
return nil
end
-- Match key owner and content
local ta_tag = C.kr_dnssec_key_tag(ta.type, ta.rdata, #ta.rdata)
if ta_tag < 0 or ta_tag > 65535 then
- warn(string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
kres.rr2str(ta), ffi.string(C.knot_strerror(ta_tag))))
else
if ta.owner == rr.owner then
-- Attempt to extract key_tag
local key_tag = C.kr_dnssec_key_tag(rr.type, rr.rdata, #rr.rdata)
if key_tag < 0 or key_tag > 65535 then
- warn(string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
kres.rr2str(rr), ffi.string(C.knot_strerror(key_tag))))
return false
end
ta.timer = nil
end
if rr.state ~= key_state.Valid or verbose() then
- log('[ta_update] key: ' .. key_tag .. ' state: '..ta.state)
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'key: ' .. key_tag .. ' state: '..ta.state)
end
return true
elseif not key_revoked then -- First time seen (NewKey)
local keep_ta = true
local key_tag = C.kr_dnssec_key_tag(ta.type, ta.rdata, #ta.rdata)
if key_tag < 0 or key_tag > 65535 then
- warn(string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, string.format('[ta_update] ignoring invalid or unsupported RR: %s: %s',
kres.rr2str(ta), ffi.string(C.knot_strerror(key_tag))))
key_tag = ''
end
-- Remove key that is missing for too long
elseif ta.state == key_state.Missing and os.difftime(ta.timer, os.time()) <= 0 then
ta.state = key_state.Removed
- log('[ta_update] key: '..key_tag..' removed because missing for too long')
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'key: '..key_tag..' removed because missing for too long')
keep_ta = false
-- Purge pending key
elseif ta.state == key_state.AddPend then
- log('[ta_update] key: '..key_tag..' purging')
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'key: '..key_tag..' purging')
keep_ta = false
end
- log('[ta_update] key: '..key_tag..' state: '..ta.state)
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'key: '..key_tag..' state: '..ta.state)
return keep_ta
end
if not trust_anchors.keyset_publish(keyset) then
-- TODO: try to rebootstrap if for root?
return false
- elseif verbose() then
- log('[ta_update] refreshed trust anchors for domain ' .. kres.dname2str(keyset.owner) .. ' are:\n'
+ else
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'refreshed trust anchors for domain ' .. kres.dname2str(keyset.owner) .. ' are:\n'
.. trust_anchors.summary(keyset.owner))
end
end
local function unmanagedkey_change(file_name)
- warn('[ta_update] you need to update package with trust anchors in "%s" before it breaks', file_name)
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, 'you need to update package with trust anchors in "%s" before it breaks', file_name)
end
local function check_upstream(keyset, new_keys)
else
local qry = req:initial()
if qry.flags.DNSSEC_BOGUS == true then
- warn('[ta_update] active refresh failed, update your trust anchors in "%s"', keyset.filename)
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, 'active refresh failed, update your trust anchors in "%s"', keyset.filename)
elseif pkt == nil then
- warn('[ta_update] active refresh failed, answer was dropped')
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, 'active refresh failed, answer was dropped')
else
- warn('[ta_update] active refresh failed for ' .. kres.dname2str(keyset.owner)
+ log_warn(ffi.C.LOG_GRP_TAUPDATE, 'active refresh failed for ' .. kres.dname2str(keyset.owner)
.. ' with rcode: ' .. pkt:rcode())
end
end
event.cancel(track_cfg.event)
end
track_cfg.event = event.after(delay, function ()
- log('[ta_update] refreshing TA for ' .. owner_str)
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'refreshing TA for ' .. owner_str)
resolve(owner_str, kres.type.DNSKEY, kres.class.IN, 'NO_CACHE',
function (pkt, req)
-- Schedule itself with updated timeout
local delay_new = active_refresh(keyset, pkt, req, managed)
delay_new = keyset.refresh_time or ta_update.refresh_time or delay_new
- log('[ta_update] next refresh for ' .. owner_str .. ' in '
+ log_info(ffi.C.LOG_GRP_TAUPDATE, 'next refresh for ' .. owner_str .. ' in '
.. delay_new/hour .. ' hours')
refresh_plan(keyset, delay_new, managed)
end)
end
function private.fail_callback()
- log('[watchdog] ABORTING resolver, supervisor is expected to restart it')
+ log_error(ffi.C.LOG_GRP_WATCHDOG, 'ABORTING resolver, supervisor is expected to restart it')
ffi.C.abort()
end
private.ok_callback()
return
end
- log('[watchdog] watchdog query returned unexpected answer! query verbose log:')
- log(table.concat(logbuf, ''))
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'watchdog query returned unexpected answer! query verbose log:')
+ log_info(ffi.C.LOG_GRP_WATCHDOG, table.concat(logbuf, ''))
if pkt ~= nil then
- log('[watchdog] problematic answer:\n%s', pkt)
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'problematic answer:\n%s', pkt)
else
- log('[watchdog] answer was dropped')
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'answer was dropped')
end
-- failure! quit immediatelly to allow process supervisor to restart us
private.fail_callback()
local logbuf = {}
-- fire watchdog query
if private.qname and private.qtype then
- if verbose() then
- log('[watchdog] starting watchdog query %s %s', private.qname, private.qtype)
- end
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'starting watchdog query %s %s', private.qname, private.qtype)
resolve(private.qname,
private.qtype,
kres.class.IN,
local timeoutptr = ffi.new('uint64_t[1]')
local systemd_present, ret = pcall(function() return ffi.C.sd_watchdog_enabled(0, timeoutptr) end)
if not systemd_present then
- if verbose() then
- log('[watchdog] systemd library not detected')
- end
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'systemd library not detected')
return
end
private.ok_callback = sd_signal_ok
error('[watchdog] %s', ffi.string(ffi.C.knot_strerror(math.abs(ret))))
return
elseif ret == 0 then
- if verbose() then
- log('[watchdog] disabled in systemd (WatchdogSec= not specified)')
- end
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'disabled in systemd (WatchdogSec= not specified)')
return
end
local timeout = tonumber(timeoutptr[0]) / 1000 -- convert to ms
local interval = timeout / 2 -- halve interval to make sure we are never late
if interval < 1 then
- log('[watchdog] error: WatchdogSec= must be at least 2ms! (got %d usec)',
+ log_error(ffi.C.LOG_GRP_WATCHDOG, 'error: WatchdogSec= must be at least 2ms! (got %d usec)',
tonumber(timeoutptr[0]))
end
watchdog.config({ interval = interval })
- if verbose() then
- log('[watchdog] systemd watchdog enabled (check interval: %s ms, timeout: %s ms)',
- private.interval, timeout)
- end
+ log_info(ffi.C.LOG_GRP_WATCHDOG, 'systemd watchdog enabled (check interval: %s ms, timeout: %s ms)',
+ private.interval, timeout)
end
function watchdog.deinit()
modules.unload 'ta_update'
-- load test
+local ffi = require('ffi')
assert(type(env.TEST_FILE) == 'string')
-log('processing test file %s', env.TEST_FILE)
+log_info(ffi.C.LOG_GRP_SYSTEM, 'processing test file %s', env.TEST_FILE)
local tests = dofile(env.TEST_FILE)
-- run test after processed config file