From: Nicki Křížek Date: Thu, 23 Oct 2025 13:08:35 +0000 (+0200) Subject: Refactor NamedInstance.rndc() to use EnvCmd() interface X-Git-Tag: v9.21.17~53^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f33e2b6d877146576d9384f21dbe1ff075df6205;p=thirdparty%2Fbind9.git Refactor NamedInstance.rndc() to use EnvCmd() interface To unify the command handling, utilize EnvCmd() to handle rndc commands: 1. Remove isctest.rndc abstractions. They were intended for an upcoming python-only implementation. A couple of years later, it doesn't seem to be coming any time soon, so let's stick with the interface that makes sense today, i.e. use the same command handling interface everywhere. 2. Remove the specialized rndc.log in favor of the generic logging already implemented by isctest.run.cmd(). I believe the cause of the many rndc(log=False) invocations was that nobody wanted this extra file. Yet, logging everything by default makes sense for debugging, unless there's a good reason not to. In almost all cases, logging was switched to the default (enabled). 3. With the NamedInstance.rndc() call now returning CmdResult rather than combined stdout+stderr string, adjust all the invocations to use `.out` or `.err` as necessary. 4. Replace some manual rndc invocation and its base argument construction with the standardized nsX.rndc() call. 5. In cases where rndc is expected to fail, utilize raise_on_exception=False and check the `.rc` from the result, rather than handling an exception. 6. In addzone/tests_rndc_deadlock.py, refactor the test slightly to avoid using EnvCmd() entirely to avoid spamming the logs. This test calls rndc in a loop from multiple threads and such test case is an exception which doesn't warrant changing the `isctest.run.cmd()` implementation. --- diff --git a/bin/tests/system/addzone/tests_rndc_deadlock.py b/bin/tests/system/addzone/tests_rndc_deadlock.py index fd2c9d58970..f805897f294 100755 --- a/bin/tests/system/addzone/tests_rndc_deadlock.py +++ b/bin/tests/system/addzone/tests_rndc_deadlock.py @@ -10,12 +10,12 @@ # information regarding copyright ownership. import concurrent.futures +import os +import subprocess import time import pytest -import isctest - pytestmark = pytest.mark.extra_artifacts( [ "ns*/*.nzf*", @@ -43,20 +43,19 @@ def rndc_loop(test_state, domain, ns3): ["delzone", domain], ] + args = [os.environ["RNDC"]] + ns3.rndc_args.split() while not test_state["finished"]: for command in rndc_commands: - ns3.rndc(" ".join(command), ignore_errors=True, log=False) + # avoid using ns3.rndc() directly to avoid log spam + subprocess.run(args + " ".join(command), timeout=10, check=False) def check_if_server_is_responsive(ns3): """ Check if server status can be successfully retrieved using "rndc status" """ - try: - ns3.rndc("status", log=False) - return True - except isctest.rndc.RNDCException: - return False + cmd = ns3.rndc("status", raise_on_exception=False) + return cmd.rc == 0 def test_rndc_deadlock(ns3): diff --git a/bin/tests/system/addzone/tests_showzone_static.py b/bin/tests/system/addzone/tests_showzone_static.py index 6edaa497f6c..fcf278346b9 100644 --- a/bin/tests/system/addzone/tests_showzone_static.py +++ b/bin/tests/system/addzone/tests_showzone_static.py @@ -23,9 +23,9 @@ import pytest ) def test_showzone_static(ns1, templates, allow): templates.render("ns1/named.conf", {"allownewzones": allow}) - ns1.rndc("reload", log=False) - zoneconfig = ns1.rndc("showzone inlinesec.example", log=False) + ns1.rndc("reload") + response = ns1.rndc("showzone inlinesec.example") assert ( - zoneconfig - == 'zone "inlinesec.example" { type primary; file "inlinesec.db"; };\n' + 'zone "inlinesec.example" { type primary; file "inlinesec.db"; };' + in response.out ) diff --git a/bin/tests/system/checkds/tests_checkds.py b/bin/tests/system/checkds/tests_checkds.py index b6c36e39085..8be24a830bc 100755 --- a/bin/tests/system/checkds/tests_checkds.py +++ b/bin/tests/system/checkds/tests_checkds.py @@ -189,33 +189,6 @@ def keystate_check(server, zone, key): assert val != 0 -def rekey(zone): - rndc = os.getenv("RNDC") - assert rndc is not None - - port = os.getenv("CONTROLPORT") - assert port is not None - - # rndc loadkeys. - rndc_cmd = [ - rndc, - "-c", - "../_common/rndc.conf", - "-p", - port, - "-s", - "10.53.0.9", - "loadkeys", - zone, - ] - controller = isctest.run.cmd(rndc_cmd) - - if controller.rc != 0: - isctest.log.error(f"rndc loadkeys {zone} failed") - - assert controller.rc == 0 - - class CheckDSTest(NamedTuple): zone: str logs_to_wait_for: Tuple[str] @@ -472,7 +445,7 @@ def test_checkds(ns2, ns9, params): for log_string in params.logs_to_wait_for: line = f"zone {params.zone}/IN (signed): checkds: {log_string}" while line not in ns9.log: - rekey(params.zone) + ns9.rndc(f"loadkeys {params.zone}") time_remaining -= 1 assert time_remaining, f'Timed out waiting for "{log_string}" to be logged' time.sleep(1) diff --git a/bin/tests/system/configloading/tests_configloading.py b/bin/tests/system/configloading/tests_configloading.py index ec017a8bfe5..f5b512861bc 100644 --- a/bin/tests/system/configloading/tests_configloading.py +++ b/bin/tests/system/configloading/tests_configloading.py @@ -11,8 +11,6 @@ from re import compile as Re -import isctest - def test_configloading_log(ns1): """ @@ -38,11 +36,11 @@ def test_configloading_log(ns1): watcher.wait_for_sequence(log_sequence) with ns1.watch_log_from_here() as watcher: - ns1.rndc("reconfig", log=False) + ns1.rndc("reconfig") watcher.wait_for_sequence(log_sequence) with ns1.watch_log_from_here() as watcher: - ns1.rndc("reload", log=False) + ns1.rndc("reload") watcher.wait_for_sequence(log_sequence) @@ -66,8 +64,6 @@ def test_reload_fails_log(ns1, templates): with ns1.watch_log_from_here() as watcher: templates.render("ns1/named.conf", {"wrongoption": True}) - try: - ns1.rndc("reload", log=False) - assert False - except isctest.rndc.RNDCException: - watcher.wait_for_sequence(log_sequence) + cmd = ns1.rndc("reload", raise_on_exception=False) + assert cmd.rc != 0 + watcher.wait_for_sequence(log_sequence) diff --git a/bin/tests/system/dnssec/tests_policy.py b/bin/tests/system/dnssec/tests_policy.py index e813bea4481..51801d5d86c 100644 --- a/bin/tests/system/dnssec/tests_policy.py +++ b/bin/tests/system/dnssec/tests_policy.py @@ -62,7 +62,7 @@ def test_signatures_validity(ns3, templates): templates.render("ns3/named.conf", {"long_sigs": True}) with ns3.watch_log_from_here() as watcher: - ns3.reconfigure(log=False) + ns3.reconfigure() watcher.wait_for_line( "zone_needdump: zone siginterval.example/IN (signed): enter" ) @@ -72,7 +72,7 @@ def test_signatures_validity(ns3, templates): assert after != before - ns3.rndc("sign siginterval.example", log=False) + ns3.rndc("sign siginterval.example") msg = isctest.query.create("siginterval.example.", "SOA") res = isctest.query.tcp(msg, "10.53.0.3") diff --git a/bin/tests/system/dnssec/tests_signing.py b/bin/tests/system/dnssec/tests_signing.py index 682c3d7cfb1..657e50b76ba 100644 --- a/bin/tests/system/dnssec/tests_signing.py +++ b/bin/tests/system/dnssec/tests_signing.py @@ -381,14 +381,13 @@ def test_cdnskey_signing(): ) def test_rndc_signing_except(cmd, ns3): # check that 'rndc signing' errors are handled - with pytest.raises(isctest.rndc.RNDCException): - ns3.rndc(cmd, log=False) - ns3.rndc("status", log=False) + ret = ns3.rndc(cmd, raise_on_exception=False) + assert ret.rc != 0 def test_rndc_signing_output(ns3): - response = ns3.rndc("signing -list dynamic.example", log=False) - assert "No signing records found" in response + response = ns3.rndc("signing -list dynamic.example") + assert "No signing records found" in response.out def test_zonestatus_signing(ns3): @@ -398,14 +397,14 @@ def test_zonestatus_signing(ns3): # for the name and type, and check that the resigning time is # after the inception and before the expiration. - response = ns3.rndc("zonestatus secure.example", log=False) + response = ns3.rndc("zonestatus secure.example") # next resign node: secure.example/DNSKEY - nrn = [r for r in response.splitlines() if "next resign node" in r][0] + nrn = [r for r in response.out.splitlines() if "next resign node" in r][0] rdname, rdtype = nrn.split()[3].split("/") # next resign time: Thu, 24 Apr 2014 10:38:16 GMT - nrt = [r for r in response.splitlines() if "next resign time" in r][0] + nrt = [r for r in response.out.splitlines() if "next resign time" in r][0] rtime = " ".join(nrt.split()[3:]) rt = time.strptime(rtime, "%a, %d %b %Y %H:%M:%S %Z") when = int(time.strftime("%s", rt)) @@ -469,7 +468,7 @@ def test_offline_ksk_signing(ns2): def loadkeys(): pattern = Re(f"{zone}/IN.*next key event") with ns2.watch_log_from_here() as watcher: - ns2.rndc(f"loadkeys {zone}", log=False) + ns2.rndc(f"loadkeys {zone}") watcher.wait_for_line(pattern) ksk_only_types = ["DNSKEY", "CDNSKEY", "CDS"] @@ -506,7 +505,7 @@ def test_offline_ksk_signing(ns2): ZSKID2 = getkeyid(ZSK2) isctest.log.info("prepublish new ZSK") - ns2.rndc(f"dnssec -rollover -key {ZSKID} {zone}", log=False) + ns2.rndc(f"dnssec -rollover -key {ZSKID} {zone}") isctest.run.retry_with_timeout(check_zskcount, 5) isctest.log.info("make the new ZSK active") @@ -561,7 +560,7 @@ def test_offline_ksk_signing(ns2): settime("-sKns2", "-k", "HIDDEN", "now", "-z", "HIDDEN", "now", "-Dnow", ZSK) settime("-sKns2", "-k", "OMNIPRESENT", "now", "-z", "OMNIPRESENT", "now", ZSK2) loadkeys() - ns2.rndc(f"dnssec -rollover -key {ZSKID2} {zone}", log=False) + ns2.rndc(f"dnssec -rollover -key {ZSKID2} {zone}") with ns2.watch_log_from_start() as watcher: watcher.wait_for_line(f"{ZSKID3} (ZSK) is now published") diff --git a/bin/tests/system/dnssec/tests_validation.py b/bin/tests/system/dnssec/tests_validation.py index 7c3cc24d381..9446f73da18 100644 --- a/bin/tests/system/dnssec/tests_validation.py +++ b/bin/tests/system/dnssec/tests_validation.py @@ -134,10 +134,9 @@ def test_secure_root(ns4): # check that "rndc secroots" dumps the trusted keys key = int(getfrom("ns1/managed.key.id")) alg = os.environ["DEFAULT_ALGORITHM"] - expected = f"./{alg}/{key} ; static" - response = ns4.rndc("secroots -", log=False).splitlines() - assert expected in response - assert len(response) == 10 + response = ns4.rndc("secroots -") + assert f"./{alg}/{key} ; static" in response.out + assert len(response.out.splitlines()) == 10 def test_positive_validation_nsec(): @@ -709,7 +708,7 @@ def test_negative_validation_optout(): def test_cache(ns4): # check that key id's are logged when dumping the cache - ns4.rndc("dumpdb -cache", log=False) + ns4.rndc("dumpdb -cache") dumpdb = isctest.text.TextFile("ns4/named_dump.db") assert "; key id = " in dumpdb @@ -811,7 +810,7 @@ def test_insecure_proof_nsec(ns4): isctest.check.noadflag(res2) # insecurity proof using negative cache - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("insecure.example", "DS", cd=True) isctest.query.tcp(msg, "10.53.0.4") @@ -941,7 +940,7 @@ def test_validation_recovery(ns2, ns4): msg = isctest.query.create("target.peer-ns-spoof", "A", cd=True) res = isctest.query.tcp(msg, "10.53.0.4") isctest.check.servfail(res) - ns4.rndc("dumpdb", log=False) + ns4.rndc("dumpdb") dumpdb = isctest.text.TextFile("ns4/named_dump.db") assert "10.53.0.100" in dumpdb @@ -950,7 +949,7 @@ def test_validation_recovery(ns2, ns4): "ns2/peer.peer-ns-spoof.db.next", "ns2/peer.peer-ns-spoof.db.signed" ) with ns2.watch_log_from_here() as watcher: - ns2.rndc("reload peer.peer-ns-spoof", log=False) + ns2.rndc("reload peer.peer-ns-spoof") watcher.wait_for_line("zone peer.peer-ns-spoof/IN: loaded serial 2000042408") # and check we can resolve with the correct server address @@ -971,7 +970,7 @@ def test_validation_recovery(ns2, ns4): "ns2/dnskey-rrsigs-stripped.db.next", "ns2/dnskey-rrsigs-stripped.db.signed" ) with ns2.watch_log_from_here() as watcher: - ns2.rndc("reload dnskey-rrsigs-stripped", log=False) + ns2.rndc("reload dnskey-rrsigs-stripped") watcher.wait_for_line( "zone dnskey-rrsigs-stripped/IN: loaded serial 2000042408" ) @@ -995,7 +994,7 @@ def test_validation_recovery(ns2, ns4): "ns2/ds-rrsigs-stripped.db.next", "ns2/ds-rrsigs-stripped.db.signed" ) with ns2.watch_log_from_here() as watcher: - ns2.rndc("reload ds-rrsigs-stripped", log=False) + ns2.rndc("reload ds-rrsigs-stripped") watcher.wait_for_line("zone ds-rrsigs-stripped/IN: loaded serial 2000042408") # and check we can now resolve with the correct server address @@ -1006,7 +1005,7 @@ def test_validation_recovery(ns2, ns4): isctest.check.adflag(res2) # check recovery with mismatching NS - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("inconsistent", "NS", dnssec=False, cd=True) res = isctest.query.tcp(msg, "10.53.0.4") isctest.check.noadflag(res) @@ -1074,7 +1073,7 @@ def test_transitions(): def test_validating_forwarder(ns4, ns9): # check validating forwarder behavior with mismatching NS - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("inconsistent", "NS", dnssec=False, cd=True) res = isctest.query.tcp(msg, "10.53.0.9") isctest.check.noerror(res) @@ -1098,7 +1097,7 @@ def test_validating_forwarder(ns4, ns9): isctest.check.adflag(res) # check validating forwarder sends CD to validate with a local trust anchor - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("localkey.example", "SOA") res = isctest.query.tcp(msg, "10.53.0.4") isctest.check.servfail(res) @@ -1144,7 +1143,7 @@ def test_expired_signatures(ns4): isctest.check.noerror(res) # test TTL is capped at RRSIG expiry time - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("expiring.example", "SOA", cd=True) res1 = isctest.query.tcp(msg, "10.53.0.4") msg = isctest.query.create("expiring.example", "SOA") @@ -1155,7 +1154,7 @@ def test_expired_signatures(ns4): assert rrset.ttl <= 60 # test TTL is capped at RRSIG expiry time in the additional section (NS) - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("expiring.example", "NS", cd=True) res1 = isctest.query.tcp(msg, "10.53.0.4") msg = isctest.query.create("expiring.example", "NS") @@ -1166,7 +1165,7 @@ def test_expired_signatures(ns4): assert rrset.ttl <= 60 # test TTL is capped at RRSIG expiry time in the additional section (MX) - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("expiring.example", "MX", cd=True) res1 = isctest.query.tcp(msg, "10.53.0.4") msg = isctest.query.create("expiring.example", "MX") @@ -1254,7 +1253,7 @@ def test_pending_ds(ns4): # a negative cache entry with trust level "pending" for the DS. prime # with a +cd DS query to produce the negative cache entry, then send a # query that uses that entry as part of the validation process. - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("insecure.example", "DS", cd=True) res = isctest.query.tcp(msg, "10.53.0.4") isctest.check.noerror(res) diff --git a/bin/tests/system/dnssec/tests_validation_accept_expired.py b/bin/tests/system/dnssec/tests_validation_accept_expired.py index 51283aedf9d..d8f97f227b1 100644 --- a/bin/tests/system/dnssec/tests_validation_accept_expired.py +++ b/bin/tests/system/dnssec/tests_validation_accept_expired.py @@ -27,7 +27,7 @@ def bootstrap(): def test_accept_expired(ns4): # test TTL of about-to-expire rrsets with accept-expired - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("expiring.example", "SOA") msg.flags |= flags.CD res1 = isctest.query.tcp(msg, "10.53.0.4") @@ -40,7 +40,7 @@ def test_accept_expired(ns4): # test TTL is capped at RRSIG expiry time in the additional section # with accept-expired - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("expiring.example", "MX") msg.flags |= flags.CD res1 = isctest.query.tcp(msg, "10.53.0.4") @@ -52,7 +52,7 @@ def test_accept_expired(ns4): assert rrset.ttl <= 120 # test TTL of expired rrsets with accept-expired - ns4.rndc("flush", log=False) + ns4.rndc("flush") msg = isctest.query.create("expired.example", "SOA") msg.flags |= flags.CD res1 = isctest.query.tcp(msg, "10.53.0.4") diff --git a/bin/tests/system/dnssec/tests_validation_managed_keys.py b/bin/tests/system/dnssec/tests_validation_managed_keys.py index e6079e4b5a3..3cef4d67c96 100644 --- a/bin/tests/system/dnssec/tests_validation_managed_keys.py +++ b/bin/tests/system/dnssec/tests_validation_managed_keys.py @@ -44,10 +44,9 @@ def test_secure_root_managed(ns4): # check that "rndc secroots" dumps the trusted keys key = int(getfrom("ns1/managed.key.id")) alg = os.environ["DEFAULT_ALGORITHM"] - expected = f"./{alg}/{key} ; managed" - response = ns4.rndc("secroots -", log=False).splitlines() - assert expected in response - assert len(response) == 10 + response = ns4.rndc("secroots -") + assert f"./{alg}/{key} ; managed" in response.out + assert len(response.out.splitlines()) == 10 def test_positive_validation_nsec_managed(): @@ -103,6 +102,6 @@ def test_ds_managed(): def test_keydata_storage(ns4): - ns4.rndc("managed-keys sync", log=False) + ns4.rndc("managed-keys sync") with isctest.log.WatchLogFromStart("ns4/managed-keys.bind") as watcher: watcher.wait_for_line(["KEYDATA", "next refresh:"]) diff --git a/bin/tests/system/dnssec/tests_validation_multiview.py b/bin/tests/system/dnssec/tests_validation_multiview.py index 015e458349c..e22f1d420b2 100644 --- a/bin/tests/system/dnssec/tests_validation_multiview.py +++ b/bin/tests/system/dnssec/tests_validation_multiview.py @@ -58,7 +58,6 @@ def test_secure_roots(ns4): # check that "rndc secroots" dumps the trusted keys with multiple views key = int(getfrom("ns1/managed.key.id")) alg = os.environ["DEFAULT_ALGORITHM"] - expected = f"./{alg}/{key} ; static" - response = ns4.rndc("secroots -", log=False).splitlines() - assert expected in response, response - assert len(response) == 17 + response = ns4.rndc("secroots -") + assert f"./{alg}/{key} ; static" in response.out + assert len(response.out.splitlines()) == 17 diff --git a/bin/tests/system/isctest/__init__.py b/bin/tests/system/isctest/__init__.py index fb04a2a1e85..ce0dd75508c 100644 --- a/bin/tests/system/isctest/__init__.py +++ b/bin/tests/system/isctest/__init__.py @@ -13,7 +13,6 @@ from . import check from . import instance from . import query from . import kasp -from . import rndc from . import run from . import template from . import log diff --git a/bin/tests/system/isctest/instance.py b/bin/tests/system/isctest/instance.py index 9e8ac6b5dc1..83bd51ffa5f 100644 --- a/bin/tests/system/isctest/instance.py +++ b/bin/tests/system/isctest/instance.py @@ -13,7 +13,6 @@ from typing import List, NamedTuple, Optional -import logging import os from pathlib import Path import re @@ -21,9 +20,8 @@ import re import dns.message import dns.rcode -from .log import debug, info, WatchLogFromStart, WatchLogFromHere -from .rndc import RNDCBinaryExecutor, RNDCException, RNDCExecutor -from .run import perl +from .log import debug, WatchLogFromStart, WatchLogFromHere +from .run import CmdResult, EnvCmd, perl from .query import udp from .text import TextFile @@ -57,8 +55,6 @@ class NamedInstance: identifier: str, num: Optional[int] = None, ports: Optional[NamedPorts] = None, - rndc_logger: Optional[logging.Logger] = None, - rndc_executor: Optional[RNDCExecutor] = None, ) -> None: """ `identifier` is the name of the instance's directory @@ -71,12 +67,6 @@ class NamedInstance: this `named` instance is listening for various types of traffic (both DNS traffic and RNDC commands). Defaults to ports set by the test framework. - - `rndc_logger` is the `logging.Logger` to use for logging RNDC - commands sent to this `named` instance. - - `rndc_executor` is an object implementing the `RNDCExecutor` interface - that is used for executing RNDC commands on this `named` instance. """ self.directory = Path(identifier).absolute() if not self.directory.is_dir(): @@ -89,8 +79,14 @@ class NamedInstance: ports = NamedPorts.from_env() self.ports = ports self.log = TextFile(os.path.join(identifier, "named.run")) - self._rndc_executor = rndc_executor or RNDCBinaryExecutor() - self._rndc_logger = rndc_logger + + self._rndc_conf = Path("../_common/rndc.conf").absolute() + self._rndc = EnvCmd("RNDC", self.rndc_args) + + @property + def rndc_args(self) -> str: + """Base arguments for calling RNDC to control the instance.""" + return f"-c {self._rndc_conf} -s {self.ip} -p {self.ports.rndc}" @property def ip(self) -> str: @@ -108,52 +104,16 @@ class NamedInstance: assert num is None or num == parsed_num, "mismatched num and identifier" return parsed_num - def rndc(self, command: str, ignore_errors: bool = False, log: bool = True) -> str: + def rndc(self, command: str, timeout=10, **kwargs) -> CmdResult: """ Send `command` to this named instance using RNDC. Return the server's response. - If the RNDC command fails, an `RNDCException` is raised unless - `ignore_errors` is set to `True`. - - The RNDC command will be logged to `rndc.log` (along with the server's - response) unless `log` is set to `False`. - - ```python - def test_foo(servers): - # Send the "status" command to ns1. An `RNDCException` will be - # raised if the RNDC command fails. This command will be logged. - response = servers["ns1"].rndc("status") - - # Send the "thaw foo" command to ns2. No exception will be raised - # in case the RNDC command fails. This command will be logged - # (even if it fails). - response = servers["ns2"].rndc("thaw foo", ignore_errors=True) - - # Send the "stop" command to ns3. An `RNDCException` will be - # raised if the RNDC command fails, but this command will not be - # logged (the server's response will still be returned to the - # caller, though). - response = servers["ns3"].rndc("stop", log=False) - - # Send the "halt" command to ns4 in "fire & forget mode": no - # exceptions will be raised and no logging will take place (the - # server's response will still be returned to the caller, though). - response = servers["ns4"].rndc("stop", ignore_errors=True, log=False) - ``` + To suppress exceptions, redirect outputs, control logging change + timeout etc. use keyword arguments which are passed to + isctest.cmd.run(). """ - try: - response = self._rndc_executor.call(self.ip, self.ports.rndc, command) - if log: - self._rndc_log(command, response) - except RNDCException as exc: - response = str(exc) - if log: - self._rndc_log(command, response) - if not ignore_errors: - raise - - return response + return self._rndc(command, timeout=timeout, **kwargs) def nsupdate( self, update_msg: dns.message.Message, expected_rcode=dns.rcode.NOERROR @@ -199,31 +159,15 @@ class NamedInstance: """ return WatchLogFromHere(self.log.path, timeout) - def reconfigure(self, **kwargs) -> None: + def reconfigure(self, **kwargs) -> CmdResult: """ Reconfigure this named `instance` and wait until reconfiguration is - finished. Raise an `RNDCException` if reconfiguration fails. + finished. """ with self.watch_log_from_here() as watcher: - self.rndc("reconfig", **kwargs) + cmd = self.rndc("reconfig", **kwargs) watcher.wait_for_line("any newly configured zones are now loaded") - - def _rndc_log(self, command: str, response: str) -> None: - """ - Log an `rndc` invocation (and its output) to the `rndc.log` file in the - current working directory. - """ - fmt = '%(ip)s: "%(command)s"\n%(separator)s\n%(response)s%(separator)s' - args = { - "ip": self.ip, - "command": command, - "separator": "-" * 80, - "response": response, - } - if self._rndc_logger is None: - info(fmt, args) - else: - self._rndc_logger.info(fmt, args) + return cmd def stop(self, args: Optional[List[str]] = None) -> None: """Stop the instance.""" diff --git a/bin/tests/system/isctest/kasp.py b/bin/tests/system/isctest/kasp.py index de3edc8e55f..a3b71577d98 100644 --- a/bin/tests/system/isctest/kasp.py +++ b/bin/tests/system/isctest/kasp.py @@ -859,19 +859,19 @@ def check_dnssecstatus(server, zone, keys, policy=None, view=None, verbose=False v = "-v " if view is None: - response = server.rndc(f"dnssec -status {v}{zone}", log=False) + response = server.rndc(f"dnssec -status {v}{zone}") else: - response = server.rndc(f"dnssec -status {v}{zone} in {view}", log=False) + response = server.rndc(f"dnssec -status {v}{zone} in {view}") if policy is None: - assert "Zone does not have dnssec-policy" in response + assert "Zone does not have dnssec-policy" in response.out return - assert f"DNSSEC status for zone '{zone}' using policy '{policy}'" in response + assert f"DNSSEC status for zone '{zone}' using policy '{policy}'" in response.out for key in keys: if not key.external: - assert f"{key.role()} {key.tag}" in response + assert f"{key.role()} {key.tag}" in response.out def _check_signatures( diff --git a/bin/tests/system/isctest/rndc.py b/bin/tests/system/isctest/rndc.py deleted file mode 100644 index d4a0a1bd77c..00000000000 --- a/bin/tests/system/isctest/rndc.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) Internet Systems Consortium, Inc. ("ISC") -# -# SPDX-License-Identifier: MPL-2.0 -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, you can obtain one at https://mozilla.org/MPL/2.0/. -# -# See the COPYRIGHT file distributed with this work for additional -# information regarding copyright ownership. - -import abc -import os -import subprocess - - -class RNDCExecutor(abc.ABC): - """ - An interface which RNDC executors have to implement in order for the - `NamedInstance` class to be able to use them. - """ - - @abc.abstractmethod - def call(self, ip: str, port: int, command: str) -> str: - """ - Send RNDC `command` to the `named` instance at `ip:port` and return the - server's response. - """ - - -class RNDCException(Exception): - """ - Raised by classes implementing the `RNDCExecutor` interface when sending an - RNDC command fails for any reason. - """ - - -class RNDCBinaryExecutor(RNDCExecutor): - """ - An `RNDCExecutor` which sends RNDC commands to servers using the `rndc` - binary. - """ - - def __init__(self) -> None: - """ - This class needs the `RNDC` environment variable to be set to the path - to the `rndc` binary to use. - """ - rndc_path = os.environ.get("RNDC", "/bin/false") - rndc_conf = os.path.join("..", "_common", "rndc.conf") - self._base_cmdline = [rndc_path, "-c", rndc_conf] - - def call(self, ip: str, port: int, command: str) -> str: - """ - Send RNDC `command` to the `named` instance at `ip:port` and return the - server's response. - """ - cmdline = self._base_cmdline[:] - cmdline.extend(["-s", ip]) - cmdline.extend(["-p", str(port)]) - cmdline.extend(command.split()) - - try: - return subprocess.check_output( - cmdline, stderr=subprocess.STDOUT, timeout=10, encoding="utf-8" - ) - except subprocess.SubprocessError as exc: - msg = getattr(exc, "output", "RNDC exception occurred") - raise RNDCException(msg) from exc diff --git a/bin/tests/system/kasp/tests_kasp.py b/bin/tests/system/kasp/tests_kasp.py index 2e8d038e294..6b9fd9d228f 100644 --- a/bin/tests/system/kasp/tests_kasp.py +++ b/bin/tests/system/kasp/tests_kasp.py @@ -11,6 +11,7 @@ import os import shutil +import subprocess import time from datetime import timedelta @@ -200,7 +201,7 @@ def cb_ixfr_is_signed(expected_updates, params, ksks=None, zsks=None): f"expected updates {expected_updates} policy {policy} ksks {ksks} zsks {zsks}" ) shutil.copyfile(f"ns2/{zone}.db.in2", f"ns2/{zone}.db") - servers["ns2"].rndc(f"reload {zone}", log=False) + servers["ns2"].rndc(f"reload {zone}") def update_is_signed(): parts = update.split() @@ -314,7 +315,7 @@ def cb_remove_keyfiles(params, ksks=None, zsks=None): os.remove(k.statefile) with servers["ns3"].watch_log_from_here() as watcher: - servers["ns3"].rndc(f"loadkeys {zone}", log=False) + servers["ns3"].rndc(f"loadkeys {zone}") watcher.wait_for_line( f"zone {zone}/IN (signed): zone_rekey:zone_verifykeys failed: some key files are missing" ) @@ -806,9 +807,9 @@ def test_kasp_inherit_view(number, dynamic, inline_signing, txt_rdata, ns4): isctest.kasp.check_dnssecstatus(ns4, zone, keys, policy=policy, view=view) isctest.kasp.check_apex(ns4, zone, keys, [], tsig=tsig) # check zonestatus - response = ns4.rndc(f"zonestatus {zone} in {view}", log=False) - assert f"dynamic: {dynamic}" in response - assert f"inline signing: {inline_signing}" in response + response = ns4.rndc(f"zonestatus {zone} in {view}") + assert f"dynamic: {dynamic}" in response.out + assert f"inline signing: {inline_signing}" in response.out # check subdomain fqdn = f"{zone}." qname = f"view.{zone}." @@ -869,7 +870,7 @@ def test_kasp_default(ns3): state_stat = os.stat(key.statefile) with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"loadkeys {zone}", log=False) + ns3.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") assert privkey_stat.st_mtime == os.stat(key.privatefile).st_mtime @@ -878,7 +879,7 @@ def test_kasp_default(ns3): # again with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"loadkeys {zone}", log=False) + ns3.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") assert privkey_stat.st_mtime == os.stat(key.privatefile).st_mtime @@ -888,7 +889,7 @@ def test_kasp_default(ns3): # modify unsigned zone file and check that new record is signed. isctest.log.info("check that an updated zone signs the new record") shutil.copyfile("ns3/template2.db.in", f"ns3/{zone}.db") - ns3.rndc(f"reload {zone}", log=False) + ns3.rndc(f"reload {zone}") def update_is_signed(): parts = update.split() @@ -909,7 +910,7 @@ def test_kasp_default(ns3): shutil.move(f"{key.privatefile}", f"{key.path}.offline") expectmsg = "zone_rekey:zone_verifykeys failed: some key files are missing" with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"loadkeys {zone}", log=False) + ns3.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"zone {zone}/IN (signed): {expectmsg}") # Nothing has changed. expected[0].private = False # noqa @@ -986,7 +987,7 @@ def test_kasp_dynamic(ns3): # Update zone with freeze/thaw. isctest.log.info("check dynamic zone is updated and signed after freeze and thaw") with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"freeze {zone}", log=False) + ns3.rndc(f"freeze {zone}") watcher.wait_for_line(f"freezing zone '{zone}/IN': success") time.sleep(1) @@ -995,7 +996,7 @@ def test_kasp_dynamic(ns3): time.sleep(1) with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"thaw {zone}", log=False) + ns3.rndc(f"thaw {zone}") watcher.wait_for_line(f"thawing zone '{zone}/IN': success") expected_updates = [f"a.{zone}. A 10.0.0.1", f"d.{zone}. A 10.0.0.44"] @@ -1025,7 +1026,7 @@ def test_kasp_dynamic(ns3): "check dynamic inline-signed zone is updated and signed after freeze and thaw" ) with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"freeze {zone}", log=False) + ns3.rndc(f"freeze {zone}") watcher.wait_for_line(f"freezing zone '{zone}/IN': success") time.sleep(1) @@ -1033,7 +1034,7 @@ def test_kasp_dynamic(ns3): time.sleep(1) with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"thaw {zone}", log=False) + ns3.rndc(f"thaw {zone}") watcher.wait_for_line(f"thawing zone '{zone}/IN': success") expected_updates = [f"a.{zone}. A 10.0.0.11", f"d.{zone}. A 10.0.0.44"] @@ -1090,7 +1091,7 @@ def test_kasp_checkds(ns3): ksk = ksks[0] isctest.log.info("check if checkds -publish correctly sets DSPublish") - ns3.rndc(f"dnssec -checkds -when {now} published {zone}", log=False) + ns3.rndc(f"dnssec -checkds -when {now} published {zone}") metadata = f"DSPublish: {now}" isctest.run.retry_with_timeout(wait_for_metadata, timeout=3) expected[0].metadata["DSState"] = "rumoured" @@ -1098,7 +1099,7 @@ def test_kasp_checkds(ns3): isctest.kasp.check_keys(zone, keys, expected) isctest.log.info("check if checkds -withdrawn correctly sets DSRemoved") - ns3.rndc(f"dnssec -checkds -when {now} withdrawn {zone}", log=False) + ns3.rndc(f"dnssec -checkds -when {now} withdrawn {zone}") metadata = f"DSRemoved: {now}" isctest.run.retry_with_timeout(wait_for_metadata, timeout=3) expected[0].metadata["DSState"] = "unretentive" @@ -1138,8 +1139,8 @@ def test_kasp_checkds_doubleksk(ns3): isctest.log.info("check invalid checkds commands") def check_error(): - response = ns3.rndc(test["command"], log=False) - assert test["error"] in response + response = ns3.rndc(test["command"], stderr=subprocess.STDOUT) + assert test["error"] in response.out test_cases = [ { @@ -1163,7 +1164,7 @@ def test_kasp_checkds_doubleksk(ns3): check_error() isctest.log.info("check if checkds -publish -key correctly sets DSPublish") - ns3.rndc(f"dnssec -checkds -when {now} -key {ksk.tag} published {zone}", log=False) + ns3.rndc(f"dnssec -checkds -when {now} -key {ksk.tag} published {zone}") metadata = f"DSPublish: {now}" isctest.run.retry_with_timeout(wait_for_metadata, timeout=3) expected[0].metadata["DSState"] = "rumoured" @@ -1172,7 +1173,7 @@ def test_kasp_checkds_doubleksk(ns3): isctest.log.info("check if checkds -withdrawn -key correctly sets DSRemoved") ksk = ksks[1] - ns3.rndc(f"dnssec -checkds -when {now} -key {ksk.tag} withdrawn {zone}", log=False) + ns3.rndc(f"dnssec -checkds -when {now} -key {ksk.tag} withdrawn {zone}") metadata = f"DSRemoved: {now}" isctest.run.retry_with_timeout(wait_for_metadata, timeout=3) expected[1].metadata["DSState"] = "unretentive" @@ -1205,7 +1206,7 @@ def test_kasp_checkds_csk(ns3): ksk = keys[0] isctest.log.info("check if checkds -publish csk correctly sets DSPublish") - ns3.rndc(f"dnssec -checkds -when {now} published {zone}", log=False) + ns3.rndc(f"dnssec -checkds -when {now} published {zone}") metadata = f"DSPublish: {now}" isctest.run.retry_with_timeout(wait_for_metadata, timeout=3) expected[0].metadata["DSState"] = "rumoured" @@ -1213,7 +1214,7 @@ def test_kasp_checkds_csk(ns3): isctest.kasp.check_keys(zone, keys, expected) isctest.log.info("check if checkds -withdrawn csk correctly sets DSRemoved") - ns3.rndc(f"dnssec -checkds -when {now} withdrawn {zone}", log=False) + ns3.rndc(f"dnssec -checkds -when {now} withdrawn {zone}") metadata = f"DSRemoved: {now}" isctest.run.retry_with_timeout(wait_for_metadata, timeout=3) expected[0].metadata["DSState"] = "unretentive" @@ -1597,7 +1598,7 @@ def test_kasp_zsk_retired(ns3): # Load again, make sure the purged key is not an issue when verifying keys. with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"loadkeys {zone}", log=False) + ns3.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") msg = f"zone {zone}/IN (signed): zone_rekey:zone_verifykeys failed: some key files are missing" @@ -1621,7 +1622,7 @@ def test_kasp_purge_keys(ns4): # Reconfig, make sure the purged key is not an issue when verifying keys. shutil.copyfile("ns4/purgekeys2.conf", "ns4/purgekeys.conf") with ns4.watch_log_from_here() as watcher: - ns4.rndc("reconfig", log=False) + ns4.rndc("reconfig") watcher.wait_for_line(f"keymgr: {zone} done") msg = f"zone {zone}/IN/example1 (signed): zone_rekey:zone_verifykeys failed: some key files are missing" @@ -1667,7 +1668,7 @@ def test_kasp_reload_restart(ns6): shutil.copyfile(f"ns6/{zone}2.db.in", f"ns6/{zone}.db") with ns6.watch_log_from_here() as watcher: - ns6.rndc("reload", log=False) + ns6.rndc("reload") watcher.wait_for_line("all zones loaded") newttl = 300 @@ -1744,7 +1745,7 @@ def test_kasp_manual_mode(ns3): # Force step. with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"dnssec -step {zone}", log=False) + ns3.rndc(f"dnssec -step {zone}") watcher.wait_for_line( f"zone {zone}/IN (signed): zone_rekey:zone_verifykeys failed: some key files are missing" ) @@ -1757,7 +1758,7 @@ def test_kasp_manual_mode(ns3): # Load keys. with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"loadkeys {zone}", log=False) + ns3.rndc(f"loadkeys {zone}") watcher.wait_for_line(blockmsg) # Check keys again, make sure no new keys are created. @@ -1768,7 +1769,7 @@ def test_kasp_manual_mode(ns3): # Force step. with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"dnssec -step {zone}", log=False) + ns3.rndc(f"dnssec -step {zone}") watcher.wait_for_line( f"zone {zone}/IN (signed): zone_rekey done: key {tag}/ECDSAP256SHA256" ) diff --git a/bin/tests/system/keepalive/tests_keepalive.py b/bin/tests/system/keepalive/tests_keepalive.py index 1025ac53840..d0efc6af542 100644 --- a/bin/tests/system/keepalive/tests_keepalive.py +++ b/bin/tests/system/keepalive/tests_keepalive.py @@ -20,7 +20,7 @@ pytestmark = pytest.mark.extra_artifacts( def test_dig_tcp_keepalive_handling(named_port, ns2): def get_keepalive_options_received(): - ns2.rndc("stats", log=False) + ns2.rndc("stats") options_received = 0 with open("ns2/named.stats", "r", encoding="utf-8") as ns2_stats_file: for line in ns2_stats_file: @@ -55,12 +55,12 @@ def test_dig_tcp_keepalive_handling(named_port, ns2): ) isctest.log.info("check a re-configured keepalive value") - response = ns2.rndc("tcp-timeouts 300 300 300 200 100", log=False) - assert "tcp-initial-timeout=300" in response - assert "tcp-idle-timeout=300" in response - assert "tcp-keepalive-timeout=300" in response - assert "tcp-advertised-timeout=200" in response - assert "tcp-primaries-timeout=100" in response + response = ns2.rndc("tcp-timeouts 300 300 300 200 100") + assert "tcp-initial-timeout=300" in response.out + assert "tcp-idle-timeout=300" in response.out + assert "tcp-keepalive-timeout=300" in response.out + assert "tcp-advertised-timeout=200" in response.out + assert "tcp-primaries-timeout=100" in response.out assert ( "; TCP-KEEPALIVE: 20.0 secs" in dig("+tcp +keepalive foo.example. @10.53.0.2").out diff --git a/bin/tests/system/ksr/tests_ksr.py b/bin/tests/system/ksr/tests_ksr.py index 553eda1dff2..78c8f841b90 100644 --- a/bin/tests/system/ksr/tests_ksr.py +++ b/bin/tests/system/ksr/tests_ksr.py @@ -740,13 +740,12 @@ def test_ksr_common(ns1): f"addzone {zone} " + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' - + "};", - log=False, + + "};" ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - ns1.rndc(f"skr -import {skr_fname} {zone}", log=False) + ns1.rndc(f"skr -import {skr_fname} {zone}") # test zone is correctly signed # - check rndc dnssec -status output @@ -817,12 +816,11 @@ def test_ksr_lastbundle(ns1): + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' + "};", - log=False, ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - ns1.rndc(f"skr -import {skr_fname} {zone}", log=False) + ns1.rndc(f"skr -import {skr_fname} {zone}") # test zone is correctly signed # - check rndc dnssec -status output @@ -896,12 +894,11 @@ def test_ksr_inthemiddle(ns1): + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' + "};", - log=False, ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - ns1.rndc(f"skr -import {skr_fname} {zone}", log=False) + ns1.rndc(f"skr -import {skr_fname} {zone}") # test zone is correctly signed # - check rndc dnssec -status output @@ -967,12 +964,11 @@ def check_ksr_rekey_logs_error(server, zone, policy, offset, end): + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' + "};", - log=False, ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - server.rndc(f"skr -import {skr_fname} {zone}", log=False) + server.rndc(f"skr -import {skr_fname} {zone}") # test that rekey logs error time_remaining = 10 @@ -1090,12 +1086,11 @@ def test_ksr_unlimited(ns1): + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' + "};", - log=False, ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - ns1.rndc(f"skr -import {skr_fname} {zone}", log=False) + ns1.rndc(f"skr -import {skr_fname} {zone}") # test zone is correctly signed # - check rndc dnssec -status output @@ -1201,12 +1196,11 @@ def test_ksr_twotone(ns1): + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' + "};", - log=False, ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - ns1.rndc(f"skr -import {skr_fname} {zone}", log=False) + ns1.rndc(f"skr -import {skr_fname} {zone}") # test zone is correctly signed # - check rndc dnssec -status output @@ -1280,12 +1274,11 @@ def test_ksr_kskroll(ns1): + "{ type primary; file " + f'"{zone}.db"; dnssec-policy {policy}; ' + "};", - log=False, ) # import skr shutil.copyfile(skr_fname, f"ns1/{skr_fname}") - ns1.rndc(f"skr -import {skr_fname} {zone}", log=False) + ns1.rndc(f"skr -import {skr_fname} {zone}") # test zone is correctly signed # - check rndc dnssec -status output diff --git a/bin/tests/system/multisigner/tests_multisigner.py b/bin/tests/system/multisigner/tests_multisigner.py index 020d39791d1..6fdb177bd9f 100644 --- a/bin/tests/system/multisigner/tests_multisigner.py +++ b/bin/tests/system/multisigner/tests_multisigner.py @@ -182,7 +182,7 @@ def check_add_zsk(server, zone, keys, expected, extra_keys, extra, primary=None) # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -220,7 +220,7 @@ def _check_remove_zsk_fail( # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -263,7 +263,7 @@ def check_remove_zsk( # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -299,7 +299,7 @@ def check_add_cdnskey(server, zone, keys, expected, extra_keys, extra, primary=N # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -336,7 +336,7 @@ def _check_remove_cdnskey_fail( # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -379,7 +379,7 @@ def check_remove_cdnskey( # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -415,7 +415,7 @@ def check_add_cds(server, zone, keys, expected, extra_keys, extra, primary=None) # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -452,7 +452,7 @@ def _check_remove_cds_fail( # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. @@ -495,7 +495,7 @@ def check_remove_cds( # Trigger keymgr. with server.watch_log_from_here() as watcher: - server.rndc(f"loadkeys {zone}", log=False) + server.rndc(f"loadkeys {zone}") watcher.wait_for_line(f"keymgr: {zone} done") # Check again. diff --git a/bin/tests/system/nsec3/tests_nsec3_change.py b/bin/tests/system/nsec3/tests_nsec3_change.py index a65a1789be2..4ebb2be2bc6 100644 --- a/bin/tests/system/nsec3/tests_nsec3_change.py +++ b/bin/tests/system/nsec3/tests_nsec3_change.py @@ -114,4 +114,4 @@ def test_nsec3_case(ns3): # Using rndc signing -nsec3param (should fail) isctest.log.info(f"use rndc signing -nsec3param {zone} to change NSEC3 settings") response = ns3.rndc(f"signing -nsec3param 1 1 12 ffff {zone}") - assert "zone uses dnssec-policy, use rndc dnssec command instead" in response + assert "zone uses dnssec-policy, use rndc dnssec command instead" in response.out diff --git a/bin/tests/system/nta/tests_nta.py b/bin/tests/system/nta/tests_nta.py index f4ca8d3e7f8..70e8f088914 100644 --- a/bin/tests/system/nta/tests_nta.py +++ b/bin/tests/system/nta/tests_nta.py @@ -10,17 +10,12 @@ # information regarding copyright ownership. import os -import re +from re import compile as Re import time import isctest -# helper functions -def hasmatch(regex, blob): - return re.search(regex, blob, flags=re.MULTILINE) - - def active(blob): return len([x for x in blob.splitlines() if " expiry" in x]) @@ -48,8 +43,8 @@ def test_initial(): def test_nta_validate_except(servers): ns4 = servers["ns4"] - response = ns4.rndc("secroots -", log=False) - assert hasmatch("^corp: permanent", response) + response = ns4.rndc("secroots -") + assert Re("^corp: permanent") in response.out # check insecure local domain works with validate-except m = isctest.query.create("www.corp", "NS") @@ -62,41 +57,39 @@ def test_nta_bogus_lifetimes(servers): ns4 = servers["ns4"] # no nta lifetime specified: - response = ns4.rndc("nta -l '' foo", ignore_errors=True, log=False) - assert "'nta' failed: bad ttl" in response + response = ns4.rndc("nta -l '' foo", raise_on_exception=False) + assert "'nta' failed: bad ttl" in response.err # bad nta lifetime: - response = ns4.rndc("nta -l garbage foo", ignore_errors=True, log=False) - assert "'nta' failed: bad ttl" in response + response = ns4.rndc("nta -l garbage foo", raise_on_exception=False) + assert "'nta' failed: bad ttl" in response.err # excessive nta lifetime: - response = ns4.rndc("nta -l 7d1h foo", ignore_errors=True, log=False) - assert "'nta' failed: out of range" in response + response = ns4.rndc("nta -l 7d1h foo", raise_on_exception=False) + assert "'nta' failed: out of range" in response.err def test_nta_install(servers): global start ns4 = servers["ns4"] - ns4.rndc("nta -f -l 20s bogus.example", log=False) - ns4.rndc("nta badds.example", log=False) + ns4.rndc("nta -f -l 20s bogus.example") + ns4.rndc("nta badds.example") # NTAs should persist after reconfig - with ns4.watch_log_from_here() as watcher: - ns4.reconfigure(log=False) - watcher.wait_for_line("any newly configured zones are now loaded") + ns4.reconfigure() - response = ns4.rndc("nta -d", log=False) - assert len(response.splitlines()) == 3 + response = ns4.rndc("nta -d") + assert len(response.out.splitlines()) == 3 - ns4.rndc("nta secure.example", log=False) - ns4.rndc("nta fakenode.secure.example", log=False) + ns4.rndc("nta secure.example") + ns4.rndc("nta fakenode.secure.example") with ns4.watch_log_from_here() as watcher: - ns4.rndc("reload", log=False) + ns4.rndc("reload") watcher.wait_for_line("all zones loaded") - response = ns4.rndc("nta -d", log=False) - assert len(response.splitlines()) == 5 + response = ns4.rndc("nta -d") + assert len(response.out.splitlines()) == 5 start = time.time() @@ -125,11 +118,11 @@ def test_nta_behavior(servers): isctest.check.noadflag(res) ns4 = servers["ns4"] - response = ns4.rndc("secroots -", log=False) - assert hasmatch("^bogus.example: expiry", response) - assert hasmatch("^badds.example: expiry", response) - assert hasmatch("^secure.example: expiry", response) - assert hasmatch("^fakenode.secure.example: expiry", response) + response = ns4.rndc("secroots -") + assert Re("^bogus.example: expiry") in response.out + assert Re("^badds.example: expiry") in response.out + assert Re("^secure.example: expiry") in response.out + assert Re("^fakenode.secure.example: expiry") in response.out # secure.example and badds.example used the default nta-duration # (configured as 12s in ns4/named1.conf), but the nta recheck interval @@ -162,12 +155,12 @@ def test_nta_behavior(servers): if delay > 0: time.sleep(delay) - response = ns4.rndc("nta -d", log=False) - assert active(response) <= 2 + response = ns4.rndc("nta -d") + assert active(response.out) <= 2 - response = ns4.rndc("secroots -", log=False) - assert hasmatch("bogus.example: expiry", response) - assert not hasmatch("badds.example: expiry", response) + response = ns4.rndc("secroots -") + assert Re("bogus.example: expiry") in response.out + assert Re("badds.example: expiry") not in response.out m = isctest.query.create("b.bogus.example", "A") res = isctest.query.tcp(m, "10.53.0.4") @@ -188,8 +181,8 @@ def test_nta_behavior(servers): if delay > 0: time.sleep(delay) - response = ns4.rndc("nta -d", log=False) - assert active(response) == 0 + response = ns4.rndc("nta -d") + assert active(response.out) == 0 m = isctest.query.create("d.secure.example", "A") res = isctest.query.tcp(m, "10.53.0.4") @@ -204,31 +197,31 @@ def test_nta_behavior(servers): def test_nta_removals(servers): ns4 = servers["ns4"] - ns4.rndc("nta badds.example", log=False) + ns4.rndc("nta badds.example") - response = ns4.rndc("nta -d", log=False) - assert hasmatch("^badds.example/_default: expiry", response) + response = ns4.rndc("nta -d") + assert Re("^badds.example/_default: expiry") in response.out m = isctest.query.create("a.badds.example", "A") res = isctest.query.tcp(m, "10.53.0.4") isctest.check.noerror(res) isctest.check.noadflag(res) - response = ns4.rndc("nta -remove badds.example", log=False) - assert "Negative trust anchor removed: badds.example" in response + response = ns4.rndc("nta -remove badds.example") + assert "Negative trust anchor removed: badds.example" in response.out - response = ns4.rndc("nta -d", log=False) - assert not hasmatch("^badds.example/_default: expiry", response) + response = ns4.rndc("nta -d") + assert Re("^badds.example/_default: expiry") not in response.out res = isctest.query.tcp(m, "10.53.0.4") isctest.check.servfail(res) isctest.check.noadflag(res) # remove non-existent NTA three times - ns4.rndc("nta -r foo", log=False) - ns4.rndc("nta -remove foo", log=False) - response = ns4.rndc("nta -r foo", log=False) - assert "not found" in response + ns4.rndc("nta -r foo") + ns4.rndc("nta -remove foo") + response = ns4.rndc("nta -r foo") + assert "not found" in response.out def test_nta_restarts(servers): @@ -237,14 +230,14 @@ def test_nta_restarts(servers): # test NTA persistence across restarts ns4 = servers["ns4"] - response = ns4.rndc("nta -d", log=False) - assert active(response) == 0 + response = ns4.rndc("nta -d") + assert active(response.out) == 0 start = time.time() - ns4.rndc("nta -f -l 30s bogus.example", log=False) - ns4.rndc("nta -f -l 10s badds.example", log=False) - response = ns4.rndc("nta -d", log=False) - assert active(response) == 2 + ns4.rndc("nta -f -l 30s bogus.example") + ns4.rndc("nta -f -l 10s badds.example") + response = ns4.rndc("nta -d") + assert active(response.out) == 2 # stop the server ns4.stop() @@ -256,9 +249,9 @@ def test_nta_restarts(servers): time.sleep(delay) ns4.start(["--noclean", "--restart", "--port", os.environ["PORT"]]) - response = ns4.rndc("nta -d", log=False) - assert active(response) == 1 - assert hasmatch("^bogus.example/_default: expiry", response) + response = ns4.rndc("nta -d") + assert active(response.out) == 1 + assert Re("^bogus.example/_default: expiry") in response.out m = isctest.query.create("a.badds.example", "A") res = isctest.query.tcp(m, "10.53.0.4") @@ -269,7 +262,7 @@ def test_nta_restarts(servers): isctest.check.noerror(res) isctest.check.noadflag(res) - ns4.rndc("nta -r bogus.example", log=False) + ns4.rndc("nta -r bogus.example") def test_nta_regular(servers): @@ -279,8 +272,8 @@ def test_nta_regular(servers): # check "regular" attribute in NTA file ns4 = servers["ns4"] - response = ns4.rndc("nta -d", log=False) - assert active(response) == 0 + response = ns4.rndc("nta -d") + assert active(response.out) == 0 # secure.example validates with AD=1 m = isctest.query.create("a.secure.example", "A") @@ -309,12 +302,12 @@ def test_nta_regular(servers): if delay > 0: time.sleep(delay) - response = ns4.rndc("nta -d", log=False) - assert active(response) == 0 + response = ns4.rndc("nta -d") + assert active(response.out) == 0 # NTA lifted; secure.example. flush the cache to trigger a new query, # and it should now return an AD=1 answer. - ns4.rndc("flushtree secure.example", log=False) + ns4.rndc("flushtree secure.example") res = isctest.query.tcp(m, "10.53.0.4") isctest.check.noerror(res) isctest.check.adflag(res) @@ -328,10 +321,10 @@ def test_nta_forced(servers): ns4 = servers["ns4"] # just to be certain, clean up any existing NTA first - ns4.rndc("nta -r secure.example", log=False) + ns4.rndc("nta -r secure.example") - response = ns4.rndc("nta -d", log=False) - assert active(response) == 0 + response = ns4.rndc("nta -d") + assert active(response.out) == 0 # secure.example validates with AD=1 m = isctest.query.create("a.secure.example", "A") @@ -361,7 +354,7 @@ def test_nta_forced(servers): time.sleep(delay) # NTA lifted; secure.example. should still return an AD=0 answer - ns4.rndc("flushtree secure.example", log=False) + ns4.rndc("flushtree secure.example") res = isctest.query.tcp(m, "10.53.0.4") isctest.check.noerror(res) isctest.check.noadflag(res) @@ -371,7 +364,7 @@ def test_nta_clamping(servers): ns4 = servers["ns4"] # clean up any existing NTA - ns4.rndc("nta -r secure.example", log=False) + ns4.rndc("nta -r secure.example") # stop the server, update _default.nta, restart ns4.stop() @@ -383,10 +376,10 @@ def test_nta_clamping(servers): ns4.start(["--noclean", "--restart", "--port", os.environ["PORT"]]) # check that NTA lifetime read from file is clamped to 1 week. - response = ns4.rndc("nta -d", log=False) - assert active(response) == 1 + response = ns4.rndc("nta -d") + assert active(response.out) == 1 - nta = next((s for s in response.splitlines() if " expiry" in s), None) + nta = next((s for s in response.out.splitlines() if " expiry" in s), None) assert nta is not None nta = nta.split(" ") @@ -401,7 +394,7 @@ def test_nta_clamping(servers): assert abs(nextweek - then < 3610) # remove the NTA - ns4.rndc("nta -r secure.example", log=False) + ns4.rndc("nta -r secure.example") def test_nta_forward(servers): @@ -414,14 +407,14 @@ def test_nta_forward(servers): isctest.check.noadflag(res) # add NTA and expect resolution to succeed - ns9.rndc("nta badds.example", log=False) + ns9.rndc("nta badds.example") res = isctest.query.tcp(m, "10.53.0.9") isctest.check.noerror(res) isctest.check.rr_count_eq(res.answer, 2) isctest.check.noadflag(res) # remove NTA and expect resolution to fail again - ns9.rndc("nta -remove badds.example", log=False) + ns9.rndc("nta -remove badds.example") res = isctest.query.tcp(m, "10.53.0.9") isctest.check.servfail(res) isctest.check.empty_answer(res) diff --git a/bin/tests/system/nzd2nzf/tests_nzd2nzf.py b/bin/tests/system/nzd2nzf/tests_nzd2nzf.py index 5ad766c4cfd..790e937556a 100644 --- a/bin/tests/system/nzd2nzf/tests_nzd2nzf.py +++ b/bin/tests/system/nzd2nzf/tests_nzd2nzf.py @@ -34,7 +34,7 @@ def test_nzd2nzf(ns1): isctest.check.refused(res) # add new zone into the default NZD using "rndc addzone" - ns1.rndc(f"addzone {zone_data}", log=False) + ns1.rndc(f"addzone {zone_data}") # query for existing zone data res = isctest.query.tcp(msg, ns1.ip) diff --git a/bin/tests/system/resolver/tests_resolver.py b/bin/tests/system/resolver/tests_resolver.py index 286a33e4e08..4e99315bcb6 100644 --- a/bin/tests/system/resolver/tests_resolver.py +++ b/bin/tests/system/resolver/tests_resolver.py @@ -22,18 +22,20 @@ def test_resolver_cache_reloadfails(ns1, templates): isctest.check.noerror(res) assert res.answer[0].ttl == 300 templates.render("ns1/named.conf", {"wrongoption": True}) - try: - # The first reload fails, and the old cache list will be preserved - ns1.rndc("reload") - except isctest.rndc.RNDCException: - templates.render("ns1/named.conf", {"wrongoption": False}) - # The second reload succeed, and the cache is still there, as preserved - # from the old cache list - ns1.rndc("reload") - time.sleep(3) - msg = isctest.query.create("www.example.org.", "A") - res = isctest.query.udp(msg, "10.53.0.1") - isctest.check.noerror(res) - # The ttl being lower than 300 (provided by fake authoritative) proves - # the cache is still in use - assert res.answer[0].ttl < 300 + + # The first reload fails, and the old cache list will be preserved + cmd = ns1.rndc("reload", raise_on_exception=False) + assert cmd.rc != 0 + + templates.render("ns1/named.conf", {"wrongoption": False}) + # The second reload succeed, and the cache is still there, as preserved + # from the old cache list + ns1.rndc("reload") + time.sleep(3) + msg = isctest.query.create("www.example.org.", "A") + res = isctest.query.udp(msg, "10.53.0.1") + isctest.check.noerror(res) + + # The ttl being lower than 300 (provided by fake authoritative) proves + # the cache is still in use + assert res.answer[0].ttl < 300 diff --git a/bin/tests/system/rollover-zsk-prepub/tests_rollover_zsk_prepublication.py b/bin/tests/system/rollover-zsk-prepub/tests_rollover_zsk_prepublication.py index 17b7d38a57e..f0201296fca 100644 --- a/bin/tests/system/rollover-zsk-prepub/tests_rollover_zsk_prepublication.py +++ b/bin/tests/system/rollover-zsk-prepub/tests_rollover_zsk_prepublication.py @@ -224,7 +224,7 @@ def test_zsk_prepub_step3(tld, alg, size, ns3): # Force full resign and check all signatures have been replaced. with ns3.watch_log_from_here() as watcher: - ns3.rndc(f"sign {zone}", log=False) + ns3.rndc(f"sign {zone}") watcher.wait_for_line(f"zone_needdump: zone {zone}/IN (signed): enter") step["smooth"] = False diff --git a/bin/tests/system/rollover/tests_rollover_manual.py b/bin/tests/system/rollover/tests_rollover_manual.py index 0fa5edc58fd..75ae7d86c7a 100644 --- a/bin/tests/system/rollover/tests_rollover_manual.py +++ b/bin/tests/system/rollover/tests_rollover_manual.py @@ -154,7 +154,7 @@ def test_rollover_manual(ns3): # Try to schedule a ZSK rollover for an inactive key (should fail). zsk = expected[3].key response = ns3.rndc(f"dnssec -rollover -key {zsk.tag} {zone}") - assert "key is not actively signing" in response + assert "key is not actively signing" in response.out def test_rollover_manual_zrrsig_rumoured(ns3): diff --git a/bin/tests/system/showconf/tests_showconf.py b/bin/tests/system/showconf/tests_showconf.py index ae7e3833cc1..e7dff5c7483 100644 --- a/bin/tests/system/showconf/tests_showconf.py +++ b/bin/tests/system/showconf/tests_showconf.py @@ -19,44 +19,38 @@ def test_showconf(ns1): res = isctest.query.udp(msg, "10.53.0.1") isctest.check.rcode(res, dns.rcode.NOERROR) - effectiveconfig = ns1.rndc("showconf -effective", log=False) - assert 'zone "example.com"' in effectiveconfig - assert 'view "_bind" chaos {' in effectiveconfig + effectiveconfig = ns1.rndc("showconf -effective") + assert 'zone "example.com"' in effectiveconfig.out + assert 'view "_bind" chaos {' in effectiveconfig.out # builtin-trust-anchors is non documented and internal clause only, it must # not be visible. - assert "builtin-trust-anchors" not in effectiveconfig + assert "builtin-trust-anchors" not in effectiveconfig.out # Dynamically added zones are not visible from the effectiveconfig zonedata = '"added.example" { type primary; file "example.db"; };' - ns1.rndc(f"addzone {zonedata}", log=False) + ns1.rndc(f"addzone {zonedata}") msg = isctest.query.create("a.added.example", "A") res = isctest.query.udp(msg, "10.53.0.1") isctest.check.rcode(res, dns.rcode.NOERROR) - effectiveconfig = ns1.rndc("showconf -effective", log=False) - assert 'zone "added.example"' not in effectiveconfig + effectiveconfig = ns1.rndc("showconf -effective") + assert 'zone "added.example"' not in effectiveconfig.out - userconfig = ns1.rndc("showconf -user", log=False) - assert 'zone "example.com"' in userconfig - assert 'view "_bind" chaos {' not in userconfig + userconfig = ns1.rndc("showconf -user") + assert 'zone "example.com"' in userconfig.out + assert 'view "_bind" chaos {' not in userconfig.out - builtinconfig = ns1.rndc("showconf -builtin", log=False) - assert len(userconfig.split()) < len(builtinconfig.split()) - assert len(builtinconfig.split()) < len(effectiveconfig.split()) + builtinconfig = ns1.rndc("showconf -builtin") + assert len(userconfig.out.split()) < len(builtinconfig.out.split()) + assert len(builtinconfig.out.split()) < len(effectiveconfig.out.split()) # Errors handling - error_msg = "" - - try: - ns1.rndc("showconf -idontexist", log=False) - except isctest.rndc.RNDCException as e: - error_msg = str(e) - assert error_msg == "rndc: 'showconf' failed: syntax error\n" - - try: - ns1.rndc("showconf", log=False) - except isctest.rndc.RNDCException as e: - error_msg = str(e) - assert error_msg == "rndc: 'showconf' failed: unexpected end of input\n" + response = ns1.rndc("showconf -idontexist", raise_on_exception=False) + assert response.rc != 0 + assert "rndc: 'showconf' failed: syntax error" in response.err + + response = ns1.rndc("showconf", raise_on_exception=False) + assert response.rc != 0 + assert "rndc: 'showconf' failed: unexpected end of input" in response.err diff --git a/bin/tests/system/shutdown/tests_shutdown.py b/bin/tests/system/shutdown/tests_shutdown.py index d746b3821d9..6f5ecd538e9 100755 --- a/bin/tests/system/shutdown/tests_shutdown.py +++ b/bin/tests/system/shutdown/tests_shutdown.py @@ -71,11 +71,8 @@ def do_work(named_proc, resolver_ip, instance, kill_method, n_workers, n_queries # helper function, 'command' is the rndc command to run def launch_rndc(command): - try: - instance.rndc(command, log=False) - return 0 - except isctest.rndc.RNDCException: - return -1 + ret = instance.rndc(command, raise_on_exception=False) + return 0 if ret.rc == 0 else -1 # We're going to execute queries in parallel by means of a thread pool. # dnspython functions block, so we need to circumvent that. diff --git a/bin/tests/system/stress/tests_stress_update.py b/bin/tests/system/stress/tests_stress_update.py index cbc678f6ba6..ef98b411374 100644 --- a/bin/tests/system/stress/tests_stress_update.py +++ b/bin/tests/system/stress/tests_stress_update.py @@ -10,7 +10,6 @@ # information regarding copyright ownership. import concurrent.futures -import os import time import dns.update @@ -27,22 +26,8 @@ pytestmark = pytest.mark.extra_artifacts( def rndc_loop(test_state, server): - rndc = os.getenv("RNDC") - port = os.getenv("CONTROLPORT") - - cmdline = [ - rndc, - "-c", - "../_common/rndc.conf", - "-p", - port, - "-s", - server, - "reload", - ] - while not test_state["finished"]: - isctest.run.cmd(cmdline, raise_on_exception=False) + server.rndc("reload", raise_on_exception=False) time.sleep(1) diff --git a/bin/tests/system/synthrecord/tests_synthrecord.py b/bin/tests/system/synthrecord/tests_synthrecord.py index 3af7b3d57a5..9e2243d700b 100644 --- a/bin/tests/system/synthrecord/tests_synthrecord.py +++ b/bin/tests/system/synthrecord/tests_synthrecord.py @@ -499,7 +499,6 @@ def test_sythreverse_arpa_v6_nxdomain_toomanylabels(domain): def test_synthrecord_inview(ns1, templates): templates.render("ns1/named.conf", {"inview": True}) with ns1.watch_log_from_here() as watcher: - try: - ns1.rndc("reconfig") - except isctest.rndc.RNDCException: - watcher.wait_for_line("'synthrecord' must be configured as a zone plugin") + cmd = ns1.rndc("reconfig", raise_on_exception=False) + assert cmd.rc != 0 + watcher.wait_for_line("'synthrecord' must be configured as a zone plugin") diff --git a/bin/tests/system/views/tests_views_addzones.py b/bin/tests/system/views/tests_views_addzones.py index c362b47721d..2baf745fdf6 100644 --- a/bin/tests/system/views/tests_views_addzones.py +++ b/bin/tests/system/views/tests_views_addzones.py @@ -20,6 +20,6 @@ def test_views_add_zones(ns2, templates): templates.render("ns2/named.conf", {"zone_names": zone_names}) shutil.copyfile("ns2/zone.db.in", f"ns2/{name}.db") with ns2.watch_log_from_here() as watcher: - ns2.rndc("reconfig", log=False) + ns2.rndc("reconfig") log_seq = ["any newly configured zones are now loaded", "running"] watcher.wait_for_sequence(log_seq)