]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
manager: supervisord backend: completing implementation with first working test
authorVasek Sraier <git@vakabus.cz>
Mon, 19 Apr 2021 21:34:39 +0000 (23:34 +0200)
committerAleš Mrázek <ales.mrazek@nic.cz>
Fri, 8 Apr 2022 14:17:52 +0000 (16:17 +0200)
17 files changed:
manager/containers/debian-supervisord/Containerfile [new file with mode: 0644]
manager/integration/runner.py
manager/integration/tests/basic_crash/test.toml
manager/integration/tests/basic_startup/run
manager/integration/tests/basic_startup/test.toml
manager/integration/tests/worker_count/test.toml
manager/knot_resolver_manager/__main__.py
manager/knot_resolver_manager/kresd_controller/__init__.py
manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py
manager/knot_resolver_manager/kresd_controller/supervisord/config.py
manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2
manager/knot_resolver_manager/kresd_controller/systemd/__init__.py
manager/knot_resolver_manager/utils/async_utils.py
manager/pyproject.toml
manager/scripts/_env.sh
manager/scripts/container-build
manager/scripts/container-run.py

diff --git a/manager/containers/debian-supervisord/Containerfile b/manager/containers/debian-supervisord/Containerfile
new file mode 100644 (file)
index 0000000..5c11746
--- /dev/null
@@ -0,0 +1,20 @@
+FROM knot-manager:debian
+
+# Remove systemd
+# RUN apt-get remove -y systemd
+#
+# Well, we can't do that... The command above also uninstalls knot-resolver. Which
+# is kind of stupid. So let's just keep systemd installed and not start it.
+
+# install supervisord
+RUN python3 -m pip install supervisor
+
+# install tini init
+RUN apt-get update \
+  && apt-get install --no-install-recommends -y tini \
+  && apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
+  && apt-get clean -y && rm -rf /var/lib/apt/lists/*
+
+# replace systemd with dummy init
+ENTRYPOINT ["/usr/bin/tini", "--"]
+CMD ["/bin/sleep", "inf"]
\ No newline at end of file
index bd9c7517fabe6e8a02e8200c6226e8f5f5f03ef4..3a52bf24e4934fa6662ee5154098d148614a47cb 100644 (file)
-import hashlib
-import json
 import os
-import shutil
-import signal
 import subprocess
 import sys
-import tarfile
-import time
-import uuid
-from _hashlib import HASH as Hash
-from pathlib import Path, PurePath
-from typing import BinaryIO, Dict, List, Optional, Union
+from pathlib import Path
+from typing import Dict, List, TypeVar
 
 import click
-import requests
 import toml
 
 
-class DirectoryHash:
-    """
-    This class serves one purpose - hide implementation details of directory hashing
-    """
-
-    @staticmethod
-    def _md5_update_from_file(filename: Union[str, Path], hash: Hash) -> Hash:
-        assert Path(filename).is_file()
-        with open(str(filename), "rb") as f:
-            for chunk in iter(lambda: f.read(4096), b""):
-                hash.update(chunk)
-        return hash
-
-    @staticmethod
-    def md5_file(filename: Union[str, Path]) -> str:
-        return str(
-            DirectoryHash._md5_update_from_file(filename, hashlib.md5()).hexdigest()
-        )
-
-    @staticmethod
-    def _md5_update_from_dir(directory: Union[str, Path], hash: Hash) -> Hash:
-        assert Path(directory).is_dir()
-        for path in sorted(Path(directory).iterdir(), key=lambda p: str(p).lower()):
-            hash.update(path.name.encode())
-            if path.is_file():
-                hash = DirectoryHash._md5_update_from_file(path, hash)
-            elif path.is_dir():
-                hash = DirectoryHash._md5_update_from_dir(path, hash)
-        return hash
-
-    @staticmethod
-    def md5_dir(directory: Union[str, Path]) -> str:
-        return str(
-            DirectoryHash._md5_update_from_dir(directory, hashlib.md5()).hexdigest()
-        )
-
-
-class PodmanService:
-    def __init__(self):
-        self._process: Optional[subprocess.Popen] = None
-
-    def __enter__(self):
-        env = os.environ.copy()
-        env["BUILDAH_LAYERS"] = "true"
-
-        # run with --log-level=info or --log-level=debug for debugging
-        self._process = subprocess.Popen(
-            "podman system service tcp:localhost:13579 --time=0", shell=True, env=env
-        )
-        time.sleep(0.5)  # required to prevent connection failures
-
-        # check that it is really running
-        if self._process.poll() is not None:
-            raise Exception(f"Failed to start the podman service, it exited early with exit code {self._process.returncode}")
-
-        return PodmanServiceManager("http://localhost:13579")
-
-    def __exit__(self, ex_type, ex_value, ex_traceback):
-        failed_while_running = self._process.poll() is not None
-        self._process.send_signal(signal.SIGINT)
-
-        time.sleep(0.5)  # fixes interleaved stacktraces with podman's output
-
-        if failed_while_running:
-            raise Exception("Podman has probably unexpectedly stopped. Can't terminate it properly.", ex_value)
-
-
-class PodmanServiceManager:
-    """
-    Using HTTP Rest API new in version 2.0. Documentation here:
-    https://docs.podman.io/en/latest/_static/api.html
-    """
-
-    _API_VERSION = "v1.0.0"
-    _HASHFILE_NAME = ".contentshash"
-
-    def __init__(self, url):
-        self._url = url
-
-    def _create_url(self, path):
-        return self._url + "/" + PodmanServiceManager._API_VERSION + "/" + path
-
-    @staticmethod
-    def _create_tar_achive(directory: Path, outfile: Path):
-        with tarfile.open(str(outfile), "w:gz") as tar_handle:
-            for root, _, files in os.walk(str(directory)):
-                for file in files:
-                    path = Path(os.path.join(root, file))
-                    tar_handle.add(path, arcname=path.relative_to(directory))
-
-    def _api_build_container(self, image_name: str, data: BinaryIO):
-        response = requests.post(
-            self._create_url("libpod/build"),
-            params=[
-                ("t", image_name),
-                ("rm", "false"),
-                ("squash", "false"),
-                ("nocache", "false"),
-                ("cache-from", image_name),
-                ("forcerm", "false"),
-                ("layers", "true"),
-                ("debilita", "prd"),
-            ],
-            data=data,
-            stream=True,
-        )
-        response.raise_for_status()
-
-        # forward output
-        for line in response.iter_lines():
-            line = json.loads(str(line, "utf8"))["stream"].rstrip()
-            for real_line in line.splitlines(keepends=False):
-                print(f"\t\t{real_line}")
-
-    def _read_and_remove_hashfile(self, context_dir: Path) -> Optional[str]:
-        hashfile: Path = context_dir / PodmanServiceManager._HASHFILE_NAME
-        if hashfile.exists():
-            hash_ = hashfile.read_text("utf8").strip()
-            hashfile.unlink()
-        else:
-            hash_ = "WAS NOT HASHED BEFORE"
-
-        return hash_
-
-    def _create_hashfile(self, context_dir: Path, hash_: str):
-        hashfile: Path = context_dir / PodmanServiceManager._HASHFILE_NAME
-        with open(hashfile, "w") as f:
-            f.write(hash_)
-
-    def build_image(self, context_dir: Path, image: str):
-        # For some weird reason, creating containers using API does not use cache.
-        #
-        # # create tar archive out of the context_dir (weird, but there is no other way to specify context)
-        # tar = Path("/tmp/context.tar.gz")
-        # PodmanServiceManager._create_tar_achive(context_dir, tar)
-        # try:
-        #     # send the API request
-        #     with open(tar, "rb") as f:
-        #         self._api_build_container(image, f)
-
-        # finally:
-        #     # cleanup the tar file
-        #     tar.unlink()
-
-        current_hash = DirectoryHash.md5_file(context_dir / "Dockerfile")
-        old_hash = self._read_and_remove_hashfile(context_dir)
-
-        if current_hash == old_hash:
-            print("\t\tSkipping container build - no changes")
-        else:
-            command = f"podman build -t {image} ."
-            cmd = subprocess.Popen(
-                command,
-                shell=True,
-                cwd=str(context_dir.absolute()),
-                stdout=subprocess.PIPE,
-            )
-            while cmd.poll() is None:
-                for line in cmd.stdout.readlines():
-                    line = str(line, "utf8").rstrip()
-                    print(f"\t\t{line}")
-            assert (
-                cmd.returncode == 0
-            ), f"Container build ended with exit code {cmd.returncode}"
-
-        self._create_hashfile(context_dir, current_hash)
-
-    def _api_create_container(
-        self, image: str, bind_mount_ro: Dict[PurePath, PurePath] = {}
-    ) -> str:
-        response = requests.post(
-            self._create_url("libpod/containers/create"),
-            json={
-                "image": image,
-                "remove": True,
-                "systemd": "true",
-                "mounts": [
-                    {
-                        "destination": str(destination),
-                        "options": ["ro"],
-                        "source": str(source),
-                        "type": "bind",
-                    }
-                    for source, destination in bind_mount_ro.items()
-                ],
-            },
-        )
-        response.raise_for_status()
-        return response.json()["Id"]
-
-    def _api_start_container(self, container_id: str):
-        response = requests.post(
-            self._create_url(f"libpod/containers/{container_id}/start")
-        )
-        response.raise_for_status()
-
-    def _api_create_exec(self, container_id, command: List[str]) -> str:
-        response = requests.post(
-            self._create_url(f"libpod/containers/{container_id}/exec"),
-            json={
-                "AttachStderr": True,
-                "AttachStdin": False,
-                "AttachStdout": True,
-                "Cmd": command,
-                "Tty": True,
-                "User": "root",
-                "WorkingDir": "/",
-            },
-        )
-        response.raise_for_status()
-        return response.json()["Id"]
-
-    def _api_start_exec(self, exec_id):
-        response = requests.post(
-            self._create_url(f"libpod/exec/{exec_id}/start"), json={}, stream=True
-        )
-        response.raise_for_status()
-
-        for line in response.iter_lines():
-            print(f"\t\t{str(line, 'utf8').rstrip()}")
-
-    def _api_get_exec_exit_code(self, exec_id) -> int:
-        response = requests.get(self._create_url(f"libpod/exec/{exec_id}/json"))
-        response.raise_for_status()
-        return int(response.json()["ExitCode"])
-
-    def _api_wait_for_container(self, container_id):
-        response = requests.post(
-            self._create_url(f"libpod/containers/{container_id}/wait"),
-            params=[("condition", "exited")],
-            timeout=None,
-        )
-        response.raise_for_status()
-
-    def start_temporary_and_wait(
-        self,
-        image: str,
-        command: List[str],
-        bind_mount_ro: Dict[PurePath, PurePath] = {},
-        inspect_failed: bool = False,
-    ) -> int:
-        # start the container
-        container_id = self._api_create_container(image, bind_mount_ro)
-        self._api_start_container(container_id)
-
-        # the container is booting, let's give it some time
-        time.sleep(0.5)
-
-        # exec the the actual test
-        exec_id = self._api_create_exec(container_id, command)
-        self._api_start_exec(exec_id)
-        test_exit_code = self._api_get_exec_exit_code(exec_id)
-
-        if inspect_failed and test_exit_code != 0:
-            command = f"podman exec -ti {container_id[:8]} bash"
-            print(
-                f"\t{Colors.RED}Test failed with exit code {test_exit_code}{Colors.RESET}"
-            )
-            print(
-                f"\t{Colors.YELLOW}Interactive inspection enabled - launching shell!{Colors.RESET}"
-            )
-            print(f"\t\t{Colors.YELLOW}{command}{Colors.RESET}")
-            print(
-                f"\t{Colors.YELLOW}====== Stop the shell to continue testing ======={Colors.RESET}"
-            )
-            _ = subprocess.call(command, shell=True)
-            print(
-                f"\t{Colors.YELLOW}========= Interactive inspection ended =========={Colors.RESET}"
-            )
-            print(f"\t{Colors.YELLOW}Testing continues...{Colors.RESET}")
-
-        # issue shutdown command to the container
-        exec_id = self._api_create_exec(container_id, ["systemctl", "poweroff"])
-        self._api_start_exec(exec_id)
-
-        # wait for the container to shutdown completely
-        self._api_wait_for_container(container_id)
-
-        return test_exit_code
-
-
 class Colors:
     RED = "\033[0;31m"
     YELLOW = "\033[0;33m"
     GREEN = "\033[0;32m"
+    BRIGHT_BLACK = "\033[0;90m"
     RESET = "\033[0m"
 
 
-def _get_git_root() -> PurePath:
+def _get_git_root() -> Path:
     result = subprocess.run(
         "git rev-parse --show-toplevel", shell=True, stdout=subprocess.PIPE
     )
-    return PurePath(str(result.stdout, encoding="utf8").strip())
+    return Path(str(result.stdout, encoding="utf8").strip())
+
+
+T = TypeVar("T")
+def flatten(lst: List[List[T]]) -> List[T]:
+    res: List[T] = []
+    for inner in lst:
+        res.extend(inner)
+    return res
 
 
 class Test:
@@ -319,7 +38,7 @@ class Test:
         with open(path / Test._CONFIG_FILE, 'r') as f:
             config = toml.load(f)
 
-        self._mounts = {}
+        self._mounts: Dict[Path, Path] = {}
         gitroot: Path = _get_git_root()
         for dst, src in config["mount"].items():
             # note that we flip the meaning around to match podman's api
@@ -329,24 +48,24 @@ class Test:
         
         self.name = str(path.absolute().name)
         self._cmd = [ str(x) for x in config["cmd"] ]
-        self._image = str(config["image"])
+        self._images = [ str(img) for img in config["images"]]
 
     
-    def run(self, manager: PodmanServiceManager, inspect_failed=False):
-        print(f"Running test {Colors.YELLOW}{self.name}{Colors.RESET}")
-        print("\tRunning...")
-        exit_code = manager.start_temporary_and_wait(
-            self._image,
-            self._cmd,
-            bind_mount_ro=self._mounts,
-            inspect_failed=inspect_failed,
-        )
-        if exit_code == 0:
-            print(f"\t{Colors.GREEN}Test succeeded{Colors.RESET}")
-        else:
-            print(
-                f"\t{Colors.RED}Test failed with exit code {exit_code}{Colors.RESET}"
-            )
+    def run(self, inspect_failed: bool =False):
+        for image in self._images:
+            print(f"Running test {Colors.YELLOW}{self.name}{Colors.RESET} within container {Colors.YELLOW}{image}{Colors.RESET}")
+            print(f"----------------------------{Colors.BRIGHT_BLACK}")
+            cmd: List[str] = ["../scripts/container-run.py"] + (["-i"] if inspect_failed else []) + flatten([["-m", f"{k}:{v}"] for k,v in self._mounts.items()]) + [image] + self._cmd
+
+            # run and relay output
+            exit_code = subprocess.call(cmd)
+            print(f"{Colors.RESET}----------------------------")
+            if exit_code == 0:
+                print(f"{Colors.GREEN}Test succeeded{Colors.RESET}")
+            else:
+                print(
+                    f"{Colors.RED}Test failed with exit code {exit_code}{Colors.RESET}"
+                )
 
 class TestRunner:
     _TEST_DIRECTORY = "tests"
@@ -373,27 +92,33 @@ class TestRunner:
         default=False,
         is_flag=True,
     )
-    def run(tests: List[str] = [], inspect_failed: bool = False):
+    @click.option(
+        "-n",
+        "--no-build",
+        help="Skip building the containers",
+        default=False,
+        is_flag=True,
+    )
+    def run(tests: List[str] = [], inspect_failed: bool = False, no_build: bool = False):
         """Run TESTS
 
         If no TESTS are specified, runs them all.
         """
 
-        # Temporary hack
         # build all test containers
-        ret = subprocess.call("poe container-build", shell=True)
-        assert ret == 0
+        if not no_build:
+            ret = subprocess.call("poe container-build", shell=True)
+            assert ret == 0
 
         # Run the tests
-        with PodmanService() as manager:
-            for test_path in TestRunner._list_tests():
-                test = Test(test_path)
+        for test_path in TestRunner._list_tests():
+            test = Test(test_path)
 
-                if len(tests) != 0 and test.name not in tests:
-                    print(f"Skipping test {Colors.YELLOW}{test.name}{Colors.RESET}")
-                    continue
+            if len(tests) != 0 and test.name not in tests:
+                print(f"Skipping test {Colors.YELLOW}{test.name}{Colors.RESET}")
+                continue
 
-                test.run(manager, inspect_failed)
+            test.run(inspect_failed)
 
 
 if __name__ == "__main__":
index a079519f3cbc9f8c7f0dc5d7f57253151bb49f68..20981b6a54fa9a10f3c98f0332461905c8254ec4 100644 (file)
@@ -1,4 +1,4 @@
-image = "knot-manager:debian"
+images = ["knot-manager:debian"]
 cmd = ["/test/run"]
 
 [mount]
index 126f038ed9b4301697e6b0a60868ca996144689a..2b0a1bfdb8e21f4c086305faeb276c31c588008f 100755 (executable)
@@ -5,12 +5,13 @@ set -e
 
 cd /test
 echo "Starting manager..."
-systemctl start knot-resolver-manager.service
+cmd=$(grep ExecStart /etc/systemd/system/knot-resolver-manager.service | sed 's/ExecStart=//')
+bash -c "cd /code; $cmd" &
 
 # give it time to start
-sleep 1
+sleep 2
 
 python3 send_request.py
 
 # assert that any kresd process is running
-systemctl status | grep kresd
+ps aux | grep -v grep | grep kresd
index d6bd3880a1f46bdcc157cf3dadef9214573d93b3..4f7d23bdcd3e57cb87d993ae32b05d0a0f0f0b34 100644 (file)
@@ -1,4 +1,4 @@
-image = "knot-manager:debian"
+images = ["knot-manager:debian", "knot-manager:debian-supervisord"]
 cmd = ["/test/run"]
 
 [mount]
index dcb25097efffd53f72d984bce65f2ecdadf09b3a..bb961603a513ca0068ab84fd9936fba55673f0f9 100644 (file)
@@ -1,4 +1,4 @@
-image = "knot-manager:debian"
+images = ["knot-manager:debian"]
 cmd = ["/test/run"]
 
 [mount]
index a92712b9be2aa2921512d2864e4b1cfd75c7dec8..a7f585da27063a043a5c3b5a2cbcadc7f7f33218 100644 (file)
@@ -1,5 +1,7 @@
+import logging
 import sys
 from pathlib import Path
+from time import time
 from typing import Optional
 
 import click
@@ -14,6 +16,9 @@ _SOCKET_PATH = "/tmp/manager.sock"
 _MANAGER = "kres_manager"
 
 
+logger = logging.getLogger(__name__)
+
+
 async def index(_request: web.Request) -> web.Response:
     return web.Response(text="Knot Resolver Manager is running! The configuration endpoint is at /config")
 
@@ -40,6 +45,7 @@ def main(listen: Optional[str], config: Optional[str]):
 
     [listen] ... numeric port or a path for a Unix domain socket, default is \"/tmp/manager.sock\"
     """
+    start_time = time()
 
     app = web.Application()
 
@@ -52,6 +58,8 @@ def main(listen: Optional[str], config: Optional[str]):
         if config is not None:
             # TODO Use config loaded from the file system
             pass
+        end_time = time()
+        logger.info(f"Manager fully initialized after {end_time - start_time} seconds")
 
     app.on_startup.append(init_manager)
 
index 6636186ed200fe91cf4184aaae1a57e5dac071ef..04dc6fa1856f4af6000e932bed16b89d1f73b1dc 100644 (file)
@@ -2,10 +2,11 @@ import asyncio
 from typing import Type
 
 from knot_resolver_manager.kresd_controller.base import BaseKresdController
+from knot_resolver_manager.kresd_controller.supervisord import SupervisordKresdController
 from knot_resolver_manager.kresd_controller.systemd import SystemdKresdController
 
 # In this tuple, every supported controller should be listed. In the order of preference (preferred first)
-_registered_controllers = (SystemdKresdController,)
+_registered_controllers = (SystemdKresdController, SupervisordKresdController)
 
 
 async def get_best_controller_implementation() -> Type[BaseKresdController]:
index aab938ae2db63304fd02851e85f0c9bee6aff4f4..d9d8752ba7f61dd032723afd9d42654f3da0bdc9 100644 (file)
@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
 
 
 class SupervisordKresdController(BaseKresdController):
-    _config = SupervisordConfig()
+    _config = SupervisordConfig([])
 
     async def is_running(self) -> bool:
         return self.id in SupervisordKresdController._config.instances
index f347579ee8929e9858462e9829f2a3a8c63803db..9a22aef1a69bfd66f1a35853bda21fa8dd8be0e2 100644 (file)
@@ -1,6 +1,8 @@
 import os.path
 import signal
 from os import kill
+from pathlib import Path
+from time import sleep
 from typing import List
 
 from jinja2 import Template
@@ -15,12 +17,13 @@ SERVER_SOCK = "/tmp/knot-resolver-manager-supervisord.sock"  # created pseudoran
 
 @dataclass
 class SupervisordConfig:
+    instances: List[str]
     unix_http_server: str = SERVER_SOCK
-    instances: List[str] = []
+    pid_file: str = PID_FILE
 
 
 async def _create_config_file(config: SupervisordConfig):
-    path = os.path.realpath(__file__)
+    path = Path(os.path.realpath(__file__)).parent / "supervisord.conf.j2"
     template = await readfile(path)
     config_string = Template(template).render(config=config)
     await writefile(CONFIG_FILE, config_string)
@@ -28,7 +31,8 @@ async def _create_config_file(config: SupervisordConfig):
 
 async def start_supervisord(config: SupervisordConfig):
     await _create_config_file(config)
-    await call(f'supervisord --configuration="{CONFIG_FILE}" --pidfile="{PID_FILE}"', shell=True)
+    await call(f'supervisord --configuration="{CONFIG_FILE}"', shell=True)
+    sleep(1)
 
 
 async def stop_supervisord():
@@ -39,20 +43,23 @@ async def stop_supervisord():
 
 async def update_config(config: SupervisordConfig):
     await _create_config_file(config)
-    await call(f'supervisorctl -s "{SERVER_SOCK}" update')
+    await call(f'supervisorctl -c "{CONFIG_FILE}" update', shell=True)
 
 
 async def restart(id_: str):
-    await call(f'supervisorctl -s "{SERVER_SOCK}" restart {id_}')
+    await call(f'supervisorctl -c "{CONFIG_FILE}" restart {id_}', shell=True)
 
 
 async def is_supervisord_available() -> bool:
-    i = await call("supervisorctl -h", shell=True)
-    i += await call("supervisord -h", shell=True)
+    i = await call("supervisorctl -h > /dev/null", shell=True, discard_output=True)
+    i += await call("supervisord -h > /dev/null", shell=True, discard_output=True)
     return i == 0
 
 
 async def is_supervisord_running() -> bool:
+    if not Path(PID_FILE).exists():
+        return False
+
     pid = int(await readfile(PID_FILE))
     try:
         kill(pid, 0)
index 1394127ffd8546f7cc9ccc5ac505924383d25f8e..eb0439e3d9772c2bf5da46c622cd32720f41c54c 100644 (file)
@@ -1,5 +1,19 @@
 [supervisord]
-unix_http_server = {{ config.unix_http_server }}
+pidfile = {{ config.pid_file }}
+directory = /tmp
+nodaemon = false
+logfile = /tmp/supervisord.log
+logfile_maxbytes = 50MB
+user=root
+
+[unix_http_server]
+file = {{ config.unix_http_server }}
+
+[supervisorctl]
+serverurl = unix://{{ config.unix_http_server }}
+
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
 
 {% for id in config.instances -%}
 
index 7e3caf0ef7cd784bd3ce137b42cd1462dcfb950d..8c15d3513e6dfbcdc6ce27eb339340bd1a685e4c 100644 (file)
@@ -3,6 +3,7 @@ from typing import Iterable, List
 
 from knot_resolver_manager import compat
 from knot_resolver_manager.kresd_controller.base import BaseKresdController
+from knot_resolver_manager.utils.async_utils import call
 
 from . import dbus_api as systemd
 
@@ -24,11 +25,17 @@ class SystemdKresdController(BaseKresdController):
 
     @staticmethod
     async def is_controller_available() -> bool:
+        # try to run systemctl (should be quite fast)
+        ret = await call("systemctl status", shell=True, discard_output=True)
+        if ret != 0:
+            return False
+
+        # if that passes, try to list units
         try:
             _ = await compat.asyncio.to_thread(systemd.list_units)
             return True
         except BaseException:  # we want every possible exception to be caught
-            logger.warning("systemd DBus API backend failed to initialize", exc_info=True)
+            logger.warning("systemd DBus API backend failed to initialize")
             return False
 
     @staticmethod
index 385fc995c3f437aa27494c803bafc080cd25bd66..7fbcb18cc16f8b84735f8e2235575a0549a1c133 100644 (file)
@@ -1,3 +1,4 @@
+import asyncio
 import os
 import time
 from asyncio import create_subprocess_exec, create_subprocess_shell
@@ -7,20 +8,27 @@ from typing import List, Union
 from knot_resolver_manager.compat.asyncio import to_thread
 
 
-async def call(cmd: Union[str, bytes, List[str], List[bytes]], shell: bool = False) -> int:
+async def call(
+    cmd: Union[str, bytes, List[str], List[bytes]], shell: bool = False, discard_output: bool = False
+) -> int:
     """
     custom async alternative to subprocess.call()
     """
+    kwargs = {}
+    if discard_output:
+        kwargs["stdout"] = asyncio.subprocess.DEVNULL
+        kwargs["stderr"] = asyncio.subprocess.DEVNULL
+
     if shell:
         if isinstance(cmd, list):
             raise RuntimeError("can't use list of arguments with shell=True")
-        proc = await create_subprocess_shell(cmd)
+        proc = await create_subprocess_shell(cmd, **kwargs)
     else:
         if not isinstance(cmd, list):
             raise RuntimeError(
                 "Please use list of arguments, not a single string. It will prevent ambiguity when parsing"
             )
-        proc = await create_subprocess_exec(*cmd)
+        proc = await create_subprocess_exec(*cmd, **kwargs)
 
     return await proc.wait()
 
index e21fa64bb6e68cca19483bfa082b0c4a5c986bd8..b4fe4968789d9f87a260fa31bea631e78941d393 100644 (file)
@@ -103,6 +103,7 @@ disable= [
     "bad-continuation", # conflicts with black
     "consider-using-in", # pyright can't see through in expressions,
     "too-many-return-statements", # would prevent us from using recursive tree traversals
+    "logging-fstring-interpolation", # see https://github.com/PyCQA/pylint/issues/1788
 ]
 
 [tool.pylint.SIMILARITIES]
index f0459d2b44305c72edc6845909bfd2f262852392..d2826dc2fb20fa6e103a23ff86bb675ff2b81da4 100644 (file)
@@ -5,6 +5,7 @@ set -e
 red="\033[0;31m"
 yellow="\033[0;33m"
 green="\033[0;32m"
+bright_black="\033[0;90m"
 reset="\033[0m"
 
 # ensure consistent top level directory
index 1e0a3ac1fca9e3f93e542da839b84eb17d7be411..d1a34c2378eea1bb44b5e9f849d41a3696ba2701 100755 (executable)
@@ -17,5 +17,7 @@ fi
 
 # build all configured containers
 for tag in $containers; do
+    echo -e "Building ${yellow}knot-manager:${tag}${reset}${bright_black}"
     podman build -t "knot-manager:$tag" -f "containers/$tag/Containerfile" .
+    echo -e "${reset}Build finished"
 done
\ No newline at end of file
index d83c83d60ba53f61c5fa75198a6efea5288139db..0e567eecd6c63c497eea5b7d6489a0fa12d554c1 100755 (executable)
@@ -3,8 +3,8 @@
 import subprocess
 import sys
 import time
-from pathlib import Path, PurePath
-from typing import Dict, List, Optional
+from pathlib import Path
+from typing import Dict, List, NoReturn, Optional
 
 import click
 
@@ -12,7 +12,7 @@ PODMAN_EXECUTABLE = "/usr/bin/podman"
 
 
 def start_detached(
-    image: str, publish: List[int] = [], ro_mounts: Dict[PurePath, PurePath] = {}
+    image: str, publish: List[int] = [], ro_mounts: Dict[Path, Path] = {}
 ) -> str:
     """Start a detached container"""
     options = [f"--publish={port}:{port}/tcp" for port in publish] + [
@@ -43,11 +43,11 @@ def stop(container_id: str):
     assert ret == 0
 
 
-def _get_git_root() -> PurePath:
+def _get_git_root() -> Path:
     result = subprocess.run(
         "git rev-parse --show-toplevel", shell=True, stdout=subprocess.PIPE
     )
-    return PurePath(str(result.stdout, encoding="utf8").strip())
+    return Path(str(result.stdout, encoding="utf8").strip())
 
 
 @click.command()
@@ -74,19 +74,29 @@ def _get_git_root() -> PurePath:
     type=bool,
     help="Shortcut to mount gitroot into /code",
 )
+@click.option(
+    "-i",
+    "--interactive",
+    "interactive_inspection",
+    default=False,
+    is_flag=True,
+    type=bool,
+    help="Drop into interactive shell if the command fails"
+)
 def main(
     image: str,
     command: List[str],
-    publish: Optional[int],
+    publish: Optional[List[int]],
     mount: Optional[List[str]],
     mount_code: bool,
-):
+    interactive_inspection: bool,
+) -> NoReturn:
     # make sure arguments have the correct type
     image = str(image)
     command = list(command)
-    publish = [] if publish is None else [int(p) for p in publish]
-    mount = [] if mount is None else [x.split(":") for x in mount]
-    mount_path = {Path(x[0]).absolute(): Path(x[1]).absolute() for x in mount}
+    publishI = [] if publish is None else [int(p) for p in publish]
+    mountI = [] if mount is None else [x.split(":") for x in mount]
+    mount_path = {Path(x[0]).absolute(): Path(x[1]).absolute() for x in mountI}
     for src_path in mount_path:
         if not src_path.exists():
             print(
@@ -97,13 +107,21 @@ def main(
     if mount_code:
         mount_path[_get_git_root()] = Path("/code")
 
-    cont = start_detached(image, publish=publish, ro_mounts=mount_path)
+    cont = start_detached(image, publish=publishI, ro_mounts=mount_path)
     # wait for the container to boot properly
     time.sleep(0.5)
     # run the command
-    ret = exec_interactive(cont, command)
+    exit_code = exec_interactive(cont, command)
+
+    if interactive_inspection and exit_code != 0:
+        print(f"The command {command} failed with exit code {exit_code}.")
+        print("Dropping into an interactive shell as requested. Stop the shell to stop the whole container.")
+        print("-----------------------------")
+        exec_interactive(cont, ["/bin/bash"])
+
     # stop the container
     stop(cont)
+    sys.exit(exit_code)
 
 
 if __name__ == "__main__":