]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
integration testing: improved testing tool, testing changes in worker count
authorVasek Sraier <git@vakabus.cz>
Sat, 13 Mar 2021 11:28:52 +0000 (12:28 +0100)
committerAleš Mrázek <ales.mrazek@nic.cz>
Fri, 8 Apr 2022 14:17:51 +0000 (16:17 +0200)
manager/integration/README.md
manager/integration/runner.py
manager/integration/tests/dummy/Dockerfile [deleted file]
manager/integration/tests/dummy/run [deleted file]
manager/integration/tests/worker_count/Dockerfile [new file with mode: 0644]
manager/integration/tests/worker_count/knot-resolver.service [new file with mode: 0644]
manager/integration/tests/worker_count/run [new file with mode: 0755]
manager/integration/tests/worker_count/run_test.py [new file with mode: 0644]

index d1f81bcaf971c29ef38f23b814affcc237ed48e1..9c5051d90604d78daa2f97d18155815afe2557be 100644 (file)
@@ -24,6 +24,6 @@ The tool launches a Podman subprocess which exposes a HTTP API. This API is then
 
 For each directory in `tests/`, the testing tool builds the container, starts it, exec's `/test/run` and observes its result. After that, it issues `systemctl poweroff` and waits until the container turns itself off.
 
-Because building the container is slow (even with Podman's caching), we skip it if it's not needed. The testing tool creates a `.contentshash` file within each test directory, which contains a hash of all content. The container is rebuilt only when the hash changes (or the file is missing).
+Because building the container is slow and spams the output unnecessarly, we skip it if it's not needed. The testing tool creates a `.contentshash` file within each test directory, which contains a hash of the Dockerfile. The container is rebuilt only when the hash changes (or the file is missing).
 
 
index 5c758728a6f7256fdb07110d0c53e2352c0bfd49..3e8e282ba05f481bb8e0a2259404df8f63867044 100644 (file)
@@ -59,9 +59,12 @@ class PodmanService:
         self._process: Optional[subprocess.Popen] = None
 
     def __enter__(self):
+        env = os.environ.copy()
+        env["BUILDAH_LAYERS"] = "true"
+
         # run with --log-level=info or --log-level=debug for debugging
         self._process = subprocess.Popen(
-            "podman system service tcp:localhost:13579 --time=0", shell=True
+            "podman system service tcp:localhost:13579 --time=0", shell=True, env=env
         )
         time.sleep(0.5)  # required to prevent connection failures
         return PodmanServiceManager("http://localhost:13579")
@@ -102,14 +105,26 @@ class PodmanServiceManager:
     def _api_build_container(self, image_name: str, data: BinaryIO):
         response = requests.post(
             self._create_url("libpod/build"),
-            params=[("t", image_name)],
+            params=[
+                ("t", image_name),
+                ("rm", "false"),
+                ("squash", "false"),
+                ("nocache", "false"),
+                ("cache-from", image_name),
+                ("forcerm", "false"),
+                ("layers", "true"),
+                ("debilita", "prd"),
+            ],
             data=data,
             stream=True,
         )
         response.raise_for_status()
 
+        # forward output
         for line in response.iter_lines():
-            print(f"\t\t{json.loads(str(line, 'utf8'))['stream'].rstrip()}")
+            line = json.loads(str(line, "utf8"))["stream"].rstrip()
+            for real_line in line.splitlines(keepends=False):
+                print(f"\t\t{real_line}")
 
     def _read_and_remove_hashfile(self, context_dir: Path) -> Optional[str]:
         hashfile: Path = context_dir / PodmanServiceManager._HASHFILE_NAME
@@ -127,28 +142,41 @@ class PodmanServiceManager:
             f.write(hash_)
 
     def build_image(self, context_dir: Path, image: str):
+        # For some weird reason, creating containers using API does not use cache.
+        #
+        # # create tar archive out of the context_dir (weird, but there is no other way to specify context)
+        # tar = Path("/tmp/context.tar.gz")
+        # PodmanServiceManager._create_tar_achive(context_dir, tar)
+        # try:
+        #     # send the API request
+        #     with open(tar, "rb") as f:
+        #         self._api_build_container(image, f)
+
+        # finally:
+        #     # cleanup the tar file
+        #     tar.unlink()
+
+        current_hash = DirectoryHash.md5_file(context_dir / "Dockerfile")
         old_hash = self._read_and_remove_hashfile(context_dir)
-        current_hash = DirectoryHash.md5_dir(context_dir) + "_" + image
-        if old_hash == current_hash:
-            # no rebuild required
-            self._create_hashfile(context_dir, current_hash)
-            print("\tSkipping container build")
-            return
-
-        # create tar archive out of the context_dir (weird, but there is no other way to specify context)
-        tar = Path("/tmp/context.tar.gz")
-        PodmanServiceManager._create_tar_achive(context_dir, tar)
-        try:
-            # send the API request
-            with open(tar, "rb") as f:
-                self._api_build_container(image, f)
-
-        finally:
-            # cleanup the tar file
-            # tar.unlink()
-            pass
-
-        # create hashfile for future caching
+
+        if current_hash == old_hash:
+            print("\t\tSkipping container build - no changes")
+        else:
+            command = f"podman build -t {image} ."
+            cmd = subprocess.Popen(
+                command,
+                shell=True,
+                cwd=str(context_dir.absolute()),
+                stdout=subprocess.PIPE,
+            )
+            while cmd.poll() is None:
+                for line in cmd.stdout.readlines():
+                    line = str(line, "utf8").rstrip()
+                    print(f"\t\t{line}")
+            assert (
+                cmd.returncode == 0
+            ), f"Container build ended with exit code {cmd.returncode}"
+
         self._create_hashfile(context_dir, current_hash)
 
     def _api_create_container(
diff --git a/manager/integration/tests/dummy/Dockerfile b/manager/integration/tests/dummy/Dockerfile
deleted file mode 100644 (file)
index c811f39..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM docker.io/debian:buster
-
-RUN apt-get update && apt-get install -y systemd sudo
-
-RUN useradd -m -s /bin/bash dev
-RUN echo "dev:password" | chpasswd
-RUN usermod -a -G sudo dev
-
-CMD ["/sbin/init"]
diff --git a/manager/integration/tests/dummy/run b/manager/integration/tests/dummy/run
deleted file mode 100755 (executable)
index 84b6391..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-exit 42
diff --git a/manager/integration/tests/worker_count/Dockerfile b/manager/integration/tests/worker_count/Dockerfile
new file mode 100644 (file)
index 0000000..a601519
--- /dev/null
@@ -0,0 +1,38 @@
+FROM docker.io/debian:latest
+
+ENV LC_ALL=C.UTF-8
+
+# install project dependencies
+
+## build essentials
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y build-essential git ca-certificates
+
+## python
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y python3 python3-pip python3-dev
+
+## glib dependencies
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y libcairo2-dev libglib2.0-0 libgirepository1.0-dev
+
+## python setuptools
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y python3-setuptools
+
+## python libraries
+RUN pip3 install aiohttp strictyaml pydbus PyGObject
+
+## systemd
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y systemd
+
+## kresd
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y wget
+RUN wget https://secure.nic.cz/files/knot-resolver/knot-resolver-release.deb && dpkg -i knot-resolver-release.deb
+RUN apt-get update && apt-get install -y knot-resolver
+
+# dbus
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y dbus
+
+
+# install test dependencies
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y curl procps
+RUN pip3 install requests requests-unixsocket
+
+CMD ["/bin/systemd"]
diff --git a/manager/integration/tests/worker_count/knot-resolver.service b/manager/integration/tests/worker_count/knot-resolver.service
new file mode 100644 (file)
index 0000000..b09045c
--- /dev/null
@@ -0,0 +1,12 @@
+[Unit]
+Description=Knot Resolver Manager
+Requires=dbus
+After=dbus
+
+[Service]
+WorkingDirectory=/repo
+ExecStart=/usr/bin/python3 -m knot_resolver_manager
+KillSignal=SIGINT
+
+[Install]
+WantedBy=multi-user.target
diff --git a/manager/integration/tests/worker_count/run b/manager/integration/tests/worker_count/run
new file mode 100755 (executable)
index 0000000..c074d4d
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+# fail early
+set -e
+
+cd /test
+echo "Starting manager..."
+cp knot-resolver.service /etc/systemd/system
+systemctl daemon-reload
+systemctl start knot-resolver.service
+
+# give it time to start
+sleep 1
+
+python3 run_test.py
diff --git a/manager/integration/tests/worker_count/run_test.py b/manager/integration/tests/worker_count/run_test.py
new file mode 100644 (file)
index 0000000..40fd7b4
--- /dev/null
@@ -0,0 +1,68 @@
+
+
+
+import requests
+import requests_unixsocket
+import subprocess
+import time
+
+# patch requests library so that it supports unix socket
+requests_unixsocket.monkeypatch()
+
+# prepare the payload
+LUA_CONFIG = """
+-- SPDX-License-Identifier: CC0-1.0
+-- vim:syntax=lua:set ts=4 sw=4:
+-- Refer to manual: https://knot-resolver.readthedocs.org/en/stable/
+
+-- Network interface configuration
+net.listen('127.0.0.1', 53, { kind = 'dns' })
+net.listen('127.0.0.1', 853, { kind = 'tls' })
+--net.listen('127.0.0.1', 443, { kind = 'doh2' })
+net.listen('::1', 53, { kind = 'dns', freebind = true })
+net.listen('::1', 853, { kind = 'tls', freebind = true })
+--net.listen('::1', 443, { kind = 'doh2' })
+
+-- Load useful modules
+modules = {
+       'hints > iterate',  -- Load /etc/hosts and allow custom root hints
+       'stats',            -- Track internal statistics
+       'predict',          -- Prefetch expiring/frequent records
+}
+
+-- Cache size
+cache.size = 100 * MB
+"""
+PREPROCESSED_CONFIG = "\n  ".join(LUA_CONFIG.splitlines(keepends=False))
+PAYLOAD_F = lambda num: f"""
+num_workers: {num}
+lua_config: |
+{ PREPROCESSED_CONFIG }"""
+
+def set_workers(num: int):
+       # send the config
+       r = requests.post('http+unix://%2Ftmp%2Fmanager.sock/config', data=PAYLOAD_F(num))
+       r.raise_for_status()
+
+def count_running() -> int:
+       cmd = subprocess.run("ps aux | grep kresd | grep -v grep", shell=True, stdout=subprocess.PIPE)
+       return len(str(cmd.stdout, 'utf8').strip().split("\n"))
+
+
+print("Initial 1 worker config...")
+set_workers(1)
+time.sleep(1)
+count = count_running()
+assert count == 1, f"Unexpected number of kresd instances is running - {count}"
+
+print("Increasing worker count to 8")
+set_workers(8)
+time.sleep(2)
+count = count_running()
+assert count == 8, f"Unexpected number of kresd instances is running - {count}"
+
+print("Decreasing worker count to 4")
+set_workers(4)
+time.sleep(2)
+count = count_running()
+assert count == 4, f"Unexpected number of kresd instances is running - {count}"
\ No newline at end of file