For each directory in `tests/`, the testing tool builds the container, starts it, exec's `/test/run` and observes its result. After that, it issues `systemctl poweroff` and waits until the container turns itself off.
-Because building the container is slow (even with Podman's caching), we skip it if it's not needed. The testing tool creates a `.contentshash` file within each test directory, which contains a hash of all content. The container is rebuilt only when the hash changes (or the file is missing).
+Because building the container is slow and spams the output unnecessarly, we skip it if it's not needed. The testing tool creates a `.contentshash` file within each test directory, which contains a hash of the Dockerfile. The container is rebuilt only when the hash changes (or the file is missing).
self._process: Optional[subprocess.Popen] = None
def __enter__(self):
+ env = os.environ.copy()
+ env["BUILDAH_LAYERS"] = "true"
+
# run with --log-level=info or --log-level=debug for debugging
self._process = subprocess.Popen(
- "podman system service tcp:localhost:13579 --time=0", shell=True
+ "podman system service tcp:localhost:13579 --time=0", shell=True, env=env
)
time.sleep(0.5) # required to prevent connection failures
return PodmanServiceManager("http://localhost:13579")
def _api_build_container(self, image_name: str, data: BinaryIO):
response = requests.post(
self._create_url("libpod/build"),
- params=[("t", image_name)],
+ params=[
+ ("t", image_name),
+ ("rm", "false"),
+ ("squash", "false"),
+ ("nocache", "false"),
+ ("cache-from", image_name),
+ ("forcerm", "false"),
+ ("layers", "true"),
+ ("debilita", "prd"),
+ ],
data=data,
stream=True,
)
response.raise_for_status()
+ # forward output
for line in response.iter_lines():
- print(f"\t\t{json.loads(str(line, 'utf8'))['stream'].rstrip()}")
+ line = json.loads(str(line, "utf8"))["stream"].rstrip()
+ for real_line in line.splitlines(keepends=False):
+ print(f"\t\t{real_line}")
def _read_and_remove_hashfile(self, context_dir: Path) -> Optional[str]:
hashfile: Path = context_dir / PodmanServiceManager._HASHFILE_NAME
f.write(hash_)
def build_image(self, context_dir: Path, image: str):
+ # For some weird reason, creating containers using API does not use cache.
+ #
+ # # create tar archive out of the context_dir (weird, but there is no other way to specify context)
+ # tar = Path("/tmp/context.tar.gz")
+ # PodmanServiceManager._create_tar_achive(context_dir, tar)
+ # try:
+ # # send the API request
+ # with open(tar, "rb") as f:
+ # self._api_build_container(image, f)
+
+ # finally:
+ # # cleanup the tar file
+ # tar.unlink()
+
+ current_hash = DirectoryHash.md5_file(context_dir / "Dockerfile")
old_hash = self._read_and_remove_hashfile(context_dir)
- current_hash = DirectoryHash.md5_dir(context_dir) + "_" + image
- if old_hash == current_hash:
- # no rebuild required
- self._create_hashfile(context_dir, current_hash)
- print("\tSkipping container build")
- return
-
- # create tar archive out of the context_dir (weird, but there is no other way to specify context)
- tar = Path("/tmp/context.tar.gz")
- PodmanServiceManager._create_tar_achive(context_dir, tar)
- try:
- # send the API request
- with open(tar, "rb") as f:
- self._api_build_container(image, f)
-
- finally:
- # cleanup the tar file
- # tar.unlink()
- pass
-
- # create hashfile for future caching
+
+ if current_hash == old_hash:
+ print("\t\tSkipping container build - no changes")
+ else:
+ command = f"podman build -t {image} ."
+ cmd = subprocess.Popen(
+ command,
+ shell=True,
+ cwd=str(context_dir.absolute()),
+ stdout=subprocess.PIPE,
+ )
+ while cmd.poll() is None:
+ for line in cmd.stdout.readlines():
+ line = str(line, "utf8").rstrip()
+ print(f"\t\t{line}")
+ assert (
+ cmd.returncode == 0
+ ), f"Container build ended with exit code {cmd.returncode}"
+
self._create_hashfile(context_dir, current_hash)
def _api_create_container(
+++ /dev/null
-FROM docker.io/debian:buster
-
-RUN apt-get update && apt-get install -y systemd sudo
-
-RUN useradd -m -s /bin/bash dev
-RUN echo "dev:password" | chpasswd
-RUN usermod -a -G sudo dev
-
-CMD ["/sbin/init"]
+++ /dev/null
-#!/bin/bash
-
-exit 42
--- /dev/null
+FROM docker.io/debian:latest
+
+ENV LC_ALL=C.UTF-8
+
+# install project dependencies
+
+## build essentials
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y build-essential git ca-certificates
+
+## python
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y python3 python3-pip python3-dev
+
+## glib dependencies
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y libcairo2-dev libglib2.0-0 libgirepository1.0-dev
+
+## python setuptools
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y python3-setuptools
+
+## python libraries
+RUN pip3 install aiohttp strictyaml pydbus PyGObject
+
+## systemd
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y systemd
+
+## kresd
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y wget
+RUN wget https://secure.nic.cz/files/knot-resolver/knot-resolver-release.deb && dpkg -i knot-resolver-release.deb
+RUN apt-get update && apt-get install -y knot-resolver
+
+# dbus
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y dbus
+
+
+# install test dependencies
+RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y curl procps
+RUN pip3 install requests requests-unixsocket
+
+CMD ["/bin/systemd"]
--- /dev/null
+[Unit]
+Description=Knot Resolver Manager
+Requires=dbus
+After=dbus
+
+[Service]
+WorkingDirectory=/repo
+ExecStart=/usr/bin/python3 -m knot_resolver_manager
+KillSignal=SIGINT
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+#!/bin/bash
+
+# fail early
+set -e
+
+cd /test
+echo "Starting manager..."
+cp knot-resolver.service /etc/systemd/system
+systemctl daemon-reload
+systemctl start knot-resolver.service
+
+# give it time to start
+sleep 1
+
+python3 run_test.py
--- /dev/null
+
+
+
+import requests
+import requests_unixsocket
+import subprocess
+import time
+
+# patch requests library so that it supports unix socket
+requests_unixsocket.monkeypatch()
+
+# prepare the payload
+LUA_CONFIG = """
+-- SPDX-License-Identifier: CC0-1.0
+-- vim:syntax=lua:set ts=4 sw=4:
+-- Refer to manual: https://knot-resolver.readthedocs.org/en/stable/
+
+-- Network interface configuration
+net.listen('127.0.0.1', 53, { kind = 'dns' })
+net.listen('127.0.0.1', 853, { kind = 'tls' })
+--net.listen('127.0.0.1', 443, { kind = 'doh2' })
+net.listen('::1', 53, { kind = 'dns', freebind = true })
+net.listen('::1', 853, { kind = 'tls', freebind = true })
+--net.listen('::1', 443, { kind = 'doh2' })
+
+-- Load useful modules
+modules = {
+ 'hints > iterate', -- Load /etc/hosts and allow custom root hints
+ 'stats', -- Track internal statistics
+ 'predict', -- Prefetch expiring/frequent records
+}
+
+-- Cache size
+cache.size = 100 * MB
+"""
+PREPROCESSED_CONFIG = "\n ".join(LUA_CONFIG.splitlines(keepends=False))
+PAYLOAD_F = lambda num: f"""
+num_workers: {num}
+lua_config: |
+{ PREPROCESSED_CONFIG }"""
+
+def set_workers(num: int):
+ # send the config
+ r = requests.post('http+unix://%2Ftmp%2Fmanager.sock/config', data=PAYLOAD_F(num))
+ r.raise_for_status()
+
+def count_running() -> int:
+ cmd = subprocess.run("ps aux | grep kresd | grep -v grep", shell=True, stdout=subprocess.PIPE)
+ return len(str(cmd.stdout, 'utf8').strip().split("\n"))
+
+
+print("Initial 1 worker config...")
+set_workers(1)
+time.sleep(1)
+count = count_running()
+assert count == 1, f"Unexpected number of kresd instances is running - {count}"
+
+print("Increasing worker count to 8")
+set_workers(8)
+time.sleep(2)
+count = count_running()
+assert count == 8, f"Unexpected number of kresd instances is running - {count}"
+
+print("Decreasing worker count to 4")
+set_workers(4)
+time.sleep(2)
+count = count_running()
+assert count == 4, f"Unexpected number of kresd instances is running - {count}"
\ No newline at end of file