-from typing import List, Optional
+from typing import List, Optional, Union
-from knot_resolver_manager.datamodel.types import Dir, DomainName, File, SizeUnit, TimeUnit
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import Dir, DomainName, File, IntNonNegative, Percent, SizeUnit, TimeUnit
from knot_resolver_manager.utils.modeling import ConfigSchema
raise ValueError("cache prefilling is not yet supported for non-root zones")
+class GarbageCollectorSchema(ConfigSchema):
+ """
+ Configuration options of the cache garbage collector (kres-cache-gc).
+
+ ---
+ interval: Time interval how often the garbage collector will be run.
+ threshold: Cache usage in percent that triggers the garbage collector.
+ release: Percent of used cache to be freed by the garbage collector.
+ temp_keys_space: Maximum amount of temporary memory for copied keys (0 = unlimited).
+ rw_deletes: Maximum number of deleted records per read-write transaction (0 = unlimited).
+ rw_reads: Maximum number of readed records per read-write transaction (0 = unlimited).
+ rw_duration: Maximum duration of read-write transaction (0 = unlimited).
+ rw_delay: Wait time between two read-write transactions.
+ dry_run: Run the garbage collector in dry-run mode.
+ """
+
+ interval: TimeUnit = TimeUnit("1s")
+ threshold: Percent = Percent(80)
+ release: Percent = Percent(10)
+ temp_keys_space: SizeUnit = SizeUnit(0)
+ rw_deletes: IntNonNegative = IntNonNegative(100)
+ rw_reads: IntNonNegative = IntNonNegative(200)
+ rw_duration: TimeUnit = TimeUnit(0)
+ rw_delay: TimeUnit = TimeUnit(0)
+ dry_run: bool = False
+
+
class CacheSchema(ConfigSchema):
"""
DNS resolver cache configuration.
---
- garbage_collector: Automatically use garbage collector to periodically clear cache.
storage: Cache storage of the DNS resolver.
size_max: Maximum size of the cache.
+ garbage_collector: Use the garbage collector (kres-cache-gc) to periodically clear cache.
ttl_min: Minimum time-to-live for the cache entries.
ttl_max: Maximum time-to-live for the cache entries.
ns_timeout: Time interval for which a nameserver address will be ignored after determining that it does not return (useful) answers.
prefill: Prefill the cache periodically by importing zone data obtained over HTTP.
"""
- garbage_collector: bool = True
storage: Dir = Dir("/var/cache/knot-resolver")
size_max: SizeUnit = SizeUnit("100M")
+ garbage_collector: Union[GarbageCollectorSchema, Literal[False]] = GarbageCollectorSchema()
ttl_min: TimeUnit = TimeUnit("5s")
ttl_max: TimeUnit = TimeUnit("6d")
ns_timeout: TimeUnit = TimeUnit("1000ms")
from typing_extensions import Literal
-from knot_resolver_manager.datamodel.types import DomainName, IDPattern, IPAddress, TimeUnit, UncheckedPath
-from knot_resolver_manager.utils.modeling import BaseSchema
+from knot_resolver_manager.datamodel.types import DomainName, IDPattern, IPAddress, TimeUnit
+from knot_resolver_manager.datamodel.types.files import UncheckedPath
+from knot_resolver_manager.utils.modeling import ConfigSchema
-class SubtreeSchema(BaseSchema):
+class SubtreeSchema(ConfigSchema):
"""
Local data and configuration of subtree.
raise ValueError("'refresh' can be only configured with 'roots-file' or 'roots-url'")
-class LocalDataSchema(BaseSchema):
+class LocalDataSchema(ConfigSchema):
"""
Local data for forward records (A/AAAA) and reverse records (PTR).
LogGroupsEnum: TypeAlias = Literal[
"manager",
"supervisord",
+ "cache-gc",
"system",
"cache",
"io",
IPv6Address,
IPv6Network96,
ListOrSingle,
+ Percent,
PortNumber,
SizeUnit,
TimeUnit,
"IPv6Address",
"IPv6Network96",
"ListOrSingle",
+ "Percent",
"PortNumber",
"SizeUnit",
"TimeUnit",
self._value = int(val) * type(self)._units[unit]
else:
raise ValueError(f"{type(self._value)} Failed to convert: {self}")
+ elif source_value in (0, "0"):
+ self._value_orig = source_value
+ self._value = int(source_value)
elif isinstance(source_value, int):
raise ValueError(
f"number without units, please convert to string and add unit - {list(type(self)._units.keys())}",
_max: int = 65_535
+class Percent(IntRangeBase):
+ _min: int = 0
+ _max: int = 100
+
+
class PortNumber(IntRangeBase):
_min: int = 1
_max: int = 65_535
def bytes(self) -> int:
return self._value
+ def mbytes(self) -> int:
+ return self._value // 1024 ** 2
+
class TimeUnit(UnitBase):
- _units = {"ms": 1, "s": 1000, "m": 60 * 1000, "h": 3600 * 1000, "d": 24 * 3600 * 1000}
+ _units = {"us": 1, "ms": 10 ** 3, "s": 10 ** 6, "m": 60 * 10 ** 6, "h": 3600 * 10 ** 6, "d": 24 * 3600 * 10 ** 6}
def seconds(self) -> int:
- return self._value // 1000
+ return self._value // 1000 ** 2
def millis(self) -> int:
+ return self._value // 1000
+
+ def micros(self) -> int:
return self._value
await self._rolling_restart(config)
await self._ensure_number_of_children(config, int(config.workers))
- if self._is_gc_running() != config.cache.garbage_collector:
+ if self._is_gc_running() != bool(config.cache.garbage_collector):
if config.cache.garbage_collector:
logger.debug("Starting cache GC")
await self._start_gc(config)
raise RuntimeError(f"Unexpected subprocess type {self.subprocess_type}")
+def kres_cache_gc_args(config: KresConfig) -> str:
+ args = ""
+
+ if config.logging.level == "debug" or (config.logging.groups and "cache-gc" in config.logging.groups):
+ args += " -v"
+
+ gc_config = config.cache.garbage_collector
+ if gc_config:
+ args += (
+ f" -d {gc_config.interval.millis()}"
+ f" -u {gc_config.threshold}"
+ f" -f {gc_config.release}"
+ f" -l {gc_config.rw_deletes}"
+ f" -L {gc_config.rw_reads}"
+ f" -t {gc_config.temp_keys_space.mbytes()}"
+ f" -m {gc_config.rw_duration.micros()}"
+ f" -w {gc_config.rw_delay.micros()}"
+ )
+ if gc_config.dry_run:
+ args += " -n"
+ return args
+ raise ValueError("missing configuration for the cache garbage collector")
+
+
@dataclass
class ProcessTypeConfig:
"""
return ProcessTypeConfig( # type: ignore[call-arg]
logfile=supervisord_subprocess_log_dir(config) / "gc.log",
workdir=cwd,
- command=f"{kres_gc_executable()} -c {kresd_cache_dir(config)} -d 1000",
+ command=f"{kres_gc_executable()} -c {kresd_cache_dir(config)}{kres_cache_gc_args(config)}",
environment="",
)
manager=ProcessTypeConfig.create_manager_config(config),
config=SupervisordConfig.create(config),
)
+ print(config_string)
await writefile(supervisord_config_file_tmp(config), config_string)
# atomically replace (we don't technically need this right now, but better safe then sorry)
os.rename(supervisord_config_file_tmp(config), supervisord_config_file(config))
SizeUnit(val)
-@pytest.mark.parametrize("val", ["1d", "24h", "1440m", "86400s", "86400000ms"])
+@pytest.mark.parametrize("val", ["1d", "24h", "1440m", "86400s", "86400000ms", "86400000000us"])
def test_time_unit_valid(val: str):
o = TimeUnit(val)
- assert int(o) == 86400000
+ assert int(o) == 86400000000
assert str(o) == val
assert o.seconds() == 86400
assert o.millis() == 86400000
+ assert o.micros() == 86400000000
@pytest.mark.parametrize("val", ["-1", "-24h", "1440mm", 6575, -1440])