From: Zbigniew Jędrzejewski-Szmek Date: Sun, 25 Oct 2015 03:28:07 +0000 (-0400) Subject: log-generator: add option to generate easily compressible data X-Git-Tag: v228~108^2 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=2c1a55cf3fe78bcf728f73487813ddd3ee020a98;p=thirdparty%2Fsystemd.git log-generator: add option to generate easily compressible data This is useful to check that compression actually works, and how compression influences file size in the best-case-scenario for compression. (The answer is that not as much as one would hope: there's still a big overhead of the indexing and since every field is compressed separately, even fields that compress very well contribute to the file size. This overhead becomes negligible only for very big fields.) --- diff --git a/src/journal-remote/log-generator.py b/src/journal-remote/log-generator.py index 9a8fb07c7fd..fd6964e7582 100755 --- a/src/journal-remote/log-generator.py +++ b/src/journal-remote/log-generator.py @@ -6,6 +6,8 @@ import argparse PARSER = argparse.ArgumentParser() PARSER.add_argument('n', type=int) PARSER.add_argument('--dots', action='store_true') +PARSER.add_argument('--data-size', type=int, default=4000) +PARSER.add_argument('--data-type', choices={'random', 'simple'}) OPTIONS = PARSER.parse_args() template = """\ @@ -38,10 +40,16 @@ facility = 6 src = open('/dev/urandom', 'rb') bytes = 0 +counter = 0 for i in range(OPTIONS.n): message = repr(src.read(2000)) - data = repr(src.read(4000)) + if OPTIONS.data_type == 'random': + data = repr(src.read(OPTIONS.data_size)) + else: + # keep the pattern non-repeating so we get a different blob every time + data = '{:0{}}'.format(counter, OPTIONS.data_size) + counter += 1 entry = template.format(m=m, realtime_ts=realtime_ts,