]> git.ipfire.org Git - thirdparty/zstd.git/commitdiff
rateLimiter does not "catch up" when input speed is slow
authorYann Collet <cyan@fb.com>
Mon, 13 Aug 2018 18:38:55 +0000 (11:38 -0700)
committerYann Collet <cyan@fb.com>
Mon, 13 Aug 2018 18:38:55 +0000 (11:38 -0700)
programs/fileio.c
programs/zstd.1.md
tests/rateLimiter.py

index 89ee524b3de7ade7ad8be091ce618a6152718c2f..c3eddad4900c5fb36bbadb703337886467624587 100644 (file)
@@ -837,9 +837,13 @@ FIO_compressZstdFrame(const cRess_t* ressPtr,
                             csuzfp = zfp;
                             lastFlushedSize = compressedfilesize;
                             assert(inputPresented > 0);
+                            DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n",
+                                            inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100,
+                                            (U32)newlyIngested, (U32)newlyConsumed,
+                                            (U32)newlyFlushed, (U32)newlyProduced);
                             if ( (inputBlocked > inputPresented / 8)     /* input is waiting often, because input buffers is full : compression or output too slow */
-                              && (newlyFlushed * 17 / 16 > newlyProduced)  /* flush everything that is produced */
-                              && (newlyIngested * 17 / 16 > newlyConsumed) /* can't keep up with input speed */
+                              && (newlyFlushed * 33 / 32 > newlyProduced)  /* flush everything that is produced */
+                              && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */
                             ) {
                                 DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n",
                                                 newlyIngested, newlyConsumed, newlyProduced, newlyFlushed);
index b71d5d5bfd219716bb846244125984ab24439f5e..5f37018640cf5cca8142740da9acccb4712ca72a 100644 (file)
@@ -139,6 +139,7 @@ the last one takes effect.
     The current compression level can be observed live by using command `-v`.
     Works with multi-threading and `--long` mode.
     Does not work with `--single-thread`.
+    Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible.
 * `-D file`:
     use `file` as Dictionary to compress or decompress FILE(s)
 * `--no-dictID`:
index 134ef8971aa1def9cc2ceac6377154820d37b648..15222e0166953a9a930d10538a0e2653032a2fbd 100755 (executable)
@@ -19,7 +19,7 @@ import time
 
 MB = 1024 * 1024
 rate = float(sys.argv[1]) * MB
-rate *= 1.25   # compensation for excluding write time (experimentally determined)
+rate *= 1.4   # compensation for excluding i/o time (experimentally determined)
 start = time.time()
 total_read = 0
 
@@ -29,9 +29,14 @@ while len(buf):
   to_read = max(int(rate * (now - start) - total_read), 1)
   max_buf_size = 1 * MB
   to_read = min(to_read, max_buf_size)
+
+  read_start = time.time()
   buf = sys.stdin.buffer.read(to_read)
-  write_start = time.time()
+
+  write_start = read_end = time.time()
   sys.stdout.buffer.write(buf)
   write_end = time.time()
-  start += write_end - write_start   # exclude write delay
+
+  wait_time = max(read_end - read_start, write_end - write_start)
+  start += wait_time   # exclude delay of the slowest
   total_read += len(buf)