crc32_comb.c
deflate.c
deflate_fast.c
+ deflate_huff.c
deflate_medium.c
deflate_quick.c
+ deflate_rle.c
deflate_slow.c
+ deflate_stored.c
functable.c
infback.c
inffast.c
crc32_comb.o \
deflate.o \
deflate_fast.o \
+ deflate_huff.o \
deflate_medium.o \
deflate_quick.o \
+ deflate_rle.o \
deflate_slow.o \
+ deflate_stored.o \
functable.o \
infback.o \
inffast.o \
crc32_comb.lo \
deflate.lo \
deflate_fast.lo \
+ deflate_huff.lo \
deflate_medium.lo \
deflate_quick.lo \
+ deflate_rle.lo \
deflate_slow.lo \
+ deflate_stored.lo \
functable.lo \
infback.lo \
inffast.lo \
/* Compression function. Returns the block state after the call. */
static int deflateStateCheck (PREFIX3(stream) *strm);
-static block_state deflate_stored (deflate_state *s, int flush);
-Z_INTERNAL block_state deflate_fast (deflate_state *s, int flush);
-Z_INTERNAL block_state deflate_quick (deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_stored(deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_fast (deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_quick (deflate_state *s, int flush);
#ifndef NO_MEDIUM_STRATEGY
-Z_INTERNAL block_state deflate_medium (deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_medium(deflate_state *s, int flush);
#endif
-Z_INTERNAL block_state deflate_slow (deflate_state *s, int flush);
-static block_state deflate_rle (deflate_state *s, int flush);
-static block_state deflate_huff (deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_slow (deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_rle (deflate_state *s, int flush);
+Z_INTERNAL block_state deflate_huff (deflate_state *s, int flush);
static void lm_init (deflate_state *s);
Z_INTERNAL unsigned read_buf (PREFIX3(stream) *strm, unsigned char *buf, unsigned size);
s->match_start = 0;
}
-#ifdef ZLIB_DEBUG
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
-/* ===========================================================================
- * Check that the match at match_start is indeed a match.
- */
-void check_match(deflate_state *s, Pos start, Pos match, int length) {
- /* check that the match length is valid*/
- if (length < MIN_MATCH || length > MAX_MATCH) {
- fprintf(stderr, " start %u, match %u, length %d\n", start, match, length);
- z_error("invalid match length");
- }
- /* check that the match isn't at the same position as the start string */
- if (match == start) {
- fprintf(stderr, " start %u, match %u, length %d\n", start, match, length);
- z_error("invalid match position");
- }
- /* check that the match is indeed a match */
- if (memcmp(s->window + match, s->window + start, length) != EQUAL) {
- int32_t i = 0;
- fprintf(stderr, " start %u, match %u, length %d\n", start, match, length);
- do {
- fprintf(stderr, " %03d: match [%02x] start [%02x]\n", i++, s->window[match++], s->window[start++]);
- } while (--length != 0);
- z_error("invalid match");
- }
- if (z_verbose > 1) {
- fprintf(stderr, "\\[%u,%d]", start-match, length);
- do {
- putc(s->window[start++], stderr);
- } while (--length != 0);
- }
-}
-#else
-# define check_match(s, start, match, length)
-#endif /* ZLIB_DEBUG */
-
/* ===========================================================================
* Fill the window when the lookahead becomes insufficient.
* Updates strstart and lookahead.
"not enough room for search");
}
-/* ===========================================================================
- * Copy without compression as much as possible from the input stream, return
- * the current block state.
- *
- * In case deflateParams() is used to later switch to a non-zero compression
- * level, s->matches (otherwise unused when storing) keeps track of the number
- * of hash table slides to perform. If s->matches is 1, then one hash table
- * slide will be done when switching. If s->matches is 2, the maximum value
- * allowed here, then the hash table will be cleared, since two or more slides
- * is the same as a clear.
- *
- * deflate_stored() is written to minimize the number of times an input byte is
- * copied. It is most efficient with large input and output buffers, which
- * maximizes the opportunites to have a single copy from next_in to next_out.
- */
-static block_state deflate_stored(deflate_state *s, int flush) {
- /* Smallest worthy block size when not flushing or finishing. By default
- * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
- * large input and output buffers, the stored block size will be larger.
- */
- unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
-
- /* Copy as many min_block or larger stored blocks directly to next_out as
- * possible. If flushing, copy the remaining available input to next_out as
- * stored blocks, if there is enough space.
- */
- unsigned len, left, have, last = 0;
- unsigned used = s->strm->avail_in;
- do {
- /* Set len to the maximum size block that we can copy directly with the
- * available input data and output space. Set left to how much of that
- * would be copied from what's left in the window.
- */
- len = MAX_STORED; /* maximum deflate stored block length */
- have = (s->bi_valid + 42) >> 3; /* number of header bytes */
- if (s->strm->avail_out < have) /* need room for header */
- break;
- /* maximum stored block length that will fit in avail_out: */
- have = s->strm->avail_out - have;
- left = (int)s->strstart - s->block_start; /* bytes left in window */
- if (len > (unsigned long)left + s->strm->avail_in)
- len = left + s->strm->avail_in; /* limit len to the input */
- if (len > have)
- len = have; /* limit len to the output */
-
- /* If the stored block would be less than min_block in length, or if
- * unable to copy all of the available input when flushing, then try
- * copying to the window and the pending buffer instead. Also don't
- * write an empty block when flushing -- deflate() does that.
- */
- if (len < min_block && ((len == 0 && flush != Z_FINISH) || flush == Z_NO_FLUSH || len != left + s->strm->avail_in))
- break;
-
- /* Make a dummy stored block in pending to get the header bytes,
- * including any pending bits. This also updates the debugging counts.
- */
- last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
- zng_tr_stored_block(s, (char *)0, 0L, last);
-
- /* Replace the lengths in the dummy stored block with len. */
- s->pending -= 4;
- put_short(s, (uint16_t)len);
- put_short(s, (uint16_t)~len);
-
- /* Write the stored block header bytes. */
- flush_pending(s->strm);
-
- /* Update debugging counts for the data about to be copied. */
- cmpr_bits_add(s, len << 3);
- sent_bits_add(s, len << 3);
-
- /* Copy uncompressed bytes from the window to next_out. */
- if (left) {
- if (left > len)
- left = len;
- memcpy(s->strm->next_out, s->window + s->block_start, left);
- s->strm->next_out += left;
- s->strm->avail_out -= left;
- s->strm->total_out += left;
- s->block_start += (int)left;
- len -= left;
- }
-
- /* Copy uncompressed bytes directly from next_in to next_out, updating
- * the check value.
- */
- if (len) {
- read_buf(s->strm, s->strm->next_out, len);
- s->strm->next_out += len;
- s->strm->avail_out -= len;
- s->strm->total_out += len;
- }
- } while (last == 0);
-
- /* Update the sliding window with the last s->w_size bytes of the copied
- * data, or append all of the copied data to the existing window if less
- * than s->w_size bytes were copied. Also update the number of bytes to
- * insert in the hash tables, in the event that deflateParams() switches to
- * a non-zero compression level.
- */
- used -= s->strm->avail_in; /* number of input bytes directly copied */
- if (used) {
- /* If any input was used, then no unused input remains in the window,
- * therefore s->block_start == s->strstart.
- */
- if (used >= s->w_size) { /* supplant the previous history */
- s->matches = 2; /* clear hash */
- memcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
- s->strstart = s->w_size;
- s->insert = s->strstart;
- } else {
- if (s->window_size - s->strstart <= used) {
- /* Slide the window down. */
- s->strstart -= s->w_size;
- memcpy(s->window, s->window + s->w_size, s->strstart);
- if (s->matches < 2)
- s->matches++; /* add a pending slide_hash() */
- if (s->insert > s->strstart)
- s->insert = s->strstart;
- }
- memcpy(s->window + s->strstart, s->strm->next_in - used, used);
- s->strstart += used;
- s->insert += MIN(used, s->w_size - s->insert);
- }
- s->block_start = (int)s->strstart;
- }
- if (s->high_water < s->strstart)
- s->high_water = s->strstart;
-
- /* If the last block was written to next_out, then done. */
- if (last)
- return finish_done;
-
- /* If flushing and all input has been consumed, then done. */
- if (flush != Z_NO_FLUSH && flush != Z_FINISH && s->strm->avail_in == 0 && (int)s->strstart == s->block_start)
- return block_done;
-
- /* Fill the window with any remaining input. */
- have = s->window_size - s->strstart;
- if (s->strm->avail_in > have && s->block_start >= (int)s->w_size) {
- /* Slide the window down. */
- s->block_start -= (int)s->w_size;
- s->strstart -= s->w_size;
- memcpy(s->window, s->window + s->w_size, s->strstart);
- if (s->matches < 2)
- s->matches++; /* add a pending slide_hash() */
- have += s->w_size; /* more space now */
- if (s->insert > s->strstart)
- s->insert = s->strstart;
- }
- if (have > s->strm->avail_in)
- have = s->strm->avail_in;
- if (have) {
- read_buf(s->strm, s->window + s->strstart, have);
- s->strstart += have;
- s->insert += MIN(have, s->w_size - s->insert);
- }
- if (s->high_water < s->strstart)
- s->high_water = s->strstart;
-
- /* There was not enough avail_out to write a complete worthy or flushed
- * stored block to next_out. Write a stored block to pending instead, if we
- * have enough input for a worthy block, or if flushing and there is enough
- * room for the remaining input as a stored block in the pending buffer.
- */
- have = (s->bi_valid + 42) >> 3; /* number of header bytes */
- /* maximum stored block length that will fit in pending: */
- have = MIN(s->pending_buf_size - have, MAX_STORED);
- min_block = MIN(have, s->w_size);
- left = (int)s->strstart - s->block_start;
- if (left >= min_block || ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH && s->strm->avail_in == 0 && left <= have)) {
- len = MIN(left, have);
- last = flush == Z_FINISH && s->strm->avail_in == 0 && len == left ? 1 : 0;
- zng_tr_stored_block(s, (char *)s->window + s->block_start, len, last);
- s->block_start += (int)len;
- flush_pending(s->strm);
- }
-
- /* We've done all we can with the available input and output. */
- return last ? finish_started : need_more;
-}
-
-
-/* ===========================================================================
- * For Z_RLE, simply look for runs of bytes, generate matches only of distance
- * one. Do not maintain a hash table. (It will be regenerated if this run of
- * deflate switches away from Z_RLE.)
- */
-static block_state deflate_rle(deflate_state *s, int flush) {
- int bflush = 0; /* set if current block must be flushed */
- unsigned int prev; /* byte at distance one to match */
- unsigned char *scan, *strend; /* scan goes up to strend for length of run */
- uint32_t match_len = 0;
-
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the longest run, plus one for the unrolled loop.
- */
- if (s->lookahead <= MAX_MATCH) {
- fill_window(s);
- if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH)
- return need_more;
- if (s->lookahead == 0)
- break; /* flush the current block */
- }
-
- /* See how many times the previous byte repeats */
- if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
- scan = s->window + s->strstart - 1;
- prev = *scan;
- if (prev == *++scan && prev == *++scan && prev == *++scan) {
- strend = s->window + s->strstart + MAX_MATCH;
- do {
- } while (prev == *++scan && prev == *++scan &&
- prev == *++scan && prev == *++scan &&
- prev == *++scan && prev == *++scan &&
- prev == *++scan && prev == *++scan &&
- scan < strend);
- match_len = MAX_MATCH - (unsigned int)(strend - scan);
- if (match_len > s->lookahead)
- match_len = s->lookahead;
- }
- Assert(scan <= s->window + s->window_size - 1, "wild scan");
- }
-
- /* Emit match if have run of MIN_MATCH or longer, else emit literal */
- if (match_len >= MIN_MATCH) {
- check_match(s, s->strstart, s->strstart - 1, match_len);
- bflush = zng_tr_tally_dist(s, 1, match_len - MIN_MATCH);
-
- s->lookahead -= match_len;
- s->strstart += match_len;
- match_len = 0;
- } else {
- /* No match, output a literal byte */
- bflush = zng_tr_tally_lit(s, s->window[s->strstart]);
- s->lookahead--;
- s->strstart++;
- }
- if (bflush)
- FLUSH_BLOCK(s, 0);
- }
- s->insert = 0;
- if (flush == Z_FINISH) {
- FLUSH_BLOCK(s, 1);
- return finish_done;
- }
- if (s->sym_next)
- FLUSH_BLOCK(s, 0);
- return block_done;
-}
-
-/* ===========================================================================
- * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
- * (It will be regenerated if this run of deflate switches away from Huffman.)
- */
-static block_state deflate_huff(deflate_state *s, int flush) {
- int bflush = 0; /* set if current block must be flushed */
-
- for (;;) {
- /* Make sure that we have a literal to write. */
- if (s->lookahead == 0) {
- fill_window(s);
- if (s->lookahead == 0) {
- if (flush == Z_NO_FLUSH)
- return need_more;
- break; /* flush the current block */
- }
- }
-
- /* Output a literal byte */
- bflush = zng_tr_tally_lit(s, s->window[s->strstart]);
- s->lookahead--;
- s->strstart++;
- if (bflush)
- FLUSH_BLOCK(s, 0);
- }
- s->insert = 0;
- if (flush == Z_FINISH) {
- FLUSH_BLOCK(s, 1);
- return finish_done;
- }
- if (s->sym_next)
- FLUSH_BLOCK(s, 0);
- return block_done;
-}
#ifndef ZLIB_COMPAT
/* =========================================================================
--- /dev/null
+/* deflate_huff.c -- compress data using huffman encoding only strategy
+ *
+ * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zbuild.h"
+#include "deflate.h"
+#include "deflate_p.h"
+#include "functable.h"
+
+/* ===========================================================================
+ * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
+ * (It will be regenerated if this run of deflate switches away from Huffman.)
+ */
+Z_INTERNAL block_state deflate_huff(deflate_state *s, int flush) {
+ int bflush = 0; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we have a literal to write. */
+ if (s->lookahead == 0) {
+ fill_window(s);
+ if (s->lookahead == 0) {
+ if (flush == Z_NO_FLUSH)
+ return need_more;
+ break; /* flush the current block */
+ }
+ }
+
+ /* Output a literal byte */
+ bflush = zng_tr_tally_lit(s, s->window[s->strstart]);
+ s->lookahead--;
+ s->strstart++;
+ if (bflush)
+ FLUSH_BLOCK(s, 0);
+ }
+ s->insert = 0;
+ if (flush == Z_FINISH) {
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+ if (s->sym_next)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+}
/* Forward declare common non-inlined functions declared in deflate.c */
#ifdef ZLIB_DEBUG
-void check_match(deflate_state *s, Pos start, Pos match, int length);
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+static inline void check_match(deflate_state *s, Pos start, Pos match, int length) {
+ /* check that the match length is valid*/
+ if (length < MIN_MATCH || length > MAX_MATCH) {
+ fprintf(stderr, " start %u, match %u, length %d\n", start, match, length);
+ z_error("invalid match length");
+ }
+ /* check that the match isn't at the same position as the start string */
+ if (match == start) {
+ fprintf(stderr, " start %u, match %u, length %d\n", start, match, length);
+ z_error("invalid match position");
+ }
+ /* check that the match is indeed a match */
+ if (memcmp(s->window + match, s->window + start, length) != 0) {
+ int32_t i = 0;
+ fprintf(stderr, " start %u, match %u, length %d\n", start, match, length);
+ do {
+ fprintf(stderr, " %03d: match [%02x] start [%02x]\n", i++,
+ s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr, "\\[%u,%d]", start-match, length);
+ do {
+ putc(s->window[start++], stderr);
+ } while (--length != 0);
+ }
+}
#else
#define check_match(s, start, match, length)
#endif
-void flush_pending(PREFIX3(stream) *strm);
+
+Z_INTERNAL void flush_pending(PREFIX3(stream) *strm);
+Z_INTERNAL unsigned read_buf(PREFIX3(stream) *strm, unsigned char *buf, unsigned size);
/* ===========================================================================
* Save the match info and tally the frequency counts. Return true if
s->sym_buf[s->sym_next++] = (uint8_t)len;
s->matches++;
dist--;
- Assert(dist < MAX_DIST(s) && (uint16_t)d_code(dist) < (uint16_t)D_CODES,
+ Assert(dist < MAX_DIST(s) && (uint16_t)d_code(dist) < (uint16_t)D_CODES,
"zng_tr_tally: bad match");
s->dyn_ltree[zng_length_code[len]+LITERALS+1].Freq++;
--- /dev/null
+/* deflate_rle.c -- compress data using RLE strategy of deflation algorithm
+ *
+ * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zbuild.h"
+#include "deflate.h"
+#include "deflate_p.h"
+#include "functable.h"
+
+/* ===========================================================================
+ * For Z_RLE, simply look for runs of bytes, generate matches only of distance
+ * one. Do not maintain a hash table. (It will be regenerated if this run of
+ * deflate switches away from Z_RLE.)
+ */
+Z_INTERNAL block_state deflate_rle(deflate_state *s, int flush) {
+ int bflush = 0; /* set if current block must be flushed */
+ unsigned int prev; /* byte at distance one to match */
+ unsigned char *scan, *strend; /* scan goes up to strend for length of run */
+ uint32_t match_len = 0;
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the longest run, plus one for the unrolled loop.
+ */
+ if (s->lookahead <= MAX_MATCH) {
+ fill_window(s);
+ if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH)
+ return need_more;
+ if (s->lookahead == 0)
+ break; /* flush the current block */
+ }
+
+ /* See how many times the previous byte repeats */
+ if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
+ scan = s->window + s->strstart - 1;
+ prev = *scan;
+ if (prev == *++scan && prev == *++scan && prev == *++scan) {
+ strend = s->window + s->strstart + MAX_MATCH;
+ do {
+ } while (prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ scan < strend);
+ match_len = MAX_MATCH - (unsigned int)(strend - scan);
+ if (match_len > s->lookahead)
+ match_len = s->lookahead;
+ }
+ Assert(scan <= s->window + s->window_size - 1, "wild scan");
+ }
+
+ /* Emit match if have run of MIN_MATCH or longer, else emit literal */
+ if (match_len >= MIN_MATCH) {
+ check_match(s, s->strstart, s->strstart - 1, match_len);
+
+ bflush = zng_tr_tally_dist(s, 1, match_len - MIN_MATCH);
+
+ s->lookahead -= match_len;
+ s->strstart += match_len;
+ match_len = 0;
+ } else {
+ /* No match, output a literal byte */
+ bflush = zng_tr_tally_lit(s, s->window[s->strstart]);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush)
+ FLUSH_BLOCK(s, 0);
+ }
+ s->insert = 0;
+ if (flush == Z_FINISH) {
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+ if (s->sym_next)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+}
--- /dev/null
+/* deflate_stored.c -- store data without compression using deflation algorithm
+ *
+ * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zbuild.h"
+#include "deflate.h"
+#include "deflate_p.h"
+#include "functable.h"
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ *
+ * In case deflateParams() is used to later switch to a non-zero compression
+ * level, s->matches (otherwise unused when storing) keeps track of the number
+ * of hash table slides to perform. If s->matches is 1, then one hash table
+ * slide will be done when switching. If s->matches is 2, the maximum value
+ * allowed here, then the hash table will be cleared, since two or more slides
+ * is the same as a clear.
+ *
+ * deflate_stored() is written to minimize the number of times an input byte is
+ * copied. It is most efficient with large input and output buffers, which
+ * maximizes the opportunites to have a single copy from next_in to next_out.
+ */
+Z_INTERNAL block_state deflate_stored(deflate_state *s, int flush) {
+ /* Smallest worthy block size when not flushing or finishing. By default
+ * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
+ * large input and output buffers, the stored block size will be larger.
+ */
+ unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
+
+ /* Copy as many min_block or larger stored blocks directly to next_out as
+ * possible. If flushing, copy the remaining available input to next_out as
+ * stored blocks, if there is enough space.
+ */
+ unsigned len, left, have, last = 0;
+ unsigned used = s->strm->avail_in;
+ do {
+ /* Set len to the maximum size block that we can copy directly with the
+ * available input data and output space. Set left to how much of that
+ * would be copied from what's left in the window.
+ */
+ len = MAX_STORED; /* maximum deflate stored block length */
+ have = (s->bi_valid + 42) >> 3; /* number of header bytes */
+ if (s->strm->avail_out < have) /* need room for header */
+ break;
+ /* maximum stored block length that will fit in avail_out: */
+ have = s->strm->avail_out - have;
+ left = (int)s->strstart - s->block_start; /* bytes left in window */
+ if (len > (unsigned long)left + s->strm->avail_in)
+ len = left + s->strm->avail_in; /* limit len to the input */
+ if (len > have)
+ len = have; /* limit len to the output */
+
+ /* If the stored block would be less than min_block in length, or if
+ * unable to copy all of the available input when flushing, then try
+ * copying to the window and the pending buffer instead. Also don't
+ * write an empty block when flushing -- deflate() does that.
+ */
+ if (len < min_block && ((len == 0 && flush != Z_FINISH) || flush == Z_NO_FLUSH || len != left + s->strm->avail_in))
+ break;
+
+ /* Make a dummy stored block in pending to get the header bytes,
+ * including any pending bits. This also updates the debugging counts.
+ */
+ last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
+ zng_tr_stored_block(s, (char *)0, 0L, last);
+
+ /* Replace the lengths in the dummy stored block with len. */
+ s->pending -= 4;
+ put_short(s, (uint16_t)len);
+ put_short(s, (uint16_t)~len);
+
+ /* Write the stored block header bytes. */
+ flush_pending(s->strm);
+
+ /* Update debugging counts for the data about to be copied. */
+ cmpr_bits_add(s, len << 3);
+ sent_bits_add(s, len << 3);
+
+ /* Copy uncompressed bytes from the window to next_out. */
+ if (left) {
+ if (left > len)
+ left = len;
+ memcpy(s->strm->next_out, s->window + s->block_start, left);
+ s->strm->next_out += left;
+ s->strm->avail_out -= left;
+ s->strm->total_out += left;
+ s->block_start += (int)left;
+ len -= left;
+ }
+
+ /* Copy uncompressed bytes directly from next_in to next_out, updating
+ * the check value.
+ */
+ if (len) {
+ read_buf(s->strm, s->strm->next_out, len);
+ s->strm->next_out += len;
+ s->strm->avail_out -= len;
+ s->strm->total_out += len;
+ }
+ } while (last == 0);
+
+ /* Update the sliding window with the last s->w_size bytes of the copied
+ * data, or append all of the copied data to the existing window if less
+ * than s->w_size bytes were copied. Also update the number of bytes to
+ * insert in the hash tables, in the event that deflateParams() switches to
+ * a non-zero compression level.
+ */
+ used -= s->strm->avail_in; /* number of input bytes directly copied */
+ if (used) {
+ /* If any input was used, then no unused input remains in the window,
+ * therefore s->block_start == s->strstart.
+ */
+ if (used >= s->w_size) { /* supplant the previous history */
+ s->matches = 2; /* clear hash */
+ memcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
+ s->strstart = s->w_size;
+ s->insert = s->strstart;
+ } else {
+ if (s->window_size - s->strstart <= used) {
+ /* Slide the window down. */
+ s->strstart -= s->w_size;
+ memcpy(s->window, s->window + s->w_size, s->strstart);
+ if (s->matches < 2)
+ s->matches++; /* add a pending slide_hash() */
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
+ }
+ memcpy(s->window + s->strstart, s->strm->next_in - used, used);
+ s->strstart += used;
+ s->insert += MIN(used, s->w_size - s->insert);
+ }
+ s->block_start = (int)s->strstart;
+ }
+ if (s->high_water < s->strstart)
+ s->high_water = s->strstart;
+
+ /* If the last block was written to next_out, then done. */
+ if (last)
+ return finish_done;
+
+ /* If flushing and all input has been consumed, then done. */
+ if (flush != Z_NO_FLUSH && flush != Z_FINISH && s->strm->avail_in == 0 && (int)s->strstart == s->block_start)
+ return block_done;
+
+ /* Fill the window with any remaining input. */
+ have = s->window_size - s->strstart;
+ if (s->strm->avail_in > have && s->block_start >= (int)s->w_size) {
+ /* Slide the window down. */
+ s->block_start -= (int)s->w_size;
+ s->strstart -= s->w_size;
+ memcpy(s->window, s->window + s->w_size, s->strstart);
+ if (s->matches < 2)
+ s->matches++; /* add a pending slide_hash() */
+ have += s->w_size; /* more space now */
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
+ }
+ if (have > s->strm->avail_in)
+ have = s->strm->avail_in;
+ if (have) {
+ read_buf(s->strm, s->window + s->strstart, have);
+ s->strstart += have;
+ s->insert += MIN(have, s->w_size - s->insert);
+ }
+ if (s->high_water < s->strstart)
+ s->high_water = s->strstart;
+
+ /* There was not enough avail_out to write a complete worthy or flushed
+ * stored block to next_out. Write a stored block to pending instead, if we
+ * have enough input for a worthy block, or if flushing and there is enough
+ * room for the remaining input as a stored block in the pending buffer.
+ */
+ have = (s->bi_valid + 42) >> 3; /* number of header bytes */
+ /* maximum stored block length that will fit in pending: */
+ have = MIN(s->pending_buf_size - have, MAX_STORED);
+ min_block = MIN(have, s->w_size);
+ left = (int)s->strstart - s->block_start;
+ if (left >= min_block || ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH && s->strm->avail_in == 0 && left <= have)) {
+ len = MIN(left, have);
+ last = flush == Z_FINISH && s->strm->avail_in == 0 && len == left ? 1 : 0;
+ zng_tr_stored_block(s, (char *)s->window + s->block_start, len, last);
+ s->block_start += (int)len;
+ flush_pending(s->strm);
+ }
+
+ /* We've done all we can with the available input and output. */
+ return last ? finish_started : need_more;
+}
crc32_comb.obj \
deflate.obj \
deflate_fast.obj \
- deflate_slow.obj \
+ deflate_huff.obj \
deflate_quick.obj \
deflate_medium.obj \
+ deflate_rle.obj \
+ deflate_slow.obj \
+ deflate_stored.obj \
functable.obj \
infback.obj \
inflate.obj \
crc32.obj: $(SRCDIR)/crc32.c $(SRCDIR)/zbuild.h $(SRCDIR)/zendian.h $(SRCDIR)/deflate.h $(SRCDIR)/functable.h $(SRCDIR)/crc32_tbl.h
crc32_comb.obj: $(SRCDIR)/crc32_comb.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/crc32_comb_tbl.h
deflate.obj: $(SRCDIR)/deflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
-deflate_quick.obj: $(SRCDIR)/deflate_quick.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h $(SRCDIR)/trees_emit.h
deflate_fast.obj: $(SRCDIR)/deflate_fast.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_huff.obj: $(SRCDIR)/deflate_huff.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_quick.obj: $(SRCDIR)/deflate_quick.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h $(SRCDIR)/trees_emit.h
deflate_medium.obj: $(SRCDIR)/deflate_medium.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_rle.obj: $(SRCDIR)/deflate_rle.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_slow.obj: $(SRCDIR)/deflate_slow.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_stored.obj: $(SRCDIR)/deflate_stored.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
infback.obj: $(SRCDIR)/infback.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h
inffast.obj: $(SRCDIR)/inffast.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h $(SRCDIR)/functable.h
inflate.obj: $(SRCDIR)/inflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h $(SRCDIR)/functable.h $(SRCDIR)/functable.h
crc32_comb.obj \
deflate.obj \
deflate_fast.obj \
- deflate_slow.obj \
- deflate_quick.obj \
+ deflate_huff.obj \
deflate_medium.obj \
+ deflate_quick.obj \
+ deflate_rle.obj \
+ deflate_slow.obj \
+ deflate_stored.obj \
functable.obj \
infback.obj \
inflate.obj \
crc32_comb.obj: $(SRCDIR)/crc32_comb.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/crc32_comb_tbl.h
deflate.obj: $(SRCDIR)/deflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_fast.obj: $(SRCDIR)/deflate_fast.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_huff.obj: $(SRCDIR)/deflate_huff.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_medium.obj: $(SRCDIR)/deflate_medium.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_quick.obj: $(SRCDIR)/deflate_quick.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h $(SRCDIR)/trees_emit.h
+deflate_rle.obj: $(SRCDIR)/deflate_rle.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_slow.obj: $(SRCDIR)/deflate_slow.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_stored.obj: $(SRCDIR)/deflate_stored.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
infback.obj: $(SRCDIR)/infback.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h
inffast.obj: $(SRCDIR)/inffast.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h $(SRCDIR)/functable.h
inflate.obj: $(SRCDIR)/inflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h $(SRCDIR)/functable.h $(SRCDIR)/functable.h
crc_folding.obj \
deflate.obj \
deflate_fast.obj \
+ deflate_huff.obj \
+ deflate_medium.obj \
deflate_quick.obj \
+ deflate_rle.obj \
deflate_slow.obj \
- deflate_medium.obj \
+ deflate_stored.obj \
functable.obj \
infback.obj \
inflate.obj \
crc32_comb.obj: $(SRCDIR)/crc32_comb.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/crc32_comb_tbl.h
deflate.obj: $(SRCDIR)/deflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_fast.obj: $(SRCDIR)/deflate_fast.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_huff.obj: $(SRCDIR)/deflate_huff.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_medium.obj: $(SRCDIR)/deflate_medium.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_quick.obj: $(SRCDIR)/deflate_quick.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h $(SRCDIR)/trees_emit.h
+deflate_rle.obj: $(SRCDIR)/deflate_rle.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_slow.obj: $(SRCDIR)/deflate_slow.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
+deflate_stored.obj: $(SRCDIR)/deflate_stored.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
infback.obj: $(SRCDIR)/infback.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h
inffast.obj: $(SRCDIR)/inffast.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h $(SRCDIR)/functable.h
inflate.obj: $(SRCDIR)/inflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h $(SRCDIR)/inftrees.h $(SRCDIR)/inflate.h $(SRCDIR)/inffast.h $(SRCDIR)/functable.h $(SRCDIR)/functable.h