check_pclmulqdq_intrinsics()
if(HAVE_PCLMULQDQ_INTRIN AND HAVE_SSSE3_INTRIN)
add_definitions(-DX86_PCLMULQDQ_CRC)
- set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c)
+ set(PCLMULQDQ_SRCS ${ARCHDIR}/crc32_fold_pclmulqdq.c)
add_feature_info(PCLMUL_CRC 1 "Support CRC hash generation using PCLMULQDQ, using \"${SSSE3FLAG} ${SSE4FLAG} ${PCLMULFLAG}\"")
list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
set_property(SOURCE ${PCLMULQDQ_SRCS} PROPERTY COMPILE_FLAGS "${SSSE3FLAG} ${SSE4FLAG} ${PCLMULFLAG} ${NOLTOFLAG}")
crc32_p.h
crc32_tbl.h
crc32_comb_tbl.h
+ crc32_fold.h
deflate.h
deflate_p.h
functable.h
compress.c
crc32.c
crc32_comb.c
+ crc32_fold.c
deflate.c
deflate_fast.c
deflate_huff.c
compress.o \
crc32.o \
crc32_comb.o \
+ crc32_fold.o \
deflate.o \
deflate_fast.o \
deflate_huff.o \
compress.lo \
crc32.lo \
crc32_comb.lo \
+ crc32_fold.lo \
deflate.lo \
deflate_fast.lo \
deflate_huff.lo \
|Name|Description|
|:-|:-|
|deflate_quick.c|SSE4 optimized deflate strategy for use as level 1|
-|crc_folding.c|SSE4 + PCLMULQDQ optimized CRC folding implementation|
+|crc32_fold_pclmulqdq.c|SSE4 + PCLMULQDQ optimized CRC folding implementation|
|slide_hash_sse2.c|SSE2 optimized slide_hash|
compare258_avx.o compare258_avx.lo \
compare258_sse.o compare258_sse.lo \
insert_string_sse.o insert_string_sse.lo \
- crc_folding.o crc_folding.lo \
+ crc32_fold_pclmulqdq.o crc32_fold_pclmulqdq.lo \
slide_hash_avx.o slide_hash_avx.lo \
slide_hash_sse.o slide_hash_sse.lo
insert_string_sse.lo:
$(CC) $(SFLAGS) $(SSE4FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/insert_string_sse.c
-crc_folding.o:
- $(CC) $(CFLAGS) $(PCLMULFLAG) $(SSE4FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/crc_folding.c
+crc32_fold_pclmulqdq.o:
+ $(CC) $(CFLAGS) $(PCLMULFLAG) $(SSE4FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_fold_pclmulqdq.c
-crc_folding.lo:
- $(CC) $(SFLAGS) $(PCLMULFLAG) $(SSE4FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/crc_folding.c
+crc32_fold_pclmulqdq.lo:
+ $(CC) $(SFLAGS) $(PCLMULFLAG) $(SSE4FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_fold_pclmulqdq.c
slide_hash_avx.o:
$(CC) $(CFLAGS) $(AVX2FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/slide_hash_avx.c
*/
#ifdef X86_PCLMULQDQ_CRC
+#include "../../zutil.h"
#include <inttypes.h>
#include <immintrin.h>
#include <wmmintrin.h>
-#include "crc_folding.h"
+#include "../../crc32_fold.h"
-Z_INTERNAL void crc_fold_init(unsigned int crc0[4 * 5]) {
+Z_INTERNAL uint32_t crc32_fold_reset_pclmulqdq(crc32_fold *crc) {
/* CRC_SAVE */
- _mm_storeu_si128((__m128i *)crc0 + 0, _mm_cvtsi32_si128(0x9db42487));
- _mm_storeu_si128((__m128i *)crc0 + 1, _mm_setzero_si128());
- _mm_storeu_si128((__m128i *)crc0 + 2, _mm_setzero_si128());
- _mm_storeu_si128((__m128i *)crc0 + 3, _mm_setzero_si128());
+ _mm_storeu_si128((__m128i *)crc->fold + 0, _mm_cvtsi32_si128(0x9db42487));
+ _mm_storeu_si128((__m128i *)crc->fold + 1, _mm_setzero_si128());
+ _mm_storeu_si128((__m128i *)crc->fold + 2, _mm_setzero_si128());
+ _mm_storeu_si128((__m128i *)crc->fold + 3, _mm_setzero_si128());
+
+ return 0;
}
static void fold_1(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
*xmm_crc3 = _mm_castps_si128(ps_res);
}
-Z_INTERNAL void crc_fold_copy(unsigned int crc0[4 * 5], unsigned char *dst, const unsigned char *src, long len) {
+Z_INTERNAL void crc32_fold_copy_pclmulqdq(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
unsigned long algn_diff;
__m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3;
char ALIGNED_(16) partial_buf[16] = { 0 };
/* CRC_LOAD */
- __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)crc0 + 0);
- __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)crc0 + 1);
- __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)crc0 + 2);
- __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)crc0 + 3);
+ __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)crc->fold + 0);
+ __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)crc->fold + 1);
+ __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)crc->fold + 2);
+ __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)crc->fold + 3);
__m128i xmm_crc_part;
if (len < 16) {
xmm_crc_part = _mm_setzero_si128();
}
- while ((len -= 64) >= 0) {
+ while (len >= 64) {
/* CRC_LOAD */
xmm_t0 = _mm_load_si128((__m128i *)src);
xmm_t1 = _mm_load_si128((__m128i *)src + 1);
src += 64;
dst += 64;
+ len -= 64;
}
/*
* len = num bytes left - 64
*/
- if (len + 16 >= 0) {
- len += 16;
-
+ if (len >= 48) {
xmm_t0 = _mm_load_si128((__m128i *)src);
xmm_t1 = _mm_load_si128((__m128i *)src + 1);
xmm_t2 = _mm_load_si128((__m128i *)src + 2);
xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0);
xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1);
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2);
-
+ len -= 48;
if (len == 0)
goto done;
dst += 48;
memcpy(&xmm_crc_part, (__m128i *)src + 3, len);
- } else if (len + 32 >= 0) {
- len += 32;
-
+ } else if (len >= 32) {
xmm_t0 = _mm_load_si128((__m128i *)src);
xmm_t1 = _mm_load_si128((__m128i *)src + 1);
xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0);
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1);
+ len -= 32;
if (len == 0)
goto done;
dst += 32;
memcpy(&xmm_crc_part, (__m128i *)src + 2, len);
- } else if (len + 48 >= 0) {
- len += 48;
-
+ } else if (len >= 16) {
xmm_t0 = _mm_load_si128((__m128i *)src);
fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
+ len -= 16;
if (len == 0)
goto done;
dst += 16;
memcpy(&xmm_crc_part, (__m128i *)src + 1, len);
} else {
- len += 64;
if (len == 0)
goto done;
memcpy(&xmm_crc_part, src, len);
partial_fold((size_t)len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, &xmm_crc_part);
done:
/* CRC_SAVE */
- _mm_storeu_si128((__m128i *)crc0 + 0, xmm_crc0);
- _mm_storeu_si128((__m128i *)crc0 + 1, xmm_crc1);
- _mm_storeu_si128((__m128i *)crc0 + 2, xmm_crc2);
- _mm_storeu_si128((__m128i *)crc0 + 3, xmm_crc3);
- _mm_storeu_si128((__m128i *)crc0 + 4, xmm_crc_part);
+ _mm_storeu_si128((__m128i *)crc->fold + 0, xmm_crc0);
+ _mm_storeu_si128((__m128i *)crc->fold + 1, xmm_crc1);
+ _mm_storeu_si128((__m128i *)crc->fold + 2, xmm_crc2);
+ _mm_storeu_si128((__m128i *)crc->fold + 3, xmm_crc3);
+ _mm_storeu_si128((__m128i *)crc->fold + 4, xmm_crc_part);
}
static const unsigned ALIGNED_(16) crc_k[] = {
0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
};
-uint32_t Z_INTERNAL crc_fold_512to32(unsigned int crc0[4 * 5]) {
+Z_INTERNAL uint32_t crc32_fold_final_pclmulqdq(crc32_fold *crc) {
const __m128i xmm_mask = _mm_load_si128((__m128i *)crc_mask);
const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
- uint32_t crc;
__m128i x_tmp0, x_tmp1, x_tmp2, crc_fold;
/* CRC_LOAD */
- __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)crc0 + 0);
- __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)crc0 + 1);
- __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)crc0 + 2);
- __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)crc0 + 3);
+ __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)crc->fold + 0);
+ __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)crc->fold + 1);
+ __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)crc->fold + 2);
+ __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)crc->fold + 3);
/*
* k1
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
- crc = (uint32_t)_mm_extract_epi32(xmm_crc3, 2);
- return ~crc;
+ crc->value = ~((uint32_t)_mm_extract_epi32(xmm_crc3, 2));
+
+ return crc->value;
}
#endif
+++ /dev/null
-/* crc_folding.h
- *
- * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ
- * instruction.
- *
- * Copyright (C) 2013 Intel Corporation Jim Kukunas
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#ifndef CRC_FOLDING_H_
-#define CRC_FOLDING_H_
-
-#include "../../zutil.h"
-
-Z_INTERNAL void crc_fold_init(unsigned int crc0[4 * 5]);
-Z_INTERNAL uint32_t crc_fold_512to32(unsigned int crc0[4 * 5]);
-Z_INTERNAL void crc_fold_copy(unsigned int crc0[4 * 5], unsigned char *, const unsigned char *, long);
-
-#endif
if test ${HAVE_PCLMULQDQ_INTRIN} -eq 1; then
CFLAGS="${CFLAGS} -DX86_PCLMULQDQ_CRC"
SFLAGS="${SFLAGS} -DX86_PCLMULQDQ_CRC"
- ARCH_STATIC_OBJS="${ARCH_STATIC_OBJS} crc_folding.o"
- ARCH_SHARED_OBJS="${ARCH_SHARED_OBJS} crc_folding.lo"
+ ARCH_STATIC_OBJS="${ARCH_STATIC_OBJS} crc32_fold_pclmulqdq.o"
+ ARCH_SHARED_OBJS="${ARCH_SHARED_OBJS} crc32_fold_pclmulqdq.lo"
fi
fi
;;
return ZSWAP32(c);
}
#endif /* BYTE_ORDER == BIG_ENDIAN */
-
-#ifdef X86_PCLMULQDQ_CRC
-#include "arch/x86/x86.h"
-#include "arch/x86/crc_folding.h"
-
-Z_INTERNAL void crc_finalize(deflate_state *const s) {
- if (x86_cpu_has_pclmulqdq)
- s->strm->adler = crc_fold_512to32(s->crc0);
-}
-#endif
-
-Z_INTERNAL void crc_reset(deflate_state *const s) {
-#ifdef X86_PCLMULQDQ_CRC
- x86_check_features();
- if (x86_cpu_has_pclmulqdq) {
- crc_fold_init(s->crc0);
- }
-#endif
- s->strm->adler = CRC32_INITIAL_VALUE;
-}
-
-Z_INTERNAL void copy_with_crc(PREFIX3(stream) *strm, unsigned char *dst, unsigned long size) {
-#ifdef X86_PCLMULQDQ_CRC
- if (x86_cpu_has_pclmulqdq) {
- crc_fold_copy(strm->state->crc0, dst, strm->next_in, size);
- return;
- }
-#endif
- memcpy(dst, strm->next_in, size);
- strm->adler = PREFIX(crc32)(strm->adler, dst, size);
-}
--- /dev/null
+/* crc32_fold.c -- crc32 folding interface
+ * Copyright (C) 2021 Nathan Moinvaziri
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+#include "zbuild.h"
+#include "zutil.h"
+#include "functable.h"
+
+#include "crc32_fold.h"
+
+Z_INTERNAL uint32_t crc32_fold_reset_c(crc32_fold *crc) {
+ crc->value = CRC32_INITIAL_VALUE;
+ return crc->value;
+}
+
+Z_INTERNAL void crc32_fold_copy_c(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
+ crc->value = functable.crc32(crc->value, src, len);
+ memcpy(dst, src, len);
+}
+
+Z_INTERNAL uint32_t crc32_fold_final_c(crc32_fold *crc) {
+ return crc->value;
+}
--- /dev/null
+/* crc32_fold.h -- crc32 folding interface
+ * Copyright (C) 2021 Nathan Moinvaziri
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+#ifndef CRC32_FOLD_H_
+#define CRC32_FOLD_H_
+
+typedef struct crc32_fold_s {
+ uint32_t ALIGNED_(16) fold[4 * 5];
+ uint32_t value;
+} crc32_fold;
+
+Z_INTERNAL uint32_t crc32_fold_reset_c(crc32_fold *crc);
+Z_INTERNAL void crc32_fold_copy_c(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len);
+Z_INTERNAL uint32_t crc32_fold_final_c(crc32_fold *crc);
+
+#endif
static void lm_init (deflate_state *s);
Z_INTERNAL unsigned read_buf (PREFIX3(stream) *strm, unsigned char *buf, unsigned size);
-extern void crc_reset(deflate_state *const s);
-#ifdef X86_PCLMULQDQ_CRC
-extern void crc_finalize(deflate_state *const s);
-#endif
-extern void copy_with_crc(PREFIX3(stream) *strm, unsigned char *dst, unsigned long size);
-
extern uint32_t update_hash_roll (deflate_state *const s, uint32_t h, uint32_t val);
extern void insert_string_roll (deflate_state *const s, uint32_t str, uint32_t count);
extern Pos quick_insert_string_roll(deflate_state *const s, uint32_t str);
#ifdef GZIP
if (s->wrap == 2)
- crc_reset(s);
+ strm->adler = functable.crc32_fold_reset(&s->crc_fold);
else
#endif
strm->adler = ADLER32_INITIAL_VALUE;
#ifdef GZIP
if (s->status == GZIP_STATE) {
/* gzip header */
- crc_reset(s);
+ functable.crc32_fold_reset(&s->crc_fold);
put_byte(s, 31);
put_byte(s, 139);
put_byte(s, 8);
}
}
put_short(s, (uint16_t)strm->adler);
- crc_reset(s);
+ functable.crc32_fold_reset(&s->crc_fold);
}
s->status = BUSY_STATE;
/* Write the trailer */
#ifdef GZIP
if (s->wrap == 2) {
-# ifdef X86_PCLMULQDQ_CRC
- crc_finalize(s);
-# endif
+ strm->adler = functable.crc32_fold_final(&s->crc_fold);
+
put_uint32(s, strm->adler);
put_uint32(s, (uint32_t)strm->total_in);
} else
memcpy(buf, strm->next_in, len);
#ifdef GZIP
} else if (strm->state->wrap == 2) {
- copy_with_crc(strm, buf, len);
+ functable.crc32_fold_copy(&strm->state->crc_fold, buf, strm->next_in, len);
#endif
} else {
memcpy(buf, strm->next_in, len);
#include "zutil.h"
#include "zendian.h"
+#include "crc32_fold.h"
/* define NO_GZIP when compiling if you want to disable gzip header and
trailer creation by deflate(). NO_GZIP would be used to avoid linking in
int nice_match; /* Stop searching when current match exceeds this */
-#if defined(__x86_64__) || defined(_M_X64) || defined(__i386) || defined(_M_IX86)
- /* Only used if X86_PCLMULQDQ_CRC is defined */
- unsigned crc0[4 * 5];
-#endif
+ crc32_fold ALIGNED_(16) crc_fold;
/* used by trees.c: */
/* Didn't use ct_data typedef below to suppress compiler warning */
extern uint32_t adler32_power8(uint32_t adler, const unsigned char* buf, size_t len);
#endif
+/* CRC32 folding */
+extern uint32_t crc32_fold_reset_c(crc32_fold *crc);
+extern void crc32_fold_copy_c(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len);
+extern uint32_t crc32_fold_final_c(crc32_fold *crc);
+
+#ifdef X86_PCLMULQDQ_CRC
+extern uint32_t crc32_fold_reset_pclmulqdq(crc32_fold *crc);
+extern void crc32_fold_copy_pclmulqdq(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len);
+extern uint32_t crc32_fold_final_pclmulqdq(crc32_fold *crc);
+#endif
+
/* memory chunking */
extern uint32_t chunksize_c(void);
extern uint8_t* chunkcopy_c(uint8_t *out, uint8_t const *from, unsigned len);
return functable.adler32(adler, buf, len);
}
+Z_INTERNAL uint32_t crc32_fold_reset_stub(crc32_fold *crc) {
+ functable.crc32_fold_reset = crc32_fold_reset_c;
+ cpu_check_features();
+#ifdef X86_PCLMULQDQ_CRC
+ if (x86_cpu_has_pclmulqdq)
+ functable.crc32_fold_reset = crc32_fold_reset_pclmulqdq;
+#endif
+ return functable.crc32_fold_reset(crc);
+}
+
+Z_INTERNAL void crc32_fold_copy_stub(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
+ functable.crc32_fold_copy = crc32_fold_copy_c;
+ cpu_check_features();
+#ifdef X86_PCLMULQDQ_CRC
+ if (x86_cpu_has_pclmulqdq)
+ functable.crc32_fold_copy = crc32_fold_copy_pclmulqdq;
+#endif
+ functable.crc32_fold_copy(crc, dst, src, len);
+}
+
+Z_INTERNAL uint32_t crc32_fold_final_stub(crc32_fold *crc) {
+ functable.crc32_fold_final = crc32_fold_final_c;
+ cpu_check_features();
+#ifdef X86_PCLMULQDQ_CRC
+ if (x86_cpu_has_pclmulqdq)
+ functable.crc32_fold_final = crc32_fold_final_pclmulqdq;
+#endif
+ return functable.crc32_fold_final(crc);
+}
+
Z_INTERNAL uint32_t chunksize_stub(void) {
// Initialize default
functable.chunksize = &chunksize_c;
quick_insert_string_stub,
adler32_stub,
crc32_stub,
+ crc32_fold_reset_stub,
+ crc32_fold_copy_stub,
+ crc32_fold_final_stub,
slide_hash_stub,
compare258_stub,
longest_match_stub,
#define FUNCTABLE_H_
#include "deflate.h"
+#include "crc32_fold.h"
struct functable_s {
uint32_t (* update_hash) (deflate_state *const s, uint32_t h, uint32_t val);
Pos (* quick_insert_string)(deflate_state *const s, uint32_t str);
uint32_t (* adler32) (uint32_t adler, const unsigned char *buf, size_t len);
uint32_t (* crc32) (uint32_t crc, const unsigned char *buf, uint64_t len);
+ uint32_t (* crc32_fold_reset) (crc32_fold *crc);
+ void (* crc32_fold_copy) (crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len);
+ uint32_t (* crc32_fold_final) (crc32_fold *crc);
void (* slide_hash) (deflate_state *s);
uint32_t (* compare258) (const unsigned char *src0, const unsigned char *src1);
uint32_t (* longest_match) (deflate_state *const s, Pos cur_match);
compress.obj \
crc32.obj \
crc32_comb.obj \
+ crc32_fold.obj \
deflate.obj \
deflate_fast.obj \
deflate_huff.obj \
uncompr.obj: $(SRCDIR)/uncompr.c $(SRCDIR)/zbuild.h $(SRCDIR)/zlib$(SUFFIX).h
crc32.obj: $(SRCDIR)/crc32.c $(SRCDIR)/zbuild.h $(SRCDIR)/zendian.h $(SRCDIR)/deflate.h $(SRCDIR)/functable.h $(SRCDIR)/crc32_tbl.h
crc32_comb.obj: $(SRCDIR)/crc32_comb.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/crc32_comb_tbl.h
+crc32_fold.obj: $(SRCDIR)/crc32_fold.c $(SRCDIR)/zbuild.h
deflate.obj: $(SRCDIR)/deflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_fast.obj: $(SRCDIR)/deflate_fast.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_huff.obj: $(SRCDIR)/deflate_huff.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
compress.obj \
crc32.obj \
crc32_comb.obj \
+ crc32_fold.obj \
deflate.obj \
deflate_fast.obj \
deflate_huff.obj \
chunkset.obj: $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h
crc32.obj: $(SRCDIR)/crc32.c $(SRCDIR)/zbuild.h $(SRCDIR)/zendian.h $(SRCDIR)/deflate.h $(SRCDIR)/functable.h $(SRCDIR)/crc32_tbl.h
crc32_comb.obj: $(SRCDIR)/crc32_comb.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/crc32_comb_tbl.h
+crc32_fold.obj: $(SRCDIR)/crc32_fold.c $(SRCDIR)/zbuild.h
deflate.obj: $(SRCDIR)/deflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_fast.obj: $(SRCDIR)/deflate_fast.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_huff.obj: $(SRCDIR)/deflate_huff.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
compress.obj \
crc32.obj \
crc32_comb.obj \
- crc_folding.obj \
+ crc32_fold.obj \
+ crc32_fold_pclmulqdq.obj \
deflate.obj \
deflate_fast.obj \
deflate_huff.obj \
chunkset_sse.obj: $(SRCDIR)/arch/x86/chunkset_sse.c $(SRCDIR)/zbuild.h $(SRCDIR)/zutil.h
crc32.obj: $(SRCDIR)/crc32.c $(SRCDIR)/zbuild.h $(SRCDIR)/zendian.h $(SRCDIR)/deflate.h $(SRCDIR)/functable.h $(SRCDIR)/crc32_tbl.h
crc32_comb.obj: $(SRCDIR)/crc32_comb.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/crc32_comb_tbl.h
+crc32_fold.obj: $(SRCDIR)/crc32_fold.c $(SRCDIR)/zbuild.h
+crc32_fold_pclmulqdq.obj: $(SRCDIR)/arch/x86/crc32_fold_pclmulqdq.c $(SRCDIR)/crc32_fold.h $(SRCDIR)/zbuild.h
deflate.obj: $(SRCDIR)/deflate.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_fast.obj: $(SRCDIR)/deflate_fast.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
deflate_huff.obj: $(SRCDIR)/deflate_huff.c $(SRCDIR)/zbuild.h $(SRCDIR)/deflate.h $(SRCDIR)/deflate_p.h $(SRCDIR)/functable.h
#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
#define ADLER32_INITIAL_VALUE 1 /* initial adler-32 hash value */
-#define CRC32_INITIAL_VALUE 0 /* initial crc-32 hash value */
+#define CRC32_INITIAL_VALUE 0 /* initial crc-32 hash value */
/* target dependencies */