]> git.ipfire.org Git - thirdparty/zlib-ng.git/commitdiff
add PCLMULQDQ optimized CRC folding
authorJim Kukunas <james.t.kukunas@linux.intel.com>
Thu, 11 Jul 2013 20:49:05 +0000 (13:49 -0700)
committerJim Kukunas <james.t.kukunas@linux.intel.com>
Tue, 3 Jun 2014 22:37:52 +0000 (15:37 -0700)
Rather than copy the input data from strm->next_in into the window and
then compute the CRC, this patch combines these two steps into one. It
performs a SSE memory copy, while folding the data down in the SSE
registers. A final step is added, when we write the gzip trailer,
to reduce the 4 SSE registers to 32b.

Adds some extra padding bytes to the window to allow for SSE partial
writes.

Makefile.in
configure
crc32.c
crc_folding.c [new file with mode: 0644]
deflate.c
deflate.h

index 4774810f222ff5b90cd3126918390ffc0bfe83fa..d3c3efd05bd3c7201aeb202bf35229afd04dfb99 100644 (file)
@@ -47,6 +47,8 @@ EXE=
 
 FILL_WINDOW_SSE_o= 
 FILL_WINDOW_SSE_lo=
+CRC_FOLDING_o=
+CRC_FOLDING_lo=
 
 prefix = /usr/local
 exec_prefix = ${prefix}
@@ -57,11 +59,11 @@ mandir = ${prefix}/share/man
 man3dir = ${mandir}/man3
 pkgconfigdir = ${libdir}/pkgconfig
 
-OBJZ = adler32.o crc32.o ${FILL_WINDOW_SSE_o} deflate.o infback.o inffast.o inflate.o inftrees.o trees.o zutil.o
+OBJZ = adler32.o ${CRC_FOLDING_o} crc32.o ${FILL_WINDOW_SSE_o} deflate.o infback.o inffast.o inflate.o inftrees.o trees.o zutil.o
 OBJG = compress.o uncompr.o gzclose.o gzlib.o gzread.o gzwrite.o
 OBJC = $(OBJZ) $(OBJG)
 
-PIC_OBJZ = adler32.lo crc32.lo ${FILL_WINDOW_SSE_lo} deflate.lo infback.lo inffast.lo inflate.lo inftrees.lo trees.lo zutil.lo
+PIC_OBJZ = adler32.lo ${CRC_FOLDING_lo} crc32.lo ${FILL_WINDOW_SSE_lo} deflate.lo infback.lo inffast.lo inflate.lo inftrees.lo trees.lo zutil.lo
 PIC_OBJG = compress.lo uncompr.lo gzclose.lo gzlib.lo gzread.lo gzwrite.lo
 PIC_OBJC = $(PIC_OBJZ) $(PIC_OBJG)
 
@@ -124,6 +126,14 @@ fill_window_sse.lo: fill_window_sse.c
 fill_window_sse.o: fill_window_sse.c
        ${CC} ${CFLAGS} -msse2 -I. -c -o $@ fill_window_sse.c
 
+crc_folding.lo: crc_folding.c
+       -@mkdir objs 2>/dev/null || test -d objs
+       $(CC) $(SFLAGS) -mpclmul -msse4 -DPIC -c -o objs/$*.o $<
+       -@mv objs/$*.o $@
+
+crc_folding.o: crc_folding.c
+       ${CC} ${CFLAGS} -mpclmul -msse4 -I. -c -o $@ crc_folding.c
+
 infcover.o: test/infcover.c zlib.h zconf.h
        $(CC) $(CFLAGS) -I. -c -o $@ test/infcover.c
 
index bf88e45419e013af874557d6fdb28b3d87a959ef..ab4e06482d35576caf364a907cabcb37884cc09b 100755 (executable)
--- a/configure
+++ b/configure
@@ -777,6 +777,25 @@ else
     HAVE_SSE2_INTRIN=0
 fi
 
+# Check for PCLMULQDQ intrinsics
+cat > $test.c << EOF
+#include <immintrin.h>
+int main(void)
+{
+    __m128i a = _mm_setzero_si128();
+    __m128i b = _mm_setzero_si128();
+    __m128i c = _mm_clmulepi64_si128(a, b, 0x10);
+    return 0;
+}
+EOF
+if try ${CC} ${CFLAGS} -mpclmul $test.c; then
+    echo "Checking for PCLMULQDQ intrinsics ... Yes." | tee -a configure.log
+    HAVE_PCLMULQDQ_INTRIN=1
+else
+    echo "Checking for PCLMULQDQ intrinsics ... No." | tee -a configure.log
+    HAVE_PCLMULQDQ_INTRIN=0
+fi
+
 # Set ARCH specific FLAGS
 case "${ARCH}" in
     x86_64)
@@ -806,6 +825,18 @@ case "${ARCH}" in
 
         CFLAGS="${CFLAGS} -DUSE_SSE4_2_CRC_HASH"
         SFLAGS="${SFLAGS} -DUSE_SSE4_2_CRC_HASH"
+
+        if test ${HAVE_PCLMULQDQ_INTRIN} -eq 1; then
+            CFLAGS="${CFLAGS} -DHAVE_PCLMULQDQ"
+            SFLAGS="${SFLAGS} -DHAVE_PCLMULQDQ"
+            CRC_FOLDING_o="crc_folding.o"
+            CRC_FOLDING_lo="crc_folding.lo"
+            OBJS="${OBJS} ${CRC_FOLDING_o}"
+            PIC_OBJS="${PIC_OBJS} ${CRC_FOLDING_lo}"
+        else
+            CRC_FOLDING_o=""
+            CRC_FOLDING_lo=""
+        fi
     ;;
     i386 | i486 | i586 | i686)
         OBJC="${OBJC} x86.o"
@@ -834,6 +865,18 @@ case "${ARCH}" in
 
         CFLAGS="${CFLAGS} -DUSE_SSE4_2_CRC_HASH"
         SFLAGS="${SFLAGS} -DUSE_SSE4_2_CRC_HASH"
+
+        if test ${HAVE_PCLMULQDQ_INTRIN} -eq 1; then
+            CFLAGS="${CFLAGS} -DHAVE_PCLMULQDQ"
+            SFLAGS="${SFLAGS} -DHAVE_PCLMULQDQ"
+            CRC_FOLDING_o="crc_folding.o"
+            CRC_FOLDING_lo="crc_folding.lo"
+            OBJS="${OBJS} ${CRC_FOLDING_o}"
+            PIC_OBJS="${PIC_OBJS} ${CRC_FOLDING_lo}"
+        else
+            CRC_FOLDING_o=""
+            CRC_FOLDING_lo=""
+        fi
     ;;
 esac
 
@@ -870,6 +913,8 @@ echo sharedlibdir = $sharedlibdir >> configure.log
 echo uname = $uname >> configure.log
 echo FILL_WINDOW_SSE_o = ${FILL_WINDOW_SSE_o} >> configure.log
 echo FILL_WINDOW_SSE_lo= ${FILL_WINDOW_SSE_lo} >> configure.log
+echo CRC_FOLDING_o = ${CRC_FOLDING_o} >> configure.log
+echo CRC_FOLDING_lo= ${CRC_FOLDING_lo} >> configure.log
 
 # udpate Makefile with the configure results
 sed < Makefile.in "
@@ -901,6 +946,8 @@ sed < Makefile.in "
 /^test: */s#:.*#: $TEST#
 /^FILL_WINDOW_SSE_o *=/s#=.*#=$FILL_WINDOW_SSE_o#
 /^FILL_WINDOW_SSE_lo *=/s#=.*#=$FILL_WINDOW_SSE_lo#
+/^CRC_FOLDING_o *=/s#=.*#=$CRC_FOLDING_o#
+/^CRC_FOLDING_lo *=/s#=.*#=$CRC_FOLDING_lo#
 " > Makefile
 
 # create zlib.pc with the configure results
diff --git a/crc32.c b/crc32.c
index 15b8d2e4beec411b84e64d1bdad53fecdf86caa7..524ace6e73ead3c712badaf3c8d5211dd4d14250 100644 (file)
--- a/crc32.c
+++ b/crc32.c
@@ -437,3 +437,45 @@ uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
 {
     return crc32_combine_(crc1, crc2, len2);
 }
+
+#include "deflate.h"
+
+#ifdef HAVE_PCLMULQDQ
+#include "x86.h"
+extern void ZLIB_INTERNAL crc_fold_init(deflate_state *z_const s);
+extern void ZLIB_INTERNAL crc_fold_copy(deflate_state *z_const s,
+        unsigned char *dst, z_const unsigned char *src, long len);
+extern unsigned ZLIB_INTERNAL crc_fold_512to32(deflate_state *z_const s);
+#endif
+
+ZLIB_INTERNAL void crc_reset(deflate_state *const s)
+{
+#ifdef HAVE_PCLMULQDQ
+    if (x86_cpu_has_pclmulqdq) {
+        crc_fold_init(s);
+        return;
+    }
+#endif
+    s->strm->adler = crc32(0L, Z_NULL, 0);
+}
+
+ZLIB_INTERNAL void crc_finalize(deflate_state *const s)
+{
+#ifdef HAVE_PCLMULQDQ
+    if (x86_cpu_has_pclmulqdq)
+        s->strm->adler = crc_fold_512to32(s);
+#endif
+}
+
+ZLIB_INTERNAL void copy_with_crc(z_streamp strm, Bytef *dst, long size)
+{
+#ifdef HAVE_PCLMULQDQ
+    if (x86_cpu_has_pclmulqdq) {
+        crc_fold_copy(strm->state, dst, strm->next_in, size);
+        return;
+    }
+#endif
+    zmemcpy(dst, strm->next_in, size);
+    strm->adler = crc32(strm->adler, dst, size);
+}
+
diff --git a/crc_folding.c b/crc_folding.c
new file mode 100644 (file)
index 0000000..28ddcfa
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+ * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ 
+ * instruction.
+ *
+ * A white paper describing this algorithm can be found at:
+ * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+ *
+ * Copyright (C) 2013 Intel Corporation. All rights reserved.
+ * Authors:
+ *     Wajdi Feghali   <wajdi.k.feghali@intel.com>
+ *     Jim Guilford    <james.guilford@intel.com>
+ *     Vinodh Gopal    <vinodh.gopal@intel.com>
+ *     Erdinc Ozturk   <erdinc.ozturk@intel.com>
+ *     Jim Kukunas     <james.t.kukunas@linux.intel.com>
+ *
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#ifdef HAVE_PCLMULQDQ
+
+#include "deflate.h"
+
+#include <inttypes.h>
+#include <immintrin.h>
+
+#define CRC_LOAD(s) \
+    do { \
+        __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\
+        __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)s->crc0 + 1);\
+        __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)s->crc0 + 2);\
+        __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)s->crc0 + 3);\
+        __m128i xmm_crc_part = _mm_loadu_si128((__m128i *)s->crc0 + 4);
+
+#define CRC_SAVE(s) \
+        _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\
+        _mm_storeu_si128((__m128i *)s->crc0 + 1, xmm_crc1);\
+        _mm_storeu_si128((__m128i *)s->crc0 + 2, xmm_crc2);\
+        _mm_storeu_si128((__m128i *)s->crc0 + 3, xmm_crc3);\
+        _mm_storeu_si128((__m128i *)s->crc0 + 4, xmm_crc_part);\
+    } while (0);
+
+ZLIB_INTERNAL void crc_fold_init(deflate_state *z_const s)
+{
+    CRC_LOAD(s)
+
+    xmm_crc0 = _mm_cvtsi32_si128(0x9db42487);
+    xmm_crc1 = _mm_setzero_si128();
+    xmm_crc2 = _mm_setzero_si128();
+    xmm_crc3 = _mm_setzero_si128();
+
+    CRC_SAVE(s)
+
+    s->strm->adler = 0;
+}
+
+local void fold_1(deflate_state *z_const s,
+        __m128i *xmm_crc0, __m128i *xmm_crc1,
+        __m128i *xmm_crc2, __m128i *xmm_crc3)
+{
+    z_const __m128i xmm_fold4 = _mm_set_epi32(
+            0x00000001, 0x54442bd4,
+            0x00000001, 0xc6e41596);
+    
+    __m128i x_tmp3;
+    __m128 ps_crc0, ps_crc3, ps_res;
+
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc3 = *xmm_crc0;
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_res = _mm_xor_ps(ps_crc0, ps_crc3);
+
+    *xmm_crc0 = *xmm_crc1;
+    *xmm_crc1 = *xmm_crc2;
+    *xmm_crc2 = x_tmp3;
+    *xmm_crc3 = _mm_castps_si128(ps_res);
+}
+
+local void fold_2(deflate_state *z_const s,
+        __m128i *xmm_crc0, __m128i *xmm_crc1,
+        __m128i *xmm_crc2, __m128i *xmm_crc3)
+{
+    z_const __m128i xmm_fold4 = _mm_set_epi32(
+            0x00000001, 0x54442bd4,
+            0x00000001, 0xc6e41596);
+
+    __m128i x_tmp3, x_tmp2;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20;
+
+    x_tmp3 = *xmm_crc3;
+    x_tmp2 = *xmm_crc2;
+
+    *xmm_crc3 = *xmm_crc1;
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_res31= _mm_xor_ps(ps_crc3, ps_crc1);
+
+    *xmm_crc2 = *xmm_crc0;
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_res20= _mm_xor_ps(ps_crc0, ps_crc2);
+
+    *xmm_crc0 = x_tmp2;
+    *xmm_crc1 = x_tmp3;
+    *xmm_crc2 = _mm_castps_si128(ps_res20);
+    *xmm_crc3 = _mm_castps_si128(ps_res31);
+}
+
+local void fold_3(deflate_state *z_const s,
+        __m128i *xmm_crc0, __m128i *xmm_crc1,
+        __m128i *xmm_crc2, __m128i *xmm_crc3)
+{
+    z_const __m128i xmm_fold4 = _mm_set_epi32(
+            0x00000001, 0x54442bd4,
+            0x00000001, 0xc6e41596);
+
+    __m128i x_tmp3;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10;
+
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc3 = *xmm_crc2;
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3);
+
+    *xmm_crc2 = *xmm_crc1;
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_res21= _mm_xor_ps(ps_crc1, ps_crc2);
+
+    *xmm_crc1 = *xmm_crc0;
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_res10= _mm_xor_ps(ps_crc0, ps_crc1);
+
+    *xmm_crc0 = x_tmp3;
+    *xmm_crc1 = _mm_castps_si128(ps_res10);
+    *xmm_crc2 = _mm_castps_si128(ps_res21);
+    *xmm_crc3 = _mm_castps_si128(ps_res32);
+}
+
+local void fold_4(deflate_state *z_const s,
+        __m128i *xmm_crc0, __m128i *xmm_crc1,
+        __m128i *xmm_crc2, __m128i *xmm_crc3)
+{
+    z_const __m128i xmm_fold4 = _mm_set_epi32(
+            0x00000001, 0x54442bd4,
+            0x00000001, 0xc6e41596);
+
+    __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
+    __m128 ps_t0, ps_t1, ps_t2, ps_t3;
+    __m128 ps_res0, ps_res1, ps_res2, ps_res3;
+
+    x_tmp0 = *xmm_crc0;
+    x_tmp1 = *xmm_crc1;
+    x_tmp2 = *xmm_crc2;
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_t0 = _mm_castsi128_ps(x_tmp0);
+    ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
+
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
+    x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_t1 = _mm_castsi128_ps(x_tmp1);
+    ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
+
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
+    x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_t2 = _mm_castsi128_ps(x_tmp2);
+    ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
+
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01);
+    x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_t3 = _mm_castsi128_ps(x_tmp3);
+    ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
+
+    *xmm_crc0 = _mm_castps_si128(ps_res0);
+    *xmm_crc1 = _mm_castps_si128(ps_res1);
+    *xmm_crc2 = _mm_castps_si128(ps_res2);
+    *xmm_crc3 = _mm_castps_si128(ps_res3);
+}
+
+local z_const unsigned __attribute__((aligned(32))) pshufb_shf_table[60] = {
+       0x84838281,0x88878685,0x8c8b8a89,0x008f8e8d, /* shl 15 (16 - 1)/shr1 */
+       0x85848382,0x89888786,0x8d8c8b8a,0x01008f8e, /* shl 14 (16 - 3)/shr2 */
+       0x86858483,0x8a898887,0x8e8d8c8b,0x0201008f, /* shl 13 (16 - 4)/shr3 */
+       0x87868584,0x8b8a8988,0x8f8e8d8c,0x03020100, /* shl 12 (16 - 4)/shr4 */
+       0x88878685,0x8c8b8a89,0x008f8e8d,0x04030201, /* shl 11 (16 - 5)/shr5 */
+       0x89888786,0x8d8c8b8a,0x01008f8e,0x05040302, /* shl 10 (16 - 6)/shr6 */
+       0x8a898887,0x8e8d8c8b,0x0201008f,0x06050403, /* shl  9 (16 - 7)/shr7 */
+       0x8b8a8988,0x8f8e8d8c,0x03020100,0x07060504, /* shl  8 (16 - 8)/shr8 */
+       0x8c8b8a89,0x008f8e8d,0x04030201,0x08070605, /* shl  7 (16 - 9)/shr9 */
+       0x8d8c8b8a,0x01008f8e,0x05040302,0x09080706, /* shl  6 (16 -10)/shr10*/
+       0x8e8d8c8b,0x0201008f,0x06050403,0x0a090807, /* shl  5 (16 -11)/shr11*/
+       0x8f8e8d8c,0x03020100,0x07060504,0x0b0a0908, /* shl  4 (16 -12)/shr12*/
+       0x008f8e8d,0x04030201,0x08070605,0x0c0b0a09, /* shl  3 (16 -13)/shr13*/
+       0x01008f8e,0x05040302,0x09080706,0x0d0c0b0a, /* shl  2 (16 -14)/shr14*/
+       0x0201008f,0x06050403,0x0a090807,0x0e0d0c0b  /* shl  1 (16 -15)/shr15*/
+};
+
+local void partial_fold(deflate_state *z_const s, z_const size_t len,
+        __m128i *xmm_crc0, __m128i *xmm_crc1,
+        __m128i *xmm_crc2, __m128i *xmm_crc3,
+        __m128i *xmm_crc_part)
+{
+
+    z_const __m128i xmm_fold4 = _mm_set_epi32(
+            0x00000001, 0x54442bd4,
+            0x00000001, 0xc6e41596);
+    z_const __m128i xmm_mask3 = _mm_set1_epi32(0x80808080);
+    
+    __m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3;
+    __m128i xmm_a0_0, xmm_a0_1;
+    __m128 ps_crc3, psa0_0, psa0_1, ps_res;
+
+    xmm_shl = _mm_load_si128((__m128i *)pshufb_shf_table + (len - 1));
+    xmm_shr = xmm_shl;
+    xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3);
+
+    xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl);
+
+    *xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr);
+    xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl);
+    *xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1);
+
+    *xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr);
+    xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl);
+    *xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2);
+
+    *xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr);
+    xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl);
+    *xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3);
+
+    *xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr);
+    *xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl);
+    *xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part);
+
+    xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10);
+    xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01);
+
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    psa0_0 = _mm_castsi128_ps(xmm_a0_0);
+    psa0_1 = _mm_castsi128_ps(xmm_a0_1);
+
+    ps_res = _mm_xor_ps(ps_crc3, psa0_0);
+    ps_res = _mm_xor_ps(ps_res, psa0_1);
+
+    *xmm_crc3 = _mm_castps_si128(ps_res);
+}
+
+ZLIB_INTERNAL void crc_fold_copy(deflate_state *z_const s,
+        unsigned char *dst, z_const unsigned char *src, long len)
+{
+    unsigned long algn_diff;
+    __m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3;
+
+    CRC_LOAD(s)
+
+    if (len < 16) {
+        if (len == 0)
+            return;
+        xmm_crc_part = _mm_loadu_si128((__m128i *)src);
+        goto partial;
+    }
+
+    algn_diff = 0 - (unsigned long)src & 0xF;
+    if (algn_diff) {
+        xmm_crc_part = _mm_loadu_si128((__m128i *)src);
+        _mm_storeu_si128((__m128i *)dst, xmm_crc_part);
+
+        dst += algn_diff;
+        src += algn_diff;
+        len -= algn_diff;
+
+        partial_fold(s, algn_diff, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3,
+            &xmm_crc_part);
+    }
+
+    while ((len -= 64) >= 0) {
+        xmm_t0 = _mm_load_si128((__m128i *)src);
+        xmm_t1 = _mm_load_si128((__m128i *)src + 1);
+        xmm_t2 = _mm_load_si128((__m128i *)src + 2);
+        xmm_t3 = _mm_load_si128((__m128i *)src + 3);
+
+        fold_4(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+
+        _mm_storeu_si128((__m128i *)dst, xmm_t0);
+        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+
+        xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0);
+        xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1);
+        xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2);
+        xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3);
+        
+        src += 64;
+        dst += 64;
+    }
+
+    /*
+     * len = num bytes left - 64
+     */
+    if (len + 16 >= 0) {
+        len += 16;
+
+        xmm_t0 = _mm_load_si128((__m128i *)src);
+        xmm_t1 = _mm_load_si128((__m128i *)src + 1);
+        xmm_t2 = _mm_load_si128((__m128i *)src + 2);
+
+        fold_3(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+
+        _mm_storeu_si128((__m128i *)dst, xmm_t0);
+        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+
+        xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0);
+        xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1);
+        xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2);
+
+        if (len == 0)
+            goto done;
+
+        dst += 48;
+        xmm_crc_part = _mm_load_si128((__m128i *)src + 3);
+    } else if (len + 32 >= 0) {
+        len += 32;
+
+        xmm_t0 = _mm_load_si128((__m128i *)src);
+        xmm_t1 = _mm_load_si128((__m128i *)src + 1);
+
+        fold_2(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+
+        _mm_storeu_si128((__m128i *)dst, xmm_t0);
+        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+
+        xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0);
+        xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1);
+
+        if (len == 0)
+            goto done;
+
+        dst += 32;
+        xmm_crc_part = _mm_load_si128((__m128i *)src + 2);
+    } else if (len + 48 >= 0) {
+        len += 48;
+
+        xmm_t0 = _mm_load_si128((__m128i *)src);
+
+        fold_1(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+
+        _mm_storeu_si128((__m128i *)dst, xmm_t0);
+
+        xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
+
+        if (len == 0)
+            goto done;
+        
+        dst += 16;
+        xmm_crc_part = _mm_load_si128((__m128i *)src + 1);
+    } else {
+        len += 64;
+        if (len == 0)
+            goto done;
+        xmm_crc_part = _mm_load_si128((__m128i *)src);
+    }
+
+partial:
+    _mm_storeu_si128((__m128i *)dst, xmm_crc_part);
+    partial_fold(s, len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3,
+        &xmm_crc_part);
+done:
+    CRC_SAVE(s)
+}
+
+local z_const unsigned __attribute__((aligned(16))) crc_k[] = {
+    0xccaa009e, 0x00000000, /* rk1 */
+    0x751997d0, 0x00000001, /* rk2 */
+    0xccaa009e, 0x00000000, /* rk5 */
+    0x63cd6124, 0x00000001, /* rk6 */
+    0xf7011640, 0x00000001, /* rk7 */
+    0xdb710640, 0x00000001  /* rk8 */
+};
+
+local z_const unsigned __attribute__((aligned(16))) crc_mask[4] = {
+    0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000
+};
+
+local z_const unsigned __attribute__((aligned(16))) crc_mask2[4] = {
+    0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
+};
+
+unsigned ZLIB_INTERNAL crc_fold_512to32(deflate_state *z_const s)
+{
+    z_const __m128i xmm_mask  = _mm_load_si128((__m128i *)crc_mask);
+    z_const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
+
+    unsigned crc;
+    __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3, crc_fold;
+
+    CRC_LOAD(s)
+
+    /*
+     * k1
+     */
+    crc_fold = _mm_load_si128((__m128i *)crc_k);
+
+    x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10);
+    xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01);
+    xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0);
+    xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0);
+
+    x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10);
+    xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01);
+    xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1);
+    xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1);
+
+    x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10);
+    xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
+
+    /*
+     * k5
+     */
+    crc_fold = _mm_load_si128((__m128i *)crc_k + 1);
+
+    xmm_crc0 = xmm_crc3;
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
+    xmm_crc0 = _mm_srli_si128(xmm_crc0, 8);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
+
+    xmm_crc0 = xmm_crc3;
+    xmm_crc3 = _mm_slli_si128(xmm_crc3, 4);
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
+    xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2);
+
+    /*
+     * k7
+     */
+    xmm_crc1 = xmm_crc3;
+    xmm_crc2 = xmm_crc3;
+    crc_fold = _mm_load_si128((__m128i *)crc_k + 2);
+
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
+    xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask);
+
+    xmm_crc2 = xmm_crc3;
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
+
+    crc = _mm_extract_epi32(xmm_crc3, 2);
+    return ~crc;
+    CRC_SAVE(s)
+}
+
+#endif
+
index 6f861472dd5db0d6ca81f757feb9820c926c39ac..592cfb9ffc35a8195f03a1245e1bdbf44c9df86b 100644 (file)
--- a/deflate.c
+++ b/deflate.c
@@ -256,6 +256,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
     const char *version;
     int stream_size;
 {
+    unsigned window_padding = 0;
     deflate_state *s;
     int wrap = 1;
     static const char my_version[] = ZLIB_VERSION;
@@ -335,7 +336,11 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
     s->hash_mask = s->hash_size - 1;
     s->hash_shift =  ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
 
-    s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+#ifdef HAVE_PCLMULQDQ
+    window_padding = 8;
+#endif
+
+    s->window = (Bytef *) ZALLOC(strm, s->w_size + window_padding, 2*sizeof(Byte));
     s->prev   = (Posf *)  ZALLOC(strm, s->w_size, sizeof(Pos));
     s->head   = (Posf *)  ZALLOC(strm, s->hash_size, sizeof(Pos));
 
@@ -735,7 +740,7 @@ int ZEXPORT deflate (strm, flush)
     if (s->status == INIT_STATE) {
 #ifdef GZIP
         if (s->wrap == 2) {
-            strm->adler = crc32(0L, Z_NULL, 0);
+            crc_reset(s);
             put_byte(s, 31);
             put_byte(s, 139);
             put_byte(s, 8);
@@ -997,6 +1002,7 @@ int ZEXPORT deflate (strm, flush)
     /* Write the trailer */
 #ifdef GZIP
     if (s->wrap == 2) {
+        crc_finalize(s);
         put_byte(s, (Byte)(strm->adler & 0xff));
         put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
         put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
@@ -1130,15 +1136,16 @@ ZLIB_INTERNAL int read_buf(strm, buf, size)
 
     strm->avail_in  -= len;
 
-    zmemcpy(buf, strm->next_in, len);
-    if (strm->state->wrap == 1) {
-        strm->adler = adler32(strm->adler, buf, len);
-    }
 #ifdef GZIP
-    else if (strm->state->wrap == 2) {
-        strm->adler = crc32(strm->adler, buf, len);
-    }
+    if (strm->state->wrap == 2)
+        copy_with_crc(strm, buf, len);
+    else 
 #endif
+    {
+        zmemcpy(buf, strm->next_in, len);
+        if (strm->state->wrap == 1)
+            strm->adler = adler32(strm->adler, buf, len);
+    }
     strm->next_in  += len;
     strm->total_in += len;
 
index fd9c80fb13887d616932a8e5e4ad20d7110f8b26..b9f92beca333c3b8e90f4f02e9ab85168d735ae2 100644 (file)
--- a/deflate.h
+++ b/deflate.h
@@ -107,6 +107,10 @@ typedef struct internal_state {
     Byte  method;        /* can only be DEFLATED */
     int   last_flush;    /* value of flush param for previous deflate call */
 
+#ifdef HAVE_PCLMULQDQ
+    unsigned __attribute__((aligned(16))) crc0[4 * 5];
+#endif
+
                 /* used by deflate.c: */
 
     uInt  w_size;        /* LZ77 window size (32K by default) */