]> git.ipfire.org Git - thirdparty/zlib-ng.git/commitdiff
Remove COPY ifdef from crc32 (v)pclmulqdq.
authorNathan Moinvaziri <nathan@nathanm.com>
Wed, 3 Dec 2025 03:36:54 +0000 (19:36 -0800)
committerHans Kristian Rosbach <hk-github@circlestorm.org>
Tue, 9 Dec 2025 14:23:53 +0000 (15:23 +0100)
arch/x86/crc32_fold_pclmulqdq_tpl.h
arch/x86/crc32_fold_vpclmulqdq_tpl.h [deleted file]
arch/x86/crc32_pclmulqdq.c
arch/x86/crc32_pclmulqdq_tpl.h [deleted file]
arch/x86/crc32_vpclmulqdq.c

index 0a22a4abe5c553373534f1e50d337db506b8a8f6..f4f06302c40abd4209275ae1df574b420bbc624b 100644 (file)
@@ -1,10 +1,10 @@
-/*
- * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ
- * instruction.
+/* crc32_fold_pclmulqdq_tpl.h -- Compute the CRC32 using a parallelized folding
+ * approach with the PCLMULQDQ and VPCMULQDQ instructions.
  *
  * A white paper describing this algorithm can be found at:
  *     doc/crc-pclmulqdq.pdf
  *
+ * Copyright (C) 2020 Wangyang Guo (wangyang.guo@intel.com) (VPCLMULQDQ support)
  * Copyright (C) 2013 Intel Corporation. All rights reserved.
  * Copyright (C) 2016 Marian Beermann (support for initial value)
  * Authors:
  * For conditions of distribution and use, see copyright notice in zlib.h
  */
 
-#ifdef COPY
-Z_INTERNAL void CRC32_FOLD_COPY(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
-#else
-Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint32_t init_crc) {
+#include "zbuild.h"
+
+#include <immintrin.h>
+#include <wmmintrin.h>
+#include <smmintrin.h> // _mm_extract_epi32
+
+#include "crc32.h"
+#include "crc32_braid_p.h"
+#include "crc32_braid_tbl.h"
+#include "x86_intrins.h"
+
+static const unsigned ALIGNED_(16) crc_k[] = {
+    0xccaa009e, 0x00000000, /* rk1 */
+    0x751997d0, 0x00000001, /* rk2 */
+    0xccaa009e, 0x00000000, /* rk5 */
+    0x63cd6124, 0x00000001, /* rk6 */
+    0xf7011640, 0x00000001, /* rk7 */
+    0xdb710640, 0x00000001  /* rk8 */
+};
+
+static const unsigned ALIGNED_(16) crc_mask[4] = {
+    0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000
+};
+
+static const unsigned ALIGNED_(16) crc_mask2[4] = {
+    0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
+};
+
+#define ONCE(op)                if (first) { first = 0; op; }
+#define XOR_INITIAL128(where)   ONCE(where = _mm_xor_si128(where, xmm_initial))
+
+static void fold_1(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
+    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
+                                             0x00000001, 0xc6e41596);
+    __m128i x_tmp3;
+    __m128 ps_crc0, ps_crc3, ps_res;
+
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc3 = *xmm_crc0;
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_res = _mm_xor_ps(ps_crc0, ps_crc3);
+
+    *xmm_crc0 = *xmm_crc1;
+    *xmm_crc1 = *xmm_crc2;
+    *xmm_crc2 = x_tmp3;
+    *xmm_crc3 = _mm_castps_si128(ps_res);
+}
+
+static void fold_2(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
+    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
+                                             0x00000001, 0xc6e41596);
+    __m128i x_tmp3, x_tmp2;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20;
+
+    x_tmp3 = *xmm_crc3;
+    x_tmp2 = *xmm_crc2;
+
+    *xmm_crc3 = *xmm_crc1;
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_res31 = _mm_xor_ps(ps_crc3, ps_crc1);
+
+    *xmm_crc2 = *xmm_crc0;
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_res20 = _mm_xor_ps(ps_crc0, ps_crc2);
+
+    *xmm_crc0 = x_tmp2;
+    *xmm_crc1 = x_tmp3;
+    *xmm_crc2 = _mm_castps_si128(ps_res20);
+    *xmm_crc3 = _mm_castps_si128(ps_res31);
+}
+
+static void fold_3(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
+    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
+                                             0x00000001, 0xc6e41596);
+    __m128i x_tmp3;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10;
+
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc3 = *xmm_crc2;
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3);
+
+    *xmm_crc2 = *xmm_crc1;
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_res21 = _mm_xor_ps(ps_crc1, ps_crc2);
+
+    *xmm_crc1 = *xmm_crc0;
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_res10 = _mm_xor_ps(ps_crc0, ps_crc1);
+
+    *xmm_crc0 = x_tmp3;
+    *xmm_crc1 = _mm_castps_si128(ps_res10);
+    *xmm_crc2 = _mm_castps_si128(ps_res21);
+    *xmm_crc3 = _mm_castps_si128(ps_res32);
+}
+
+static void fold_4(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
+    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
+                                             0x00000001, 0xc6e41596);
+    __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
+    __m128 ps_t0, ps_t1, ps_t2, ps_t3;
+    __m128 ps_res0, ps_res1, ps_res2, ps_res3;
+
+    x_tmp0 = *xmm_crc0;
+    x_tmp1 = *xmm_crc1;
+    x_tmp2 = *xmm_crc2;
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
+    x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_t0 = _mm_castsi128_ps(x_tmp0);
+    ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
+
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
+    x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_t1 = _mm_castsi128_ps(x_tmp1);
+    ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
+
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
+    x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_t2 = _mm_castsi128_ps(x_tmp2);
+    ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
+
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01);
+    x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_t3 = _mm_castsi128_ps(x_tmp3);
+    ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
+
+    *xmm_crc0 = _mm_castps_si128(ps_res0);
+    *xmm_crc1 = _mm_castps_si128(ps_res1);
+    *xmm_crc2 = _mm_castps_si128(ps_res2);
+    *xmm_crc3 = _mm_castps_si128(ps_res3);
+}
+
+static void fold_12(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
+    const __m128i xmm_fold12 = _mm_set_epi64x(0x596C8D81, 0xF5E48C85);
+    __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
+    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
+    __m128 ps_t0, ps_t1, ps_t2, ps_t3;
+    __m128 ps_res0, ps_res1, ps_res2, ps_res3;
+
+    x_tmp0 = *xmm_crc0;
+    x_tmp1 = *xmm_crc1;
+    x_tmp2 = *xmm_crc2;
+    x_tmp3 = *xmm_crc3;
+
+    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold12, 0x01);
+    x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold12, 0x10);
+    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
+    ps_t0 = _mm_castsi128_ps(x_tmp0);
+    ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
+
+    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold12, 0x01);
+    x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold12, 0x10);
+    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
+    ps_t1 = _mm_castsi128_ps(x_tmp1);
+    ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
+
+    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold12, 0x01);
+    x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold12, 0x10);
+    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
+    ps_t2 = _mm_castsi128_ps(x_tmp2);
+    ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
+
+    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold12, 0x01);
+    x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold12, 0x10);
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    ps_t3 = _mm_castsi128_ps(x_tmp3);
+    ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
+
+    *xmm_crc0 = _mm_castps_si128(ps_res0);
+    *xmm_crc1 = _mm_castps_si128(ps_res1);
+    *xmm_crc2 = _mm_castps_si128(ps_res2);
+    *xmm_crc3 = _mm_castps_si128(ps_res3);
+}
+
+#ifdef X86_VPCLMULQDQ
+
+#define XOR_INITIAL512(where) ONCE(where = _mm512_xor_si512(where, zmm_initial))
+
+static size_t fold_16(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3, uint8_t *dst,
+    const uint8_t *src, size_t len, __m128i init_crc, int32_t first, const int COPY) {
+    __m512i zmm_initial = _mm512_zextsi128_si512(init_crc);
+    __m512i zmm_t0, zmm_t1, zmm_t2, zmm_t3;
+    __m512i zmm_crc0, zmm_crc1, zmm_crc2, zmm_crc3;
+    __m512i z0, z1, z2, z3;
+    size_t len_tmp = len;
+    const __m512i zmm_fold4 = _mm512_set4_epi32(
+        0x00000001, 0x54442bd4, 0x00000001, 0xc6e41596);
+    const __m512i zmm_fold16 = _mm512_set4_epi32(
+        0x00000001, 0x1542778a, 0x00000001, 0x322d1430);
+
+    // zmm register init
+    zmm_crc0 = _mm512_setzero_si512();
+    zmm_t0 = _mm512_loadu_si512((__m512i *)src);
+    if (!COPY) {
+        XOR_INITIAL512(zmm_t0);
+    }
+    zmm_crc1 = _mm512_loadu_si512((__m512i *)src + 1);
+    zmm_crc2 = _mm512_loadu_si512((__m512i *)src + 2);
+    zmm_crc3 = _mm512_loadu_si512((__m512i *)src + 3);
+
+    /* already have intermediate CRC in xmm registers
+        * fold4 with 4 xmm_crc to get zmm_crc0
+    */
+    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc0, 0);
+    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc1, 1);
+    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc2, 2);
+    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc3, 3);
+    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
+    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
+    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_t0, 0x96);
+
+    if (COPY) {
+        _mm512_storeu_si512((__m512i *)dst, zmm_t0);
+        _mm512_storeu_si512((__m512i *)dst + 1, zmm_crc1);
+        _mm512_storeu_si512((__m512i *)dst + 2, zmm_crc2);
+        _mm512_storeu_si512((__m512i *)dst + 3, zmm_crc3);
+        dst += 256;
+    }
+    len -= 256;
+    src += 256;
+
+    // fold-16 loops
+    while (len >= 256) {
+        zmm_t0 = _mm512_loadu_si512((__m512i *)src);
+        zmm_t1 = _mm512_loadu_si512((__m512i *)src + 1);
+        zmm_t2 = _mm512_loadu_si512((__m512i *)src + 2);
+        zmm_t3 = _mm512_loadu_si512((__m512i *)src + 3);
+
+        z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold16, 0x01);
+        z1 = _mm512_clmulepi64_epi128(zmm_crc1, zmm_fold16, 0x01);
+        z2 = _mm512_clmulepi64_epi128(zmm_crc2, zmm_fold16, 0x01);
+        z3 = _mm512_clmulepi64_epi128(zmm_crc3, zmm_fold16, 0x01);
+
+        zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold16, 0x10);
+        zmm_crc1 = _mm512_clmulepi64_epi128(zmm_crc1, zmm_fold16, 0x10);
+        zmm_crc2 = _mm512_clmulepi64_epi128(zmm_crc2, zmm_fold16, 0x10);
+        zmm_crc3 = _mm512_clmulepi64_epi128(zmm_crc3, zmm_fold16, 0x10);
+
+        zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_t0, 0x96);
+        zmm_crc1 = _mm512_ternarylogic_epi32(zmm_crc1, z1, zmm_t1, 0x96);
+        zmm_crc2 = _mm512_ternarylogic_epi32(zmm_crc2, z2, zmm_t2, 0x96);
+        zmm_crc3 = _mm512_ternarylogic_epi32(zmm_crc3, z3, zmm_t3, 0x96);
+
+        if (COPY) {
+            _mm512_storeu_si512((__m512i *)dst, zmm_t0);
+            _mm512_storeu_si512((__m512i *)dst + 1, zmm_t1);
+            _mm512_storeu_si512((__m512i *)dst + 2, zmm_t2);
+            _mm512_storeu_si512((__m512i *)dst + 3, zmm_t3);
+            dst += 256;
+        }
+        len -= 256;
+        src += 256;
+    }
+    // zmm_crc[0,1,2,3] -> zmm_crc0
+    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
+    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
+    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_crc1, 0x96);
+
+    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
+    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
+    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_crc2, 0x96);
+
+    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
+    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
+    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_crc3, 0x96);
+
+    // zmm_crc0 -> xmm_crc[0, 1, 2, 3]
+    *xmm_crc0 = _mm512_extracti32x4_epi32(zmm_crc0, 0);
+    *xmm_crc1 = _mm512_extracti32x4_epi32(zmm_crc0, 1);
+    *xmm_crc2 = _mm512_extracti32x4_epi32(zmm_crc0, 2);
+    *xmm_crc3 = _mm512_extracti32x4_epi32(zmm_crc0, 3);
+
+    return (len_tmp - len);  // return n bytes processed
+}
 #endif
+
+static const unsigned ALIGNED_(32) pshufb_shf_table[60] = {
+    0x84838281, 0x88878685, 0x8c8b8a89, 0x008f8e8d, /* shl 15 (16 - 1)/shr1 */
+    0x85848382, 0x89888786, 0x8d8c8b8a, 0x01008f8e, /* shl 14 (16 - 3)/shr2 */
+    0x86858483, 0x8a898887, 0x8e8d8c8b, 0x0201008f, /* shl 13 (16 - 4)/shr3 */
+    0x87868584, 0x8b8a8988, 0x8f8e8d8c, 0x03020100, /* shl 12 (16 - 4)/shr4 */
+    0x88878685, 0x8c8b8a89, 0x008f8e8d, 0x04030201, /* shl 11 (16 - 5)/shr5 */
+    0x89888786, 0x8d8c8b8a, 0x01008f8e, 0x05040302, /* shl 10 (16 - 6)/shr6 */
+    0x8a898887, 0x8e8d8c8b, 0x0201008f, 0x06050403, /* shl  9 (16 - 7)/shr7 */
+    0x8b8a8988, 0x8f8e8d8c, 0x03020100, 0x07060504, /* shl  8 (16 - 8)/shr8 */
+    0x8c8b8a89, 0x008f8e8d, 0x04030201, 0x08070605, /* shl  7 (16 - 9)/shr9 */
+    0x8d8c8b8a, 0x01008f8e, 0x05040302, 0x09080706, /* shl  6 (16 -10)/shr10*/
+    0x8e8d8c8b, 0x0201008f, 0x06050403, 0x0a090807, /* shl  5 (16 -11)/shr11*/
+    0x8f8e8d8c, 0x03020100, 0x07060504, 0x0b0a0908, /* shl  4 (16 -12)/shr12*/
+    0x008f8e8d, 0x04030201, 0x08070605, 0x0c0b0a09, /* shl  3 (16 -13)/shr13*/
+    0x01008f8e, 0x05040302, 0x09080706, 0x0d0c0b0a, /* shl  2 (16 -14)/shr14*/
+    0x0201008f, 0x06050403, 0x0a090807, 0x0e0d0c0b  /* shl  1 (16 -15)/shr15*/
+};
+
+static void partial_fold(const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2,
+                         __m128i *xmm_crc3, __m128i *xmm_crc_part) {
+    const __m128i xmm_fold4 = _mm_set_epi32(0x00000001, 0x54442bd4,
+                                            0x00000001, 0xc6e41596);
+    const __m128i xmm_mask3 = _mm_set1_epi32((int32_t)0x80808080);
+
+    __m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3;
+    __m128i xmm_a0_0, xmm_a0_1;
+    __m128 ps_crc3, psa0_0, psa0_1, ps_res;
+
+    xmm_shl = _mm_load_si128((__m128i *)(pshufb_shf_table + (4 * (len - 1))));
+    xmm_shr = xmm_shl;
+    xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3);
+
+    xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl);
+
+    *xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr);
+    xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl);
+    *xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1);
+
+    *xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr);
+    xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl);
+    *xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2);
+
+    *xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr);
+    xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl);
+    *xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3);
+
+    *xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr);
+    *xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl);
+    *xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part);
+
+    xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10);
+    xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01);
+
+    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
+    psa0_0 = _mm_castsi128_ps(xmm_a0_0);
+    psa0_1 = _mm_castsi128_ps(xmm_a0_1);
+
+    ps_res = _mm_xor_ps(ps_crc3, psa0_0);
+    ps_res = _mm_xor_ps(ps_res, psa0_1);
+
+    *xmm_crc3 = _mm_castps_si128(ps_res);
+}
+
+static inline uint32_t crc32_small(uint32_t crc, const uint8_t *buf, size_t len) {
+    uint32_t c = (~crc) & 0xffffffff;
+
+    while (len) {
+        len--;
+        CRC_DO1;
+    }
+
+    return c ^ 0xffffffff;
+}
+
+static inline void crc32_fold_load(__m128i *fold, __m128i *fold0, __m128i *fold1, __m128i *fold2, __m128i *fold3) {
+    *fold0 = _mm_load_si128(fold + 0);
+    *fold1 = _mm_load_si128(fold + 1);
+    *fold2 = _mm_load_si128(fold + 2);
+    *fold3 = _mm_load_si128(fold + 3);
+}
+
+static inline void crc32_fold_save(__m128i *fold, const __m128i *fold0, const __m128i *fold1,
+                                   const __m128i *fold2, const __m128i *fold3) {
+    _mm_storeu_si128(fold + 0, *fold0);
+    _mm_storeu_si128(fold + 1, *fold1);
+    _mm_storeu_si128(fold + 2, *fold2);
+    _mm_storeu_si128(fold + 3, *fold3);
+}
+
+static inline uint32_t crc32_fold_reset(crc32_fold *crc) {
+    __m128i xmm_crc0 = _mm_cvtsi32_si128(0x9db42487);
+    __m128i xmm_zero = _mm_setzero_si128();
+    crc32_fold_save((__m128i *)crc->fold, &xmm_crc0, &xmm_zero, &xmm_zero, &xmm_zero);
+    return 0;
+}
+
+static inline uint32_t crc32_fold_final(crc32_fold *crc) {
+    const __m128i xmm_mask  = _mm_load_si128((__m128i *)crc_mask);
+    const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
+    __m128i xmm_crc0, xmm_crc1, xmm_crc2, xmm_crc3;
+    __m128i x_tmp0, x_tmp1, x_tmp2, crc_fold;
+
+    crc32_fold_load((__m128i *)crc->fold, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+
+    /*
+     * k1
+     */
+    crc_fold = _mm_load_si128((__m128i *)crc_k);
+
+    x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10);
+    xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01);
+    xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0);
+    xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0);
+
+    x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10);
+    xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01);
+    xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1);
+    xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1);
+
+    x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10);
+    xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
+
+    /*
+     * k5
+     */
+    crc_fold = _mm_load_si128((__m128i *)(crc_k + 4));
+
+    xmm_crc0 = xmm_crc3;
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
+    xmm_crc0 = _mm_srli_si128(xmm_crc0, 8);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
+
+    xmm_crc0 = xmm_crc3;
+    xmm_crc3 = _mm_slli_si128(xmm_crc3, 4);
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
+    xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2);
+
+    /*
+     * k7
+     */
+    xmm_crc1 = xmm_crc3;
+    xmm_crc2 = xmm_crc3;
+    crc_fold = _mm_load_si128((__m128i *)(crc_k + 8));
+
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
+    xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask);
+
+    xmm_crc2 = xmm_crc3;
+    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
+    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
+
+    crc->value = ~((uint32_t)_mm_extract_epi32(xmm_crc3, 2));
+
+    return crc->value;
+}
+
+static inline void crc32_fold_copy(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len, uint32_t init_crc, const int COPY) {
     unsigned long algn_diff;
     __m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3;
     __m128i xmm_crc0, xmm_crc1, xmm_crc2, xmm_crc3;
     __m128i xmm_crc_part = _mm_setzero_si128();
     char ALIGNED_(16) partial_buf[16] = { 0 };
-#ifndef COPY
     __m128i xmm_initial = _mm_cvtsi32_si128(init_crc);
     int32_t first = init_crc != 0;
 
-    /* The CRC functions don't call this for input < 16, as a minimum of 16 bytes of input is needed
-     * for the aligning load that occurs.  If there's an initial CRC, to carry it forward through
-     * the folded CRC there must be 16 - src % 16 + 16 bytes available, which by definition can be
-     * up to 15 bytes + one full vector load. */
-    assert(len >= 16 || first == 0);
-#endif
+    if (!COPY) {
+        /* The CRC functions don't call this for input < 16, as a minimum of 16 bytes of input is needed
+         * for the aligning load that occurs.  If there's an initial CRC, to carry it forward through
+         * the folded CRC there must be 16 - src % 16 + 16 bytes available, which by definition can be
+         * up to 15 bytes + one full vector load. */
+        Assert(len >= 16 || first == 0, "Insufficient data for initial CRC");
+    }
     crc32_fold_load((__m128i *)crc->fold, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
 
     if (len < 16) {
@@ -45,44 +505,44 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
 
         memcpy(partial_buf, src, len);
         xmm_crc_part = _mm_load_si128((const __m128i *)partial_buf);
-#ifdef COPY
-        memcpy(dst, partial_buf, len);
-#endif
+        if (COPY) {
+            memcpy(dst, partial_buf, len);
+        }
         goto partial;
     }
 
     algn_diff = ((uintptr_t)16 - ((uintptr_t)src & 0xF)) & 0xF;
     if (algn_diff) {
         xmm_crc_part = _mm_loadu_si128((__m128i *)src);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_crc_part);
-        dst += algn_diff;
-#else
-        XOR_INITIAL128(xmm_crc_part);
-
-        if (algn_diff < 4 && init_crc != 0) {
-            xmm_t0 = xmm_crc_part;
-            if (len >= 32) {
-                xmm_crc_part = _mm_loadu_si128((__m128i*)src + 1);
-                fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-                xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
-            } else {
-                memcpy(partial_buf, src + 16, len - 16);
-                xmm_crc_part = _mm_load_si128((__m128i*)partial_buf);
-                fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-                xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_crc_part);
+            dst += algn_diff;
+        } else {
+            XOR_INITIAL128(xmm_crc_part);
+
+            if (algn_diff < 4 && init_crc != 0) {
+                xmm_t0 = xmm_crc_part;
+                if (len >= 32) {
+                    xmm_crc_part = _mm_loadu_si128((__m128i*)src + 1);
+                    fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+                    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
+                } else {
+                    memcpy(partial_buf, src + 16, len - 16);
+                    xmm_crc_part = _mm_load_si128((__m128i*)partial_buf);
+                    fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
+                    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
+                    src += 16;
+                    len -= 16;
+                    if (COPY) {
+                        dst -= algn_diff;
+                    }
+                    goto partial;
+                }
+
                 src += 16;
                 len -= 16;
-#ifdef COPY
-                dst -= algn_diff;
-#endif
-                goto partial;
             }
-
-            src += 16;
-            len -= 16;
         }
-#endif
 
         partial_fold(algn_diff, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, &xmm_crc_part);
 
@@ -92,14 +552,14 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
 
 #ifdef X86_VPCLMULQDQ
     if (len >= 256) {
-#ifdef COPY
-        size_t n = fold_16_vpclmulqdq_copy(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, dst, src, len);
-        dst += n;
-#else
-        size_t n = fold_16_vpclmulqdq(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, src, len,
-            xmm_initial, first);
-        first = 0;
-#endif
+        size_t n;
+        if (COPY) {
+            n = fold_16(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, dst, src, len, xmm_initial, first, 1);
+            dst += n;
+        } else {
+            n = fold_16(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, dst, src, len, xmm_initial, first, 0);
+            first = 0;
+        }
         len -= n;
         src += n;
     }
@@ -111,7 +571,9 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
      * the stream at the following offsets: 6, 9, 10, 16, 20, 22,
      * 24, 25, 27, 28, 30, 31, 32 - this is detailed in the paper
      * as "generator_64_bits_unrolled_8" */
-#if !defined(COPY) || defined(__AVX512VL__)
+#ifndef __AVX512VL__
+    if (!COPY) {
+#endif
     while (len >= 512 + 64 + 16*8) {
         __m128i chorba8 = _mm_load_si128((__m128i *)src);
         __m128i chorba7 = _mm_load_si128((__m128i *)src + 1);
@@ -121,19 +583,19 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         __m128i chorba3 = _mm_load_si128((__m128i *)src + 5);
         __m128i chorba2 = _mm_load_si128((__m128i *)src + 6);
         __m128i chorba1 = _mm_load_si128((__m128i *)src + 7);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, chorba8);
-        _mm_storeu_si128((__m128i *)dst + 1, chorba7);
-        _mm_storeu_si128((__m128i *)dst + 2, chorba6);
-        _mm_storeu_si128((__m128i *)dst + 3, chorba5);
-        _mm_storeu_si128((__m128i *)dst + 4, chorba4);
-        _mm_storeu_si128((__m128i *)dst + 5, chorba3);
-        _mm_storeu_si128((__m128i *)dst + 6, chorba2);
-        _mm_storeu_si128((__m128i *)dst + 7, chorba1);
-        dst += 16*8;
-#else
-        XOR_INITIAL128(chorba8);
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, chorba8);
+            _mm_storeu_si128((__m128i *)dst + 1, chorba7);
+            _mm_storeu_si128((__m128i *)dst + 2, chorba6);
+            _mm_storeu_si128((__m128i *)dst + 3, chorba5);
+            _mm_storeu_si128((__m128i *)dst + 4, chorba4);
+            _mm_storeu_si128((__m128i *)dst + 5, chorba3);
+            _mm_storeu_si128((__m128i *)dst + 6, chorba2);
+            _mm_storeu_si128((__m128i *)dst + 7, chorba1);
+            dst += 16*8;
+        } else {
+            XOR_INITIAL128(chorba8);
+        }
         chorba2 = _mm_xor_si128(chorba2, chorba8);
         chorba1 = _mm_xor_si128(chorba1, chorba7);
         src += 16*8;
@@ -145,13 +607,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 3);
 
         fold_12(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
         xmm_t0 = _mm_xor_si128(xmm_t0, chorba6);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(xmm_t1, chorba5), chorba8);
         xmm_t2 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t2, chorba4), chorba8), chorba7);
@@ -167,13 +629,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 7);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
 
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t0, chorba2), chorba6), chorba5);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t1, chorba1), chorba4), chorba5);
@@ -190,13 +652,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 11);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
 
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t0, chorba1), chorba2), chorba8);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(xmm_t1, chorba1), chorba7);
@@ -213,13 +675,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 15);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
 
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(xmm_t0, chorba4), chorba8);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t1, chorba3), chorba8), chorba7);
@@ -236,13 +698,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 19);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
 
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t0, chorba4), chorba8), chorba6), chorba5);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t1, chorba3), chorba4), chorba8), chorba7), chorba5);
@@ -259,13 +721,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 23);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
 
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t0, chorba1), chorba2), chorba4), chorba8), chorba7), chorba5);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t1, chorba1), chorba3), chorba4), chorba7), chorba6);
@@ -282,13 +744,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 27);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t0, chorba1), chorba3), chorba4), chorba8), chorba7), chorba6);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t1, chorba2), chorba3), chorba7), chorba6), chorba5);
         xmm_t2 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t2, chorba1), chorba2), chorba4), chorba6), chorba5);
@@ -304,13 +766,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t3 = _mm_load_si128((__m128i *)src + 31);
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        }
         xmm_t0 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t0, chorba2), chorba3), chorba4);
         xmm_t1 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(xmm_t1, chorba1), chorba2), chorba3);
         xmm_t2 = _mm_xor_si128(_mm_xor_si128(xmm_t2, chorba1), chorba2);
@@ -323,6 +785,8 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         len -= 512;
         src += 512;
     }
+#ifndef __AVX512VL__
+    }
 #endif
 
     while (len >= 64) {
@@ -334,15 +798,15 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         src += 64;
 
         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
-        dst += 64;
-#else
-        XOR_INITIAL128(xmm_t0);
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            _mm_storeu_si128((__m128i *)dst + 3, xmm_t3);
+            dst += 64;
+        } else {
+            XOR_INITIAL128(xmm_t0);
+        }
 
         xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0);
         xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1);
@@ -360,14 +824,14 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t1 = _mm_load_si128((__m128i *)src + 1);
         xmm_t2 = _mm_load_si128((__m128i *)src + 2);
         src += 48;
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
-        dst += 48;
-#else
-        XOR_INITIAL128(xmm_t0);
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            _mm_storeu_si128((__m128i *)dst + 2, xmm_t2);
+            dst += 48;
+        } else {
+            XOR_INITIAL128(xmm_t0);
+        }
         fold_3(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
 
         xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0);
@@ -379,13 +843,13 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         xmm_t0 = _mm_load_si128((__m128i *)src);
         xmm_t1 = _mm_load_si128((__m128i *)src + 1);
         src += 32;
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
-        dst += 32;
-#else
-        XOR_INITIAL128(xmm_t0);
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            _mm_storeu_si128((__m128i *)dst + 1, xmm_t1);
+            dst += 32;
+        } else {
+            XOR_INITIAL128(xmm_t0);
+        }
         fold_2(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
 
         xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0);
@@ -394,12 +858,12 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
         len -= 16;
         xmm_t0 = _mm_load_si128((__m128i *)src);
         src += 16;
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)dst, xmm_t0);
-        dst += 16;
-#else
-        XOR_INITIAL128(xmm_t0);
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)dst, xmm_t0);
+            dst += 16;
+        } else {
+            XOR_INITIAL128(xmm_t0);
+        }
         fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
 
         xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
@@ -408,10 +872,10 @@ Z_INTERNAL void CRC32_FOLD(crc32_fold *crc, const uint8_t *src, size_t len, uint
 partial:
     if (len) {
         memcpy(&xmm_crc_part, src, len);
-#ifdef COPY
-        _mm_storeu_si128((__m128i *)partial_buf, xmm_crc_part);
-        memcpy(dst, partial_buf, len);
-#endif
+        if (COPY) {
+            _mm_storeu_si128((__m128i *)partial_buf, xmm_crc_part);
+            memcpy(dst, partial_buf, len);
+        }
         partial_fold(len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, &xmm_crc_part);
     }
 
diff --git a/arch/x86/crc32_fold_vpclmulqdq_tpl.h b/arch/x86/crc32_fold_vpclmulqdq_tpl.h
deleted file mode 100644 (file)
index 3ea5c33..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/* crc32_fold_vpclmulqdq_tpl.h -- VPCMULQDQ-based CRC32 folding template.
- * Copyright Wangyang Guo (wangyang.guo@intel.com)
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#ifdef COPY
-static size_t fold_16_vpclmulqdq_copy(__m128i *xmm_crc0, __m128i *xmm_crc1,
-    __m128i *xmm_crc2, __m128i *xmm_crc3, uint8_t *dst, const uint8_t *src, size_t len) {
-#else
-static size_t fold_16_vpclmulqdq(__m128i *xmm_crc0, __m128i *xmm_crc1,
-    __m128i *xmm_crc2, __m128i *xmm_crc3, const uint8_t *src, size_t len,
-    __m128i init_crc, int32_t first) {
-    __m512i zmm_initial = _mm512_zextsi128_si512(init_crc);
-#endif
-    __m512i zmm_t0, zmm_t1, zmm_t2, zmm_t3;
-    __m512i zmm_crc0, zmm_crc1, zmm_crc2, zmm_crc3;
-    __m512i z0, z1, z2, z3;
-    size_t len_tmp = len;
-    const __m512i zmm_fold4 = _mm512_set4_epi32(
-        0x00000001, 0x54442bd4, 0x00000001, 0xc6e41596);
-    const __m512i zmm_fold16 = _mm512_set4_epi32(
-        0x00000001, 0x1542778a, 0x00000001, 0x322d1430);
-
-    // zmm register init
-    zmm_crc0 = _mm512_setzero_si512();
-    zmm_t0 = _mm512_loadu_si512((__m512i *)src);
-#ifndef COPY
-    XOR_INITIAL512(zmm_t0);
-#endif
-    zmm_crc1 = _mm512_loadu_si512((__m512i *)src + 1);
-    zmm_crc2 = _mm512_loadu_si512((__m512i *)src + 2);
-    zmm_crc3 = _mm512_loadu_si512((__m512i *)src + 3);
-
-    /* already have intermediate CRC in xmm registers
-        * fold4 with 4 xmm_crc to get zmm_crc0
-    */
-    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc0, 0);
-    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc1, 1);
-    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc2, 2);
-    zmm_crc0 = _mm512_inserti32x4(zmm_crc0, *xmm_crc3, 3);
-    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
-    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
-    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_t0, 0x96);
-
-#ifdef COPY
-    _mm512_storeu_si512((__m512i *)dst, zmm_t0);
-    _mm512_storeu_si512((__m512i *)dst + 1, zmm_crc1);
-    _mm512_storeu_si512((__m512i *)dst + 2, zmm_crc2);
-    _mm512_storeu_si512((__m512i *)dst + 3, zmm_crc3);
-    dst += 256;
-#endif
-    len -= 256;
-    src += 256;
-
-    // fold-16 loops
-    while (len >= 256) {
-        zmm_t0 = _mm512_loadu_si512((__m512i *)src);
-        zmm_t1 = _mm512_loadu_si512((__m512i *)src + 1);
-        zmm_t2 = _mm512_loadu_si512((__m512i *)src + 2);
-        zmm_t3 = _mm512_loadu_si512((__m512i *)src + 3);
-
-        z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold16, 0x01);
-        z1 = _mm512_clmulepi64_epi128(zmm_crc1, zmm_fold16, 0x01);
-        z2 = _mm512_clmulepi64_epi128(zmm_crc2, zmm_fold16, 0x01);
-        z3 = _mm512_clmulepi64_epi128(zmm_crc3, zmm_fold16, 0x01);
-
-        zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold16, 0x10);
-        zmm_crc1 = _mm512_clmulepi64_epi128(zmm_crc1, zmm_fold16, 0x10);
-        zmm_crc2 = _mm512_clmulepi64_epi128(zmm_crc2, zmm_fold16, 0x10);
-        zmm_crc3 = _mm512_clmulepi64_epi128(zmm_crc3, zmm_fold16, 0x10);
-
-        zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_t0, 0x96);
-        zmm_crc1 = _mm512_ternarylogic_epi32(zmm_crc1, z1, zmm_t1, 0x96);
-        zmm_crc2 = _mm512_ternarylogic_epi32(zmm_crc2, z2, zmm_t2, 0x96);
-        zmm_crc3 = _mm512_ternarylogic_epi32(zmm_crc3, z3, zmm_t3, 0x96);
-
-#ifdef COPY
-        _mm512_storeu_si512((__m512i *)dst, zmm_t0);
-        _mm512_storeu_si512((__m512i *)dst + 1, zmm_t1);
-        _mm512_storeu_si512((__m512i *)dst + 2, zmm_t2);
-        _mm512_storeu_si512((__m512i *)dst + 3, zmm_t3);
-        dst += 256;
-#endif
-        len -= 256;
-        src += 256;
-    }
-    // zmm_crc[0,1,2,3] -> zmm_crc0
-    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
-    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
-    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_crc1, 0x96);
-
-    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
-    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
-    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_crc2, 0x96);
-
-    z0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x01);
-    zmm_crc0 = _mm512_clmulepi64_epi128(zmm_crc0, zmm_fold4, 0x10);
-    zmm_crc0 = _mm512_ternarylogic_epi32(zmm_crc0, z0, zmm_crc3, 0x96);
-
-    // zmm_crc0 -> xmm_crc[0, 1, 2, 3]
-    *xmm_crc0 = _mm512_extracti32x4_epi32(zmm_crc0, 0);
-    *xmm_crc1 = _mm512_extracti32x4_epi32(zmm_crc0, 1);
-    *xmm_crc2 = _mm512_extracti32x4_epi32(zmm_crc0, 2);
-    *xmm_crc3 = _mm512_extracti32x4_epi32(zmm_crc0, 3);
-
-    return (len_tmp - len);  // return n bytes processed
-}
index 9383b7a2ba00c78773b2393fc44dd1d6d2401063..428f5554e2b974c2bfa0c88ae75eb1005fedfcbf 100644 (file)
 
 #ifdef X86_PCLMULQDQ_CRC
 
-#define CRC32_FOLD_COPY  crc32_fold_pclmulqdq_copy
-#define CRC32_FOLD       crc32_fold_pclmulqdq
-#define CRC32_FOLD_RESET crc32_fold_pclmulqdq_reset
-#define CRC32_FOLD_FINAL crc32_fold_pclmulqdq_final
-#define CRC32            crc32_pclmulqdq
+#include "crc32_fold_pclmulqdq_tpl.h"
 
-#include "crc32_pclmulqdq_tpl.h"
+Z_INTERNAL uint32_t crc32_fold_pclmulqdq_reset(crc32_fold *crc) {
+    return crc32_fold_reset(crc);
+}
 
+Z_INTERNAL uint32_t crc32_fold_pclmulqdq_final(crc32_fold *crc) {
+    return crc32_fold_final(crc);
+}
+
+Z_INTERNAL void crc32_fold_pclmulqdq(crc32_fold *crc, const uint8_t *src, size_t len, uint32_t init_crc) {
+    crc32_fold_copy(crc, NULL, src, len, init_crc, 0);
+}
+
+Z_INTERNAL void crc32_fold_pclmulqdq_copy(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
+    crc32_fold_copy(crc, dst, src, len, 0, 1);
+}
+
+Z_INTERNAL uint32_t crc32_pclmulqdq(uint32_t crc32, const uint8_t *buf, size_t len) {
+    /* For lens smaller than ~12, crc32_small method is faster.
+     * But there are also minimum requirements for the pclmul functions due to alignment */
+    if (len < 16)
+        return crc32_small(crc32, buf, len);
+
+    crc32_fold ALIGNED_(16) crc_state;
+    crc32_fold_reset(&crc_state);
+    crc32_fold_copy(&crc_state, NULL, buf, len, crc32, 0);
+    return crc32_fold_final(&crc_state);
+}
 #endif
diff --git a/arch/x86/crc32_pclmulqdq_tpl.h b/arch/x86/crc32_pclmulqdq_tpl.h
deleted file mode 100644 (file)
index c6c4c8f..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ
- * instruction.
- *
- * A white paper describing this algorithm can be found at:
- *     doc/crc-pclmulqdq.pdf
- *
- * Copyright (C) 2013 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Marian Beermann (support for initial value)
- * Authors:
- *     Wajdi Feghali   <wajdi.k.feghali@intel.com>
- *     Jim Guilford    <james.guilford@intel.com>
- *     Vinodh Gopal    <vinodh.gopal@intel.com>
- *     Erdinc Ozturk   <erdinc.ozturk@intel.com>
- *     Jim Kukunas     <james.t.kukunas@linux.intel.com>
- *
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include "zbuild.h"
-
-#include <immintrin.h>
-#include <wmmintrin.h>
-#include <smmintrin.h> // _mm_extract_epi32
-
-#include "crc32.h"
-#include "crc32_braid_p.h"
-#include "crc32_braid_tbl.h"
-#include "x86_intrins.h"
-#include <assert.h>
-
-#ifdef X86_VPCLMULQDQ
-static size_t fold_16_vpclmulqdq(__m128i *xmm_crc0, __m128i *xmm_crc1,
-    __m128i *xmm_crc2, __m128i *xmm_crc3, const uint8_t *src, size_t len, __m128i init_crc,
-    int32_t first);
-static size_t fold_16_vpclmulqdq_copy(__m128i *xmm_crc0, __m128i *xmm_crc1,
-    __m128i *xmm_crc2, __m128i *xmm_crc3, uint8_t *dst, const uint8_t *src, size_t len);
-#endif
-
-static void fold_1(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
-    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
-                                             0x00000001, 0xc6e41596);
-    __m128i x_tmp3;
-    __m128 ps_crc0, ps_crc3, ps_res;
-
-    x_tmp3 = *xmm_crc3;
-
-    *xmm_crc3 = *xmm_crc0;
-    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
-    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
-    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
-    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
-    ps_res = _mm_xor_ps(ps_crc0, ps_crc3);
-
-    *xmm_crc0 = *xmm_crc1;
-    *xmm_crc1 = *xmm_crc2;
-    *xmm_crc2 = x_tmp3;
-    *xmm_crc3 = _mm_castps_si128(ps_res);
-}
-
-static void fold_2(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
-    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
-                                             0x00000001, 0xc6e41596);
-    __m128i x_tmp3, x_tmp2;
-    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20;
-
-    x_tmp3 = *xmm_crc3;
-    x_tmp2 = *xmm_crc2;
-
-    *xmm_crc3 = *xmm_crc1;
-    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
-    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
-    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
-    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
-    ps_res31 = _mm_xor_ps(ps_crc3, ps_crc1);
-
-    *xmm_crc2 = *xmm_crc0;
-    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
-    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
-    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
-    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
-    ps_res20 = _mm_xor_ps(ps_crc0, ps_crc2);
-
-    *xmm_crc0 = x_tmp2;
-    *xmm_crc1 = x_tmp3;
-    *xmm_crc2 = _mm_castps_si128(ps_res20);
-    *xmm_crc3 = _mm_castps_si128(ps_res31);
-}
-
-static void fold_3(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
-    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
-                                             0x00000001, 0xc6e41596);
-    __m128i x_tmp3;
-    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10;
-
-    x_tmp3 = *xmm_crc3;
-
-    *xmm_crc3 = *xmm_crc2;
-    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
-    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
-    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
-    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
-    ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3);
-
-    *xmm_crc2 = *xmm_crc1;
-    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
-    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
-    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
-    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
-    ps_res21 = _mm_xor_ps(ps_crc1, ps_crc2);
-
-    *xmm_crc1 = *xmm_crc0;
-    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
-    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10);
-    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
-    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
-    ps_res10 = _mm_xor_ps(ps_crc0, ps_crc1);
-
-    *xmm_crc0 = x_tmp3;
-    *xmm_crc1 = _mm_castps_si128(ps_res10);
-    *xmm_crc2 = _mm_castps_si128(ps_res21);
-    *xmm_crc3 = _mm_castps_si128(ps_res32);
-}
-
-static void fold_4(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
-    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
-                                             0x00000001, 0xc6e41596);
-    __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
-    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
-    __m128 ps_t0, ps_t1, ps_t2, ps_t3;
-    __m128 ps_res0, ps_res1, ps_res2, ps_res3;
-
-    x_tmp0 = *xmm_crc0;
-    x_tmp1 = *xmm_crc1;
-    x_tmp2 = *xmm_crc2;
-    x_tmp3 = *xmm_crc3;
-
-    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
-    x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10);
-    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
-    ps_t0 = _mm_castsi128_ps(x_tmp0);
-    ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
-
-    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
-    x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10);
-    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
-    ps_t1 = _mm_castsi128_ps(x_tmp1);
-    ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
-
-    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
-    x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10);
-    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
-    ps_t2 = _mm_castsi128_ps(x_tmp2);
-    ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
-
-    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01);
-    x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10);
-    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
-    ps_t3 = _mm_castsi128_ps(x_tmp3);
-    ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
-
-    *xmm_crc0 = _mm_castps_si128(ps_res0);
-    *xmm_crc1 = _mm_castps_si128(ps_res1);
-    *xmm_crc2 = _mm_castps_si128(ps_res2);
-    *xmm_crc3 = _mm_castps_si128(ps_res3);
-}
-
-static void fold_12(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
-    const __m128i xmm_fold12 = _mm_set_epi64x(0x596C8D81, 0xF5E48C85);
-    __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
-    __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
-    __m128 ps_t0, ps_t1, ps_t2, ps_t3;
-    __m128 ps_res0, ps_res1, ps_res2, ps_res3;
-
-    x_tmp0 = *xmm_crc0;
-    x_tmp1 = *xmm_crc1;
-    x_tmp2 = *xmm_crc2;
-    x_tmp3 = *xmm_crc3;
-
-    *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold12, 0x01);
-    x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold12, 0x10);
-    ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
-    ps_t0 = _mm_castsi128_ps(x_tmp0);
-    ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
-
-    *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold12, 0x01);
-    x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold12, 0x10);
-    ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
-    ps_t1 = _mm_castsi128_ps(x_tmp1);
-    ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
-
-    *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold12, 0x01);
-    x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold12, 0x10);
-    ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
-    ps_t2 = _mm_castsi128_ps(x_tmp2);
-    ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
-
-    *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold12, 0x01);
-    x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold12, 0x10);
-    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
-    ps_t3 = _mm_castsi128_ps(x_tmp3);
-    ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
-
-    *xmm_crc0 = _mm_castps_si128(ps_res0);
-    *xmm_crc1 = _mm_castps_si128(ps_res1);
-    *xmm_crc2 = _mm_castps_si128(ps_res2);
-    *xmm_crc3 = _mm_castps_si128(ps_res3);
-}
-
-static const unsigned ALIGNED_(32) pshufb_shf_table[60] = {
-    0x84838281, 0x88878685, 0x8c8b8a89, 0x008f8e8d, /* shl 15 (16 - 1)/shr1 */
-    0x85848382, 0x89888786, 0x8d8c8b8a, 0x01008f8e, /* shl 14 (16 - 3)/shr2 */
-    0x86858483, 0x8a898887, 0x8e8d8c8b, 0x0201008f, /* shl 13 (16 - 4)/shr3 */
-    0x87868584, 0x8b8a8988, 0x8f8e8d8c, 0x03020100, /* shl 12 (16 - 4)/shr4 */
-    0x88878685, 0x8c8b8a89, 0x008f8e8d, 0x04030201, /* shl 11 (16 - 5)/shr5 */
-    0x89888786, 0x8d8c8b8a, 0x01008f8e, 0x05040302, /* shl 10 (16 - 6)/shr6 */
-    0x8a898887, 0x8e8d8c8b, 0x0201008f, 0x06050403, /* shl  9 (16 - 7)/shr7 */
-    0x8b8a8988, 0x8f8e8d8c, 0x03020100, 0x07060504, /* shl  8 (16 - 8)/shr8 */
-    0x8c8b8a89, 0x008f8e8d, 0x04030201, 0x08070605, /* shl  7 (16 - 9)/shr9 */
-    0x8d8c8b8a, 0x01008f8e, 0x05040302, 0x09080706, /* shl  6 (16 -10)/shr10*/
-    0x8e8d8c8b, 0x0201008f, 0x06050403, 0x0a090807, /* shl  5 (16 -11)/shr11*/
-    0x8f8e8d8c, 0x03020100, 0x07060504, 0x0b0a0908, /* shl  4 (16 -12)/shr12*/
-    0x008f8e8d, 0x04030201, 0x08070605, 0x0c0b0a09, /* shl  3 (16 -13)/shr13*/
-    0x01008f8e, 0x05040302, 0x09080706, 0x0d0c0b0a, /* shl  2 (16 -14)/shr14*/
-    0x0201008f, 0x06050403, 0x0a090807, 0x0e0d0c0b  /* shl  1 (16 -15)/shr15*/
-};
-
-static void partial_fold(const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2,
-                         __m128i *xmm_crc3, __m128i *xmm_crc_part) {
-    const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
-                                             0x00000001, 0xc6e41596);
-    const __m128i xmm_mask3 = _mm_set1_epi32((int32_t)0x80808080);
-
-    __m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3;
-    __m128i xmm_a0_0, xmm_a0_1;
-    __m128 ps_crc3, psa0_0, psa0_1, ps_res;
-
-    xmm_shl = _mm_load_si128((__m128i *)(pshufb_shf_table + (4 * (len - 1))));
-    xmm_shr = xmm_shl;
-    xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3);
-
-    xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl);
-
-    *xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr);
-    xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl);
-    *xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1);
-
-    *xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr);
-    xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl);
-    *xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2);
-
-    *xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr);
-    xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl);
-    *xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3);
-
-    *xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr);
-    *xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl);
-    *xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part);
-
-    xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10);
-    xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01);
-
-    ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
-    psa0_0 = _mm_castsi128_ps(xmm_a0_0);
-    psa0_1 = _mm_castsi128_ps(xmm_a0_1);
-
-    ps_res = _mm_xor_ps(ps_crc3, psa0_0);
-    ps_res = _mm_xor_ps(ps_res, psa0_1);
-
-    *xmm_crc3 = _mm_castps_si128(ps_res);
-}
-
-static inline void crc32_fold_load(__m128i *fold, __m128i *fold0, __m128i *fold1, __m128i *fold2, __m128i *fold3) {
-    *fold0 = _mm_load_si128(fold + 0);
-    *fold1 = _mm_load_si128(fold + 1);
-    *fold2 = _mm_load_si128(fold + 2);
-    *fold3 = _mm_load_si128(fold + 3);
-}
-
-static inline void crc32_fold_save(__m128i *fold, const __m128i *fold0, const __m128i *fold1,
-                                   const __m128i *fold2, const __m128i *fold3) {
-    _mm_storeu_si128(fold + 0, *fold0);
-    _mm_storeu_si128(fold + 1, *fold1);
-    _mm_storeu_si128(fold + 2, *fold2);
-    _mm_storeu_si128(fold + 3, *fold3);
-}
-
-Z_INTERNAL uint32_t CRC32_FOLD_RESET(crc32_fold *crc) {
-    __m128i xmm_crc0 = _mm_cvtsi32_si128(0x9db42487);
-    __m128i xmm_zero = _mm_setzero_si128();
-    crc32_fold_save((__m128i *)crc->fold, &xmm_crc0, &xmm_zero, &xmm_zero, &xmm_zero);
-    return 0;
-}
-
-#define ONCE(op)                 if (first) { first = 0; op; }
-#define XOR_INITIAL128(where)    ONCE(where = _mm_xor_si128(where, xmm_initial))
-#ifdef X86_VPCLMULQDQ
-#  define XOR_INITIAL512(where)  ONCE(where = _mm512_xor_si512(where, zmm_initial))
-#endif
-
-#ifdef X86_VPCLMULQDQ
-#  include "crc32_fold_vpclmulqdq_tpl.h"
-#endif
-#include "crc32_fold_pclmulqdq_tpl.h"
-#define COPY
-#ifdef X86_VPCLMULQDQ
-#  include "crc32_fold_vpclmulqdq_tpl.h"
-#endif
-#include "crc32_fold_pclmulqdq_tpl.h"
-
-static const unsigned ALIGNED_(16) crc_k[] = {
-    0xccaa009e, 0x00000000, /* rk1 */
-    0x751997d0, 0x00000001, /* rk2 */
-    0xccaa009e, 0x00000000, /* rk5 */
-    0x63cd6124, 0x00000001, /* rk6 */
-    0xf7011640, 0x00000001, /* rk7 */
-    0xdb710640, 0x00000001  /* rk8 */
-};
-
-static const unsigned ALIGNED_(16) crc_mask[4] = {
-    0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000
-};
-
-static const unsigned ALIGNED_(16) crc_mask2[4] = {
-    0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
-};
-
-Z_INTERNAL uint32_t CRC32_FOLD_FINAL(crc32_fold *crc) {
-    const __m128i xmm_mask  = _mm_load_si128((__m128i *)crc_mask);
-    const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
-    __m128i xmm_crc0, xmm_crc1, xmm_crc2, xmm_crc3;
-    __m128i x_tmp0, x_tmp1, x_tmp2, crc_fold;
-
-    crc32_fold_load((__m128i *)crc->fold, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
-
-    /*
-     * k1
-     */
-    crc_fold = _mm_load_si128((__m128i *)crc_k);
-
-    x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10);
-    xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01);
-    xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0);
-    xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0);
-
-    x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10);
-    xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01);
-    xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1);
-    xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1);
-
-    x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10);
-    xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
-
-    /*
-     * k5
-     */
-    crc_fold = _mm_load_si128((__m128i *)(crc_k + 4));
-
-    xmm_crc0 = xmm_crc3;
-    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
-    xmm_crc0 = _mm_srli_si128(xmm_crc0, 8);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
-
-    xmm_crc0 = xmm_crc3;
-    xmm_crc3 = _mm_slli_si128(xmm_crc3, 4);
-    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
-    xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2);
-
-    /*
-     * k7
-     */
-    xmm_crc1 = xmm_crc3;
-    xmm_crc2 = xmm_crc3;
-    crc_fold = _mm_load_si128((__m128i *)(crc_k + 8));
-
-    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
-    xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask);
-
-    xmm_crc2 = xmm_crc3;
-    xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
-    xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
-
-    crc->value = ~((uint32_t)_mm_extract_epi32(xmm_crc3, 2));
-
-    return crc->value;
-}
-
-static inline uint32_t crc32_small(uint32_t crc, const uint8_t *buf, size_t len) {
-    uint32_t c = (~crc) & 0xffffffff;
-
-    while (len) {
-        len--;
-        CRC_DO1;
-    }
-
-    return c ^ 0xffffffff;
-}
-
-Z_INTERNAL uint32_t CRC32(uint32_t crc32, const uint8_t *buf, size_t len) {
-    /* For lens smaller than ~12, crc32_small method is faster.
-     * But there are also minimum requirements for the pclmul functions due to alignment */
-    if (len < 16)
-        return crc32_small(crc32, buf, len);
-
-    crc32_fold ALIGNED_(16) crc_state;
-    CRC32_FOLD_RESET(&crc_state);
-    CRC32_FOLD(&crc_state, buf, len, crc32);
-    return CRC32_FOLD_FINAL(&crc_state);
-}
index cad35b14eefc2346f21f286d2dcdf04fee02e9f0..a3567e94d17b714edc3ca91bc95f161c919f2f4e 100644 (file)
@@ -6,12 +6,34 @@
 #ifdef X86_VPCLMULQDQ_CRC
 
 #define X86_VPCLMULQDQ
-#define CRC32_FOLD_COPY  crc32_fold_vpclmulqdq_copy
-#define CRC32_FOLD       crc32_fold_vpclmulqdq
-#define CRC32_FOLD_RESET crc32_fold_vpclmulqdq_reset
-#define CRC32_FOLD_FINAL crc32_fold_vpclmulqdq_final
-#define CRC32            crc32_vpclmulqdq
+#include "crc32_fold_pclmulqdq_tpl.h"
 
-#include "crc32_pclmulqdq_tpl.h"
+Z_INTERNAL uint32_t crc32_fold_vpclmulqdq_reset(crc32_fold *crc) {
+    return crc32_fold_reset(crc);
+}
+
+Z_INTERNAL uint32_t crc32_fold_vpclmulqdq_final(crc32_fold *crc) {
+    return crc32_fold_final(crc);
+}
+
+Z_INTERNAL void crc32_fold_vpclmulqdq(crc32_fold *crc, const uint8_t *src, size_t len, uint32_t init_crc) {
+    crc32_fold_copy(crc, NULL, src, len, init_crc, 0);
+}
+
+Z_INTERNAL void crc32_fold_vpclmulqdq_copy(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
+    crc32_fold_copy(crc, dst, src, len, 0, 1);
+}
+
+Z_INTERNAL uint32_t crc32_vpclmulqdq(uint32_t crc32, const uint8_t *buf, size_t len) {
+    /* For lens smaller than ~12, crc32_small method is faster.
+     * But there are also minimum requirements for the pclmul functions due to alignment */
+    if (len < 16)
+        return crc32_small(crc32, buf, len);
+
+    crc32_fold ALIGNED_(16) crc_state;
+    crc32_fold_reset(&crc_state);
+    crc32_fold_copy(&crc_state, NULL, buf, len, crc32, 0);
+    return crc32_fold_final(&crc_state);
+}
 
 #endif