]> git.ipfire.org Git - thirdparty/zlib-ng.git/commitdiff
Added an SSE4 optimized adler32 checksum
authorAdam Stylinski <kungfujesus06@gmail.com>
Tue, 4 Jan 2022 15:38:39 +0000 (10:38 -0500)
committerHans Kristian Rosbach <hk-github@circlestorm.org>
Sat, 8 Jan 2022 18:27:28 +0000 (19:27 +0100)
This variant uses the lower number of cycles psadw insruction in place
of pmaddubsw for the running sum that does not need multiplication.

This allows this sum to be done independently, partially overlapping the
running "sum2" half of the checksum.  We also have moved the shift
outside of the loop, breaking a small data dependency chain. The code
also now does a vectorized horizontal sum without having to rebase to
the adler32 base, as NMAX is defined as the maximum number of scalar
sums that can be peformed, so we're actually safe in doing this without
upgrading to higher precision.  We can do a partial horizontal sum
because psadw only ends up accumulating 16 bit words in 2 vector lanes,
the other two can safely be assumed as 0.

CMakeLists.txt
README.md
arch/x86/Makefile.in
arch/x86/adler32_sse41.c [new file with mode: 0644]
configure
functable.c

index 526065f3207c8c6b34648bfeffbf7882a790b970..956f735634692c1321489988a536718a618d124c 100644 (file)
@@ -110,7 +110,8 @@ elseif(BASEARCH_X86_FOUND)
     option(WITH_AVX512VNNI "Build with AVX512 VNNI extensions" ON)
     option(WITH_SSE2 "Build with SSE2" ON)
     option(WITH_SSSE3 "Build with SSSE3" ON)
-    option(WITH_SSE4 "Build with SSE4" ON)
+    option(WITH_SSE41 "Build with SSE41" ON)
+    option(WITH_SSE42 "Build with SSE42" ON)
     option(WITH_PCLMULQDQ "Build with PCLMULQDQ" ON)
 endif()
 
@@ -125,7 +126,8 @@ mark_as_advanced(FORCE
     WITH_DFLTCC_INFLATE
     WITH_CRC32_VX
     WITH_AVX2 WITH_SSE2
-    WITH_SSSE3 WITH_SSE4
+    WITH_SSSE3 WITH_SSE41
+    WITH_SSE42
     WITH_PCLMULQDQ
     WITH_ALTIVEC
     WITH_POWER8
@@ -753,14 +755,26 @@ if(WITH_OPTIM)
                 set(WITH_AVX512VNNI OFF)
             endif()
         endif()
-        if(WITH_SSE4)
-            check_sse4_intrinsics()
+        if(WITH_SSE41)
+            check_sse41_intrinsics()
+            if(HAVE_SSE41_INTRIN)
+                add_definitions(-DX86_SSE41 -DX86_SSE41_ADLER32)
+                set(SSE41_SRCS ${ARCHDIR}/adler32_sse41.c)
+                add_feature_info(SSE4_ADLER32 1 "Support SSE41-accelerated adler32, using \"${SSE41FLAG}\"")
+                list(APPEND ZLIB_ARCH_SRCS ${SSE41_SRCS})
+                set_property(SOURCE ${SSE41_SRCS} PROPERTY COMPILE_FLAGS "${SSE41FLAG} ${NOLTOFLAG}")
+            else()
+                set(WITH_SSE41 OFF)
+            endif()
+        endif()
+        if(WITH_SSE42)
+            check_sse42_intrinsics()
             if(HAVE_SSE42CRC_INLINE_ASM OR HAVE_SSE42CRC_INTRIN)
                 add_definitions(-DX86_SSE42_CRC_HASH)
                 set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c)
-                add_feature_info(SSE42_CRC 1 "Support SSE4.2 optimized CRC hash generation, using \"${SSE4FLAG}\"")
+                add_feature_info(SSE42_CRC 1 "Support SSE4.2 optimized CRC hash generation, using \"${SSE42FLAG}\"")
                 list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
-                set_property(SOURCE ${SSE42_SRCS} PROPERTY COMPILE_FLAGS "${SSE4FLAG} ${NOLTOFLAG}")
+                set_property(SOURCE ${SSE42_SRCS} PROPERTY COMPILE_FLAGS "${SSE42FLAG} ${NOLTOFLAG}")
                 if(HAVE_SSE42CRC_INTRIN)
                     add_definitions(-DX86_SSE42_CRC_INTRIN)
                 endif()
@@ -768,9 +782,9 @@ if(WITH_OPTIM)
             if(HAVE_SSE42CMPSTR_INTRIN)
                 add_definitions(-DX86_SSE42_CMP_STR)
                 set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c)
-                add_feature_info(SSE42_COMPARE258 1 "Support SSE4.2 optimized compare258, using \"${SSE4FLAG}\"")
+                add_feature_info(SSE42_COMPARE258 1 "Support SSE4.2 optimized compare258, using \"${SSE42FLAG}\"")
                 list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
-                set_property(SOURCE ${SSE42_SRCS} PROPERTY COMPILE_FLAGS "${SSE4FLAG} ${NOLTOFLAG}")
+                set_property(SOURCE ${SSE42_SRCS} PROPERTY COMPILE_FLAGS "${SSE42FLAG} ${NOLTOFLAG}")
             endif()
             if(NOT HAVE_SSE42CRC_INLINE_ASM AND NOT HAVE_SSE42CRC_INTRIN AND NOT HAVE_SSE42CMPSTR_INTRIN)
                 set(WITH_SSE4 OFF)
@@ -805,14 +819,14 @@ if(WITH_OPTIM)
                 set(WITH_SSSE3 OFF)
             endif()
         endif()
-        if(WITH_PCLMULQDQ AND WITH_SSSE3 AND WITH_SSE4)
+        if(WITH_PCLMULQDQ AND WITH_SSSE3 AND WITH_SSE42)
             check_pclmulqdq_intrinsics()
             if(HAVE_PCLMULQDQ_INTRIN AND HAVE_SSSE3_INTRIN)
                 add_definitions(-DX86_PCLMULQDQ_CRC)
                 set(PCLMULQDQ_SRCS ${ARCHDIR}/crc32_fold_pclmulqdq.c)
-                add_feature_info(PCLMUL_CRC 1 "Support CRC hash generation using PCLMULQDQ, using \"${SSSE3FLAG} ${SSE4FLAG} ${PCLMULFLAG}\"")
+                add_feature_info(PCLMUL_CRC 1 "Support CRC hash generation using PCLMULQDQ, using \"${SSSE3FLAG} ${SSE42FLAG} ${PCLMULFLAG}\"")
                 list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
-                set_property(SOURCE ${PCLMULQDQ_SRCS} PROPERTY COMPILE_FLAGS "${SSSE3FLAG} ${SSE4FLAG} ${PCLMULFLAG} ${NOLTOFLAG}")
+                set_property(SOURCE ${PCLMULQDQ_SRCS} PROPERTY COMPILE_FLAGS "${SSSE3FLAG} ${SSE42FLAG} ${PCLMULFLAG} ${NOLTOFLAG}")
             else()
                 set(WITH_PCLMULQDQ OFF)
             endif()
@@ -1441,7 +1455,8 @@ elseif(BASEARCH_X86_FOUND)
     add_feature_info(WITH_AVX512VNNI WITH_AVX512VNNI "Build with AVX512 VNNI")
     add_feature_info(WITH_SSE2 WITH_SSE2 "Build with SSE2")
     add_feature_info(WITH_SSSE3 WITH_SSSE3 "Build with SSSE3")
-    add_feature_info(WITH_SSE4 WITH_SSE4 "Build with SSE4")
+    add_feature_info(WITH_SSE41 WITH_SSE41 "Build with SSE41")
+    add_feature_info(WITH_SSE42 WITH_SSE42 "Build with SSE42")
     add_feature_info(WITH_PCLMULQDQ WITH_PCLMULQDQ "Build with PCLMULQDQ")
 endif()
 
index 0da19e407752c5f1c133b69d5bdd4583eb7ffca4..44e63db3025ac9f91b65a1d65316c03da7b07548 100644 (file)
--- a/README.md
+++ b/README.md
@@ -200,7 +200,8 @@ Advanced Build Options
 | WITH_AVX512                     |                       | Build with AVX512 intrinsics                                        | ON                     |
 | WITH_AVX512VNNI                 |                       | Build with AVX512VNNI intrinsics                                    | ON                     |
 | WITH_SSE2                       |                       | Build with SSE2 intrinsics                                          | ON                     |
-| WITH_SSE4                       |                       | Build with SSE4 intrinsics                                          | ON                     |
+| WITH_SSE41                      |                       | Build with SSE41 intrinsics                                         | ON                     |
+| WITH_SSE42                      |                       | Build with SSE42 intrinsics                                         | ON                     |
 | WITH_PCLMULQDQ                  |                       | Build with PCLMULQDQ intrinsics                                     | ON                     |
 | WITH_ACLE                       | --without-acle        | Build with ACLE intrinsics                                          | ON                     |
 | WITH_NEON                       | --without-neon        | Build with NEON intrinsics                                          | ON                     |
index 3e92738eeed49dcdd2969b001869861a1d7cfdd5..f54a695c22cc88f50c29425db73ffd3e53587b47 100644 (file)
@@ -13,7 +13,8 @@ AVX512VNNIFLAG=-mavx512vnni
 AVX2FLAG=-mavx2
 SSE2FLAG=-msse2
 SSSE3FLAG=-mssse3
-SSE4FLAG=-msse4
+SSE41FLAG=-msse4.1
+SSE42FLAG=-msse4.2
 PCLMULFLAG=-mpclmul
 NOLTOFLAG=
 
@@ -26,6 +27,7 @@ all: \
        adler32_avx.o adler32_avx.lo \
        adler32_avx512.o adler32_avx512.lo \
        adler32_avx512_vnni.o adler32_avx512_vnni.lo \
+       adler32_sse41.o adler32_sse41.lo \
        adler32_ssse3.o adler32_ssse3.lo \
        chunkset_avx.o chunkset_avx.lo \
        chunkset_sse.o chunkset_sse.lo \
@@ -61,22 +63,22 @@ compare258_avx.lo:
        $(CC) $(SFLAGS) $(AVX2FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/compare258_avx.c
 
 compare258_sse.o:
-       $(CC) $(CFLAGS) $(SSE4FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/compare258_sse.c
+       $(CC) $(CFLAGS) $(SSE42FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/compare258_sse.c
 
 compare258_sse.lo:
-       $(CC) $(SFLAGS) $(SSE4FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/compare258_sse.c
+       $(CC) $(SFLAGS) $(SSE42FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/compare258_sse.c
 
 insert_string_sse.o:
-       $(CC) $(CFLAGS) $(SSE4FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/insert_string_sse.c
+       $(CC) $(CFLAGS) $(SSE42FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/insert_string_sse.c
 
 insert_string_sse.lo:
-       $(CC) $(SFLAGS) $(SSE4FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/insert_string_sse.c
+       $(CC) $(SFLAGS) $(SSE42FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/insert_string_sse.c
 
 crc32_fold_pclmulqdq.o:
-       $(CC) $(CFLAGS) $(PCLMULFLAG) $(SSE4FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_fold_pclmulqdq.c
+       $(CC) $(CFLAGS) $(PCLMULFLAG) $(SSE42FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_fold_pclmulqdq.c
 
 crc32_fold_pclmulqdq.lo:
-       $(CC) $(SFLAGS) $(PCLMULFLAG) $(SSE4FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_fold_pclmulqdq.c
+       $(CC) $(SFLAGS) $(PCLMULFLAG) $(SSE42FLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_fold_pclmulqdq.c
 
 slide_hash_avx.o:
        $(CC) $(CFLAGS) $(AVX2FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/slide_hash_avx.c
@@ -108,6 +110,12 @@ adler32_avx512_vnni.o: $(SRCDIR)/adler32_avx512_vnni.c
 adler32_avx512_vnni.lo: $(SRCDIR)/adler32_avx512_vnni.c
        $(CC) $(SFLAGS) $(AVX512VNNIFLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/adler32_avx512_vnni.c
 
+adler32_sse41.o: $(SRCDIR)/adler32_sse41.c
+       $(CC) $(CFLAGS) $(SSE41FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/adler32_sse41.c
+
+adler32_sse41.lo: $(SRCDIR)/adler32_sse41.c
+       $(CC) $(SFLAGS) $(SSE41FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/adler32_sse41.c
+
 adler32_ssse3.o: $(SRCDIR)/adler32_ssse3.c
        $(CC) $(CFLAGS) $(SSSE3FLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/adler32_ssse3.c
 
diff --git a/arch/x86/adler32_sse41.c b/arch/x86/adler32_sse41.c
new file mode 100644 (file)
index 0000000..1e0f0ca
--- /dev/null
@@ -0,0 +1,118 @@
+/* adler32_sse41.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995-2011 Mark Adler
+ * Authors:
+ *   Adam Stylinski <kungfujesus06@gmail.com>
+ *   Brian Bockelman <bockelman@gmail.com>
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "../../zbuild.h"
+#include "../../zutil.h"
+
+#include "../../adler32_p.h"
+
+#ifdef X86_SSE41_ADLER32
+
+#include <immintrin.h>
+
+static inline uint32_t partial_hsum(__m128i x) {
+    __m128i second_int = _mm_bsrli_si128(x, 8);
+    __m128i sum = _mm_add_epi32(x, second_int);
+    return _mm_cvtsi128_si32(sum);
+}
+
+static inline uint32_t hsum(__m128i x) {
+    __m128i sum1 = _mm_unpackhi_epi64(x, x);
+    __m128i sum2 = _mm_add_epi32(x, sum1);
+    __m128i sum3 = _mm_shuffle_epi32(sum2, 0x01);
+    __m128i sum4 = _mm_add_epi32(sum2, sum3);
+    return _mm_cvtsi128_si32(sum4);
+}
+
+Z_INTERNAL uint32_t adler32_sse41(uint32_t adler, const unsigned char *buf, size_t len) {
+    uint32_t sum2;
+
+     /* split Adler-32 into component sums */
+    sum2 = (adler >> 16) & 0xffff;
+    adler &= 0xffff;
+
+    /* in case user likes doing a byte at a time, keep it fast */
+    if (UNLIKELY(len == 1))
+        return adler32_len_1(adler, buf, sum2);
+
+    /* initial Adler-32 value (deferred check for len == 1 speed) */
+    if (UNLIKELY(buf == NULL))
+        return 1L;
+
+    /* in case short lengths are provided, keep it somewhat fast */
+    if (UNLIKELY(len < 16))
+        return adler32_len_16(adler, buf, len, sum2);
+
+    const __m128i dot2v = _mm_setr_epi8(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
+    const __m128i dot3v = _mm_set1_epi16(1);
+    const __m128i zero = _mm_setzero_si128();
+
+    __m128i vs1 = _mm_cvtsi32_si128(adler);
+    __m128i vs2 = _mm_cvtsi32_si128(sum2);
+
+    while (len >= 16) {
+       __m128i vs1_0 = vs1;
+       __m128i vs3 = _mm_setzero_si128();
+
+       int k = (len < NMAX ? (int)len : NMAX);
+       k -= k % 16;
+       len -= k;
+
+       /* Aligned version of the loop */
+       if (((uintptr_t)buf & 15) == 0) {
+           while (k >= 16) {
+               /*
+                  vs1 = adler + sum(c[i])
+                  vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
+               */
+               __m128i vbuf = _mm_load_si128((__m128i*)buf);
+               buf += 16;
+               k -= 16;
+
+               __m128i v_sad_sum1 = _mm_sad_epu8(vbuf, zero);
+               vs1 = _mm_add_epi32(v_sad_sum1, vs1);
+               vs3 = _mm_add_epi32(vs1_0, vs3);
+               __m128i v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v);
+               __m128i vsum2 = _mm_madd_epi16(v_short_sum2, dot3v);
+               vs2 = _mm_add_epi32(vsum2, vs2);
+               vs1_0 = vs1;
+           }
+       } else {
+           while (k >= 16) {
+               __m128i vbuf = _mm_loadu_si128((__m128i*)buf);
+               buf += 16;
+               k -= 16;
+
+               __m128i v_sad_sum1 = _mm_sad_epu8(vbuf, zero);
+               vs1 = _mm_add_epi32(v_sad_sum1, vs1);
+               vs3 = _mm_add_epi32(vs1_0, vs3);
+               __m128i v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v);
+               __m128i vsum2 = _mm_madd_epi16(v_short_sum2, dot3v);
+               vs2 = _mm_add_epi32(vsum2, vs2);
+               vs1_0 = vs1;
+           }
+       }
+
+       vs3 = _mm_slli_epi32(vs3, 4);
+       vs2 = _mm_add_epi32(vs2, vs3);
+
+       /* We don't actually need to do a full horizontal sum, since psadbw is actually doing
+        * a partial reduction sum implicitly and only summing to integers in vector positions
+        * 0 and 2. This saves us some contention on the shuffle port(s) */
+       adler = partial_hsum(vs1) % BASE;
+       sum2 = hsum(vs2) % BASE;
+
+       vs1 = _mm_cvtsi32_si128(adler);
+       vs2 = _mm_cvtsi32_si128(sum2);
+    }
+
+    /* Process tail (len < 16).  */
+    return adler32_len_16(adler, buf, len, sum2);
+}
+
+#endif
index 5b1c5e65b265098f22ad67885457d1785e566575..2c17507a861e648d2089e482b5869f5414120f1e 100755 (executable)
--- a/configure
+++ b/configure
@@ -1447,7 +1447,7 @@ case "${ARCH}" in
                 CFLAGS="${CFLAGS} -DX86_SSE41_ADLER32"
                 SFLAGS="${SFLAGS} -DX86_SSE41_ADLER32"
                 ARCH_STATIC_OBJS="${ARCH_STATIC_OBJS} adler32_sse41.o"
-                ARCH_SHARED_OBJS="${ARCH_SHARED_OBJS} adler32_sse41.o"
+                ARCH_SHARED_OBJS="${ARCH_SHARED_OBJS} adler32_sse41.lo"
             fi
 
             check_sse42_intrinsics
index 5a607beb2dc6a005463136ef2b246e7c974943ca..ccb0b69a302a5277e82151569187b6d7d221648f 100644 (file)
@@ -63,6 +63,9 @@ extern uint32_t adler32_neon(uint32_t adler, const unsigned char *buf, size_t le
 #ifdef PPC_VMX_ADLER32
 extern uint32_t adler32_vmx(uint32_t adler, const unsigned char *buf, size_t len);
 #endif
+#ifdef X86_SSE41_ADLER32
+extern uint32_t adler32_sse41(uint32_t adler, const unsigned char *buf, size_t len);
+#endif
 #ifdef X86_SSSE3_ADLER32
 extern uint32_t adler32_ssse3(uint32_t adler, const unsigned char *buf, size_t len);
 #endif
@@ -304,6 +307,10 @@ Z_INTERNAL uint32_t adler32_stub(uint32_t adler, const unsigned char *buf, size_
     if (x86_cpu_has_ssse3)
         functable.adler32 = &adler32_ssse3;
 #endif
+#ifdef X86_SSE41_ADLER32
+    if (x86_cpu_has_sse41)
+        functable.adler32 = &adler32_sse41;
+#endif
 #ifdef X86_AVX2_ADLER32
     if (x86_cpu_has_avx2)
         functable.adler32 = &adler32_avx2;