From: Vladislav Shchapov Date: Sat, 14 Jun 2025 12:04:23 +0000 (+0500) Subject: Add LoongArch64 (LSX) adler32, adler32_fold_copy implementation X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d3365b4f3cae72da7f5d8ce8e9abfed05796900c;p=thirdparty%2Fzlib-ng.git Add LoongArch64 (LSX) adler32, adler32_fold_copy implementation Signed-off-by: Vladislav Shchapov --- diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ed57cd6..c83cf42d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1037,7 +1037,7 @@ if(WITH_OPTIM) check_lsx_intrinsics() if(HAVE_LSX_INTRIN) add_definitions(-DLOONGARCH_LSX) - set(LSX_SRCS ${ARCHDIR}/chunkset_lsx.c ${ARCHDIR}/compare256_lsx.c ${ARCHDIR}/slide_hash_lsx.c) + set(LSX_SRCS ${ARCHDIR}/adler32_lsx.c ${ARCHDIR}/chunkset_lsx.c ${ARCHDIR}/compare256_lsx.c ${ARCHDIR}/slide_hash_lsx.c) list(APPEND ZLIB_ARCH_SRCS ${LSX_SRCS}) set_property(SOURCE ${LSX_SRCS} PROPERTY COMPILE_FLAGS "${LSXFLAG} ${NOLTOFLAG}") else() diff --git a/arch/loongarch/Makefile.in b/arch/loongarch/Makefile.in index 36988f60..424340f5 100644 --- a/arch/loongarch/Makefile.in +++ b/arch/loongarch/Makefile.in @@ -20,6 +20,7 @@ TOPDIR=$(SRCTOP) all: \ loongarch_features.o loongarch_features.lo \ crc32_la.o crc32_la.lo \ + adler32_lsx.o adler32_lsx.lo \ chunkset_lasx.o chunkset_lasx.lo \ chunkset_lsx.o chunkset_lsx.lo \ compare256_lasx.o compare256_lasx.lo \ @@ -39,6 +40,12 @@ crc32_la.o: $(SRCDIR)/crc32_la.c crc32_la.lo: $(SRCDIR)/crc32_la.c $(CC) $(SFLAGS) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/crc32_la.c +adler32_lsx.o: + $(CC) $(CFLAGS) $(LSXFLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/adler32_lsx.c + +adler32_lsx.lo: + $(CC) $(SFLAGS) $(LSXFLAG) $(NOLTOFLAG) -DPIC $(INCLUDES) -c -o $@ $(SRCDIR)/adler32_lsx.c + chunkset_lasx.o: $(CC) $(CFLAGS) $(LASXFLAG) $(NOLTOFLAG) $(INCLUDES) -c -o $@ $(SRCDIR)/chunkset_lasx.c diff --git a/arch/loongarch/adler32_lsx.c b/arch/loongarch/adler32_lsx.c new file mode 100644 index 00000000..7f43262e --- /dev/null +++ b/arch/loongarch/adler32_lsx.c @@ -0,0 +1,156 @@ +/* adler32_lsx.c -- compute the Adler-32 checksum of a data stream, based on Intel SSE4.2 implementation + * Copyright (C) 1995-2011 Mark Adler + * Copyright (C) 2025 Vladislav Shchapov + * Authors: + * Adam Stylinski + * Brian Bockelman + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zbuild.h" +#include "adler32_p.h" + +#ifdef LOONGARCH_LSX + +#include +#include "lsxintrin_ext.h" + +static inline uint32_t partial_hsum(__m128i x) { + __m128i second_int = __lsx_vbsrl_v(x, 8); + __m128i sum = __lsx_vadd_w(x, second_int); + return __lsx_vpickve2gr_w(sum, 0); +} + +static inline uint32_t hsum(__m128i x) { + __m128i sum1 = __lsx_vilvh_d(x, x); + __m128i sum2 = __lsx_vadd_w(x, sum1); + __m128i sum3 = __lsx_vshuf4i_w(sum2, 0x01); + __m128i sum4 = __lsx_vadd_w(sum2, sum3); + return __lsx_vpickve2gr_w(sum4, 0); +} + +static inline uint32_t adler32_fold_copy_impl(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len, const int COPY) { + if (src == NULL) return 1L; + if (len == 0) return adler; + + uint32_t adler0, adler1; + adler1 = (adler >> 16) & 0xffff; + adler0 = adler & 0xffff; + +rem_peel: + if (len < 16) { + if (COPY) { + return adler32_copy_len_16(adler0, src, dst, len, adler1); + } else { + return adler32_len_16(adler0, src, len, adler1); + } + } + + __m128i vbuf, vbuf_0; + __m128i vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0, + v_sad_sum2, vsum2, vsum2_0; + __m128i zero = __lsx_vldi(0); + const __m128i dot2v = (__m128i)((v16i8){ 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17 }); + const __m128i dot2v_0 = (__m128i)((v16i8){ 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 }); + const __m128i dot3v = __lsx_vreplgr2vr_h(1); + size_t k; + + while (len >= 16) { + + k = MIN(len, NMAX); + k -= k % 16; + len -= k; + + vs1 = __lsx_vinsgr2vr_w(zero, adler0, 0); + vs2 = __lsx_vinsgr2vr_w(zero, adler1, 0); + + vs3 = __lsx_vldi(0); + vs2_0 = __lsx_vldi(0); + vs1_0 = vs1; + + while (k >= 32) { + /* + vs1 = adler + sum(c[i]) + vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) + */ + vbuf = __lsx_vld(src, 0); + vbuf_0 = __lsx_vld(src, 16); + src += 32; + k -= 32; + + v_sad_sum1 = lsx_sad_bu(vbuf, zero); + v_sad_sum2 = lsx_sad_bu(vbuf_0, zero); + + if (COPY) { + __lsx_vst(vbuf, dst, 0); + __lsx_vst(vbuf_0, dst, 16); + dst += 32; + } + + v_short_sum2 = __lsx_vsadd_h(__lsx_vmulwev_h_bu_b(vbuf, dot2v), __lsx_vmulwod_h_bu_b(vbuf, dot2v)); + v_short_sum2_0 = __lsx_vsadd_h(__lsx_vmulwev_h_bu_b(vbuf_0, dot2v_0), __lsx_vmulwod_h_bu_b(vbuf_0, dot2v_0)); + + vs1 = __lsx_vadd_w(v_sad_sum1, vs1); + vs3 = __lsx_vadd_w(vs1_0, vs3); + + vsum2 = __lsx_vmaddwod_w_h(__lsx_vmulwev_w_h(v_short_sum2, dot3v), v_short_sum2, dot3v); + vsum2_0 = __lsx_vmaddwod_w_h(__lsx_vmulwev_w_h(v_short_sum2_0, dot3v), v_short_sum2_0, dot3v); + vs1 = __lsx_vadd_w(v_sad_sum2, vs1); + vs2 = __lsx_vadd_w(vsum2, vs2); + vs2_0 = __lsx_vadd_w(vsum2_0, vs2_0); + vs1_0 = vs1; + } + + vs2 = __lsx_vadd_w(vs2_0, vs2); + vs3 = __lsx_vslli_w(vs3, 5); + vs2 = __lsx_vadd_w(vs3, vs2); + vs3 = __lsx_vldi(0); + + while (k >= 16) { + /* + vs1 = adler + sum(c[i]) + vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] ) + */ + vbuf = __lsx_vld(src, 0); + src += 16; + k -= 16; + + v_sad_sum1 = lsx_sad_bu(vbuf, zero); + v_short_sum2 = __lsx_vsadd_h(__lsx_vmulwev_h_bu_b(vbuf, dot2v_0), __lsx_vmulwod_h_bu_b(vbuf, dot2v_0)); + + vs1 = __lsx_vadd_w(v_sad_sum1, vs1); + vs3 = __lsx_vadd_w(vs1_0, vs3); + vsum2 = __lsx_vmaddwod_w_h(__lsx_vmulwev_w_h(v_short_sum2, dot3v), v_short_sum2, dot3v); + vs2 = __lsx_vadd_w(vsum2, vs2); + vs1_0 = vs1; + + if (COPY) { + __lsx_vst(vbuf, dst, 0); + dst += 16; + } + } + + vs3 = __lsx_vslli_w(vs3, 4); + vs2 = __lsx_vadd_w(vs2, vs3); + + adler0 = partial_hsum(vs1) % BASE; + adler1 = hsum(vs2) % BASE; + } + + /* If this is true, there's fewer than 16 elements remaining */ + if (len) { + goto rem_peel; + } + + return adler0 | (adler1 << 16); +} + +Z_INTERNAL uint32_t adler32_lsx(uint32_t adler, const uint8_t *src, size_t len) { + return adler32_fold_copy_impl(adler, NULL, src, len, 0); +} + +Z_INTERNAL uint32_t adler32_fold_copy_lsx(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) { + return adler32_fold_copy_impl(adler, dst, src, len, 1); +} + +#endif diff --git a/arch/loongarch/loongarch_functions.h b/arch/loongarch/loongarch_functions.h index c70d6c13..fa188619 100644 --- a/arch/loongarch/loongarch_functions.h +++ b/arch/loongarch/loongarch_functions.h @@ -15,6 +15,8 @@ void crc32_fold_loongarch64(crc32_fold *crc, const uint8_t *src, size_t len, #endif #ifdef LOONGARCH_LSX +uint32_t adler32_lsx(uint32_t adler, const uint8_t *src, size_t len); +uint32_t adler32_fold_copy_lsx(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len); void slide_hash_lsx(deflate_state *s); # ifdef HAVE_BUILTIN_CTZ uint32_t compare256_lsx(const uint8_t *src0, const uint8_t *src1); @@ -49,6 +51,10 @@ void inflate_fast_lasx(PREFIX3(stream) *strm, uint32_t start); # define native_crc32_fold_copy crc32_fold_copy_loongarch64 # endif # if defined(LOONGARCH_LSX) && defined(__loongarch_sx) +# undef native_adler32 +# define native_adler32 adler32_lsx +# undef native_adler32_fold_copy +# define native_adler32_fold_copy adler32_fold_copy_lsx # undef native_slide_hash # define native_slide_hash slide_hash_lsx # undef native_chunksize diff --git a/arch/loongarch/lsxintrin_ext.h b/arch/loongarch/lsxintrin_ext.h index c89105e0..0a0503b9 100644 --- a/arch/loongarch/lsxintrin_ext.h +++ b/arch/loongarch/lsxintrin_ext.h @@ -8,6 +8,13 @@ #include +static inline __m128i lsx_sad_bu(__m128i a, __m128i b) { + __m128i tmp = __lsx_vabsd_bu(a, b); + tmp = __lsx_vhaddw_hu_bu(tmp, tmp); + tmp = __lsx_vhaddw_wu_hu(tmp, tmp); + return __lsx_vhaddw_du_wu(tmp, tmp); +} + static inline int lsx_movemask_b(__m128i v) { return __lsx_vpickve2gr_w(__lsx_vmskltz_b(v), 0); } diff --git a/configure b/configure index 80fd5538..e37859bb 100755 --- a/configure +++ b/configure @@ -2316,8 +2316,8 @@ EOF CFLAGS="${CFLAGS} -DLOONGARCH_LSX" SFLAGS="${SFLAGS} -DLOONGARCH_LSX" - ARCH_STATIC_OBJS="${ARCH_STATIC_OBJS} chunkset_lsx.o compare256_lsx.o slide_hash_lsx.o" - ARCH_SHARED_OBJS="${ARCH_SHARED_OBJS} chunkset_lsx.lo compare256_lsx.lo slide_hash_lsx.lo" + ARCH_STATIC_OBJS="${ARCH_STATIC_OBJS} adler32_lsx.o chunkset_lsx.o compare256_lsx.o slide_hash_lsx.o" + ARCH_SHARED_OBJS="${ARCH_SHARED_OBJS} adler32_lsx.lo chunkset_lsx.lo compare256_lsx.lo slide_hash_lsx.lo" fi check_lasx_intrinsics diff --git a/functable.c b/functable.c index 02bd7d3f..f31138b1 100644 --- a/functable.c +++ b/functable.c @@ -279,6 +279,8 @@ static void init_functable(void) { #endif #ifdef LOONGARCH_LSX if (cf.loongarch.has_lsx) { + ft.adler32 = &adler32_lsx; + ft.adler32_fold_copy = &adler32_fold_copy_lsx; ft.slide_hash = slide_hash_lsx; # ifdef HAVE_BUILTIN_CTZ ft.compare256 = &compare256_lsx; diff --git a/test/benchmarks/benchmark_adler32.cc b/test/benchmarks/benchmark_adler32.cc index b1278950..ee36a809 100644 --- a/test/benchmarks/benchmark_adler32.cc +++ b/test/benchmarks/benchmark_adler32.cc @@ -97,4 +97,8 @@ BENCHMARK_ADLER32(avx512, adler32_avx512, test_cpu_features.x86.has_avx512_commo BENCHMARK_ADLER32(avx512_vnni, adler32_avx512_vnni, test_cpu_features.x86.has_avx512vnni); #endif +#ifdef LOONGARCH_LSX +BENCHMARK_ADLER32(lsx, adler32_lsx, test_cpu_features.loongarch.has_lsx); +#endif + #endif diff --git a/test/benchmarks/benchmark_adler32_copy.cc b/test/benchmarks/benchmark_adler32_copy.cc index bca8df18..505bc252 100644 --- a/test/benchmarks/benchmark_adler32_copy.cc +++ b/test/benchmarks/benchmark_adler32_copy.cc @@ -127,4 +127,9 @@ BENCHMARK_ADLER32_BASELINE_COPY(avx512_vnni_baseline, adler32_avx512_vnni, test_ BENCHMARK_ADLER32_COPY(avx512_vnni, adler32_fold_copy_avx512_vnni, test_cpu_features.x86.has_avx512vnni); #endif +#ifdef LOONGARCH_LSX +BENCHMARK_ADLER32_BASELINE_COPY(lsx_baseline, adler32_lsx, test_cpu_features.loongarch.has_lsx); +BENCHMARK_ADLER32_COPY(lsx, adler32_fold_copy_lsx, test_cpu_features.loongarch.has_lsx); +#endif + #endif diff --git a/test/test_adler32.cc b/test/test_adler32.cc index b3d03021..eb8bccdf 100644 --- a/test/test_adler32.cc +++ b/test/test_adler32.cc @@ -392,4 +392,8 @@ TEST_ADLER32(avx512, adler32_avx512, test_cpu_features.x86.has_avx512_common) TEST_ADLER32(avx512_vnni, adler32_avx512_vnni, test_cpu_features.x86.has_avx512vnni) #endif +#ifdef LOONGARCH_LSX +TEST_ADLER32(lsx, adler32_lsx, test_cpu_features.loongarch.has_lsx) +#endif + #endif