From: Arpad Panyik Date: Tue, 8 Jul 2025 17:07:41 +0000 (+0000) Subject: AArch64: Add Neon path for convertSequences_noRepcodes X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=07cd78d3668271cffc9b586d857455bf47aef4fb;p=thirdparty%2Fzstd.git AArch64: Add Neon path for convertSequences_noRepcodes Add a 4-way Neon implementation for the convertSequences_noRepcodes function. Remove 'static' keywords from all of its implementations to be able to add unit tests. Relative performance to Clang-18 using: `./fullbench -b18 -l5 enwik5` Neoverse-V2 before after Clang-18: 100.000% 311.703% Clang-19: 100.191% 311.714% Clang-20: 100.181% 311.723% GCC-13: 107.520% 252.309% GCC-14: 107.652% 253.158% GCC-15: 107.674% 253.168% Cortex-A720 before after Clang-18: 100.000% 204.512% Clang-19: 102.825% 204.600% Clang-20: 102.807% 204.558% GCC-13: 110.668% 203.594% GCC-14: 110.684% 203.978% GCC-15: 102.864% 204.299% Co-authored by, Thomas Daubney --- diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index 16423f6ee..008eaba35 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -56,6 +56,14 @@ # define ZSTD_HASHLOG3_MAX 17 #endif + +/*-************************************* +* Forward declarations +***************************************/ +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, + size_t nbSequences); + + /*-************************************* * Helper functions ***************************************/ @@ -7118,7 +7126,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, } -#if defined(__AVX2__) +#if defined(ZSTD_ARCH_X86_AVX2) #include /* AVX2 intrinsics */ @@ -7138,7 +7146,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, * @returns > 0 if there is one long length (> 65535), * indicating the position, and type. */ -static size_t convertSequences_noRepcodes( +size_t convertSequences_noRepcodes( SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, size_t nbSequences) @@ -7298,7 +7306,7 @@ static size_t convertSequences_noRepcodes( * @returns > 0 if there is one long length (> 65535), * indicating the position, and type. */ -static size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, size_t nbSequences) { +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, size_t nbSequences) { size_t longLen = 0; /* RVV depends on the specific definition of target structures */ @@ -7375,9 +7383,131 @@ static size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* * but since this implementation is targeting modern systems (>= Sapphire Rapid), * it's not useful to develop and maintain code for older pre-AVX2 platforms */ -#else /* no AVX2 */ +#elif defined(ZSTD_ARCH_ARM_NEON) && (defined(__aarch64__) || defined(_M_ARM64)) + +size_t convertSequences_noRepcodes( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + size_t longLen = 0; + size_t n = 0; + + /* Neon permutation depends on the specific definition of target structures. */ + ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); + ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); + ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); + + if (nbSequences > 3) { + static const ZSTD_ALIGNED(16) U32 constAddition[4] = { + ZSTD_REP_NUM, 0, -MINMATCH, 0 + }; + static const ZSTD_ALIGNED(16) U8 constMask[16] = { + 0, 1, 2, 3, 4, 5, 8, 9, 16, 17, 18, 19, 20, 21, 24, 25 + }; + static const ZSTD_ALIGNED(16) U16 constCounter[8] = { + 1, 1, 1, 1, 2, 2, 2, 2 + }; + + const uint32x4_t vaddition = vld1q_u32(constAddition); + const uint8x16_t vmask = vld1q_u8(constMask); + uint16x8_t vcounter = vld1q_u16(constCounter); + uint16x8_t vindex01 = vdupq_n_u16(0); + uint16x8_t vindex23 = vdupq_n_u16(0); + + do { + /* Load 4 ZSTD_Sequence (64 bytes). */ + const uint32x4_t vin0 = vld1q_u32(&inSeqs[n + 0].offset); + const uint32x4_t vin1 = vld1q_u32(&inSeqs[n + 1].offset); + const uint32x4_t vin2 = vld1q_u32(&inSeqs[n + 2].offset); + const uint32x4_t vin3 = vld1q_u32(&inSeqs[n + 3].offset); + + /* Add {ZSTD_REP_NUM, 0, -MINMATCH, 0} to each vector. */ + const uint8x16x2_t vadd01 = { { + vreinterpretq_u8_u32(vaddq_u32(vin0, vaddition)), + vreinterpretq_u8_u32(vaddq_u32(vin1, vaddition)), + } }; + const uint8x16x2_t vadd23 = { { + vreinterpretq_u8_u32(vaddq_u32(vin2, vaddition)), + vreinterpretq_u8_u32(vaddq_u32(vin3, vaddition)), + } }; + + /* Shuffle and pack bytes so each vector contains 2 SeqDef structures. */ + const uint8x16_t vout01 = vqtbl2q_u8(vadd01, vmask); + const uint8x16_t vout23 = vqtbl2q_u8(vadd23, vmask); + + /* Pack the upper 16-bits of 32-bit lanes for overflow check. */ + uint16x8_t voverflow01 = vuzp2q_u16(vreinterpretq_u16_u8(vadd01.val[0]), + vreinterpretq_u16_u8(vadd01.val[1])); + uint16x8_t voverflow23 = vuzp2q_u16(vreinterpretq_u16_u8(vadd23.val[0]), + vreinterpretq_u16_u8(vadd23.val[1])); + + /* Store 4 SeqDef structures. */ + vst1q_u32(&dstSeqs[n + 0].offBase, vreinterpretq_u32_u8(vout01)); + vst1q_u32(&dstSeqs[n + 2].offBase, vreinterpretq_u32_u8(vout23)); + + /* Create masks in case of overflow. */ + voverflow01 = vcgtzq_s16(vreinterpretq_s16_u16(voverflow01)); + voverflow23 = vcgtzq_s16(vreinterpretq_s16_u16(voverflow23)); + + /* Update overflow indices. */ + vindex01 = vbslq_u16(voverflow01, vcounter, vindex01); + vindex23 = vbslq_u16(voverflow23, vcounter, vindex23); + + /* Update counter for overflow check. */ + vcounter = vaddq_u16(vcounter, vdupq_n_u16(4)); + + n += 4; + } while(n < nbSequences - 3); + + /* Fixup indices in the second vector, we saved an additional counter + in the loop to update the second overflow index, we need to add 2 + here when the indices are not 0. */ + { uint16x8_t nonzero = vtstq_u16(vindex23, vindex23); + vindex23 = vsubq_u16(vindex23, nonzero); + vindex23 = vsubq_u16(vindex23, nonzero); + } + + /* Merge indices in the vectors, maximums are needed. */ + vindex01 = vmaxq_u16(vindex01, vindex23); + vindex01 = vmaxq_u16(vindex01, vextq_u16(vindex01, vindex01, 4)); + + /* Compute `longLen`, maximums of matchLength and litLength + with a preference on litLength. */ + { U64 maxLitMatchIndices = vgetq_lane_u64(vreinterpretq_u64_u16(vindex01), 0); + size_t maxLitIndex = (maxLitMatchIndices >> 16) & 0xFFFF; + size_t maxMatchIndex = (maxLitMatchIndices >> 32) & 0xFFFF; + longLen = maxLitIndex > maxMatchIndex ? maxLitIndex + nbSequences + : maxMatchIndex; + } + } + + /* Handle remaining elements. */ + for (; n < nbSequences; n++) { + dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); + dstSeqs[n].litLength = (U16)inSeqs[n].litLength; + dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); + /* Check for long length > 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535 + MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + return longLen; +} + +#else /* No vectorization. */ -static size_t convertSequences_noRepcodes( +size_t convertSequences_noRepcodes( SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, size_t nbSequences) @@ -7388,7 +7518,7 @@ static size_t convertSequences_noRepcodes( dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset); dstSeqs[n].litLength = (U16)inSeqs[n].litLength; dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH); - /* check for long length > 65535 */ + /* Check for long length > 65535. */ if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { assert(longLen == 0); longLen = n + 1; diff --git a/tests/fuzzer.c b/tests/fuzzer.c index 8e30d207d..0bc160efa 100644 --- a/tests/fuzzer.c +++ b/tests/fuzzer.c @@ -770,6 +770,130 @@ static void test_blockSplitter_incompressibleExpansionProtection(unsigned testNb DISPLAYLEVEL(3, "OK \n"); } +size_t convertSequences_noRepcodes(SeqDef* dstSeqs, const ZSTD_Sequence* inSeqs, + size_t nbSequences); + +static size_t convertSequences_noRepcodes_ref( + SeqDef* dstSeqs, + const ZSTD_Sequence* inSeqs, + size_t nbSequences) +{ + size_t longLen = 0; + size_t n; + for (n=0; n 65535. */ + if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { + assert(longLen == 0); + longLen = n + 1; + } + if (UNLIKELY(inSeqs[n].litLength > 65535)) { + assert(longLen == 0); + longLen = n + nbSequences + 1; + } + } + return longLen; +} + +static unsigned test_convertSequences_noRepcodes(unsigned seed, unsigned testNb) +{ + ZSTD_Sequence nsrc[12]; + SeqDef ndst[12], rdst[12]; + size_t ref, ret, i, j; + + seed += 0xDEADBEEF; + for (i = 0; i < COUNTOF(nsrc); ++i) { + seed = 48271 * ((unsigned)i + seed); + nsrc[i].offset = (seed & 0xFFFF) | 1; /* Offset shall not be zero. */ + seed = 48271 * ((unsigned)i + seed); + nsrc[i].litLength = seed & 0xFFFF; + seed = 48271 * ((unsigned)i + seed); + nsrc[i].matchLength = (seed & 0xFFFFFF) % (65536 + MINMATCH); + seed = 48271 * ((unsigned)i + seed); + nsrc[i].rep = seed & 0xFF; + } + + /* For near overflow and proper negative value handling. */ + nsrc[5].matchLength = 65535 + MINMATCH; + nsrc[6].litLength = 65535; + nsrc[6].matchLength = 0; + nsrc[7].litLength = 0; + nsrc[7].matchLength = MINMATCH; + + for (i = 0; i <= COUNTOF(nsrc); ++i) { + DISPLAYLEVEL(3, "test%3u : convertSequences_noRepcodes with %u inputs : ", + testNb++, (unsigned)i); + memset(ndst, 0, sizeof(ndst)); + memset(rdst, 0, sizeof(rdst)); + ref = convertSequences_noRepcodes_ref(rdst, nsrc, i); + ret = convertSequences_noRepcodes(ndst, nsrc, i); + CHECK_EQ(ret, ref); + CHECK_EQ(memcmp(rdst, ndst, sizeof(ndst)), 0); + DISPLAYLEVEL(3, "OK \n"); + } + + nsrc[7].matchLength = 65536 + MINMATCH; + for (i = 8; i <= COUNTOF(nsrc); ++i) { + DISPLAYLEVEL(3, "test%3u : convertSequences_noRepcodes with %u inputs and " + "matchLength overflow : ", + testNb++, (unsigned)i); + memset(ndst, 0, sizeof(ndst)); + memset(rdst, 0, sizeof(rdst)); + ref = convertSequences_noRepcodes_ref(rdst, nsrc, i); + ret = convertSequences_noRepcodes(ndst, nsrc, i); + CHECK_EQ(ret, ref); + CHECK_EQ(memcmp(rdst, ndst, sizeof(ndst)), 0); + DISPLAYLEVEL(3, "OK \n"); + + assert(COUNTOF(nsrc) > 8); + for (j = 4; j < 8; ++j) { + DISPLAYLEVEL(3, "test%3u : convertSequences_noRepcodes with %u inputs and " + "matchLength overflow #%u : ", + testNb++, (unsigned)i, (unsigned)(i - j)); + memset(ndst, 0, sizeof(ndst)); + memset(rdst, 0, sizeof(rdst)); + ref = convertSequences_noRepcodes_ref(rdst, nsrc + j, i - j); + ret = convertSequences_noRepcodes(ndst, nsrc + j, i - j); + CHECK_EQ(ret, ref); + CHECK_EQ(memcmp(rdst, ndst, sizeof(ndst)), 0); + DISPLAYLEVEL(3, "OK \n"); + } + } + nsrc[7].matchLength = 1; + + nsrc[7].litLength = 65536; + for (i = 8; i <= COUNTOF(nsrc); ++i) { + DISPLAYLEVEL(3, "test%3u : convertSequences_noRepcodes with %u inputs and " + "litLength overflow: ", + testNb++, (unsigned)i); + memset(ndst, 0, sizeof(ndst)); + memset(rdst, 0, sizeof(rdst)); + ref = convertSequences_noRepcodes_ref(rdst, nsrc, i); + ret = convertSequences_noRepcodes(ndst, nsrc, i); + CHECK_EQ(ret, ref); + CHECK_EQ(memcmp(rdst, ndst, sizeof(ndst)), 0); + DISPLAYLEVEL(3, "OK \n"); + + assert(COUNTOF(nsrc) > 8); + for (j = 4; j < 8; ++j) { + DISPLAYLEVEL(3, "test%3u : convertSequences_noRepcodes with %u inputs and " + "litLength overflow #%u: ", + testNb++, (unsigned)i, (unsigned)(i - j)); + memset(ndst, 0, sizeof(ndst)); + memset(rdst, 0, sizeof(rdst)); + ref = convertSequences_noRepcodes_ref(rdst, nsrc + j, i - j); + ret = convertSequences_noRepcodes(ndst, nsrc + j, i - j); + CHECK_EQ(ret, ref); + CHECK_EQ(memcmp(rdst, ndst, sizeof(ndst)), 0); + DISPLAYLEVEL(3, "OK \n"); + } + } + + return testNb; +} + static unsigned test_get1BlockSummary(unsigned testNb) { static const ZSTD_Sequence nseqs[] = { @@ -4085,6 +4209,8 @@ static int basicUnitTests(U32 const seed, double compressibility) } DISPLAYLEVEL(3, "OK \n"); + testNb = test_convertSequences_noRepcodes(seed, testNb); + testNb = test_get1BlockSummary(testNb); DISPLAYLEVEL(3, "test%3i : ZSTD_compressSequencesAndLiterals : ", testNb++);