From: Nick Terrell Date: Fri, 29 Mar 2019 18:31:21 +0000 (-0600) Subject: [libzstd] Speed up single segment zstd_fast by 5% X-Git-Tag: v1.4.0^2~14^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=95624b77e477752b3c380c22be7bcf67f06c9934;p=thirdparty%2Fzstd.git [libzstd] Speed up single segment zstd_fast by 5% This PR is based on top of PR #1563. The optimization is to process two input pointers per loop. It is based on ideas from [igzip] level 1, and talking to @gbtucker. | Platform | Silesia | Enwik8 | |-------------------------|-------------|--------| | OSX clang-10 | +5.3% | +5.4% | | i9 5 GHz gcc-8 | +6.6% | +6.6% | | i9 5 GHz clang-7 | +8.0% | +8.0% | | Skylake 2.4 GHz gcc-4.8 | +6.3% | +7.9% | | Skylake 2.4 GHz clang-7 | +6.2% | +7.5% | Testing on all Silesia files on my Intel i9-9900k with gcc-8 | Silesia File | Ratio Change | Speed Change | |--------------|--------------|--------------| | silesia.tar | +0.17% | +6.6% | | dickens | +0.25% | +7.0% | | mozilla | +0.02% | +6.8% | | mr | -0.30% | +10.9% | | nci | +1.28% | +4.5% | | ooffice | -0.35% | +10.7% | | osdb | +0.75% | +9.8% | | reymont | +0.65% | +4.6% | | samba | +0.70% | +5.9% | | sao | -0.01% | +14.0% | | webster | +0.30% | +5.5% | | xml | +0.92% | +5.3% | | x-ray | -0.00% | +1.4% | Same tests on Calgary. For brevity, I've only included files where compression ratio regressed or was much better. | Calgary File | Ratio Change | Speed Change | |--------------|--------------|--------------| | calgary.tar | +0.30% | +7.1% | | geo | -0.14% | +25.0% | | obj1 | -0.46% | +15.2% | | obj2 | -0.18% | +6.0% | | pic | +1.80% | +9.3% | | trans | -0.35% | +5.5% | We gain 0.1% of compression ratio on Silesia. We gain 0.3% of compression ratio on enwik8. I also tested on the GitHub and hg-commands datasets without a dictionary, and we gain a small amount of compression ratio on each, as well as speed. I tested the negative compression levels on Silesia on my Intel i9-9900k with gcc-8: | Level | Ratio Change | Speed Change | |-------|--------------|--------------| | -1 | +0.13% | +6.4% | | -2 | +4.6% | -1.5% | | -3 | +7.5% | -4.8% | | -4 | +8.5% | -6.9% | | -5 | +9.1% | -9.1% | Roughly, the negative levels now scale half as quickly. E.g. the new level 16 is roughly equivalent to the old level 8, but a bit quicker and smaller. If you don't think this is the right trade off, we can change it to multiply the step size by 2, instead of adding 1. I think this makes sense, because it gives a bit slower ratio decay. [igzip]: https://github.com/01org/isa-l/tree/master/igzip --- diff --git a/lib/compress/zstd_fast.c b/lib/compress/zstd_fast.c index d59e0e73e..ed997b441 100644 --- a/lib/compress/zstd_fast.c +++ b/lib/compress/zstd_fast.c @@ -51,10 +51,12 @@ size_t ZSTD_compressBlock_fast_generic( U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ - U32 const stepSize = cParams->targetLength + !(cParams->targetLength); + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; + /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ + const BYTE* ip0 = istart; + const BYTE* ip1; const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; @@ -64,62 +66,96 @@ size_t ZSTD_compressBlock_fast_generic( U32 offsetSaved = 0; /* init */ - ip += (ip == prefixStart); + ip0 += (ip0 == prefixStart); + ip1 = ip0 + 1; { - U32 const maxRep = (U32)(ip - prefixStart); + U32 const maxRep = (U32)(ip0 - prefixStart); if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ size_t mLength; - size_t const h = ZSTD_hashPtr(ip, hlog, mls); - U32 const current = (U32)(ip-base); - U32 const matchIndex = hashTable[h]; - const BYTE* match = base + matchIndex; - hashTable[h] = current; /* update hash table */ - - if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); - } else if ((matchIndex <= prefixStartIndex) || MEM_read32(match) != MEM_read32(ip)) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { + BYTE const* ip2 = ip0 + 2; + size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); + U32 const val0 = MEM_read32(ip0); + size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); + U32 const val1 = MEM_read32(ip1); + U32 const current0 = (U32)(ip0-base); + U32 const current1 = (U32)(ip1-base); + U32 const matchIndex0 = hashTable[h0]; + U32 const matchIndex1 = hashTable[h1]; + BYTE const* repMatch = ip2-offset_1; + const BYTE* match0 = base + matchIndex0; + const BYTE* match1 = base + matchIndex1; + U32 offcode; + hashTable[h0] = current0; /* update hash table */ + hashTable[h1] = current1; /* update hash table */ + + assert(ip0 + 1 == ip1); + + if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { + mLength = ip2[-1] == repMatch[-1] ? 1 : 0; + ip0 = ip2 - mLength; + match0 = repMatch - mLength; + offcode = 0; + goto _match; + } + if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { /* found a regular match */ - U32 const offset = (U32)(ip-match); - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - while (((ip>anchor) & (match>prefixStart)) - && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + goto _offset; } - + if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { + /* found a regular match after one literal */ + ip0 = ip1; + match0 = match1; + goto _offset; + } + { + size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize; + assert(step >= 2); + ip0 += step; + ip1 += step; + continue; + } +_offset: /* Requires: ip0, match0 */ + /* Compute the offset code */ + offset_2 = offset_1; + offset_1 = (U32)(ip0-match0); + offcode = offset_1 + ZSTD_REP_MOVE; + mLength = 0; + /* Count the backwards match length */ + while (((ip0>anchor) & (match0>prefixStart)) + && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ + +_match: /* Requires: ip0, match0, offcode */ + /* Count the forward length */ + mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4; + ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH); /* match found */ - ip += mLength; - anchor = ip; + ip0 += mLength; + anchor = ip0; + ip1 = ip0 + 1; - if (ip <= ilimit) { + if (ip0 <= ilimit) { /* Fill Table */ - assert(base+current+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); - while ( (ip <= ilimit) + while ( (ip0 <= ilimit) && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) { /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += rLength; + ip1 = ip0 + 1; ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH); - ip += rLength; - anchor = ip; - continue; /* faster when present ... (?) */ + anchor = ip0; + continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } }