+ huf_decompress.o decompress.o
diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
new file mode 100644
-index 0000000..9d21540
+index 0000000..9b5d2bc
--- /dev/null
+++ b/lib/zstd/bitstream.h
-@@ -0,0 +1,391 @@
+@@ -0,0 +1,376 @@
+/* ******************************************************************
+ bitstream
+ Part of FSE library
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (register U32 val)
+{
-+# if defined(_MSC_VER) /* Visual */
-+ unsigned long r=0;
-+ _BitScanReverse ( &r, val );
-+ return (unsigned) r;
-+# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
-+ return 31 - __builtin_clz (val);
-+# else /* Software version */
-+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-+ U32 v = val;
-+ v |= v >> 1;
-+ v |= v >> 2;
-+ v |= v >> 4;
-+ v |= v >> 8;
-+ v |= v >> 16;
-+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-+# endif
++ return 31 - __builtin_clz(val);
+}
+
+/*===== Local Constants =====*/
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
-+ { U32 nbBytes = bitD->bitsConsumed >> 3;
++ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
new file mode 100644
-index 0000000..79c3207
+index 0000000..4f1e184
--- /dev/null
+++ b/lib/zstd/compress.c
-@@ -0,0 +1,3384 @@
+@@ -0,0 +1,3297 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+#include "huf.h"
+#include "zstd_internal.h" /* includes zstd.h */
+
-+#ifdef current
-+# undef current
-+#endif
-+
+/*-*************************************
+* Constants
+***************************************/
+* Context memory management
+***************************************/
+struct ZSTD_CCtx_s {
-+ const BYTE* nextSrc; /* next block here to continue on current prefix */
++ const BYTE* nextSrc; /* next block here to continue on curr prefix */
+ const BYTE* base; /* All regular indexes relative to this position */
+ const BYTE* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */
+
+ /* resize params, to use less memory when necessary */
-+ { U32 const minSrcSize = (srcSize==0) ? 500 : 0;
++ { U32 const minSrcSize = (srcSize==0) ? 500 : 0;
+ U64 const rSize = srcSize + dictSize + minSrcSize;
+ if (rSize < ((U64)1<<ZSTD_WINDOWLOG_MAX)) {
+ U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ } }
+ if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
-+ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
++ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+ if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog);
+ }
+
+ return ZSTD_continueCCtx(zc, params, frameContentSize);
+ }
+
-+ { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
++ { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
+ U32 const divider = (params.cParams.searchLength==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = blockSize + 11*maxNbSeq;
+ void* ptr;
+
+ /* Check if workSpace is large enough, alloc a new one if needed */
-+ { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
++ { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
+ + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
+ size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
+ + (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
+
+
+ memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
-+ { ZSTD_parameters params = srcCCtx->params;
++ { ZSTD_parameters params = srcCCtx->params;
+ params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
+ ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
+ }
+
+ /* copy tables */
-+ { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
++ { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
+ size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
+ size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+
+ /* small ? don't even attempt compression (speed opt) */
+# define LITERAL_NOENTROPY 63
-+ { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
++ { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
+ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+
+ if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
-+ { HUF_repeat repeat = zc->flagStaticHufTable;
++ { HUF_repeat repeat = zc->flagStaticHufTable;
+ int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+ cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
-+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
++ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
-+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
++ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ default: /* should not be necessary, lhSize is only {3,4,5} */
+ case 5: /* 2 - 2 - 18 - 18 */
-+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
++ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
+
+ /* Compress literals */
-+ { const BYTE* const literals = seqStorePtr->litStart;
++ { const BYTE* const literals = seqStorePtr->litStart;
+ size_t const litSize = seqStorePtr->lit - literals;
+ size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
+ if (ZSTD_isError(cSize)) return cSize;
+ ZSTD_seqToCodes(seqStorePtr);
+
+ /* CTable for Literal Lengths */
-+ { U32 max = MaxLL;
++ { U32 max = MaxLL;
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters);
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+ *op++ = llCodeTable[0];
+ } }
+
+ /* CTable for Offsets */
-+ { U32 max = MaxOff;
++ { U32 max = MaxOff;
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters);
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+ *op++ = ofCodeTable[0];
+ } }
+
+ /* CTable for MatchLengths */
-+ { U32 max = MaxML;
++ { U32 max = MaxML;
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters);
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+ *op++ = *mlCodeTable;
+ zc->flagStaticTables = 0;
+
+ /* Encoding Sequences */
-+ { BIT_CStream_t blockStream;
++ { BIT_CStream_t blockStream;
+ FSE_CState_t stateMatchLength;
+ FSE_CState_t stateOffsetBits;
+ FSE_CState_t stateLitLength;
+ }
+ BIT_flushBits(&blockStream);
+
-+ { size_t n;
++ { size_t n;
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
+ BYTE const llCode = llCodeTable[n];
+ BYTE const ofCode = ofCodeTable[n];
+ FSE_flushCState(&blockStream, &stateOffsetBits);
+ FSE_flushCState(&blockStream, &stateLitLength);
+
-+ { size_t const streamSize = BIT_closeCStream(&blockStream);
++ { size_t const streamSize = BIT_closeCStream(&blockStream);
+ if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
+ op += streamSize;
+ } }
+
+ /* check compressibility */
+_check_compressibility:
-+ { size_t const minGain = ZSTD_minGain(srcSize);
++ { size_t const minGain = ZSTD_minGain(srcSize);
+ size_t const maxCSize = srcSize - minGain;
+ if ((size_t)(op-ostart) >= maxCSize) {
+ zc->flagStaticHufTable = HUF_repeat_none;
+ return op - ostart;
+}
+
-+#if 0 /* for debug */
-+# define STORESEQ_DEBUG
-+U32 g_startDebug = 0;
-+const BYTE* g_start = NULL;
-+#endif
-+
+/*! ZSTD_storeSeq() :
+ Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
+ `offsetCode` : distance to match, or 0 == repCode.
+*/
+MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
+{
-+#ifdef STORESEQ_DEBUG
-+ if (g_startDebug) {
-+ const U32 pos = (U32)((const BYTE*)literals - g_start);
-+ if (g_start==NULL) g_start = (const BYTE*)literals;
-+ if ((pos > 1895000) && (pos < 1895300))
-+ fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
-+ pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
-+ }
-+#endif
+ /* copy Literals */
+ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+ seqStorePtr->lit += litLength;
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
-+# if defined(_MSC_VER) && defined(_WIN64)
-+ unsigned long r = 0;
-+ _BitScanForward64( &r, (U64)val );
-+ return (unsigned)(r>>3);
-+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctzll((U64)val) >> 3);
-+# else
-+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
-+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-+# endif
+ } else { /* 32 bits */
-+# if defined(_MSC_VER)
-+ unsigned long r=0;
-+ _BitScanForward( &r, (U32)val );
-+ return (unsigned)(r>>3);
-+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
-+# else
-+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
-+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
-+# if defined(_MSC_VER) && defined(_WIN64)
-+ unsigned long r = 0;
-+ _BitScanReverse64( &r, val );
-+ return (unsigned)(r>>3);
-+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clzll(val) >> 3);
-+# else
-+ unsigned r;
-+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
-+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
-+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
-+ r += (!val);
-+ return r;
-+# endif
+ } else { /* 32 bits */
-+# if defined(_MSC_VER)
-+ unsigned long r = 0;
-+ _BitScanReverse( &r, (unsigned long)val );
-+ return (unsigned)(r>>3);
-+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
-+# else
-+ unsigned r;
-+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
-+ r += (!val);
-+ return r;
-+# endif
+ } }
+}
+
+
+ /* init */
+ ip += (ip==lowest);
-+ { U32 const maxRep = (U32)(ip-lowest);
++ { U32 const maxRep = (U32)(ip-lowest);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hBits, mls);
-+ U32 const current = (U32)(ip-base);
++ U32 const curr = (U32)(ip-base);
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
-+ hashTable[h] = current; /* update hash table */
++ hashTable[h] = curr; /* update hash table */
+
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
-+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
++ hashTable[ZSTD_hashPtr(base+curr+2, hBits, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+ const U32 matchIndex = hashTable[h];
+ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
-+ const U32 current = (U32)(ip-base);
-+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
++ const U32 curr = (U32)(ip-base);
++ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+ const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* repMatch = repBase + repIndex;
+ size_t mLength;
-+ hashTable[h] = current; /* update hash table */
++ hashTable[h] = curr; /* update hash table */
+
+ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
+ continue;
+ }
-+ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
++ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
-+ offset = current - matchIndex;
++ offset = curr - matchIndex;
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+ if (ip <= ilimit) {
+ /* Fill Table */
-+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
++ hashTable[ZSTD_hashPtr(base+curr+2, hBits, mls)] = curr+2;
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
-+ U32 const current2 = (U32)(ip-base);
-+ U32 const repIndex2 = current2 - offset_2;
++ U32 const curr2 = (U32)(ip-base);
++ U32 const repIndex2 = curr2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
-+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
++ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+
+ /* init */
+ ip += (ip==lowest);
-+ { U32 const maxRep = (U32)(ip-lowest);
++ { U32 const maxRep = (U32)(ip-lowest);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+ size_t mLength;
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
-+ U32 const current = (U32)(ip-base);
++ U32 const curr = (U32)(ip-base);
+ U32 const matchIndexL = hashLong[h2];
+ U32 const matchIndexS = hashSmall[h];
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
-+ hashLong[h2] = hashSmall[h] = current; /* update hash tables */
++ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
+
-+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
++ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= curr */
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
+ const BYTE* match3 = base + matchIndex3;
-+ hashLong[h3] = current + 1;
++ hashLong[h3] = curr + 1;
+ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ mLength = ZSTD_count(ip+9, match3+8, iend) + 8;
+ ip++;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
-+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
-+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
++ hashLong[ZSTD_hashPtr(base+curr+2, hBitsL, 8)] =
++ hashSmall[ZSTD_hashPtr(base+curr+2, hBitsS, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+
+ cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+ const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
-+ const U32 current = (U32)(ip-base);
-+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
++ const U32 curr = (U32)(ip-base);
++ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+ const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* repMatch = repBase + repIndex;
+ size_t mLength;
-+ hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
++ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
+
+ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
-+ offset = current - matchLongIndex;
++ offset = curr - matchLongIndex;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
-+ hashLong[h3] = current + 1;
++ hashLong[h3] = curr + 1;
+ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+ ip++;
-+ offset = current+1 - matchIndex3;
++ offset = curr+1 - matchIndex3;
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
+ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
-+ offset = current - matchIndex;
++ offset = curr - matchIndex;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
-+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
-+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
++ hashSmall[ZSTD_hashPtr(base+curr+2, hBitsS, mls)] = curr+2;
++ hashLong[ZSTD_hashPtr(base+curr+2, hBitsL, 8)] = curr+2;
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
-+ U32 const current2 = (U32)(ip-base);
-+ U32 const repIndex2 = current2 - offset_2;
++ U32 const curr2 = (U32)(ip-base);
++ U32 const repIndex2 = curr2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
-+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
-+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
++ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
++ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
-+ const U32 current = (U32)(ip-base);
-+ const U32 btLow = btMask >= current ? 0 : current - btMask;
-+ U32* smallerPtr = bt + 2*(current&btMask);
++ const U32 curr = (U32)(ip-base);
++ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
++ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowLow = zc->lowLimit;
-+ U32 matchEndIdx = current+8;
++ U32 matchEndIdx = curr+8;
+ size_t bestLength = 8;
-+#ifdef ZSTD_C_PREDICT
-+ U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
-+ U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
-+ predictedSmall += (predictedSmall>0);
-+ predictedLarge += (predictedLarge>0);
-+#endif /* ZSTD_C_PREDICT */
+
-+ hashTable[h] = current; /* Update Hash Table */
++ hashTable[h] = curr; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+
-+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
-+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
-+ if (matchIndex == predictedSmall) {
-+ /* no need to check length, result known */
-+ *smallerPtr = matchIndex;
-+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
-+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
-+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
-+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
-+ continue;
-+ }
-+ if (matchIndex == predictedLarge) {
-+ *largerPtr = matchIndex;
-+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
-+ largerPtr = nextPtr;
-+ matchIndex = nextPtr[0];
-+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
-+ continue;
-+ }
-+#endif
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ if (match[matchLength] == ip[matchLength])
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
-+ /* match is smaller than current */
++ /* match is smaller than curr */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
-+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
+ } else {
-+ /* match is larger than current */
++ /* match is larger than curr */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+
+ *smallerPtr = *largerPtr = 0;
+ if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
-+ if (matchEndIdx > current + 8) return matchEndIdx - current - 8;
++ if (matchEndIdx > curr + 8) return matchEndIdx - curr - 8;
+ return 1;
+}
+
+ const U32 dictLimit = zc->dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
-+ const U32 current = (U32)(ip-base);
-+ const U32 btLow = btMask >= current ? 0 : current - btMask;
++ const U32 curr = (U32)(ip-base);
++ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+ const U32 windowLow = zc->lowLimit;
-+ U32* smallerPtr = bt + 2*(current&btMask);
-+ U32* largerPtr = bt + 2*(current&btMask) + 1;
-+ U32 matchEndIdx = current+8;
++ U32* smallerPtr = bt + 2*(curr&btMask);
++ U32* largerPtr = bt + 2*(curr&btMask) + 1;
++ U32 matchEndIdx = curr+8;
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
-+ hashTable[h] = current; /* Update Hash Table */
++ hashTable[h] = curr; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
-+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
-+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
++ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
++ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
+ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
-+ /* match is smaller than current */
++ /* match is smaller than curr */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
-+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
+ } else {
-+ /* match is larger than current */
++ /* match is larger than curr */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+
+ *smallerPtr = *largerPtr = 0;
+
-+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
++ zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr+1;
+ return bestLength;
+}
+
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 lowLimit = zc->lowLimit;
-+ const U32 current = (U32)(ip-base);
-+ const U32 minChain = current > chainSize ? current - chainSize : 0;
++ const U32 curr = (U32)(ip-base);
++ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
+ int nbAttempts=maxNbAttempts;
+ size_t ml=EQUAL_READ32-1;
+
+
+ for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ const BYTE* match;
-+ size_t currentMl=0;
++ size_t currMl=0;
+ if ((!extDict) || matchIndex >= dictLimit) {
+ match = base + matchIndex;
+ if (match[ml] == ip[ml]) /* potentially better */
-+ currentMl = ZSTD_count(ip, match, iLimit);
++ currMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ match = dictBase + matchIndex;
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
-+ currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
++ currMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
+ }
+
+ /* save best solution */
-+ if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
++ if (currMl > ml) { ml = currMl; *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; if (ip+currMl == iLimit) break; /* best possible, and avoid read overflow*/ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ /* init */
+ ip += (ip==base);
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
-+ { U32 const maxRep = (U32)(ip-base);
++ { U32 const maxRep = (U32)(ip-base);
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
+ }
+
+ /* first search (depth 0) */
-+ { size_t offsetFound = 99999999;
++ { size_t offsetFound = 99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
-+ { size_t offset2=99999999;
++ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
+ matchLength = ml2, offset = 0, start = ip;
+ }
-+ { size_t offset2=99999999;
++ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+
+ /* store sequence */
+_storeSequence:
-+ { size_t const litLength = start - anchor;
++ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+ ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+ size_t matchLength=0;
+ size_t offset=0;
+ const BYTE* start=ip+1;
-+ U32 current = (U32)(ip-base);
++ U32 curr = (U32)(ip-base);
+
+ /* check repCode */
-+ { const U32 repIndex = (U32)(current+1 - offset_1);
++ { const U32 repIndex = (U32)(curr+1 - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ } }
+
+ /* first search (depth 0) */
-+ { size_t offsetFound = 99999999;
++ { size_t offsetFound = 99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offset=offsetFound;
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
-+ current++;
++ curr++;
+ /* check repCode */
+ if (offset) {
-+ const U32 repIndex = (U32)(current - offset_1);
++ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ } }
+
+ /* search match, depth 1 */
-+ { size_t offset2=99999999;
++ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
-+ current++;
++ curr++;
+ /* check repCode */
+ if (offset) {
-+ const U32 repIndex = (U32)(current - offset_1);
++ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+ } }
+
+ /* search match, depth 2 */
-+ { size_t offset2=99999999;
++ { size_t offset2=99999999;
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+
+ /* store sequence */
+_storeSequence:
-+ { size_t const litLength = start - anchor;
++ { size_t const litLength = start - anchor;
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+ anchor = ip = start + matchLength;
+ }
+ ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
+ const BYTE* const base = zc->base;
+ const BYTE* const istart = (const BYTE*)src;
-+ const U32 current = (U32)(istart-base);
++ const U32 curr = (U32)(istart-base);
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */
+ ZSTD_resetSeqStore(&(zc->seqStore));
-+ if (current > zc->nextToUpdate + 384)
-+ zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
++ if (curr > zc->nextToUpdate + 384)
++ zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
+ blockCompressor(zc, src, srcSize);
+ return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
+}
+ /* preemptive overflow correction */
+ if (cctx->lowLimit > (3U<<29)) {
+ U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
-+ U32 const current = (U32)(ip - cctx->base);
-+ U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog);
-+ U32 const correction = current - newCurrent;
++ U32 const curr = (U32)(ip - cctx->base);
++ U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
++ U32 const correction = curr - newCurr;
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
+ ZSTD_reduceIndex(cctx, correction);
+ cctx->base += correction;
+ const BYTE* const ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+
-+ /* input becomes current prefix */
++ /* input becomes curr prefix */
+ zc->lowLimit = zc->dictLimit;
+ zc->dictLimit = (U32)(zc->nextSrc - zc->base);
+ zc->dictBase = zc->base;
+ cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr);
+ dictPtr += 4;
+
-+ { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr);
++ { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr);
+ if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
+ dictPtr += hufHeaderSize;
+ }
+
-+ { unsigned offcodeLog;
++ { unsigned offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+ dictPtr += offcodeHeaderSize;
+ }
+
-+ { short matchlengthNCount[MaxML+1];
++ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ dictPtr += matchlengthHeaderSize;
+ }
+
-+ { short litlengthNCount[MaxLL+1];
++ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ cctx->rep[2] = MEM_readLE32(dictPtr+8);
+ dictPtr += 12;
+
-+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
++ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ U32 offcodeMax = MaxOff;
+ if (dictContentSize <= ((U32)-1) - 128 KB) {
+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+ /* All offset values <= dictContentSize + 128 KB must be representable */
+ CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+ /* All repCodes must be <= dictContentSize and != 0*/
-+ { U32 u;
++ { U32 u;
+ for (u=0; u<3; u++) {
+ if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted);
+ if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
+{
+ if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
-+ { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
++ { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
+ ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
+
+ if (!cdict || !cctx) {
+ cdict->dictContent = internalBuffer;
+ }
+
-+ { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
++ { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
+ if (ZSTD_isError(errorCode)) {
+ ZSTD_free(cdict->dictBuffer, customMem);
+ ZSTD_free(cdict, customMem);
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support free on NULL */
-+ { ZSTD_customMem const cMem = cdict->refContext->customMem;
++ { ZSTD_customMem const cMem = cdict->refContext->customMem;
+ ZSTD_freeCCtx(cdict->refContext);
+ ZSTD_free(cdict->dictBuffer, cMem);
+ ZSTD_free(cdict, cMem);
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+ if (zcs==NULL) return 0; /* support free on NULL */
-+ { ZSTD_customMem const cMem = zcs->customMem;
++ { ZSTD_customMem const cMem = zcs->customMem;
+ ZSTD_freeCCtx(zcs->cctx);
+ zcs->cctx = NULL;
+ ZSTD_freeCDict(zcs->cdictLocal);
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+ /* allocate buffers */
-+ { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
++ { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
+ if (zcs->inBuffSize < neededInBuffSize) {
+ zcs->inBuffSize = neededInBuffSize;
+ ZSTD_free(zcs->inBuff, zcs->customMem);
+
+ case zcss_load:
+ /* complete inBuffer */
-+ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
++ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+ size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip);
+ zcs->inBuffPos += loaded;
+ ip += loaded;
+ if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) {
+ someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */
+ } }
-+ /* compress current block (note : this stage cannot be stopped in the middle) */
-+ { void* cDst;
++ /* compress curr block (note : this stage cannot be stopped in the middle) */
++ { void* cDst;
+ size_t cSize;
+ size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
+ size_t oSize = oend-op;
+ }
+
+ case zcss_flush:
-+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
++ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+ size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ op += flushed;
+ zcs->outBuffFlushedSize += flushed;
+ *dstCapacityPtr = op - ostart;
+ zcs->inputProcessed += *srcSizePtr;
+ if (zcs->frameEnded) return 0;
-+ { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
++ { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
+ if (hintInSize==0) hintInSize = zcs->blockSize;
+ return hintInSize;
+ }
+ }
+
+ /* flush epilogue */
-+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
++ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+ size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ op += flushed;
+ zcs->outBuffFlushedSize += flushed;
+MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
new file mode 100644
-index 0000000..98508b1
+index 0000000..378d2c5
--- /dev/null
+++ b/lib/zstd/decompress.c
-@@ -0,0 +1,2377 @@
+@@ -0,0 +1,2349 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ const HUF_DTable* HUFptr;
+ ZSTD_entropyTables_t entropy;
+ const void* previousDstEnd; /* detect continuity */
-+ const void* base; /* start of current segment */
-+ const void* vBase; /* virtual start of previous segment if it was just before current one */
++ const void* base; /* start of curr segment */
++ const void* vBase; /* virtual start of previous segment if it was just before curr one */
+ const void* dictEnd; /* end of previous segment */
+ size_t expected;
+ ZSTD_frameParams fParams;
+ memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
+}
+
-+#if 0
-+/* deprecated */
-+static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
-+{
-+ ZSTD_decompressBegin(dstDCtx); /* init */
-+ if (srcDCtx) { /* support refDCtx on NULL */
-+ dstDCtx->dictEnd = srcDCtx->dictEnd;
-+ dstDCtx->vBase = srcDCtx->vBase;
-+ dstDCtx->base = srcDCtx->base;
-+ dstDCtx->previousDstEnd = srcDCtx->previousDstEnd;
-+ dstDCtx->dictID = srcDCtx->dictID;
-+ dstDCtx->litEntropy = srcDCtx->litEntropy;
-+ dstDCtx->fseEntropy = srcDCtx->fseEntropy;
-+ dstDCtx->LLTptr = srcDCtx->entropy.LLTable;
-+ dstDCtx->MLTptr = srcDCtx->entropy.MLTable;
-+ dstDCtx->OFTptr = srcDCtx->entropy.OFTable;
-+ dstDCtx->HUFptr = srcDCtx->entropy.hufTable;
-+ dstDCtx->entropy.rep[0] = srcDCtx->entropy.rep[0];
-+ dstDCtx->entropy.rep[1] = srcDCtx->entropy.rep[1];
-+ dstDCtx->entropy.rep[2] = srcDCtx->entropy.rep[2];
-+ }
-+}
-+#endif
-+
+static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict);
+
+
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+ if (size < 4) return 0;
-+ { U32 const magic = MEM_readLE32(buffer);
++ { U32 const magic = MEM_readLE32(buffer);
+ if (magic == ZSTD_MAGICNUMBER) return 1;
+ if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong);
-+ { BYTE const fhd = ((const BYTE*)src)[4];
++ { BYTE const fhd = ((const BYTE*)src)[4];
+ U32 const dictID= fhd & 3;
+ U32 const singleSegment = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
+ if (srcSize < fhsize) return fhsize; }
+
-+ { BYTE const fhdByte = ip[4];
++ { BYTE const fhdByte = ip[4];
+ size_t pos = 5;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-+ { U32 const cBlockHeader = MEM_readLE24(src);
++ { U32 const cBlockHeader = MEM_readLE24(src);
+ U32 const cSize = cBlockHeader >> 3;
+ bpPtr->lastBlock = cBlockHeader & 1;
+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+{
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
-+ { const BYTE* const istart = (const BYTE*) src;
++ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+ switch(litEncType)
+ /* fall-through */
+ case set_compressed:
+ if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
-+ { size_t lhSize, litSize, litCSize;
++ { size_t lhSize, litSize, litCSize;
+ U32 singleStream=0;
+ U32 const lhlCode = (istart[0] >> 2) & 3;
+ U32 const lhc = MEM_readLE32(istart);
+ }
+
+ case set_basic:
-+ { size_t litSize, lhSize;
++ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ switch(lhlCode)
+ {
+ }
+
+ case set_rle:
-+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
++ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+ switch(lhlCode)
+ {
+ return 0;
+ default : /* impossible */
+ case set_compressed :
-+ { U32 tableLog;
++ { U32 tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+ if (FSE_isError(headerSize)) return ERROR(corruption_detected);
+ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
-+ { int nbSeq = *ip++;
++ { int nbSeq = *ip++;
+ if (!nbSeq) { *nbSeqPtr=0; return 1; }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+
+ /* FSE table descriptors */
+ if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
-+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
++ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ ip++;
+
+ /* Build DTables */
-+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
++ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+ LLtype, MaxLL, LLFSELog,
+ ip, iend-ip, LL_defaultDTable, dctx->fseEntropy);
+ if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
+ ip += llhSize;
+ }
-+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
++ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+ OFtype, MaxOff, OffFSELog,
+ ip, iend-ip, OF_defaultDTable, dctx->fseEntropy);
+ if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
+ ip += ofhSize;
+ }
-+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
++ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+ MLtype, MaxML, MLFSELog,
+ ip, iend-ip, ML_defaultDTable, dctx->fseEntropy);
+ if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
-+ /* span extDict & currentPrefixSegment */
-+ { size_t const length1 = dictEnd - match;
++ /* span extDict & currPrefixSegment */
++ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+
+ /* sequence */
-+ { size_t offset;
++ { size_t offset;
+ if (!ofCode)
+ offset = 0;
+ else {
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
-+ /* span extDict & currentPrefixSegment */
-+ { size_t const length1 = dictEnd - match;
++ /* span extDict & currPrefixSegment */
++ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ int nbSeq;
+
+ /* Build Decoding Tables */
-+ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
++ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ }
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
+ nbSeq--;
-+ { seq_t const sequence = ZSTD_decodeSequence(&seqState);
++ { seq_t const sequence = ZSTD_decodeSequence(&seqState);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* last literal segment */
-+ { size_t const lastLLSize = litEnd - litPtr;
++ { size_t const lastLLSize = litEnd - litPtr;
+ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+
+ /* sequence */
-+ { size_t offset;
++ { size_t offset;
+ if (!ofCode)
+ offset = 0;
+ else {
+ if (MEM_32bits() ||
+ (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
+
-+ { size_t const pos = seqState->pos + seq.litLength;
++ { size_t const pos = seqState->pos + seq.litLength;
+ seq.match = seqState->base + pos - seq.offset; /* single memory segment */
+ if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */
+ seqState->pos = pos + seq.matchLength;
+ const BYTE* match = sequence.match;
+
+ /* check */
-+#if 1
+ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-+#endif
+
+ /* copy Literals */
+ ZSTD_copy8(op, *litPtr);
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* copy Match */
-+#if 1
+ if (sequence.offset > (size_t)(oLitEnd - base)) {
+ /* offset beyond prefix */
+ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+ memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
-+ /* span extDict & currentPrefixSegment */
-+ { size_t const length1 = dictEnd - match;
++ /* span extDict & currPrefixSegment */
++ { size_t const length1 = dictEnd - match;
+ memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ }
+ } }
+ /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-+#endif
+
+ /* match within prefix */
+ if (sequence.offset < 8) {
+ int nbSeq;
+
+ /* Build Decoding Tables */
-+ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
++ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ }
+ }
+
+ /* last literal segment */
-+ { size_t const lastLLSize = litEnd - litPtr;
++ { size_t const lastLLSize = litEnd - litPtr;
+ if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
+
+ /* Decode literals section */
-+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
++ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ if (ZSTD_isError(headerSize)) return headerSize;
+
+ /* Frame Header */
-+ { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
++ { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
+ if (ZSTD_isError(ret)) return ret;
+ if (ret > 0) return ERROR(srcSize_wrong);
+ }
+ if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+
+ /* Frame Header */
-+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
++ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
+ }
+ ZSTD_checkContinuity(dctx, dst);
+
-+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
++ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
+ if (ZSTD_isError(res)) return res;
+ /* don't need to bounds check this, ZSTD_decompressFrame will have
+ return 0;
+
+ case ZSTDds_decodeBlockHeader:
-+ { blockProperties_t bp;
++ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ dctx->expected = cBlockSize;
+ }
+ case ZSTDds_decompressLastBlock:
+ case ZSTDds_decompressBlock:
-+ { size_t rSize;
++ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ return rSize;
+ }
+ case ZSTDds_checkChecksum:
-+ { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
++ { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
+ if (check32 != h32) return ERROR(checksum_wrong);
+ dctx->expected = 0;
+ return 0;
+ }
+ case ZSTDds_decodeSkippableHeader:
-+ { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
++ { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+ }
+ case ZSTDds_skipFrame:
-+ { dctx->expected = 0;
++ { dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+ dictPtr += 8; /* skip header = magic + dictID */
+
+
-+ { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr);
++ { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr);
+ if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+ dictPtr += hSize;
+ }
+
-+ { short offcodeNCount[MaxOff+1];
++ { short offcodeNCount[MaxOff+1];
+ U32 offcodeMaxValue = MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+ dictPtr += offcodeHeaderSize;
+ }
+
-+ { short matchlengthNCount[MaxML+1];
++ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ dictPtr += matchlengthHeaderSize;
+ }
+
-+ { short litlengthNCount[MaxLL+1];
++ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+ }
+
+ if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
-+ { int i;
++ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
-+ { U32 const magic = MEM_readLE32(dict);
++ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTD_DICT_MAGIC) {
+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + 4);
+
+ /* load entropy tables */
-+ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
++ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
+ if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ ddict->dictID = 0;
+ ddict->entropyPresent = 0;
+ if (ddict->dictSize < 8) return 0;
-+ { U32 const magic = MEM_readLE32(ddict->dictContent);
++ { U32 const magic = MEM_readLE32(ddict->dictContent);
+ if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */
+ }
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4);
+{
+ if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
-+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
++ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+ if (!ddict) return NULL;
+ ddict->cMem = customMem;
+
+ ddict->dictSize = dictSize;
+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ /* parse dictionary content */
-+ { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
++ { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
+ if (ZSTD_isError(errorCode)) {
+ ZSTD_freeDDict(ddict);
+ return NULL;
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support free on NULL */
-+ { ZSTD_customMem const cMem = ddict->cMem;
++ { ZSTD_customMem const cMem = ddict->cMem;
+ ZSTD_free(ddict->dictBuffer, cMem);
+ ZSTD_free(ddict, cMem);
+ return 0;
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ if (zds==NULL) return 0; /* support free on null */
-+ { ZSTD_customMem const cMem = zds->customMem;
++ { ZSTD_customMem const cMem = zds->customMem;
+ ZSTD_freeDCtx(zds->dctx);
+ zds->dctx = NULL;
+ ZSTD_freeDDict(zds->ddictLocal);
+ /* fall-through */
+
+ case zdss_loadHeader :
-+ { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
++ { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
+ if (ZSTD_isError(hSize))
+ return hSize;
+ if (hSize != 0) { /* need more input */
+
+ /* Consume header */
+ ZSTD_refDDict(zds->dctx, zds->ddict);
-+ { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
++ { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
+ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
-+ { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
++ { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size));
+ } }
+
+ if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+
+ /* Adapt buffer sizes to frame header instructions */
-+ { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
++ { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
+ size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+ zds->blockSize = blockSize;
+ if (zds->inBuffSize < blockSize) {
+ /* pass-through */
+
+ case zdss_read:
-+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
++ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+ if (neededInSize==0) { /* end of frame */
+ zds->stage = zdss_init;
+ someMoreWork = 0;
+ }
+
+ case zdss_load:
-+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
++ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+ size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
+ size_t loadedSize;
+ if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */
+ } }
+
+ case zdss_flush:
-+ { size_t const toFlushSize = zds->outEnd - zds->outStart;
++ { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
+ op += flushedSize;
+ zds->outStart += flushedSize;
+ /* result */
+ input->pos += (size_t)(ip-istart);
+ output->pos += (size_t)(op-ostart);
-+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
++ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+ if (!nextSrcSizeHint) { /* frame fully decoded */
+ if (zds->outEnd == zds->outStart) { /* output fully flushed */
+ if (zds->hostageByte) {
+MODULE_DESCRIPTION("Zstd Decompressor");
diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
new file mode 100644
-index 0000000..68d88082
+index 0000000..36ad266
--- /dev/null
+++ b/lib/zstd/entropy_common.c
@@ -0,0 +1,217 @@
+ } else {
+ bitStream >>= 2;
+ } }
-+ { int const max = (2*threshold-1) - remaining;
++ { int const max = (2*threshold-1) - remaining;
+ int count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
-+ { U32 n;
++ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ /* collect weight stats */
+ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ weightTotal = 0;
-+ { U32 n; for (n=0; n<oSize; n++) {
++ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
-+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
++ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
-+ { U32 const total = 1 << tableLog;
++ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BIT_highbit32(rest);
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
+#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
new file mode 100644
-index 0000000..14fa439
+index 0000000..6a78957
--- /dev/null
+++ b/lib/zstd/fse.h
@@ -0,0 +1,606 @@
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+ FSE_initCState(statePtr, ct);
-+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
++ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* stateTable = (const U16*)(statePtr->stateTable);
+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+#endif /* FSE_H */
diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
new file mode 100644
-index 0000000..b6a6d46
+index 0000000..d0b5673
--- /dev/null
+++ b/lib/zstd/fse_compress.c
-@@ -0,0 +1,788 @@
+@@ -0,0 +1,774 @@
+/* ******************************************************************
+ FSE : Finite State Entropy encoder
+ Copyright (C) 2013-2015, Yann Collet.
+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+ /* symbol start positions */
-+ { U32 u;
++ { U32 u;
+ cumul[0] = 0;
+ for (u=1; u<=maxSymbolValue+1; u++) {
+ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
+ }
+
+ /* Spread symbols */
-+ { U32 position = 0;
++ { U32 position = 0;
+ U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ int nbOccurences;
+ }
+
+ /* Build table */
-+ { U32 u; for (u=0; u<tableSize; u++) {
++ { U32 u; for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
+ tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
+ } }
+
+ /* Build Symbol Transformation Table */
-+ { unsigned total = 0;
++ { unsigned total = 0;
+ unsigned s;
+ for (s=0; s<=maxSymbolValue; s++) {
+ switch (normalizedCounter[s])
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
-+ { int count = normalizedCounter[charnum++];
++ { int count = normalizedCounter[charnum++];
+ int const max = (2*threshold-1)-remaining;
+ remaining -= count < 0 ? -count : count;
+ count++; /* +1 for extra accuracy */
+ if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
+
+ /* by stripes of 16 bytes */
-+ { U32 cached = MEM_read32(ip); ip += 4;
++ { U32 cached = MEM_read32(ip); ip += 4;
+ while (ip < iend-15) {
+ U32 c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+ } }
+
-+ { U32 s; for (s=0; s<=maxSymbolValue; s++) {
++ { U32 s; for (s=0; s<=maxSymbolValue; s++) {
+ count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+ if (count[s] > max) max = count[s];
+ } }
+ return 0;
+ }
+
-+ { U64 const vStepLog = 62 - tableLog;
++ { U64 const vStepLog = 62 - tableLog;
+ U64 const mid = (1ULL << (vStepLog-1)) - 1;
+ U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
+ U64 tmpTotal = mid;
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
-+ { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
++ { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ U64 const scale = 62 - tableLog;
+ U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
+ U64 const vStep = 1ULL<<(scale-20);
+ else normalizedCounter[largest] += (short)stillToDistribute;
+ }
+
-+#if 0
-+ { /* Print Table (debug) */
-+ U32 s;
-+ U32 nTotal = 0;
-+ for (s=0; s<=maxSymbolValue; s++)
-+ printf("%3i: %4i \n", s, normalizedCounter[s]);
-+ for (s=0; s<=maxSymbolValue; s++)
-+ nTotal += abs(normalizedCounter[s]);
-+ if (nTotal != (1U<<tableLog))
-+ printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
-+ getchar();
-+ }
-+#endif
-+
+ return tableLog;
+}
+
+ tableU16[s] = (U16)(tableSize + s);
+
+ /* Build Symbol Transformation Table */
-+ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
++ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+ for (s=0; s<=maxSymbolValue; s++) {
+ symbolTT[s].deltaNbBits = deltaNbBits;
+ symbolTT[s].deltaFindState = s-1;
+ if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
+
+ /* Scan input and build symbol stats */
-+ { CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
++ { CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
+ if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
+ CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
+
+ /* Write table description header */
-+ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
++ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+ op += nc_err;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
-+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
++ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
new file mode 100644
-index 0000000..2a35f17
+index 0000000..6de5411
--- /dev/null
+++ b/lib/zstd/fse_decompress.c
@@ -0,0 +1,292 @@
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
-+ { FSE_DTableHeader DTableH;
++ { FSE_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
-+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
++ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ }
+
+ /* Spread symbols */
-+ { U32 const tableMask = tableSize-1;
++ { U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ }
+
+ /* Build Decoding table */
-+ { U32 u;
++ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U16 nextState = symbolNext[symbol]++;
+#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
new file mode 100644
-index 0000000..a1a1d45
+index 0000000..41b9ce0
--- /dev/null
+++ b/lib/zstd/huf_compress.c
@@ -0,0 +1,644 @@
+ if (wtSize <= 1) return 0; /* Not compressible */
+
+ /* Scan input and build symbol stats */
-+ { CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
++ { CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
+ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ }
+ CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
+
+ /* Write table description header */
-+ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
++ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+ op += hSize;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
-+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
++ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+ huffWeight[n] = bitsToWeight[CTable[n].nbBits];
+
+ /* attempt weights compression by FSE */
-+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
++ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
+ if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
+ op[0] = (BYTE)hSize;
+ return hSize+1;
+ if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall);
+
+ /* Prepare base value per rank */
-+ { U32 n, nextRankStart = 0;
++ { U32 n, nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
-+ U32 current = nextRankStart;
++ U32 curr = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
-+ rankVal[n] = current;
++ rankVal[n] = curr;
+ } }
+
+ /* fill nbBits */
-+ { U32 n; for (n=0; n<nbSymbols; n++) {
++ { U32 n; for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
+ } }
+
+ /* fill val */
-+ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
++ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
+ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+ { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
+ /* determine stating value per rank */
+ valPerRank[tableLog+1] = 0; /* for w==0 */
-+ { U16 min = 0;
++ { U16 min = 0;
+ U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
+
+ /* there are several too large elements (at least >= 2) */
-+ { int totalCost = 0;
++ { int totalCost = 0;
+ const U32 baseCost = 1 << (largestBits - maxNbBits);
+ U32 n = lastNonNull;
+
+ totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
+
+ /* repay normalized cost */
-+ { U32 const noSymbol = 0xF0F0F0F0;
++ { U32 const noSymbol = 0xF0F0F0F0;
+ U32 rankLast[HUF_TABLELOG_MAX+2];
+ int pos;
+
+ /* Get pos of last (smallest) symbol per rank */
+ memset(rankLast, 0xF0, sizeof(rankLast));
-+ { U32 currentNbBits = maxNbBits;
++ { U32 currNbBits = maxNbBits;
+ for (pos=n ; pos >= 0; pos--) {
-+ if (huffNode[pos].nbBits >= currentNbBits) continue;
-+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
-+ rankLast[maxNbBits-currentNbBits] = pos;
++ if (huffNode[pos].nbBits >= currNbBits) continue;
++ currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
++ rankLast[maxNbBits-currNbBits] = pos;
+ } }
+
+ while (totalCost > 0) {
+ U32 lowPos = rankLast[nBitsToDecrease-1];
+ if (highPos == noSymbol) continue;
+ if (lowPos == noSymbol) break;
-+ { U32 const highTotal = huffNode[highPos].count;
++ { U32 const highTotal = huffNode[highPos].count;
+ U32 const lowTotal = 2 * huffNode[lowPos].count;
+ if (highTotal <= lowTotal) break;
+ } }
+
+typedef struct {
+ U32 base;
-+ U32 current;
++ U32 curr;
+} rankPos;
+
+static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
+ rank[r].base ++;
+ }
+ for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
-+ for (n=0; n<32; n++) rank[n].current = rank[n].base;
++ for (n=0; n<32; n++) rank[n].curr = rank[n].base;
+ for (n=0; n<=maxSymbolValue; n++) {
+ U32 const c = count[n];
+ U32 const r = BIT_highbit32(c+1) + 1;
-+ U32 pos = rank[r].current++;
++ U32 pos = rank[r].curr++;
+ while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
+ huffNode[pos].count = c;
+ huffNode[pos].byte = (BYTE)n;
+ maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
+
+ /* fill result into tree (val, nbBits) */
-+ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
++ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
+ for (n=0; n<=nonNullRank; n++)
+ nbPerRank[huffNode[n].nbBits]++;
+ /* determine stating value per rank */
-+ { U16 min = 0;
++ { U16 min = 0;
+ for (n=maxNbBits; n>0; n--) {
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ if (srcSize < 12) return 0; /* no saving possible : too small input */
+ op += 6; /* jumpTable */
+
-+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
+ if (cSize==0) return 0;
+ MEM_writeLE16(ostart, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
-+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
+ if (cSize==0) return 0;
+ MEM_writeLE16(ostart+2, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
-+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
+ if (cSize==0) return 0;
+ MEM_writeLE16(ostart+4, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
-+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
++ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
+ if (cSize==0) return 0;
+ op += cSize;
+ }
+ if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC);
+ if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
+ if (!dstSize) return 0; /* cannot fit within dst budget */
-+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
++ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* curr block size limit */
+ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+ }
+
+ /* Scan input and build symbol stats */
-+ { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
++ { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
+ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
+ if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
+ }
+
+ /* Build Huffman Tree */
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
-+ { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
++ { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
+ huffLog = (U32)maxBits;
+ /* Zero the unused symbols so we can check it for validity */
+ memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
+ }
+
+ /* Write table description header */
-+ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
++ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
+ /* Check if using the previous table will be beneficial */
+ if (repeat && *repeat != HUF_repeat_none) {
+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
+}
diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
new file mode 100644
-index 0000000..f73223c
+index 0000000..2d9b33b
--- /dev/null
+++ b/lib/zstd/huf_decompress.c
@@ -0,0 +1,835 @@
+ if (HUF_isError(iSize)) return iSize;
+
+ /* Table header */
-+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
++ { DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ }
+
+ /* Calculate starting value for each rank */
-+ { U32 n, nextRankStart = 0;
++ { U32 n, nextRankStart = 0;
+ for (n=1; n<tableLog+1; n++) {
-+ U32 const current = nextRankStart;
++ U32 const curr = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
-+ rankVal[n] = current;
++ rankVal[n] = curr;
+ } }
+
+ /* fill DTable */
-+ { U32 n;
++ { U32 n;
+ for (n=0; n<nbSymbols; n++) {
+ U32 const w = huffWeight[n];
+ U32 const length = (1 << w) >> 1;
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
-+ { const BYTE* const istart = (const BYTE*) cSrc;
++ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable + 1;
+ }
+
+ /* fill DTable */
-+ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
++ { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
+ const U32 symbol = sortedSymbols[s].symbol;
+ const U32 weight = sortedSymbols[s].weight;
+ const U32 nbBits = nbBitsBaseline - weight;
+ MEM_writeLE16(&(DElt.sequence), symbol);
+ DElt.nbBits = (BYTE)(nbBits);
+ DElt.length = 1;
-+ { U32 const end = start + length;
++ { U32 const end = start + length;
+ U32 u;
+ for (u = start; u < end; u++) DTable[u] = DElt;
+ } }
+ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
-+ { U32 w, nextRankStart = 0;
++ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
-+ U32 current = nextRankStart;
++ U32 curr = nextRankStart;
+ nextRankStart += rankStats[w];
-+ rankStart[w] = current;
++ rankStart[w] = curr;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ sizeOfSort = nextRankStart;
+ }
+
+ /* sort symbols by weight */
-+ { U32 s;
++ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = weightList[s];
+ U32 const r = rankStart[w]++;
+ }
+
+ /* Build rankVal */
-+ { U32* const rankVal0 = rankVal[0];
-+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
++ { U32* const rankVal0 = rankVal[0];
++ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
-+ U32 current = nextRankVal;
++ U32 curr = nextRankVal;
+ nextRankVal += rankStats[w] << (w+rescale);
-+ rankVal0[w] = current;
++ rankVal0[w] = curr;
+ } }
-+ { U32 const minBits = tableLog+1 - maxW;
++ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = rankVal[consumed];
+ BIT_DStream_t bitD;
+
+ /* Init */
-+ { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
++ { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
+ if (HUF_isError(errorCode)) return errorCode;
+ }
+
+ /* decode */
-+ { BYTE* const ostart = (BYTE*) dst;
++ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
-+ { const BYTE* const istart = (const BYTE*) cSrc;
++ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1;
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
-+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */
+
-+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+ if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
-+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
++ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+ return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+ HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+ }
+}
diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
new file mode 100644
-index 0000000..a61bd27
+index 0000000..5ed5419
--- /dev/null
+++ b/lib/zstd/zstd_internal.h
-@@ -0,0 +1,274 @@
+@@ -0,0 +1,261 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val)
+{
-+# if defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
+ return 31 - __builtin_clz(val);
-+# else /* Software version */
-+ static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
-+ U32 v = val;
-+ int r;
-+ v |= v >> 1;
-+ v |= v >> 2;
-+ v |= v >> 4;
-+ v |= v >> 8;
-+ v |= v >> 16;
-+ r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
-+ return r;
-+# endif
+}
+
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
new file mode 100644
-index 0000000..297a715
+index 0000000..9bd5303
--- /dev/null
+++ b/lib/zstd/zstd_opt.h
@@ -0,0 +1,921 @@
+ }
+
+ /* literal Length */
-+ { const BYTE LL_deltaCode = 19;
++ { const BYTE LL_deltaCode = 19;
+ const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+ price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1);
+ }
+ if (!ultra && offCode >= 20) price += (offCode-19)*2;
+
+ /* match Length */
-+ { const BYTE ML_deltaCode = 36;
++ { const BYTE ML_deltaCode = 36;
+ const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+ price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1);
+ }
+ seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+
+ /* literal Length */
-+ { const BYTE LL_deltaCode = 19;
++ { const BYTE LL_deltaCode = 19;
+ const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+ seqStorePtr->litLengthFreq[llCode]++;
+ seqStorePtr->litLengthSum++;
+ }
+
+ /* match offset */
-+ { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
++ { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
+ seqStorePtr->offCodeSum++;
+ seqStorePtr->offCodeFreq[offCode]++;
+ }
+
+ /* match Length */
-+ { const BYTE ML_deltaCode = 36;
++ { const BYTE ML_deltaCode = 36;
+ const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+ seqStorePtr->matchLengthFreq[mlCode]++;
+ seqStorePtr->matchLengthSum++;
+
+
+#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
-+ { \
++ { \
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \
+ opt[pos].mlen = mlen_; \
+ opt[pos].off = offset_; \
+ U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+ const BYTE* const base = zc->base;
-+ const U32 current = (U32)(ip-base);
++ const U32 curr = (U32)(ip-base);
+ const U32 hashLog = zc->params.cParams.hashLog;
+ const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const hashTable = zc->hashTable;
+ const U32 dictLimit = zc->dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
-+ const U32 btLow = btMask >= current ? 0 : current - btMask;
++ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+ const U32 windowLow = zc->lowLimit;
-+ U32* smallerPtr = bt + 2*(current&btMask);
-+ U32* largerPtr = bt + 2*(current&btMask) + 1;
-+ U32 matchEndIdx = current+8;
++ U32* smallerPtr = bt + 2*(curr&btMask);
++ U32* largerPtr = bt + 2*(curr&btMask) + 1;
++ U32 matchEndIdx = curr+8;
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+
+
+ if (minMatch == 3) { /* HC3 match finder */
+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
-+ if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) {
++ if (matchIndex3>windowLow && (curr - matchIndex3 < (1<<18))) {
+ const BYTE* match;
-+ size_t currentMl=0;
++ size_t currMl=0;
+ if ((!extDict) || matchIndex3 >= dictLimit) {
+ match = base + matchIndex3;
-+ if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
++ if (match[bestLength] == ip[bestLength]) currMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ match = dictBase + matchIndex3;
+ if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
-+ currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
++ currMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
+ }
+
+ /* save best solution */
-+ if (currentMl > bestLength) {
-+ bestLength = currentMl;
-+ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3;
-+ matches[mnum].len = (U32)currentMl;
++ if (currMl > bestLength) {
++ bestLength = currMl;
++ matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
++ matches[mnum].len = (U32)currMl;
+ mnum++;
-+ if (currentMl > ZSTD_OPT_NUM) goto update;
-+ if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/
++ if (currMl > ZSTD_OPT_NUM) goto update;
++ if (ip+currMl == iLimit) goto update; /* best possible, and avoid read overflow*/
+ }
+ }
+ }
+
-+ hashTable[h] = current; /* Update Hash Table */
++ hashTable[h] = curr; /* Update Hash Table */
+
+ while (nbCompares-- && (matchIndex > windowLow)) {
+ U32* nextPtr = bt + 2*(matchIndex & btMask);
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
-+ matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex;
++ matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if (matchLength > ZSTD_OPT_NUM) break;
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
-+ /* match is smaller than current */
++ /* match is smaller than curr */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
-+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
++ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to curr) */
+ } else {
-+ /* match is larger than current */
++ /* match is larger than curr */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ *smallerPtr = *largerPtr = 0;
+
+update:
-+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
++ zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr+1;
+ return mnum;
+}
+
+ litlen = (U32)(ip - anchor);
+
+ /* check repCode */
-+ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
++ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
+ for (i=(ip == anchor); i<last_i; i++) {
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+ if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
+ }
+
+ best_mlen = minMatch;
-+ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
++ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+ for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+ if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
+
+ /* Last Literals */
-+ { size_t const lastLLSize = iend - anchor;
++ { size_t const lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }
+ while (ip < ilimit) {
+ U32 cur, match_num, last_pos, litlen, price;
+ U32 u, mlen, best_mlen, best_off, litLength;
-+ U32 current = (U32)(ip-base);
++ U32 curr = (U32)(ip-base);
+ memset(opt, 0, sizeof(ZSTD_optimal_t));
+ last_pos = 0;
+ opt[0].litlen = (U32)(ip - anchor);
+
+ /* check repCode */
-+ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
++ { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
+ for (i = (ip==anchor); i<last_i; i++) {
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
-+ const U32 repIndex = (U32)(current - repCur);
++ const U32 repIndex = (U32)(curr - repCur);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
-+ if ( (repCur > 0 && repCur <= (S32)current)
++ if ( (repCur > 0 && repCur <= (S32)curr)
+ && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
+ && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
+ /* repcode detected we should take it */
+ }
+
+ best_mlen = minMatch;
-+ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
++ { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+ for (i = (mlen != 1); i<last_i; i++) {
+ const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
-+ const U32 repIndex = (U32)(current+cur - repCur);
++ const U32 repIndex = (U32)(curr+cur - repCur);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
-+ if ( (repCur > 0 && repCur <= (S32)(current+cur))
++ if ( (repCur > 0 && repCur <= (S32)(curr+cur))
+ && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
+ && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
+ /* repcode detected */
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
+
+ /* Last Literals */
-+ { size_t lastLLSize = iend - anchor;
++ { size_t lastLLSize = iend - anchor;
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+ }