* Simple functions
***************************************/
size_t ZSTD_compress( void* dst, size_t maxDstSize,
- const void* src, size_t srcSize);
+ const void* src, size_t srcSize,
+ int compressionLevel);
size_t ZSTD_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
ZSTD_compressCCtx() :
Same as ZSTD_compress(), but requires a ZSTD_CCtx working space already allocated
*/
-size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel);
#if defined (__cplusplus)
***************************************/
#include <stdlib.h> /* malloc */
#include <string.h> /* memset */
-#include "zstdhc_static.h"
#include "zstd_static.h"
#include "zstd_internal.h"
#include "mem.h"
#define BLOCKSIZE (128 KB) /* define, for static allocation */
#define WORKPLACESIZE (BLOCKSIZE*3)
-struct ZSTD_HC_CCtx_s
+struct ZSTD_CCtx_s
{
const BYTE* end; /* next block here to continue on current prefix */
const BYTE* base; /* All regular indexes relative to this position */
U32 dictLimit; /* below that point, need extDict */
U32 lowLimit; /* below that point, no more data */
U32 nextToUpdate; /* index from which to continue dictionary update */
- ZSTD_HC_parameters params;
+ ZSTD_parameters params;
void* workSpace;
size_t workSpaceSize;
};
-ZSTD_HC_CCtx* ZSTD_HC_createCCtx(void)
+ZSTD_CCtx* ZSTD_createCCtx(void)
{
- return (ZSTD_HC_CCtx*) calloc(1, sizeof(ZSTD_HC_CCtx));
+ return (ZSTD_CCtx*) calloc(1, sizeof(ZSTD_CCtx));
}
-size_t ZSTD_HC_freeCCtx(ZSTD_HC_CCtx* cctx)
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
free(cctx->workSpace);
free(cctx);
}
-/** ZSTD_HC_validateParams
+/** ZSTD_validateParams
correct params value to remain within authorized range
optimize for srcSize if srcSize > 0 */
-void ZSTD_HC_validateParams(ZSTD_HC_parameters* params, U64 srcSizeHint)
+void ZSTD_validateParams(ZSTD_parameters* params, U64 srcSizeHint)
{
- const U32 btPlus = (params->strategy == ZSTD_HC_btlazy2);
+ const U32 btPlus = (params->strategy == ZSTD_btlazy2);
/* validate params */
- if (params->windowLog > ZSTD_HC_WINDOWLOG_MAX) params->windowLog = ZSTD_HC_WINDOWLOG_MAX;
- if (params->windowLog < ZSTD_HC_WINDOWLOG_MIN) params->windowLog = ZSTD_HC_WINDOWLOG_MIN;
+ if (params->windowLog > ZSTD_WINDOWLOG_MAX) params->windowLog = ZSTD_WINDOWLOG_MAX;
+ if (params->windowLog < ZSTD_WINDOWLOG_MIN) params->windowLog = ZSTD_WINDOWLOG_MIN;
/* correct params, to use less memory */
- if ((srcSizeHint > 0) && (srcSizeHint < (1<<ZSTD_HC_WINDOWLOG_MAX)))
+ if ((srcSizeHint > 0) && (srcSizeHint < (1<<ZSTD_WINDOWLOG_MAX)))
{
U32 srcLog = ZSTD_highbit((U32)srcSizeHint-1) + 1;
if (params->windowLog > srcLog) params->windowLog = srcLog;
}
- if (params->contentLog > params->windowLog+btPlus) params->contentLog = params->windowLog+btPlus; /* <= ZSTD_HC_CONTENTLOG_MAX */
- if (params->contentLog < ZSTD_HC_CONTENTLOG_MIN) params->contentLog = ZSTD_HC_CONTENTLOG_MIN;
- if (params->hashLog > ZSTD_HC_HASHLOG_MAX) params->hashLog = ZSTD_HC_HASHLOG_MAX;
- if (params->hashLog < ZSTD_HC_HASHLOG_MIN) params->hashLog = ZSTD_HC_HASHLOG_MIN;
- if (params->searchLog > ZSTD_HC_SEARCHLOG_MAX) params->searchLog = ZSTD_HC_SEARCHLOG_MAX;
- if (params->searchLog < ZSTD_HC_SEARCHLOG_MIN) params->searchLog = ZSTD_HC_SEARCHLOG_MIN;
- if (params->searchLength> ZSTD_HC_SEARCHLENGTH_MAX) params->searchLength = ZSTD_HC_SEARCHLENGTH_MAX;
- if (params->searchLength< ZSTD_HC_SEARCHLENGTH_MIN) params->searchLength = ZSTD_HC_SEARCHLENGTH_MIN;
- if ((U32)params->strategy>(U32)ZSTD_HC_btlazy2) params->strategy = ZSTD_HC_btlazy2;
+ if (params->contentLog > params->windowLog+btPlus) params->contentLog = params->windowLog+btPlus; /* <= ZSTD_CONTENTLOG_MAX */
+ if (params->contentLog < ZSTD_CONTENTLOG_MIN) params->contentLog = ZSTD_CONTENTLOG_MIN;
+ if (params->hashLog > ZSTD_HASHLOG_MAX) params->hashLog = ZSTD_HASHLOG_MAX;
+ if (params->hashLog < ZSTD_HASHLOG_MIN) params->hashLog = ZSTD_HASHLOG_MIN;
+ if (params->searchLog > ZSTD_SEARCHLOG_MAX) params->searchLog = ZSTD_SEARCHLOG_MAX;
+ if (params->searchLog < ZSTD_SEARCHLOG_MIN) params->searchLog = ZSTD_SEARCHLOG_MIN;
+ if (params->searchLength> ZSTD_SEARCHLENGTH_MAX) params->searchLength = ZSTD_SEARCHLENGTH_MAX;
+ if (params->searchLength< ZSTD_SEARCHLENGTH_MIN) params->searchLength = ZSTD_SEARCHLENGTH_MIN;
+ if ((U32)params->strategy>(U32)ZSTD_btlazy2) params->strategy = ZSTD_btlazy2;
}
-static size_t ZSTD_HC_resetCCtx_advanced (ZSTD_HC_CCtx* zc,
- ZSTD_HC_parameters params,
+static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
+ ZSTD_parameters params,
U64 srcSizeHint)
{
- ZSTD_HC_validateParams(¶ms, srcSizeHint);
+ ZSTD_validateParams(¶ms, srcSizeHint);
/* reserve table memory */
{
- const U32 contentLog = params.strategy == ZSTD_HC_fast ? 1 : params.contentLog;
+ const U32 contentLog = params.strategy == ZSTD_fast ? 1 : params.contentLog;
const size_t tableSpace = ((1 << contentLog) + (1 << params.hashLog)) * sizeof(U32);
const size_t neededSpace = tableSpace + WORKPLACESIZE;
if (zc->workSpaceSize < neededSpace)
***************************************/
static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_HC_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
-static size_t ZSTD_HC_hash4Ptr(const void* ptr, U32 h) { return ZSTD_HC_hash4(MEM_read32(ptr), h); }
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_HC_hash5(U64 u, U32 h) { return (size_t)((u * prime5bytes) << (64-40) >> (64-h)) ; }
-static size_t ZSTD_HC_hash5Ptr(const void* p, U32 h) { return ZSTD_HC_hash5(MEM_read64(p), h); }
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)((u * prime5bytes) << (64-40) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_read64(p), h); }
static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_HC_hash6(U64 u, U32 h) { return (size_t)((u * prime6bytes) << (64-48) >> (64-h)) ; }
-static size_t ZSTD_HC_hash6Ptr(const void* p, U32 h) { return ZSTD_HC_hash6(MEM_read64(p), h); }
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)((u * prime6bytes) << (64-48) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_read64(p), h); }
static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_HC_hash7(U64 u, U32 h) { return (size_t)((u * prime7bytes) << (64-56) >> (64-h)) ; }
-static size_t ZSTD_HC_hash7Ptr(const void* p, U32 h) { return ZSTD_HC_hash7(MEM_read64(p), h); }
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)((u * prime7bytes) << (64-56) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_read64(p), h); }
-static size_t ZSTD_HC_hashPtr(const void* p, U32 hBits, U32 mls)
+static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
{
switch(mls)
{
default:
- case 4: return ZSTD_HC_hash4Ptr(p, hBits);
- case 5: return ZSTD_HC_hash5Ptr(p, hBits);
- case 6: return ZSTD_HC_hash6Ptr(p, hBits);
- case 7: return ZSTD_HC_hash7Ptr(p, hBits);
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
}
}
***************************************/
FORCE_INLINE
-size_t ZSTD_HC_compressBlock_fast_generic(ZSTD_HC_CCtx* ctx,
+size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize,
const U32 mls)
/* init */
if (ip == base)
{
- hashTable[ZSTD_HC_hashPtr(base+1, hBits, mls)] = 1;
- hashTable[ZSTD_HC_hashPtr(base+2, hBits, mls)] = 2;
- hashTable[ZSTD_HC_hashPtr(base+3, hBits, mls)] = 3;
+ hashTable[ZSTD_hashPtr(base+1, hBits, mls)] = 1;
+ hashTable[ZSTD_hashPtr(base+2, hBits, mls)] = 2;
+ hashTable[ZSTD_hashPtr(base+3, hBits, mls)] = 3;
ip = base+4;
}
ZSTD_resetSeqStore(seqStorePtr);
/* Main Search Loop */
while (ip < ilimit) /* < instead of <=, because unconditionnal ZSTD_addPtr(ip+1) */
{
- const size_t h = ZSTD_HC_hashPtr(ip, hBits, mls);
+ const size_t h = ZSTD_hashPtr(ip, hBits, mls);
const BYTE* match = base + hashTable[h];
hashTable[h] = (U32)(ip-base);
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offsetCode, matchLength);
/* Fill Table */
- hashTable[ZSTD_HC_hashPtr(ip+1, hBits, mls)] = (U32)(ip+1-base);
+ hashTable[ZSTD_hashPtr(ip+1, hBits, mls)] = (U32)(ip+1-base);
ip += matchLength + MINMATCH;
anchor = ip;
if (ip < ilimit) /* same test as loop, for speed */
- hashTable[ZSTD_HC_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
}
}
}
-size_t ZSTD_HC_compressBlock_fast(ZSTD_HC_CCtx* ctx,
+size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
{
default:
case 4 :
- return ZSTD_HC_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 4);
+ return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 4);
case 5 :
- return ZSTD_HC_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 5);
+ return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 5);
case 6 :
- return ZSTD_HC_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 6);
+ return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 6);
case 7 :
- return ZSTD_HC_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 7);
+ return ZSTD_compressBlock_fast_generic(ctx, dst, maxDstSize, src, srcSize, 7);
}
}
/* *************************************
* Binary Tree search
***************************************/
-/** ZSTD_HC_insertBt1 : add one ptr to tree
+/** ZSTD_insertBt1 : add one ptr to tree
@ip : assumed <= iend-8 */
-static U32 ZSTD_HC_insertBt1(ZSTD_HC_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares)
+static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares)
{
U32* const hashTable = zc->hashTable;
const U32 hashLog = zc->params.hashLog;
- const size_t h = ZSTD_HC_hashPtr(ip, hashLog, mls);
+ const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
U32* const bt = zc->contentTable;
const U32 btLog = zc->params.contentLog - 1;
const U32 btMask= (1 << btLog) - 1;
FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
-size_t ZSTD_HC_insertBtAndFindBestMatch (
- ZSTD_HC_CCtx* zc,
+size_t ZSTD_insertBtAndFindBestMatch (
+ ZSTD_CCtx* zc,
const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr,
U32 nbCompares, const U32 mls)
{
U32* const hashTable = zc->hashTable;
const U32 hashLog = zc->params.hashLog;
- const size_t h = ZSTD_HC_hashPtr(ip, hashLog, mls);
+ const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
U32* const bt = zc->contentTable;
const U32 btLog = zc->params.contentLog - 1;
const U32 btMask= (1 << btLog) - 1;
}
-static const BYTE* ZSTD_HC_updateTree(ZSTD_HC_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
+static const BYTE* ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
{
const BYTE* const base = zc->base;
const U32 target = (U32)(ip - base);
//size_t dummy;
for( ; idx < target ; )
- idx += ZSTD_HC_insertBt1(zc, base+idx, mls, iend, nbCompares);
- //ZSTD_HC_insertBtAndFindBestMatch(zc, base+idx, iend, &dummy, nbCompares, mls);
+ idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares);
+ //ZSTD_insertBtAndFindBestMatch(zc, base+idx, iend, &dummy, nbCompares, mls);
zc->nextToUpdate = idx;
return base + idx;
/** Tree updater, providing best match */
FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
-size_t ZSTD_HC_BtFindBestMatch (
- ZSTD_HC_CCtx* zc,
+size_t ZSTD_BtFindBestMatch (
+ ZSTD_CCtx* zc,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 maxNbAttempts, const U32 mls)
{
- const BYTE* nextToUpdate = ZSTD_HC_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+ const BYTE* nextToUpdate = ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
if (nextToUpdate > ip)
{
/* RLE data */
*offsetPtr = 1;
return ZSTD_count(ip, ip-1, iLimit);
}
- return ZSTD_HC_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls);
+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls);
}
-FORCE_INLINE size_t ZSTD_HC_BtFindBestMatch_selectMLS (
- ZSTD_HC_CCtx* zc, /* Index table will be updated */
+FORCE_INLINE size_t ZSTD_BtFindBestMatch_selectMLS (
+ ZSTD_CCtx* zc, /* Index table will be updated */
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 maxNbAttempts, const U32 matchLengthSearch)
switch(matchLengthSearch)
{
default :
- case 4 : return ZSTD_HC_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5 : return ZSTD_HC_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 6 : return ZSTD_HC_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+ case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+ case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+ case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
}
}
#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
/* Update chains up to ip (excluded) */
-static U32 ZSTD_HC_insertAndFindFirstIndex (ZSTD_HC_CCtx* zc, const BYTE* ip, U32 mls)
+static U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
{
U32* const hashTable = zc->hashTable;
const U32 hashLog = zc->params.hashLog;
while(idx < target)
{
- size_t h = ZSTD_HC_hashPtr(base+idx, hashLog, mls);
+ size_t h = ZSTD_hashPtr(base+idx, hashLog, mls);
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
hashTable[h] = idx;
idx++;
}
zc->nextToUpdate = target;
- return hashTable[ZSTD_HC_hashPtr(ip, hashLog, mls)];
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
}
FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
-size_t ZSTD_HC_HcFindBestMatch (
- ZSTD_HC_CCtx* zc, /* Index table will be updated */
+size_t ZSTD_HcFindBestMatch (
+ ZSTD_CCtx* zc, /* Index table will be updated */
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 maxNbAttempts, const U32 matchLengthSearch)
size_t ml=0;
/* HC4 match finder */
- matchIndex = ZSTD_HC_insertAndFindFirstIndex (zc, ip, matchLengthSearch);
+ matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, matchLengthSearch);
while ((matchIndex>lowLimit) && (nbAttempts))
{
}
-FORCE_INLINE size_t ZSTD_HC_HcFindBestMatch_selectMLS (
- ZSTD_HC_CCtx* zc, /* Index table will be updated */
+FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
+ ZSTD_CCtx* zc, /* Index table will be updated */
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 maxNbAttempts, const U32 matchLengthSearch)
switch(matchLengthSearch)
{
default :
- case 4 : return ZSTD_HC_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
- case 5 : return ZSTD_HC_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
- case 6 : return ZSTD_HC_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+ case 4 : return ZSTD_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+ case 5 : return ZSTD_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+ case 6 : return ZSTD_HcFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
}
}
/* common lazy function, to be inlined */
FORCE_INLINE
-size_t ZSTD_HC_compressBlock_lazy_generic(ZSTD_HC_CCtx* ctx,
+size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
void* dst, size_t maxDstSize, const void* src, size_t srcSize,
const U32 searchMethod, const U32 deep) /* 0 : hc; 1 : bt */
{
const U32 maxSearches = 1 << ctx->params.searchLog;
const U32 mls = ctx->params.searchLength;
- typedef size_t (*searchMax_f)(ZSTD_HC_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
+ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
size_t* offsetPtr,
U32 maxNbAttempts, U32 matchLengthSearch);
- searchMax_f searchMax = searchMethod ? ZSTD_HC_BtFindBestMatch_selectMLS : ZSTD_HC_HcFindBestMatch_selectMLS;
+ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
/* init */
ZSTD_resetSeqStore(seqStorePtr);
seqStorePtr, srcSize);
}
-size_t ZSTD_HC_compressBlock_btlazy2(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- return ZSTD_HC_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 1, 1);
+ return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 1, 1);
}
-size_t ZSTD_HC_compressBlock_lazy2(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- return ZSTD_HC_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 1);
+ return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 1);
}
-size_t ZSTD_HC_compressBlock_lazy(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- return ZSTD_HC_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 0);
+ return ZSTD_compressBlock_lazy_generic(ctx, dst, maxDstSize, src, srcSize, 0, 0);
}
-size_t ZSTD_HC_compressBlock_greedy(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
seqStore_t* seqStorePtr = &(ctx->seqStore);
const BYTE* const istart = (const BYTE*)src;
/* search */
{
size_t offset=999999;
- size_t matchLength = ZSTD_HC_HcFindBestMatch_selectMLS(ctx, ip, iend, &offset, maxSearches, mls);
+ size_t matchLength = ZSTD_HcFindBestMatch_selectMLS(ctx, ip, iend, &offset, maxSearches, mls);
if (matchLength < MINMATCH)
{
ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
}
-typedef size_t (*ZSTD_HC_blockCompressor) (ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-static ZSTD_HC_blockCompressor ZSTD_HC_selectBlockCompressor(ZSTD_HC_strategy strat)
+static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat)
{
switch(strat)
{
default :
- case ZSTD_HC_fast:
- return ZSTD_HC_compressBlock_fast;
- case ZSTD_HC_greedy:
- return ZSTD_HC_compressBlock_greedy;
- case ZSTD_HC_lazy:
- return ZSTD_HC_compressBlock_lazy;
- case ZSTD_HC_lazy2:
- return ZSTD_HC_compressBlock_lazy2;
- case ZSTD_HC_btlazy2:
- return ZSTD_HC_compressBlock_btlazy2;
+ case ZSTD_fast:
+ return ZSTD_compressBlock_fast;
+ case ZSTD_greedy:
+ return ZSTD_compressBlock_greedy;
+ case ZSTD_lazy:
+ return ZSTD_compressBlock_lazy;
+ case ZSTD_lazy2:
+ return ZSTD_compressBlock_lazy2;
+ case ZSTD_btlazy2:
+ return ZSTD_compressBlock_btlazy2;
}
}
-size_t ZSTD_HC_compressBlock(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+size_t ZSTD_compressBlock(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- ZSTD_HC_blockCompressor blockCompressor = ZSTD_HC_selectBlockCompressor(ctx->params.strategy);
+ ZSTD_blockCompressor blockCompressor = ZSTD_selectBlockCompressor(ctx->params.strategy);
return blockCompressor(ctx, dst, maxDstSize, src, srcSize);
}
-static size_t ZSTD_HC_compress_generic (ZSTD_HC_CCtx* ctxPtr,
+static size_t ZSTD_compress_generic (ZSTD_CCtx* ctxPtr,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
- const ZSTD_HC_blockCompressor blockCompressor = ZSTD_HC_selectBlockCompressor(ctxPtr->params.strategy);
+ const ZSTD_blockCompressor blockCompressor = ZSTD_selectBlockCompressor(ctxPtr->params.strategy);
while (remaining)
{
}
-size_t ZSTD_HC_compressContinue (ZSTD_HC_CCtx* ctxPtr,
+size_t ZSTD_compressContinue (ZSTD_CCtx* ctxPtr,
void* dst, size_t dstSize,
const void* src, size_t srcSize)
{
if (ip != ctxPtr->end)
{
if (ctxPtr->end != NULL)
- ZSTD_HC_resetCCtx_advanced(ctxPtr, ctxPtr->params, srcSize);
+ ZSTD_resetCCtx_advanced(ctxPtr, ctxPtr->params, srcSize);
ctxPtr->base = ip;
}
ctxPtr->end = ip + srcSize;
- return ZSTD_HC_compress_generic (ctxPtr, dst, dstSize, src, srcSize);
+ return ZSTD_compress_generic (ctxPtr, dst, dstSize, src, srcSize);
}
-size_t ZSTD_HC_compressBegin_advanced(ZSTD_HC_CCtx* ctx,
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* ctx,
void* dst, size_t maxDstSize,
- const ZSTD_HC_parameters params,
+ const ZSTD_parameters params,
U64 srcSizeHint)
{
size_t errorCode;
if (maxDstSize < 4) return ERROR(dstSize_tooSmall);
- errorCode = ZSTD_HC_resetCCtx_advanced(ctx, params, srcSizeHint);
+ errorCode = ZSTD_resetCCtx_advanced(ctx, params, srcSizeHint);
if (ZSTD_isError(errorCode)) return errorCode;
MEM_writeLE32(dst, ZSTD_magicNumber); /* Write Header */
return 4;
}
-size_t ZSTD_HC_compressBegin(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, int compressionLevel, U64 srcSizeHint)
+size_t ZSTD_compressBegin(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, int compressionLevel, U64 srcSizeHint)
{
int tableID = ((srcSizeHint-1) > 128 KB); /* intentional underflow for 0 */
if (compressionLevel<=0) compressionLevel = 1;
- if (compressionLevel > ZSTD_HC_MAX_CLEVEL) compressionLevel = ZSTD_HC_MAX_CLEVEL;
- return ZSTD_HC_compressBegin_advanced(ctx, dst, maxDstSize, ZSTD_HC_defaultParameters[tableID][compressionLevel], srcSizeHint);
+ if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
+ return ZSTD_compressBegin_advanced(ctx, dst, maxDstSize, ZSTD_defaultParameters[tableID][compressionLevel], srcSizeHint);
}
-size_t ZSTD_HC_compressEnd(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize)
+size_t ZSTD_compressEnd(ZSTD_CCtx* ctx, void* dst, size_t maxDstSize)
{
BYTE* op = (BYTE*)dst;
return 3;
}
-size_t ZSTD_HC_compress_advanced (ZSTD_HC_CCtx* ctx,
+size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize,
- ZSTD_HC_parameters params)
+ ZSTD_parameters params)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
/* correct params, to use less memory */
{
U32 srcLog = ZSTD_highbit((U32)srcSize-1) + 1;
- U32 contentBtPlus = (ctx->params.strategy == ZSTD_HC_btlazy2);
+ U32 contentBtPlus = (ctx->params.strategy == ZSTD_btlazy2);
if (params.windowLog > srcLog) params.windowLog = srcLog;
if (params.contentLog > srcLog+contentBtPlus) params.contentLog = srcLog+contentBtPlus;
}
/* Header */
- oSize = ZSTD_HC_compressBegin_advanced(ctx, dst, maxDstSize, params, srcSize);
+ oSize = ZSTD_compressBegin_advanced(ctx, dst, maxDstSize, params, srcSize);
if(ZSTD_isError(oSize)) return oSize;
op += oSize;
maxDstSize -= oSize;
/* body (compression) */
ctx->base = (const BYTE*)src;
- oSize = ZSTD_HC_compress_generic (ctx, op, maxDstSize, src, srcSize);
+ oSize = ZSTD_compress_generic (ctx, op, maxDstSize, src, srcSize);
if(ZSTD_isError(oSize)) return oSize;
op += oSize;
maxDstSize -= oSize;
/* Close frame */
- oSize = ZSTD_HC_compressEnd(ctx, op, maxDstSize);
+ oSize = ZSTD_compressEnd(ctx, op, maxDstSize);
if(ZSTD_isError(oSize)) return oSize;
op += oSize;
return (op - ostart);
}
-size_t ZSTD_HC_compressCCtx (ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
{
const int tableID = (srcSize > 128 KB);
- if (compressionLevel<=1) return ZSTD_compress(dst, maxDstSize, src, srcSize); /* fast mode */
- if (compressionLevel > ZSTD_HC_MAX_CLEVEL) compressionLevel = ZSTD_HC_MAX_CLEVEL;
- return ZSTD_HC_compress_advanced(ctx, dst, maxDstSize, src, srcSize, ZSTD_HC_defaultParameters[tableID][compressionLevel]);
+ //if (compressionLevel<=1) return ZSTD_compress(dst, maxDstSize, src, srcSize); /* fast mode */
+ if (compressionLevel < 1) compressionLevel = 1;
+ if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
+ return ZSTD_compress_advanced(ctx, dst, maxDstSize, src, srcSize, ZSTD_defaultParameters[tableID][compressionLevel]);
}
-size_t ZSTD_HC_compress(void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compress(void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
{
size_t result;
- ZSTD_HC_CCtx ctxBody;
+ ZSTD_CCtx ctxBody;
memset(&ctxBody, 0, sizeof(ctxBody));
- result = ZSTD_HC_compressCCtx(&ctxBody, dst, maxDstSize, src, srcSize, compressionLevel);
+ result = ZSTD_compressCCtx(&ctxBody, dst, maxDstSize, src, srcSize, compressionLevel);
free(ctxBody.workSpace);
return result;
}
--- /dev/null
+/*
+ zstd - standard compression library
+ Copyright (C) 2014-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - zstd source repository : https://github.com/Cyan4973/zstd
+ - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+* MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+*/
+#define ZSTD_MEMORY_USAGE 16
+
+/*!
+ * HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0, fastest), or in memory heap (1, requires malloc())
+ * Note that compression context is fairly large, as a consequence heap memory is recommended.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif /* ZSTD_HEAPMODE */
+
+/*!
+* LEGACY_SUPPORT :
+* decompressor can decode older formats (starting from Zstd 0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 1
+#endif
+
+
+/* *******************************************************
+* Includes
+*********************************************************/
+#include <stdlib.h> /* calloc */
+#include <string.h> /* memcpy, memmove */
+#include <stdio.h> /* debug : printf */
+#include "mem.h" /* low level memory routines */
+#include "zstd_static.h"
+#include "zstd_internal.h"
+#include "fse_static.h"
+#include "huff0.h"
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
+# include "zstd_legacy.h"
+#endif
+
+
+/* *******************************************************
+* Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+# include <immintrin.h> /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#else
+# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+#endif
+
+
+/* *******************************************************
+* Constants
+*********************************************************/
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB) /* define, for static allocation */
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+static const U32 g_maxDistance = 4 * BLOCKSIZE;
+static const U32 g_maxLimit = 1 GB;
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define LitFSELog 11
+#define MLFSELog 10
+#define LLFSELog 10
+#define OffFSELog 9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7 /* to remove */
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/* *******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/* **************************************
+* Local structures
+****************************************/
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+ ssPtr->offset = ssPtr->offsetStart;
+ ssPtr->lit = ssPtr->litStart;
+ ssPtr->litLength = ssPtr->litLengthStart;
+ ssPtr->matchLength = ssPtr->matchLengthStart;
+ ssPtr->dumps = ssPtr->dumpsStart;
+}
+
+
+/* *************************************
+* Error Management
+***************************************/
+/*! ZSTD_isError
+* tells if a return value is an error code */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName
+* provides error code string (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* *************************************
+* Tool functions
+***************************************/
+unsigned ZSTD_versionNumber (void) { return ZSTD_VERSION_NUMBER; }
+
+
+/* *******************************************************
+* Compression
+*********************************************************/
+size_t ZSTD_compressBound(size_t srcSize) /* maximum compressed size */
+{
+ return FSE_compressBound(srcSize) + 12;
+}
+
+
+size_t ZSTD_noCompressBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE* const)dst;
+
+ if (srcSize + ZSTD_blockHeaderSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(ostart + ZSTD_blockHeaderSize, src, srcSize);
+
+ /* Build header */
+ ostart[0] = (BYTE)(srcSize>>16);
+ ostart[1] = (BYTE)(srcSize>>8);
+ ostart[2] = (BYTE) srcSize;
+ ostart[0] += (BYTE)(bt_raw<<6); /* is a raw (uncompressed) block */
+
+ return ZSTD_blockHeaderSize+srcSize;
+}
+
+
+static size_t ZSTD_compressRawLiteralsBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE* const)dst;
+
+ if (srcSize + 3 > maxDstSize) return ERROR(dstSize_tooSmall);
+
+ MEM_writeLE32(dst, ((U32)srcSize << 2) | IS_RAW);
+ memcpy(ostart + 3, src, srcSize);
+ return srcSize + 3;
+}
+
+static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE* const)dst;
+
+ (void)maxDstSize;
+ MEM_writeLE32(dst, ((U32)srcSize << 2) | IS_RLE); /* note : maxDstSize > litHeaderSize > 4 */
+ ostart[3] = *(const BYTE*)src;
+ return 4;
+}
+
+size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 1; }
+
+static size_t ZSTD_compressLiterals (void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ const size_t minGain = ZSTD_minGain(srcSize);
+ BYTE* const ostart = (BYTE*)dst;
+ size_t hsize;
+ static const size_t litHeaderSize = 5;
+
+ if (maxDstSize < litHeaderSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
+
+ hsize = HUF_compress(ostart+litHeaderSize, maxDstSize-litHeaderSize, src, srcSize);
+
+ if ((hsize==0) || (hsize >= srcSize - minGain)) return ZSTD_compressRawLiteralsBlock(dst, maxDstSize, src, srcSize);
+ if (hsize==1) return ZSTD_compressRleLiteralsBlock(dst, maxDstSize, src, srcSize);
+
+ /* Build header */
+ {
+ ostart[0] = (BYTE)(srcSize << 2); /* is a block, is compressed */
+ ostart[1] = (BYTE)(srcSize >> 6);
+ ostart[2] = (BYTE)(srcSize >>14);
+ ostart[2] += (BYTE)(hsize << 5);
+ ostart[3] = (BYTE)(hsize >> 3);
+ ostart[4] = (BYTE)(hsize >>11);
+ }
+
+ return hsize+litHeaderSize;
+}
+
+
+size_t ZSTD_compressSequences(BYTE* dst, size_t maxDstSize,
+ const seqStore_t* seqStorePtr,
+ size_t srcSize)
+{
+ U32 count[MaxSeq+1];
+ S16 norm[MaxSeq+1];
+ size_t mostFrequent;
+ U32 max = 255;
+ U32 tableLog = 11;
+ U32 CTable_LitLength [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL )];
+ U32 CTable_OffsetBits [FSE_CTABLE_SIZE_U32(OffFSELog,MaxOff)];
+ U32 CTable_MatchLength[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML )];
+ U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
+ const BYTE* const op_lit_start = seqStorePtr->litStart;
+ const BYTE* const llTable = seqStorePtr->litLengthStart;
+ const BYTE* const llPtr = seqStorePtr->litLength;
+ const BYTE* const mlTable = seqStorePtr->matchLengthStart;
+ const U32* const offsetTable = seqStorePtr->offsetStart;
+ BYTE* const offCodeTable = seqStorePtr->offCodeStart;
+ BYTE* op = dst;
+ BYTE* const oend = dst + maxDstSize;
+ const size_t nbSeq = llPtr - llTable;
+ const size_t minGain = ZSTD_minGain(srcSize);
+ const size_t maxCSize = srcSize - minGain;
+ BYTE* seqHead;
+
+
+ /* Compress literals */
+ {
+ size_t cSize;
+ size_t litSize = seqStorePtr->lit - op_lit_start;
+
+ if (litSize <= LITERAL_NOENTROPY)
+ cSize = ZSTD_compressRawLiteralsBlock(op, maxDstSize, op_lit_start, litSize);
+ else
+ cSize = ZSTD_compressLiterals(op, maxDstSize, op_lit_start, litSize);
+ if (ZSTD_isError(cSize)) return cSize;
+ op += cSize;
+ }
+
+ /* Sequences Header */
+ if ((oend-op) < MIN_SEQUENCES_SIZE)
+ return ERROR(dstSize_tooSmall);
+ MEM_writeLE16(op, (U16)nbSeq); op+=2;
+ seqHead = op;
+
+ /* dumps : contains too large lengths */
+ {
+ size_t dumpsLength = seqStorePtr->dumps - seqStorePtr->dumpsStart;
+ if (dumpsLength < 512)
+ {
+ op[0] = (BYTE)(dumpsLength >> 8);
+ op[1] = (BYTE)(dumpsLength);
+ op += 2;
+ }
+ else
+ {
+ op[0] = 2;
+ op[1] = (BYTE)(dumpsLength>>8);
+ op[2] = (BYTE)(dumpsLength);
+ op += 3;
+ }
+ if ((size_t)(oend-op) < dumpsLength+6) return ERROR(dstSize_tooSmall);
+ memcpy(op, seqStorePtr->dumpsStart, dumpsLength);
+ op += dumpsLength;
+ }
+
+ /* CTable for Literal Lengths */
+ max = MaxLL;
+ mostFrequent = FSE_countFast(count, &max, seqStorePtr->litLengthStart, nbSeq);
+ if ((mostFrequent == nbSeq) && (nbSeq > 2))
+ {
+ *op++ = *(seqStorePtr->litLengthStart);
+ FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
+ LLtype = bt_rle;
+ }
+ else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (LLbits-1))))
+ {
+ FSE_buildCTable_raw(CTable_LitLength, LLbits);
+ LLtype = bt_raw;
+ }
+ else
+ {
+ size_t NCountSize;
+ tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
+ FSE_normalizeCount(norm, tableLog, count, nbSeq, max);
+ NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
+ if (FSE_isError(NCountSize)) return ERROR(GENERIC);
+ op += NCountSize;
+ FSE_buildCTable(CTable_LitLength, norm, max, tableLog);
+ LLtype = bt_compressed;
+ }
+
+ /* CTable for Offsets codes */
+ {
+ /* create Offset codes */
+ size_t i;
+ max = MaxOff;
+ for (i=0; i<nbSeq; i++)
+ {
+ offCodeTable[i] = (BYTE)ZSTD_highbit(offsetTable[i]) + 1;
+ if (offsetTable[i]==0) offCodeTable[i]=0;
+ }
+ mostFrequent = FSE_countFast(count, &max, offCodeTable, nbSeq);
+ }
+ if ((mostFrequent == nbSeq) && (nbSeq > 2))
+ {
+ *op++ = *offCodeTable;
+ FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
+ Offtype = bt_rle;
+ }
+ else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (Offbits-1))))
+ {
+ FSE_buildCTable_raw(CTable_OffsetBits, Offbits);
+ Offtype = bt_raw;
+ }
+ else
+ {
+ size_t NCountSize;
+ tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
+ FSE_normalizeCount(norm, tableLog, count, nbSeq, max);
+ NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
+ if (FSE_isError(NCountSize)) return ERROR(GENERIC);
+ op += NCountSize;
+ FSE_buildCTable(CTable_OffsetBits, norm, max, tableLog);
+ Offtype = bt_compressed;
+ }
+
+ /* CTable for MatchLengths */
+ max = MaxML;
+ mostFrequent = FSE_countFast(count, &max, seqStorePtr->matchLengthStart, nbSeq);
+ if ((mostFrequent == nbSeq) && (nbSeq > 2))
+ {
+ *op++ = *seqStorePtr->matchLengthStart;
+ FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
+ MLtype = bt_rle;
+ }
+ else if ((nbSeq < 64) || (mostFrequent < (nbSeq >> (MLbits-1))))
+ {
+ FSE_buildCTable_raw(CTable_MatchLength, MLbits);
+ MLtype = bt_raw;
+ }
+ else
+ {
+ size_t NCountSize;
+ tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
+ FSE_normalizeCount(norm, tableLog, count, nbSeq, max);
+ NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
+ if (FSE_isError(NCountSize)) return ERROR(GENERIC);
+ op += NCountSize;
+ FSE_buildCTable(CTable_MatchLength, norm, max, tableLog);
+ MLtype = bt_compressed;
+ }
+
+ seqHead[0] += (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+
+ /* Encoding Sequences */
+ {
+ size_t streamSize, errorCode;
+ BIT_CStream_t blockStream;
+ FSE_CState_t stateMatchLength;
+ FSE_CState_t stateOffsetBits;
+ FSE_CState_t stateLitLength;
+ int i;
+
+ errorCode = BIT_initCStream(&blockStream, op, oend-op);
+ if (ERR_isError(errorCode)) return ERROR(dstSize_tooSmall); /* not enough space remaining */
+ FSE_initCState(&stateMatchLength, CTable_MatchLength);
+ FSE_initCState(&stateOffsetBits, CTable_OffsetBits);
+ FSE_initCState(&stateLitLength, CTable_LitLength);
+
+ for (i=(int)nbSeq-1; i>=0; i--)
+ {
+ BYTE matchLength = mlTable[i];
+ U32 offset = offsetTable[i];
+ BYTE offCode = offCodeTable[i]; /* 32b*/ /* 64b*/
+ U32 nbBits = (offCode-1) * (!!offCode);
+ BYTE litLength = llTable[i]; /* (7)*/ /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, matchLength); /* 17 */ /* 17 */
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* 7 */
+ BIT_addBits(&blockStream, offset, nbBits); /* 32 */ /* 42 */
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* 7 */
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, offCode); /* 16 */ /* 51 */
+ FSE_encodeSymbol(&blockStream, &stateLitLength, litLength); /* 26 */ /* 61 */
+ BIT_flushBits(&blockStream); /* 7 */ /* 7 */
+ }
+
+ FSE_flushCState(&blockStream, &stateMatchLength);
+ FSE_flushCState(&blockStream, &stateOffsetBits);
+ FSE_flushCState(&blockStream, &stateLitLength);
+
+ streamSize = BIT_closeCStream(&blockStream);
+ if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
+ op += streamSize;
+ }
+
+ /* check compressibility */
+ if ((size_t)(op-dst) >= maxCSize) return 0;
+
+ return op - dst;
+}
+
+
+
+
+/* *************************************************************
+* Decompression section
+***************************************************************/
+struct ZSTD_DCtx_s
+{
+ U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+ U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+ U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+ void* previousDstEnd;
+ void* base;
+ size_t expected;
+ blockType_t bType;
+ U32 phase;
+ const BYTE* litPtr;
+ size_t litBufSize;
+ size_t litSize;
+ BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
+
+
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+ const BYTE* const in = (const BYTE* const)src;
+ BYTE headerFlags;
+ U32 cSize;
+
+ if (srcSize < 3) return ERROR(srcSize_wrong);
+
+ headerFlags = *in;
+ cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+ bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+ bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+ if (bpPtr->blockType == bt_end) return 0;
+ if (bpPtr->blockType == bt_rle) return 1;
+ return cSize;
+}
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+ memcpy(dst, src, srcSize);
+ return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+ @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+
+ const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+ if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+ if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+ if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+ *maxDstSizePtr = litSize;
+ return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+ @return : nb of bytes read from src (< srcSize )*/
+size_t ZSTD_decodeLiteralsBlock(void* ctx,
+ const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+ const BYTE* const istart = (const BYTE*) src;
+
+ /* any compressed block with literals segment must be at least this size */
+ if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+ switch(*istart & 3)
+ {
+ /* compressed */
+ case 0:
+ {
+ size_t litSize = BLOCKSIZE;
+ const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litBufSize = BLOCKSIZE+8;
+ dctx->litSize = litSize;
+ return readSize; /* works if it's an error too */
+ }
+ case IS_RAW:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
+ {
+ if (litSize > srcSize-3) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litBufSize = BLOCKSIZE+8;
+ dctx->litSize = litSize;
+ return litSize+3;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+3;
+ dctx->litBufSize = srcSize-3;
+ dctx->litSize = litSize;
+ return litSize+3; }
+ case IS_RLE:
+ {
+ const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ memset(dctx->litBuffer, istart[3], litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litBufSize = BLOCKSIZE+8;
+ dctx->litSize = litSize;
+ return 4;
+ }
+ default:
+ return ERROR(corruption_detected); /* forbidden nominal case */
+ }
+}
+
+
+size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+ FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE* const)src;
+ const BYTE* ip = istart;
+ const BYTE* const iend = istart + srcSize;
+ U32 LLtype, Offtype, MLtype;
+ U32 LLlog, Offlog, MLlog;
+ size_t dumpsLength;
+
+ /* check */
+ if (srcSize < 5) return ERROR(srcSize_wrong);
+
+ /* SeqHead */
+ *nbSeq = MEM_readLE16(ip); ip+=2;
+ LLtype = *ip >> 6;
+ Offtype = (*ip >> 4) & 3;
+ MLtype = (*ip >> 2) & 3;
+ if (*ip & 2)
+ {
+ dumpsLength = ip[2];
+ dumpsLength += ip[1] << 8;
+ ip += 3;
+ }
+ else
+ {
+ dumpsLength = ip[1];
+ dumpsLength += (ip[0] & 1) << 8;
+ ip += 2;
+ }
+ *dumpsPtr = ip;
+ ip += dumpsLength;
+ *dumpsLengthPtr = dumpsLength;
+
+ /* check */
+ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+ /* sequences */
+ {
+ S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
+ size_t headerSize;
+
+ /* Build DTables */
+ switch(LLtype)
+ {
+ U32 max;
+ case bt_rle :
+ LLlog = 0;
+ FSE_buildDTable_rle(DTableLL, *ip++); break;
+ case bt_raw :
+ LLlog = LLbits;
+ FSE_buildDTable_raw(DTableLL, LLbits); break;
+ default :
+ max = MaxLL;
+ headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (LLlog > LLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableLL, norm, max, LLlog);
+ }
+
+ switch(Offtype)
+ {
+ U32 max;
+ case bt_rle :
+ Offlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+ break;
+ case bt_raw :
+ Offlog = Offbits;
+ FSE_buildDTable_raw(DTableOffb, Offbits); break;
+ default :
+ max = MaxOff;
+ headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (Offlog > OffFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableOffb, norm, max, Offlog);
+ }
+
+ switch(MLtype)
+ {
+ U32 max;
+ case bt_rle :
+ MLlog = 0;
+ if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+ FSE_buildDTable_rle(DTableML, *ip++); break;
+ case bt_raw :
+ MLlog = MLbits;
+ FSE_buildDTable_raw(DTableML, MLbits); break;
+ default :
+ max = MaxML;
+ headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+ if (FSE_isError(headerSize)) return ERROR(GENERIC);
+ if (MLlog > MLFSELog) return ERROR(corruption_detected);
+ ip += headerSize;
+ FSE_buildDTable(DTableML, norm, max, MLlog);
+ }
+ }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t offset;
+ size_t matchLength;
+} seq_t;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ FSE_DState_t stateLL;
+ FSE_DState_t stateOffb;
+ FSE_DState_t stateML;
+ size_t prevOffset;
+ const BYTE* dumps;
+ const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+ size_t litLength;
+ size_t prevOffset;
+ size_t offset;
+ size_t matchLength;
+ const BYTE* dumps = seqState->dumps;
+ const BYTE* const de = seqState->dumpsEnd;
+
+ /* Literal length */
+ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+ prevOffset = litLength ? seq->offset : seqState->prevOffset;
+ seqState->prevOffset = seq->offset;
+ if (litLength == MaxLL)
+ {
+ U32 add = *dumps++;
+ if (add < 255) litLength += add;
+ else
+ {
+ litLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+
+ /* Offset */
+ {
+ static const U32 offsetPrefix[MaxOff+1] = {
+ 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+ 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+ 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+ U32 offsetCode, nbBits;
+ offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ nbBits = offsetCode - 1;
+ if (offsetCode==0) nbBits = 0; /* cmove */
+ offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+ if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+ if (offsetCode==0) offset = prevOffset; /* cmove */
+ }
+
+ /* MatchLength */
+ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+ if (matchLength == MaxML)
+ {
+ U32 add = *dumps++;
+ if (add < 255) matchLength += add;
+ else
+ {
+ matchLength = MEM_readLE32(dumps) & 0xFFFFFF; /* no pb : dumps is always followed by seq tables > 1 byte */
+ dumps += 3;
+ }
+ if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ }
+ matchLength += MINMATCH;
+
+ /* save result */
+ seq->litLength = litLength;
+ seq->offset = offset;
+ seq->matchLength = matchLength;
+ seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+ seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit_8,
+ BYTE* const base, BYTE* const oend)
+{
+ static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
+ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* substracted */
+ const BYTE* const ostart = op;
+ BYTE* const oLitEnd = op + sequence.litLength;
+ BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_8 = oend-8;
+ const BYTE* const litEnd = *litPtr + sequence.litLength;
+
+ /* check */
+ if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
+ if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
+ if (litEnd > litLimit_8) return ERROR(corruption_detected); /* risk read beyond lit buffer */
+
+ /* copy Literals */
+ ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+ op = oLitEnd;
+ *litPtr = litEnd; /* update for next sequence */
+
+ /* copy Match */
+ {
+ const BYTE* match = op - sequence.offset;
+
+ /* check */
+ //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */
+ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */
+ if (match < base) return ERROR(corruption_detected);
+
+ /* close range match, overlap */
+ if (sequence.offset < 8)
+ {
+ const int dec64 = dec64table[sequence.offset];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[sequence.offset];
+ ZSTD_copy4(op+4, match);
+ match -= dec64;
+ }
+ else
+ {
+ ZSTD_copy8(op, match);
+ }
+ op += 8; match += 8;
+
+ if (oMatchEnd > oend-12)
+ {
+ if (op < oend_8)
+ {
+ ZSTD_wildcopy(op, match, oend_8 - op);
+ match += oend_8 - op;
+ op = oend_8;
+ }
+ while (op < oMatchEnd) *op++ = *match++;
+ }
+ else
+ {
+ ZSTD_wildcopy(op, match, sequence.matchLength-8); /* works even if matchLength < 8 */
+ }
+ }
+
+ return oMatchEnd - ostart;
+}
+
+static size_t ZSTD_decompressSequences(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t errorCode, dumpsLength;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litLimit_8 = litPtr + dctx->litBufSize - 8;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ int nbSeq;
+ const BYTE* dumps;
+ U32* DTableLL = dctx->LLTable;
+ U32* DTableML = dctx->MLTable;
+ U32* DTableOffb = dctx->OffTable;
+ BYTE* const base = (BYTE*) (dctx->base);
+
+ /* Build Decoding Tables */
+ errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+ DTableLL, DTableML, DTableOffb,
+ ip, iend-ip);
+ if (ZSTD_isError(errorCode)) return errorCode;
+ ip += errorCode;
+
+ /* Regen sequences */
+ {
+ seq_t sequence;
+ seqState_t seqState;
+
+ memset(&sequence, 0, sizeof(sequence));
+ sequence.offset = 4;
+ seqState.dumps = dumps;
+ seqState.dumpsEnd = dumps + dumpsLength;
+ seqState.prevOffset = 4;
+ errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+ if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+ FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+ FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+ FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+ for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
+ {
+ size_t oneSeqSize;
+ nbSeq--;
+ ZSTD_decodeSequence(&sequence, &seqState);
+ oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litLimit_8, base, oend);
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+
+ /* check if reached exact end */
+ if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
+ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
+
+ /* last literal segment */
+ {
+ size_t lastLLSize = litEnd - litPtr;
+ if (litPtr > litEnd) return ERROR(corruption_detected);
+ if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+ if (op != litPtr) memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+ void* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize)
+{
+ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+
+ /* Decode literals sub-block */
+ size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+
+ return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* iend = ip + srcSize;
+ BYTE* const ostart = (BYTE* const)dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + maxDstSize;
+ size_t remainingSize = srcSize;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
+ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ magicNumber = MEM_readLE32(src);
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
+ if (ZSTD_isLegacy(magicNumber))
+ return ZSTD_decompressLegacy(dst, maxDstSize, src, srcSize, magicNumber);
+#endif
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t decodedSize=0;
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
+ if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
+ break;
+ case bt_raw :
+ decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet supported */
+ break;
+ case bt_end :
+ /* end of frame */
+ if (remainingSize) return ERROR(srcSize_wrong);
+ break;
+ default:
+ return ERROR(GENERIC); /* impossible */
+ }
+ if (cBlockSize == 0) break; /* bt_end */
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ op += decodedSize;
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
+ }
+
+ return op-ostart;
+}
+
+size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ ZSTD_DCtx ctx;
+ ctx.base = dst;
+ return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+
+/* ******************************
+* Streaming Decompression API
+********************************/
+
+size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+ dctx->expected = ZSTD_frameHeaderSize;
+ dctx->phase = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->base = NULL;
+ return 0;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+ if (dctx==NULL) return NULL;
+ ZSTD_resetDCtx(dctx);
+ return dctx;
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ free(dctx);
+ return 0;
+}
+
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+ return dctx->expected;
+}
+
+size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+ /* Sanity check */
+ if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+ if (dst != ctx->previousDstEnd) /* not contiguous */
+ ctx->base = dst;
+
+ /* Decompress : frame header */
+ if (ctx->phase == 0)
+ {
+ /* Check frame magic header */
+ U32 magicNumber = MEM_readLE32(src);
+ if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ return 0;
+ }
+
+ /* Decompress : block header */
+ if (ctx->phase == 1)
+ {
+ blockProperties_t bp;
+ size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(blockSize)) return blockSize;
+ if (bp.blockType == bt_end)
+ {
+ ctx->expected = 0;
+ ctx->phase = 0;
+ }
+ else
+ {
+ ctx->expected = blockSize;
+ ctx->bType = bp.blockType;
+ ctx->phase = 2;
+ }
+
+ return 0;
+ }
+
+ /* Decompress : block content */
+ {
+ size_t rSize;
+ switch(ctx->bType)
+ {
+ case bt_compressed:
+ rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+ break;
+ case bt_raw :
+ rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+ break;
+ case bt_rle :
+ return ERROR(GENERIC); /* not yet handled */
+ break;
+ case bt_end : /* should never happen (filtered at phase 1) */
+ rSize = 0;
+ break;
+ default:
+ return ERROR(GENERIC);
+ }
+ ctx->phase = 1;
+ ctx->expected = ZSTD_blockHeaderSize;
+ ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+ return rSize;
+ }
+
+}
+
+
* Includes
***************************************/
#include "zstd.h"
+#include "mem.h"
+
+
+/* *************************************
+* Types
+***************************************/
+/** from faster to stronger */
+typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
+
+typedef struct
+{
+ U32 windowLog; /* largest match distance : impact decompression buffer size */
+ U32 contentLog; /* full search segment : larger == more compression, slower, more memory (useless for fast) */
+ U32 hashLog; /* dispatch table : larger == more memory, faster*/
+ U32 searchLog; /* nb of searches : larger == more compression, slower*/
+ U32 searchLength; /* size of matches : larger == faster decompression */
+ ZSTD_strategy strategy;
+} ZSTD_parameters;
+
+
+/* *************************************
+* Advanced function
+***************************************/
+/** ZSTD_compress_advanced
+* Same as ZSTD_compressCCtx(), with fine-tune control of each compression parameter */
+size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+ void* dst, size_t maxDstSize,
+ const void* src, size_t srcSize,
+ ZSTD_parameters params);
+
+/** ZSTD_validateParams
+ correct params value to remain within authorized range
+ srcSizeHint value is optional, select 0 if not known */
+void ZSTD_validateParams(ZSTD_parameters* params, U64 srcSizeHint);
/* *************************************
* Streaming functions
***************************************/
-size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, void* dst, size_t maxDstSize);
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, void* dst, size_t maxDstSize, int compressionLevel, U64 srcSizeHint);
size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t maxDstSize);
#define ZSTD_magicNumber 0xFD2FB523 /* v0.3 (current)*/
+/* *************************************
+* Pre-defined compression levels
+***************************************/
+#define ZSTD_MAX_CLEVEL 20
+#define ZSTD_WINDOWLOG_MAX 26
+#define ZSTD_WINDOWLOG_MIN 18
+#define ZSTD_CONTENTLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
+#define ZSTD_CONTENTLOG_MIN 4
+#define ZSTD_HASHLOG_MAX 28
+#define ZSTD_HASHLOG_MIN 4
+#define ZSTD_SEARCHLOG_MAX (ZSTD_CONTENTLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_SEARCHLENGTH_MAX 7
+#define ZSTD_SEARCHLENGTH_MIN 4
+
+static const ZSTD_parameters ZSTD_defaultParameters[2][ZSTD_MAX_CLEVEL+1] = {
+{ /* for <= 128 KB */
+ /* W, C, H, S, L, strat */
+ { 17, 12, 12, 1, 4, ZSTD_fast }, /* level 0 - never used */
+ { 17, 12, 13, 1, 6, ZSTD_fast }, /* level 1 */
+ { 17, 15, 16, 1, 5, ZSTD_fast }, /* level 2 */
+ { 17, 16, 17, 1, 5, ZSTD_fast }, /* level 3 */
+ { 17, 13, 15, 2, 4, ZSTD_greedy }, /* level 4 */
+ { 17, 15, 17, 3, 4, ZSTD_greedy }, /* level 5 */
+ { 17, 14, 17, 3, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 16, 17, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 17, 16, 17, 4, 4, ZSTD_lazy2 }, /* level 8 */
+ { 17, 17, 16, 5, 4, ZSTD_lazy2 }, /* level 9 */
+ { 17, 17, 16, 6, 4, ZSTD_lazy2 }, /* level 10 */
+ { 17, 17, 16, 7, 4, ZSTD_lazy2 }, /* level 11 */
+ { 17, 17, 16, 8, 4, ZSTD_lazy2 }, /* level 12 */
+ { 17, 18, 16, 4, 4, ZSTD_btlazy2 }, /* level 13 */
+ { 17, 18, 16, 5, 4, ZSTD_btlazy2 }, /* level 14 */
+ { 17, 18, 16, 6, 4, ZSTD_btlazy2 }, /* level 15 */
+ { 17, 18, 16, 7, 4, ZSTD_btlazy2 }, /* level 16 */
+ { 17, 18, 16, 8, 4, ZSTD_btlazy2 }, /* level 17 */
+ { 17, 18, 16, 9, 4, ZSTD_btlazy2 }, /* level 18 */
+ { 17, 18, 16, 10, 4, ZSTD_btlazy2 }, /* level 19 */
+ { 17, 18, 18, 12, 4, ZSTD_btlazy2 }, /* level 20 */
+},
+{ /* for > 128 KB */
+ /* W, C, H, S, L, strat */
+ { 18, 12, 12, 1, 4, ZSTD_fast }, /* level 0 - never used */
+ { 18, 14, 14, 1, 7, ZSTD_fast }, /* level 1 - in fact redirected towards zstd fast */
+ { 19, 15, 16, 1, 6, ZSTD_fast }, /* level 2 */
+ { 20, 18, 20, 1, 6, ZSTD_fast }, /* level 3 */
+ { 21, 19, 21, 1, 6, ZSTD_fast }, /* level 4 */
+ { 20, 13, 18, 5, 5, ZSTD_greedy }, /* level 5 */
+ { 20, 17, 19, 3, 5, ZSTD_greedy }, /* level 6 */
+ { 21, 17, 20, 3, 5, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 20, 3, 5, ZSTD_lazy }, /* level 8 */
+ { 21, 20, 20, 3, 5, ZSTD_lazy2 }, /* level 9 */
+ { 21, 19, 20, 4, 5, ZSTD_lazy2 }, /* level 10 */
+ { 22, 20, 22, 4, 5, ZSTD_lazy2 }, /* level 11 */
+ { 22, 20, 22, 5, 5, ZSTD_lazy2 }, /* level 12 */
+ { 22, 21, 22, 5, 5, ZSTD_lazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, ZSTD_lazy2 }, /* level 14 */
+ { 23, 23, 23, 5, 5, ZSTD_lazy2 }, /* level 15 */
+ { 23, 21, 22, 5, 5, ZSTD_btlazy2 }, /* level 16 */
+ { 23, 24, 23, 4, 5, ZSTD_btlazy2 }, /* level 17 */
+ { 25, 24, 23, 5, 5, ZSTD_btlazy2 }, /* level 18 */
+ { 25, 26, 23, 5, 5, ZSTD_btlazy2 }, /* level 19 */
+ { 26, 27, 24, 6, 5, ZSTD_btlazy2 }, /* level 20 */
+}
+};
+
+
/* *************************************
* Error management
***************************************/
+++ /dev/null
-/*
- zstdhc - high compression variant
- Header File
- Copyright (C) 2015, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - zstd source repository : http://www.zstd.net
-*/
-#pragma once
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-* Includes
-***************************************/
-#include <stddef.h> /* size_t */
-
-
-/* *************************************
-* Simple function
-***************************************/
-/**
-ZSTD_HC_compress() :
- Compresses 'srcSize' bytes from buffer 'src' into buffer 'dst', of maximum size 'dstSize'.
- Destination buffer must be already allocated.
- Compression runs faster if maxDstSize >= ZSTD_compressBound(srcSize).
- @return : the number of bytes written into buffer 'dst'
- or an error code if it fails (which can be tested using ZSTD_isError())
-*/
-size_t ZSTD_HC_compress(void* dst, size_t maxDstSize,
- const void* src, size_t srcSize,
- int compressionLevel);
-
-
-/* *************************************
-* Advanced functions
-***************************************/
-typedef struct ZSTD_HC_CCtx_s ZSTD_HC_CCtx; /* incomplete type */
-ZSTD_HC_CCtx* ZSTD_HC_createCCtx(void);
-size_t ZSTD_HC_freeCCtx(ZSTD_HC_CCtx* cctx);
-
-/**
-ZSTD_HC_compressCCtx() :
- Same as ZSTD_compress(), but requires a ZSTD_HC_CCtx working space already allocated
-*/
-size_t ZSTD_HC_compressCCtx(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel);
-
-
-#if defined (__cplusplus)
-}
-#endif
+++ /dev/null
-/*
- zstdhc - high compression variant
- Header File - Experimental API, static linking only
- Copyright (C) 2015, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - zstd source repository : http://www.zstd.net
-*/
-#pragma once
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* *************************************
-* Includes
-***************************************/
-#include "mem.h"
-#include "zstdhc.h"
-
-
-/* *************************************
-* Types
-***************************************/
-/** from faster to stronger */
-typedef enum { ZSTD_HC_fast, ZSTD_HC_greedy, ZSTD_HC_lazy, ZSTD_HC_lazy2, ZSTD_HC_btlazy2 } ZSTD_HC_strategy;
-
-typedef struct
-{
- U32 windowLog; /* largest match distance : impact decompression buffer size */
- U32 contentLog; /* full search segment : larger == more compression, slower, more memory (useless for fast) */
- U32 hashLog; /* dispatch table : larger == more memory, faster*/
- U32 searchLog; /* nb of searches : larger == more compression, slower*/
- U32 searchLength; /* size of matches : larger == faster decompression */
- ZSTD_HC_strategy strategy;
-} ZSTD_HC_parameters;
-
-/* parameters boundaries */
-#define ZSTD_HC_WINDOWLOG_MAX 26
-#define ZSTD_HC_WINDOWLOG_MIN 18
-#define ZSTD_HC_CONTENTLOG_MAX (ZSTD_HC_WINDOWLOG_MAX+1)
-#define ZSTD_HC_CONTENTLOG_MIN 4
-#define ZSTD_HC_HASHLOG_MAX 28
-#define ZSTD_HC_HASHLOG_MIN 4
-#define ZSTD_HC_SEARCHLOG_MAX (ZSTD_HC_CONTENTLOG_MAX-1)
-#define ZSTD_HC_SEARCHLOG_MIN 1
-#define ZSTD_HC_SEARCHLENGTH_MAX 7
-#define ZSTD_HC_SEARCHLENGTH_MIN 4
-
-
-/* *************************************
-* Advanced function
-***************************************/
-/** ZSTD_HC_compress_advanced
-* Same as ZSTD_HC_compressCCtx(), with fine-tune control of each compression parameter */
-size_t ZSTD_HC_compress_advanced (ZSTD_HC_CCtx* ctx,
- void* dst, size_t maxDstSize,
- const void* src, size_t srcSize,
- ZSTD_HC_parameters params);
-
-/** ZSTD_HC_validateParams
- correct params value to remain within authorized range
- srcSizeHint value is optional, select 0 if not known */
-void ZSTD_HC_validateParams(ZSTD_HC_parameters* params, U64 srcSizeHint);
-
-
-/* *************************************
-* Streaming functions
-***************************************/
-size_t ZSTD_HC_compressBegin(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, int compressionLevel, U64 srcSizeHint);
-size_t ZSTD_HC_compressContinue(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-size_t ZSTD_HC_compressEnd(ZSTD_HC_CCtx* ctx, void* dst, size_t maxDstSize);
-
-
-/* *************************************
-* Pre-defined compression levels
-***************************************/
-#define ZSTD_HC_MAX_CLEVEL 20
-static const ZSTD_HC_parameters ZSTD_HC_defaultParameters[2][ZSTD_HC_MAX_CLEVEL+1] = {
-{ /* for <= 128 KB */
- /* W, C, H, S, L, strat */
- { 17, 12, 12, 1, 4, ZSTD_HC_fast }, /* level 0 - never used */
- { 17, 12, 13, 1, 6, ZSTD_HC_fast }, /* level 1 */
- { 17, 15, 16, 1, 5, ZSTD_HC_fast }, /* level 2 */
- { 17, 16, 17, 1, 5, ZSTD_HC_fast }, /* level 3 */
- { 17, 13, 15, 2, 4, ZSTD_HC_greedy }, /* level 4 */
- { 17, 15, 17, 3, 4, ZSTD_HC_greedy }, /* level 5 */
- { 17, 14, 17, 3, 4, ZSTD_HC_lazy }, /* level 6 */
- { 17, 16, 17, 4, 4, ZSTD_HC_lazy }, /* level 7 */
- { 17, 16, 17, 4, 4, ZSTD_HC_lazy2 }, /* level 8 */
- { 17, 17, 16, 5, 4, ZSTD_HC_lazy2 }, /* level 9 */
- { 17, 17, 16, 6, 4, ZSTD_HC_lazy2 }, /* level 10 */
- { 17, 17, 16, 7, 4, ZSTD_HC_lazy2 }, /* level 11 */
- { 17, 17, 16, 8, 4, ZSTD_HC_lazy2 }, /* level 12 */
- { 17, 18, 16, 4, 4, ZSTD_HC_btlazy2 }, /* level 13 */
- { 17, 18, 16, 5, 4, ZSTD_HC_btlazy2 }, /* level 14 */
- { 17, 18, 16, 6, 4, ZSTD_HC_btlazy2 }, /* level 15 */
- { 17, 18, 16, 7, 4, ZSTD_HC_btlazy2 }, /* level 16 */
- { 17, 18, 16, 8, 4, ZSTD_HC_btlazy2 }, /* level 17 */
- { 17, 18, 16, 9, 4, ZSTD_HC_btlazy2 }, /* level 18 */
- { 17, 18, 16, 10, 4, ZSTD_HC_btlazy2 }, /* level 19 */
- { 17, 18, 18, 12, 4, ZSTD_HC_btlazy2 }, /* level 20 */
-},
-{ /* for > 128 KB */
- /* W, C, H, S, L, strat */
- { 18, 12, 12, 1, 4, ZSTD_HC_fast }, /* level 0 - never used */
- { 18, 14, 14, 1, 7, ZSTD_HC_fast }, /* level 1 - in fact redirected towards zstd fast */
- { 19, 15, 16, 1, 6, ZSTD_HC_fast }, /* level 2 */
- { 20, 18, 20, 1, 6, ZSTD_HC_fast }, /* level 3 */
- { 21, 19, 21, 1, 6, ZSTD_HC_fast }, /* level 4 */
- { 20, 13, 18, 5, 5, ZSTD_HC_greedy }, /* level 5 */
- { 20, 17, 19, 3, 5, ZSTD_HC_greedy }, /* level 6 */
- { 21, 17, 20, 3, 5, ZSTD_HC_lazy }, /* level 7 */
- { 21, 19, 20, 3, 5, ZSTD_HC_lazy }, /* level 8 */
- { 21, 20, 20, 3, 5, ZSTD_HC_lazy2 }, /* level 9 */
- { 21, 19, 20, 4, 5, ZSTD_HC_lazy2 }, /* level 10 */
- { 22, 20, 22, 4, 5, ZSTD_HC_lazy2 }, /* level 11 */
- { 22, 20, 22, 5, 5, ZSTD_HC_lazy2 }, /* level 12 */
- { 22, 21, 22, 5, 5, ZSTD_HC_lazy2 }, /* level 13 */
- { 22, 22, 23, 5, 5, ZSTD_HC_lazy2 }, /* level 14 */
- { 23, 23, 23, 5, 5, ZSTD_HC_lazy2 }, /* level 15 */
- { 23, 21, 22, 5, 5, ZSTD_HC_btlazy2 }, /* level 16 */
- { 23, 24, 23, 4, 5, ZSTD_HC_btlazy2 }, /* level 17 */
- { 25, 24, 23, 5, 5, ZSTD_HC_btlazy2 }, /* level 18 */
- { 25, 26, 23, 5, 5, ZSTD_HC_btlazy2 }, /* level 19 */
- { 26, 27, 24, 6, 5, ZSTD_HC_btlazy2 }, /* level 20 */
-}
-};
-
-
-#if defined (__cplusplus)
-}
-#endif
all: zstd zstd32 fullbench fullbench32 fuzzer fuzzer32 paramgrill datagen
-zstd: $(ZSTDDIR)/zstd.c $(ZSTDDIR)/zstdhc.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+zstd: $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
xxhash.c bench.c fileio.c zstdcli.c legacy/fileio_legacy.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
-zstd32: $(ZSTDDIR)/zstd.c $(ZSTDDIR)/zstdhc.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+zstd32: $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
xxhash.c bench.c fileio.c zstdcli.c legacy/fileio_legacy.c
$(CC) -m32 $(FLAGS) $^ -o $@$(EXT)
-fullbench : $(ZSTDDIR)/zstd.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+fullbench : $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
datagen.c fullbench.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
-fullbench32: $(ZSTDDIR)/zstd.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+fullbench32: $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
datagen.c fullbench.c
$(CC) -m32 $(FLAGS) $^ -o $@$(EXT)
-fuzzer : $(ZSTDDIR)/zstd.c $(ZSTDDIR)/zstdhc.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+fuzzer : $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
datagen.c xxhash.c fuzzer.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
-fuzzer32: $(ZSTDDIR)/zstd.c $(ZSTDDIR)/zstdhc.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+fuzzer32: $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
datagen.c xxhash.c fuzzer.c
$(CC) -m32 $(FLAGS) $^ -o $@$(EXT)
-paramgrill : $(ZSTDDIR)/zstdhc.c $(ZSTDDIR)/zstd.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
+paramgrill : $(ZSTDDIR)/zstd_compress.c $(ZSTDDIR)/zstd_decompress.c $(ZSTDDIR)/fse.c $(ZSTDDIR)/huff0.c \
$(ZSTDDIR)/legacy/zstd_v01.c $(ZSTDDIR)/legacy/zstd_v02.c \
datagen.c xxhash.c paramgrill.c
$(CC) $(FLAGS) $^ -lm -o $@$(EXT)
#include "mem.h"
#include "zstd.h"
-#include "zstdhc.h"
#include "xxhash.h"
typedef size_t (*compressor_t) (void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel);
-static size_t local_compress_fast (void* dst, size_t maxDstSize, const void* src, size_t srcSize, int compressionLevel)
-{
- (void)compressionLevel;
- return ZSTD_compress(dst, maxDstSize, src, srcSize);
-}
-
#define MIN(a,b) ((a)<(b) ? (a) : (b))
static int BMK_benchMem(void* srcBuffer, size_t srcSize, const char* fileName, int cLevel)
const size_t maxCompressedSize = (size_t)nbBlocks * ZSTD_compressBound(blockSize);
void* const compressedBuffer = malloc(maxCompressedSize);
void* const resultBuffer = malloc(srcSize);
- const compressor_t compressor = (cLevel <= 1) ? local_compress_fast : ZSTD_HC_compress;
+ const compressor_t compressor = ZSTD_compress;
U64 crcOrig;
/* init */
return (size_t)(requiredMem - step);
}
-static int BMK_benchOneFile(char* inFileName, int cLevel)
+static int BMK_benchOneFile(const char* inFileName, int cLevel)
{
FILE* inFile;
U64 inFileSize;
}
-int BMK_benchFiles(char** fileNamesTable, unsigned nbFiles, unsigned cLevel)
+int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles, unsigned cLevel)
{
double compressibility = (double)g_compressibilityDefault / 100;
#include "mem.h"
#include "fileio.h"
#include "zstd_static.h"
-#include "zstdhc_static.h"
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
# include "zstd_legacy.h" /* legacy */
}
-typedef void* (*FIO_createC) (void);
-static void* local_ZSTD_createCCtx(void) { return (void*) ZSTD_createCCtx(); }
-static void* local_ZSTD_HC_createCCtx(void) { return (void*) ZSTD_HC_createCCtx(); }
-
-typedef size_t (*FIO_initC) (void* ctx, void* dst, size_t maxDstSize, int cLevel, U64 srcSizeHint);
-static size_t local_ZSTD_compressBegin (void* ctx, void* dst, size_t maxDstSize, int cLevel, U64 srcSizeHint)
-{
- (void)cLevel; (void)srcSizeHint;
- return ZSTD_compressBegin((ZSTD_CCtx*)ctx, dst, maxDstSize);
-}
-static size_t local_ZSTD_HC_compressBegin (void* ctx, void* dst, size_t maxDstSize, int cLevel, U64 srcSizeHint)
-{
- return ZSTD_HC_compressBegin((ZSTD_HC_CCtx*)ctx, dst, maxDstSize, cLevel, srcSizeHint);
-}
-
-typedef size_t (*FIO_continueC) (void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
-static size_t local_ZSTD_compressContinue (void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
- return ZSTD_compressContinue((ZSTD_CCtx*)ctx, dst, maxDstSize, src, srcSize);
-}
-static size_t local_ZSTD_HC_compressContinue (void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
- return ZSTD_HC_compressContinue((ZSTD_HC_CCtx*)ctx, dst, maxDstSize, src, srcSize);
-}
-
-typedef size_t (*FIO_endC) (void* ctx, void* dst, size_t maxDstSize);
-static size_t local_ZSTD_compressEnd (void* ctx, void* dst, size_t maxDstSize)
-{
- return ZSTD_compressEnd((ZSTD_CCtx*)ctx, dst, maxDstSize);
-}
-static size_t local_ZSTD_HC_compressEnd (void* ctx, void* dst, size_t maxDstSize)
-{
- return ZSTD_HC_compressEnd((ZSTD_HC_CCtx*)ctx, dst, maxDstSize);
-}
-
-typedef void (*FIO_freeC) (void* ctx);
-static void local_ZSTD_freeCCtx(void* ctx) { ZSTD_freeCCtx((ZSTD_CCtx*)ctx); }
-static void local_ZSTD_HC_freeCCtx(void* ctx) { ZSTD_HC_freeCCtx((ZSTD_HC_CCtx*)ctx); }
-
-
unsigned long long FIO_compressFilename(const char* output_filename, const char* input_filename, int cLevel)
{
U64 filesize = 0;
FILE* finput;
FILE* foutput;
size_t sizeCheck, cSize;
- void* ctx;
- FIO_createC createC=NULL;
- FIO_initC initC=NULL;
- FIO_continueC continueC = NULL;
- FIO_endC endC = NULL;
- FIO_freeC freeC = NULL;
+ ZSTD_CCtx* ctx;
- /* Init */
- if (cLevel <= 1)
- {
- createC = local_ZSTD_createCCtx;
- initC = local_ZSTD_compressBegin;
- continueC = local_ZSTD_compressContinue;
- endC = local_ZSTD_compressEnd;
- freeC = local_ZSTD_freeCCtx;
- }
- else
- {
- createC = local_ZSTD_HC_createCCtx;
- initC = local_ZSTD_HC_compressBegin;
- continueC = local_ZSTD_HC_compressContinue;
- endC = local_ZSTD_HC_compressEnd;
- freeC = local_ZSTD_HC_freeCCtx;
- }
+ /* init */
FIO_getFileHandles(&finput, &foutput, input_filename, output_filename);
filesize = FIO_getFileSize(input_filename);
/* Allocate Memory */
- ctx = createC();
+ ctx = ZSTD_createCCtx();
inBuff = (BYTE*)malloc(inBuffSize);
outBuff = (BYTE*)malloc(outBuffSize);
if (!inBuff || !outBuff || !ctx) EXM_THROW(21, "Allocation error : not enough memory");
inEnd = inBuff + inBuffSize;
/* Write Frame Header */
- cSize = initC(ctx, outBuff, outBuffSize, cLevel, filesize);
+ cSize = ZSTD_compressBegin(ctx, outBuff, outBuffSize, cLevel, filesize);
if (ZSTD_isError(cSize)) EXM_THROW(22, "Compression error : cannot create frame header");
sizeCheck = fwrite(outBuff, 1, cSize, foutput);
DISPLAYUPDATE(2, "\rRead : %u MB ", (U32)(filesize>>20));
/* Compress Block */
- cSize = continueC(ctx, outBuff, outBuffSize, inSlot, inSize);
+ cSize = ZSTD_compressContinue(ctx, outBuff, outBuffSize, inSlot, inSize);
if (ZSTD_isError(cSize))
EXM_THROW(24, "Compression error : %s ", ZSTD_getErrorName(cSize));
}
/* End of Frame */
- cSize = endC(ctx, outBuff, outBuffSize);
+ cSize = ZSTD_compressEnd(ctx, outBuff, outBuffSize);
if (ZSTD_isError(cSize)) EXM_THROW(26, "Compression error : cannot create frame end");
sizeCheck = fwrite(outBuff, 1, cSize, foutput);
/* clean */
free(inBuff);
free(outBuff);
- freeC(ctx);
+ ZSTD_freeCCtx(ctx);
fclose(finput);
if (fclose(foutput)) EXM_THROW(28, "Write error : cannot properly close %s", output_filename);
size_t local_ZSTD_compress(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
{
(void)buff2;
- return ZSTD_compress(dst, dstSize, src, srcSize);
+ return ZSTD_compress(dst, dstSize, src, srcSize, 1);
}
size_t local_ZSTD_decompress(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
switch(benchNb)
{
case 11:
- g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize);
+ g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, 1);
break;
case 31: /* ZSTD_decodeLiteralsBlock */
{
blockProperties_t bp;
- g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize);
+ g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
ZSTD_getcBlockSize(dstBuff+4, dstBuffSize, &bp); // Get first block type
if (bp.blockType != bt_compressed)
{
const BYTE* ip = dstBuff;
const BYTE* iend;
size_t blockSize;
- ZSTD_compress(dstBuff, dstBuffSize, src, srcSize);
+ ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
ip += 4; // Jump magic Number
blockSize = ZSTD_getcBlockSize(ip, dstBuffSize, &bp); // Get first block type
if (bp.blockType != bt_compressed)
case 102: /* local_decodeLiteralsForward */
{
blockProperties_t bp;
- ZSTD_compress(dstBuff, dstBuffSize, src, srcSize);
+ ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
g_cSize = ZSTD_getcBlockSize(dstBuff+7, dstBuffSize, &bp);
memcpy(buff2, dstBuff+10, g_cSize);
//srcSize = benchFunction(dstBuff, dstBuffSize, buff2, src, srcSize); // real speed
#include <sys/timeb.h> /* timeb */
#include <string.h> /* strcmp */
#include "zstd_static.h"
-#include "zstdhc_static.h"
#include "datagen.h" /* RDG_genBuffer */
#include "xxhash.h" /* XXH64 */
#include "mem.h"
/* Basic tests */
DISPLAYLEVEL(4, "test%3i : compress %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH);
- result = ZSTD_compress(compressedBuffer, ZSTD_compressBound(COMPRESSIBLE_NOISE_LENGTH), CNBuffer, COMPRESSIBLE_NOISE_LENGTH);
+ result = ZSTD_compress(compressedBuffer, ZSTD_compressBound(COMPRESSIBLE_NOISE_LENGTH), CNBuffer, COMPRESSIBLE_NOISE_LENGTH, 1);
if (ZSTD_isError(result)) goto _output_error;
cSize = result;
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
sampleSize += 256 KB - 1;
RDG_genBuffer((char*)CNBuffer+sampleSize, 96 KB, compressibility, 0., randState);
sampleSize += 96 KB;
- cSize = ZSTD_compress(compressedBuffer, ZSTD_compressBound(sampleSize), CNBuffer, sampleSize);
+ cSize = ZSTD_compress(compressedBuffer, ZSTD_compressBound(sampleSize), CNBuffer, sampleSize, 1);
if (ZSTD_isError(cSize)) goto _output_error;
result = ZSTD_decompress(decodedBuffer, sampleSize, compressedBuffer, cSize);
if (ZSTD_isError(result)) goto _output_error;
U32 testNb = 0;
U32 coreSeed = seed, lseed = 0;
ZSTD_CCtx* ctx;
- ZSTD_HC_CCtx* hcctx;
+ ZSTD_CCtx* hcctx;
/* allocation */
ctx = ZSTD_createCCtx();
- hcctx = ZSTD_HC_createCCtx();
+ hcctx = ZSTD_createCCtx();
cNoiseBuffer[0] = (BYTE*)malloc (srcBufferSize);
cNoiseBuffer[1] = (BYTE*)malloc (srcBufferSize);
cNoiseBuffer[2] = (BYTE*)malloc (srcBufferSize);
#define MAX(a,b) ((a)>(b)?(a):(b))
cLevelMod = MAX(1, 38 - (int)(MAX(9, sampleSizeLog) * 2)); /* use high compression levels with small samples, for speed */
cLevel = (FUZ_rand(&lseed) % cLevelMod) +1;
- cSize = ZSTD_HC_compressCCtx(hcctx, cBuffer, cBufferSize, srcBuffer + sampleStart, sampleSize, cLevel);
- CHECK(ZSTD_isError(cSize), "ZSTD_HC_compressCCtx failed");
+ cSize = ZSTD_compressCCtx(hcctx, cBuffer, cBufferSize, srcBuffer + sampleStart, sampleSize, cLevel);
+ CHECK(ZSTD_isError(cSize), "ZSTD_compressCCtx failed");
/* compression failure test : too small dest buffer */
if (cSize > 3)
static const U32 endMark = 0x4DC2B1A9;
U32 endCheck;
memcpy(dstBuffer+tooSmallSize, &endMark, 4);
- errorCode = ZSTD_HC_compressCCtx(hcctx, dstBuffer, tooSmallSize, srcBuffer + sampleStart, sampleSize, cLevel);
- CHECK(!ZSTD_isError(errorCode), "ZSTD_HC_compressCCtx should have failed ! (buffer too small : %u < %u)", (U32)tooSmallSize, (U32)cSize);
+ errorCode = ZSTD_compressCCtx(hcctx, dstBuffer, tooSmallSize, srcBuffer + sampleStart, sampleSize, cLevel);
+ CHECK(!ZSTD_isError(errorCode), "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (U32)tooSmallSize, (U32)cSize);
memcpy(&endCheck, dstBuffer+tooSmallSize, 4);
- CHECK(endCheck != endMark, "ZSTD_HC_compressCCtx : dst buffer overflow");
+ CHECK(endCheck != endMark, "ZSTD_compressCCtx : dst buffer overflow");
}
/* successfull decompression tests*/
_cleanup:
ZSTD_freeCCtx(ctx);
- ZSTD_HC_freeCCtx(hcctx);
+ ZSTD_freeCCtx(hcctx);
free(cNoiseBuffer[0]);
free(cNoiseBuffer[1]);
free(cNoiseBuffer[2]);
#endif
#include "mem.h"
-#include "zstdhc_static.h"
-#include "zstd.h"
+#include "zstd_static.h"
#include "datagen.h"
#include "xxhash.h"
static U32 g_singleRun = 0;
static U32 g_target = 0;
static U32 g_noSeed = 0;
-static const ZSTD_HC_parameters* g_seedParams = ZSTD_HC_defaultParameters[0];
-static ZSTD_HC_parameters g_params = { 0, 0, 0, 0, 0, ZSTD_HC_greedy };
+static const ZSTD_parameters* g_seedParams = ZSTD_defaultParameters[0];
+static ZSTD_parameters g_params = { 0, 0, 0, 0, 0, ZSTD_greedy };
void BMK_SetNbIterations(int nbLoops)
{
static size_t BMK_benchParam(BMK_result_t* resultPtr,
const void* srcBuffer, size_t srcSize,
- ZSTD_HC_CCtx* ctx,
- const ZSTD_HC_parameters params)
+ ZSTD_CCtx* ctx,
+ const ZSTD_parameters params)
{
const size_t blockSize = g_blockSize ? g_blockSize : srcSize;
const U32 nbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize);
U32 Hlog = params.hashLog;
U32 Slog = params.searchLog;
U32 Slength = params.searchLength;
- ZSTD_HC_strategy strat = params.strategy;
+ ZSTD_strategy strat = params.strategy;
char name[30] = { 0 };
U64 crcOrig;
while (BMK_GetMilliSpan(milliTime) < TIMELOOP)
{
for (blockNb=0; blockNb<nbBlocks; blockNb++)
- blockTable[blockNb].cSize = ZSTD_HC_compress_advanced(ctx,
+ blockTable[blockNb].cSize = ZSTD_compress_advanced(ctx,
blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
blockTable[blockNb].srcPtr, blockTable[blockNb].srcSize,
params);
}
-const char* g_stratName[] = { "ZSTD_HC_fast ",
- "ZSTD_HC_greedy ",
- "ZSTD_HC_lazy ",
- "ZSTD_HC_lazy2 ",
- "ZSTD_HC_btlazy2" };
+const char* g_stratName[] = { "ZSTD_fast ",
+ "ZSTD_greedy ",
+ "ZSTD_lazy ",
+ "ZSTD_lazy2 ",
+ "ZSTD_btlazy2" };
-static void BMK_printWinner(FILE* f, U32 cLevel, BMK_result_t result, ZSTD_HC_parameters params, size_t srcSize)
+static void BMK_printWinner(FILE* f, U32 cLevel, BMK_result_t result, ZSTD_parameters params, size_t srcSize)
{
DISPLAY("\r%79s\r", "");
fprintf(f," {%3u,%3u,%3u,%3u,%3u, %s }, ",
}
-static U32 g_cSpeedTarget[ZSTD_HC_MAX_CLEVEL+1] = { 0 };
+static U32 g_cSpeedTarget[ZSTD_MAX_CLEVEL+1] = { 0 };
typedef struct {
BMK_result_t result;
- ZSTD_HC_parameters params;
+ ZSTD_parameters params;
} winnerInfo_t;
static void BMK_printWinners2(FILE* f, const winnerInfo_t* winners, size_t srcSize)
int cLevel;
fprintf(f, "\n /* Selected configurations : */ \n");
- fprintf(f, "#define ZSTD_HC_MAX_CLEVEL %2u \n", ZSTD_HC_MAX_CLEVEL);
- fprintf(f, "static const ZSTD_HC_parameters ZSTD_HC_defaultParameters[ZSTD_HC_MAX_CLEVEL+1] = {\n");
+ fprintf(f, "#define ZSTD_MAX_CLEVEL %2u \n", ZSTD_MAX_CLEVEL);
+ fprintf(f, "static const ZSTD_parameters ZSTD_defaultParameters[ZSTD_MAX_CLEVEL+1] = {\n");
fprintf(f, " /* W, C, H, S, L, strat */ \n");
- for (cLevel=0; cLevel <= ZSTD_HC_MAX_CLEVEL; cLevel++)
+ for (cLevel=0; cLevel <= ZSTD_MAX_CLEVEL; cLevel++)
BMK_printWinner(f, cLevel, winners[cLevel].result, winners[cLevel].params, srcSize);
}
}
-static int BMK_seed(winnerInfo_t* winners, const ZSTD_HC_parameters params,
+static int BMK_seed(winnerInfo_t* winners, const ZSTD_parameters params,
const void* srcBuffer, size_t srcSize,
- ZSTD_HC_CCtx* ctx)
+ ZSTD_CCtx* ctx)
{
BMK_result_t testResult;
int better = 0;
BMK_benchParam(&testResult, srcBuffer, srcSize, ctx, params);
- for (cLevel = 1; cLevel <= ZSTD_HC_MAX_CLEVEL; cLevel++)
+ for (cLevel = 1; cLevel <= ZSTD_MAX_CLEVEL; cLevel++)
{
if (testResult.cSpeed < g_cSpeedTarget[cLevel])
continue; /* not fast enough for this level */
double O_DMemUsed_note = O_ratioNote * ( 40 + 9*cLevel) - log((double)O_DMemUsed);
size_t W_CMemUsed = (1 << params.windowLog) + 4 * (1 << params.hashLog) +
- ((params.strategy==ZSTD_HC_fast) ? 0 : 4 * (1 << params.contentLog));
+ ((params.strategy==ZSTD_fast) ? 0 : 4 * (1 << params.contentLog));
size_t O_CMemUsed = (1 << winners[cLevel].params.windowLog) + 4 * (1 << winners[cLevel].params.hashLog) +
- ((winners[cLevel].params.strategy==ZSTD_HC_fast) ? 0 : 4 * (1 << winners[cLevel].params.contentLog));
+ ((winners[cLevel].params.strategy==ZSTD_fast) ? 0 : 4 * (1 << winners[cLevel].params.contentLog));
double W_CMemUsed_note = W_ratioNote * ( 50 + 13*cLevel) - log((double)W_CMemUsed);
double O_CMemUsed_note = O_ratioNote * ( 50 + 13*cLevel) - log((double)O_CMemUsed);
/* nullified useless params, to ensure count stats */
-static ZSTD_HC_parameters* sanitizeParams(ZSTD_HC_parameters params)
+static ZSTD_parameters* sanitizeParams(ZSTD_parameters params)
{
g_params = params;
- if (params.strategy == ZSTD_HC_fast)
+ if (params.strategy == ZSTD_fast)
{
g_params.contentLog = 0;
g_params.searchLog = 0;
#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
static void playAround(FILE* f, winnerInfo_t* winners,
- ZSTD_HC_parameters params,
+ ZSTD_parameters params,
const void* srcBuffer, size_t srcSize,
- ZSTD_HC_CCtx* ctx)
+ ZSTD_CCtx* ctx)
{
int nbVariations = 0;
const int startTime = BMK_GetMilliStart();
while (BMK_GetMilliSpan(startTime) < g_maxVariationTime)
{
- ZSTD_HC_parameters p = params;
+ ZSTD_parameters p = params;
U32 nbChanges = (FUZ_rand(&g_rand) & 3) + 1;
if (nbVariations++ > g_maxNbVariations) break;
case 9:
p.searchLength--; break;
case 10:
- p.strategy = (ZSTD_HC_strategy)(((U32)p.strategy)+1); break;
+ p.strategy = (ZSTD_strategy)(((U32)p.strategy)+1); break;
case 11:
- p.strategy = (ZSTD_HC_strategy)(((U32)p.strategy)-1); break;
+ p.strategy = (ZSTD_strategy)(((U32)p.strategy)-1); break;
}
}
/* validate new conf */
{
- ZSTD_HC_parameters saved = p;
- ZSTD_HC_validateParams(&p, g_blockSize ? g_blockSize : srcSize);
+ ZSTD_parameters saved = p;
+ ZSTD_validateParams(&p, g_blockSize ? g_blockSize : srcSize);
if (memcmp(&p, &saved, sizeof(p))) continue; /* p was invalid */
}
static void BMK_selectRandomStart(
FILE* f, winnerInfo_t* winners,
const void* srcBuffer, size_t srcSize,
- ZSTD_HC_CCtx* ctx)
+ ZSTD_CCtx* ctx)
{
- U32 id = (FUZ_rand(&g_rand) % (ZSTD_HC_MAX_CLEVEL+1));
+ U32 id = (FUZ_rand(&g_rand) % (ZSTD_MAX_CLEVEL+1));
if ((id==0) || (winners[id].params.windowLog==0))
{
/* totally random entry */
- ZSTD_HC_parameters p;
- p.contentLog = FUZ_rand(&g_rand) % (ZSTD_HC_CONTENTLOG_MAX+1 - ZSTD_HC_CONTENTLOG_MIN) + ZSTD_HC_CONTENTLOG_MIN;
- p.hashLog = FUZ_rand(&g_rand) % (ZSTD_HC_HASHLOG_MAX+1 - ZSTD_HC_HASHLOG_MIN) + ZSTD_HC_HASHLOG_MIN;
- p.searchLog = FUZ_rand(&g_rand) % (ZSTD_HC_SEARCHLOG_MAX+1 - ZSTD_HC_SEARCHLOG_MIN) + ZSTD_HC_SEARCHLOG_MIN;
- p.windowLog = FUZ_rand(&g_rand) % (ZSTD_HC_WINDOWLOG_MAX+1 - ZSTD_HC_WINDOWLOG_MIN) + ZSTD_HC_WINDOWLOG_MIN;
- p.searchLength=FUZ_rand(&g_rand) % (ZSTD_HC_SEARCHLENGTH_MAX+1 - ZSTD_HC_SEARCHLENGTH_MIN) + ZSTD_HC_SEARCHLENGTH_MIN;
- p.strategy = (ZSTD_HC_strategy) (FUZ_rand(&g_rand) % (ZSTD_HC_btlazy2+1));
+ ZSTD_parameters p;
+ p.contentLog = FUZ_rand(&g_rand) % (ZSTD_CONTENTLOG_MAX+1 - ZSTD_CONTENTLOG_MIN) + ZSTD_CONTENTLOG_MIN;
+ p.hashLog = FUZ_rand(&g_rand) % (ZSTD_HASHLOG_MAX+1 - ZSTD_HASHLOG_MIN) + ZSTD_HASHLOG_MIN;
+ p.searchLog = FUZ_rand(&g_rand) % (ZSTD_SEARCHLOG_MAX+1 - ZSTD_SEARCHLOG_MIN) + ZSTD_SEARCHLOG_MIN;
+ p.windowLog = FUZ_rand(&g_rand) % (ZSTD_WINDOWLOG_MAX+1 - ZSTD_WINDOWLOG_MIN) + ZSTD_WINDOWLOG_MIN;
+ p.searchLength=FUZ_rand(&g_rand) % (ZSTD_SEARCHLENGTH_MAX+1 - ZSTD_SEARCHLENGTH_MIN) + ZSTD_SEARCHLENGTH_MIN;
+ p.strategy = (ZSTD_strategy) (FUZ_rand(&g_rand) % (ZSTD_btlazy2+1));
playAround(f, winners, p, srcBuffer, srcSize, ctx);
}
else
static void BMK_benchMem(void* srcBuffer, size_t srcSize)
{
- ZSTD_HC_CCtx* ctx = ZSTD_HC_createCCtx();
- ZSTD_HC_parameters params;
- winnerInfo_t winners[ZSTD_HC_MAX_CLEVEL+1];
+ ZSTD_CCtx* ctx = ZSTD_createCCtx();
+ ZSTD_parameters params;
+ winnerInfo_t winners[ZSTD_MAX_CLEVEL+1];
int i;
const char* rfName = "grillResults.txt";
FILE* f;
if (g_singleRun)
{
BMK_result_t testResult;
- ZSTD_HC_validateParams(&g_params, blockSize);
+ ZSTD_validateParams(&g_params, blockSize);
BMK_benchParam(&testResult, srcBuffer, srcSize, ctx, g_params);
DISPLAY("\n");
return;
params.contentLog = 1;
params.searchLog = 1;
params.searchLength = 7;
- params.strategy = ZSTD_HC_fast;
- ZSTD_HC_validateParams(¶ms, blockSize);
+ params.strategy = ZSTD_fast;
+ ZSTD_validateParams(¶ms, blockSize);
BMK_benchParam(&testResult, srcBuffer, srcSize, ctx, params);
g_cSpeedTarget[1] = (testResult.cSpeed * 15) >> 4;
}
/* establish speed objectives (relative to level 1) */
- for (i=2; i<=ZSTD_HC_MAX_CLEVEL; i++)
+ for (i=2; i<=ZSTD_MAX_CLEVEL; i++)
g_cSpeedTarget[i] = (g_cSpeedTarget[i-1] * 25) >> 5;
/* populate initial solution */
{
const int tableID = (blockSize > 128 KB);
- const int maxSeeds = g_noSeed ? 1 : ZSTD_HC_MAX_CLEVEL;
- g_seedParams = ZSTD_HC_defaultParameters[tableID];
+ const int maxSeeds = g_noSeed ? 1 : ZSTD_MAX_CLEVEL;
+ g_seedParams = ZSTD_defaultParameters[tableID];
for (i=1; i<=maxSeeds; i++)
{
- const U32 btPlus = (params.strategy == ZSTD_HC_btlazy2);
+ const U32 btPlus = (params.strategy == ZSTD_btlazy2);
params = g_seedParams[i];
params.windowLog = MIN(srcLog, params.windowLog);
params.contentLog = MIN(params.windowLog+btPlus, params.contentLog);
/* clean up*/
fclose(f);
- ZSTD_HC_freeCCtx(ctx);
+ ZSTD_freeCCtx(ctx);
}
g_params.searchLength *= 10, g_params.searchLength += *argument++ - '0';
continue;
case 't': /* strategy */
- g_params.strategy = (ZSTD_HC_strategy)0;
+ g_params.strategy = (ZSTD_strategy)0;
argument++;
while ((*argument>= '0') && (*argument<='9'))
{
- g_params.strategy = (ZSTD_HC_strategy)((U32)g_params.strategy *10);
- g_params.strategy = (ZSTD_HC_strategy)((U32)g_params.strategy + *argument++ - '0');
+ g_params.strategy = (ZSTD_strategy)((U32)g_params.strategy *10);
+ g_params.strategy = (ZSTD_strategy)((U32)g_params.strategy + *argument++ - '0');
}
continue;
case 'L':
while ((*argument>= '0') && (*argument<='9'))
cLevel *= 10, cLevel += *argument++ - '0';
if (cLevel < 1) cLevel = 1;
- if (cLevel > ZSTD_HC_MAX_CLEVEL) cLevel = ZSTD_HC_MAX_CLEVEL;
+ if (cLevel > ZSTD_MAX_CLEVEL) cLevel = ZSTD_MAX_CLEVEL;
g_params = g_seedParams[cLevel];
continue;
}