if (cctx->streamStage == zcss_init) {
/* transparent reset */
+ const void* const prefix = cctx->prefix;
+ size_t const prefixSize = cctx->prefixSize;
ZSTD_parameters params = cctx->requestedParams;
if (cctx->compressionLevel != ZSTD_CLEVEL_CUSTOM)
params.cParams = ZSTD_getCParams(cctx->compressionLevel,
- cctx->pledgedSrcSizePlusOne-1, 0 /* dictSize */);
+ cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
+ cctx->prefix = NULL; cctx->prefixSize = 0; /* single usage */
+ assert(prefix==NULL || cctx->cdict==NULL); /* only one can be set */
#ifdef ZSTD_MULTITHREAD
if (cctx->nbThreads > 1) {
- DEBUGLOG(4, "call ZSTDMT_initCStream_internal");
- CHECK_F( ZSTDMT_initCStream_internal(cctx->mtctx, NULL, 0, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
+ DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", cctx->nbThreads);
+ CHECK_F( ZSTDMT_initCStream_internal(cctx->mtctx, prefix, prefixSize, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
cctx->streamStage = zcss_load;
} else
#endif
{
- const void* const prefix = cctx->prefix;
- size_t const prefixSize = cctx->prefixSize;
- cctx->prefix = NULL; cctx->prefixSize = 0; /* single usage */
CHECK_F( ZSTD_resetCStream_internal(cctx, prefix, prefixSize, cctx->dictMode, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
} }
#ifdef ZSTD_MULTITHREAD
if (cctx->nbThreads > 1) {
size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
- DEBUGLOG(4, "ZSTDMT_compressStream_generic : %u", (U32)flushMin);
+ DEBUGLOG(5, "ZSTDMT_compressStream_generic : %u", (U32)flushMin);
if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
ZSTD_startNewCompression(cctx);
const BYTE* const istart = (const BYTE* const)src;
const BYTE* const iend = istart + srcSize;
const BYTE* ip = istart;
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
/* check */
if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
seq.offset = offset;
}
- seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
+ seq.matchLength = ML_base[mlCode]
+ + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
- seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
- if (MEM_32bits() ||
- (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
+ seq.litLength = LL_base[llCode]
+ + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
+ if ( MEM_32bits()
+ || (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) )
+ BIT_reloadDStream(&seqState->DStream);
+
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
/* ANS state update */
FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix -> go into extDict */
- if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+ if (sequence.offset > (size_t)(oLitEnd - vBase))
+ return ERROR(corruption_detected);
match = dictEnd + (match - base);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
int nbSeq;
+ DEBUGLOG(5, "ZSTD_decompressSequences");
/* Build Decoding Tables */
{ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders: size=%u, nbSeq=%i",
+ (U32)seqHSize, nbSeq);
if (ZSTD_isError(seqHSize)) return seqHSize;
ip += seqHSize;
}
nbSeq--;
{ seq_t const sequence = ZSTD_decodeSequence(&seqState);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
} }
/* check if reached exact end */
+ DEBUGLOG(5, "after decode loop, remaining nbSeq : %i", nbSeq);
if (nbSeq) return ERROR(corruption_detected);
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
const void* src, size_t srcSize)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal");
if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
* or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
+ DEBUGLOG(5, "ZSTD_decompressContinue");
/* Sanity check */
if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* unauthorized */
if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
}
case ZSTDds_decompressLastBlock:
case ZSTDds_decompressBlock:
+ DEBUGLOG(5, "case ZSTDds_decompressBlock");
{ size_t rSize;
switch(dctx->bType)
{
case bt_compressed:
+ DEBUGLOG(5, "case bt_compressed");
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
break;
case bt_raw :
} }
/* Consume header (see ZSTDds_decodeFrameHeader) */
- DEBUGLOG(5, "Consume header");
+ DEBUGLOG(4, "Consume header");
CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
}
/* control buffer memory usage */
- DEBUGLOG(5, "Control max buffer memory usage");
+ DEBUGLOG(4, "Control max buffer memory usage");
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
zds->blockSize = blockSize;
if ((zds->inBuffSize < blockSize) || (zds->outBuffSize < neededOutSize)) {
size_t const bufferSize = blockSize + neededOutSize;
- DEBUGLOG(5, "inBuff : from %u to %u",
+ DEBUGLOG(4, "inBuff : from %u to %u",
(U32)zds->inBuffSize, (U32)blockSize);
- DEBUGLOG(5, "outBuff : from %u to %u",
+ DEBUGLOG(4, "outBuff : from %u to %u",
(U32)zds->outBuffSize, (U32)neededOutSize);
if (zds->staticSize) { /* static DCtx */
- DEBUGLOG(5, "staticSize : %u", (U32)zds->staticSize);
+ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
return ERROR(memory_allocation);
/* multi - fragments decompression test */
if (!dictSize /* don't reset if dictionary : could be different */ && (FUZ_rand(&lseed) & 1)) {
+ DISPLAYLEVEL(5, "resetting DCtx (dict:%08X) \n", (U32)(size_t)dict);
CHECK_Z( ZSTD_resetDStream(zd) );
} else {
+ DISPLAYLEVEL(5, "using dict of size %u \n", (U32)dictSize);
CHECK_Z( ZSTD_initDStream_usingDict(zd, dict, dictSize) );
}
{ size_t decompressionResult = 1;
size_t const dstBuffSize = MIN(dstBufferSize - totalGenSize, randomDstSize);
inBuff.size = inBuff.pos + readCSrcSize;
outBuff.size = inBuff.pos + dstBuffSize;
- DISPLAYLEVEL(5, "ZSTD_decompressStream input %u bytes \n", (U32)readCSrcSize);
+ DISPLAYLEVEL(5, "ZSTD_decompressStream input %u bytes (pos:%u/%u)\n",
+ (U32)readCSrcSize, (U32)inBuff.pos, (U32)cSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
DISPLAYLEVEL(5, "inBuff.pos = %u \n", (U32)readCSrcSize);