litSize = 0;
seqCount = 0;
while (sp + seqCount < send) {
- // TODO this is crude estimate for now...
- // Ask Yann, Nick for feedback.
const seqDef* const sequence = sp + seqCount;
const U32 lastSequence = sequence+1 == send;
litSize = (sequence == send) ? (size_t)(lend-lp) : litSize + sequence->litLength;
seqCount++;
+ /* I think there is an optimization opportunity here.
+ * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
+ * since it recalculates estimate from scratch.
+ * For example, it would recount literal distribution and symbol codes everytime.
+ */
cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
entropy, entropyMetadata,
workspace, wkspSize, writeEntropy);
size_t const rBufSize = size;
void* rBuf = malloc(rBufSize);
- size_t cBufSize = ZSTD_compressBound(size);
+ size_t cBufSize = ZSTD_compressBound(size) * 2;
void *cBuf;
/* Half of the time fuzz with a 1 byte smaller output size.
* This will still succeed because we force the checksum to be disabled,
{
size_t const rBufSize = size;
void* rBuf = malloc(rBufSize);
- size_t cBufSize = ZSTD_compressBound(size);
+ size_t cBufSize = ZSTD_compressBound(size) * 2;
void* cBuf;
/* Give a random portion of src data to the producer, to use for
if (FUZZ_dataProducer_uint32Range(producer, 0, 1) == 0) {
setRand(cctx, ZSTD_c_srcSizeHint, ZSTD_SRCSIZEHINT_MIN, 2 * srcSize, producer);
}
+ if (FUZZ_rand32(producer, 0, 1)) {
+ setRand(cctx, ZSTD_c_targetCBlockSize, ZSTD_TARGETCBLOCKSIZE_MIN, ZSTD_TARGETCBLOCKSIZE_MAX, producer);
+ }
}
FUZZ_dict_t FUZZ_train(void const* src, size_t srcSize, FUZZ_dataProducer_t *producer)