const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
- BYTE* op = ostart;
HUF_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
- { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
+ { BYTE* op = ostart;
+ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
{ BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
- size_t maxBits, hSize, newSize;
+ size_t hSize, newSize;
const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
size_t optSize = ((size_t) ~0) - 1;
/* Search until size increases */
for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
- maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
- if (ERR_isError(maxBits)) continue;
- if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
+ { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
+ if (ERR_isError(maxBits)) continue;
+
+ if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
- hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
+ hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
+ }
if (ERR_isError(hSize)) continue;
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
- int resizeWorkspace;
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
{ /* Check if workspace is large enough, alloc a new one if needed */
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
- resizeWorkspace = workspaceTooSmall || workspaceWasteful;
+ int resizeWorkspace = workspaceTooSmall || workspaceWasteful;
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
- size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch);
- int i;
/* End signal */
if (sequence.offset == 0)
break;
/* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
+ int i;
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
{ U32 const storeEnd = cur + 2;
U32 storeStart = storeEnd;
U32 stretchPos = cur;
- ZSTD_optimal_t nextStretch;
DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
last_pos, cur); (void)last_pos;
storeStart = storeEnd;
}
while (1) {
- nextStretch = opt[stretchPos];
+ ZSTD_optimal_t nextStretch = opt[stretchPos];
opt[storeStart].litlen = nextStretch.litlen;
DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)",
opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off);
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
{
- unsigned u;
DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
if (!bufPool) return; /* compatibility with free on NULL */
if (bufPool->buffers) {
+ unsigned u;
for (u=0; u<bufPool->totalBuffers; u++) {
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start);
ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem);
/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
{
- int cid;
if (!pool) return;
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
if (pool->cctxs) {
+ int cid;
for (cid=0; cid<pool->totalCCtx; cid++)
ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */
ZSTD_customFree(pool->cctxs, pool->cMem);
BMK_runOutcome_t BMK_benchFunction(BMK_benchParams_t p,
unsigned nbLoops)
{
- size_t dstSize = 0;
nbLoops += !nbLoops; /* minimum nbLoops is 1 */
/* init */
} }
/* benchmark */
- { UTIL_time_t const clockStart = UTIL_getTime();
+ { size_t dstSize = 0;
+ UTIL_time_t const clockStart = UTIL_getTime();
unsigned loopNb, blockNb;
if (p.initFn != NULL) p.initFn(p.initPayload);
for (loopNb = 0; loopNb < nbLoops; loopNb++) {
int closeDstFile = 0;
int result;
int transferStat = 0;
- FILE *dstFile;
int dstFd = -1;
assert(AIO_ReadPool_getFile(ress.readCtx) != NULL);
closeDstFile = 1;
DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: opening dst: %s \n", dstFileName);
- dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFileInitialPermissions);
- if (dstFile==NULL) return 1; /* could not open dstFileName */
- dstFd = fileno(dstFile);
- AIO_WritePool_setFile(ress.writeCtx, dstFile);
+ { FILE *dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFileInitialPermissions);
+ if (dstFile==NULL) return 1; /* could not open dstFileName */
+ dstFd = fileno(dstFile);
+ AIO_WritePool_setFile(ress.writeCtx, dstFile);
+ }
/* Must only be added after FIO_openDstFile() succeeds.
* Otherwise we may delete the destination file if it already exists,
* and the user presses Ctrl-C when asked if they wish to overwrite.
size_t nbFiles = 0;
char* buf;
size_t bufSize;
- size_t pos = 0;
stat_t statbuf;
if (!UTIL_stat(inputFileName, &statbuf) || !UTIL_isRegularFileStat(&statbuf))
{ const char** filenamesTable = (const char**) malloc(nbFiles * sizeof(*filenamesTable));
CONTROL(filenamesTable != NULL);
- { size_t fnb;
- for (fnb = 0, pos = 0; fnb < nbFiles; fnb++) {
+ { size_t fnb, pos = 0;
+ for (fnb = 0; fnb < nbFiles; fnb++) {
filenamesTable[fnb] = buf+pos;
pos += strlen(buf+pos)+1; /* +1 for the finishing `\0` */
- } }
+ }
assert(pos <= bufSize);
+ }
return UTIL_assembleFileNamesTable(filenamesTable, nbFiles, buf);
}
static size_t getTotalTableSize(FileNamesTable* table)
{
- size_t fnb = 0, totalSize = 0;
+ size_t fnb, totalSize = 0;
for(fnb = 0 ; fnb < table->tableSize && table->fileNames[fnb] ; ++fnb) {
totalSize += strlen(table->fileNames[fnb]) + 1; /* +1 to add '\0' at the end of each fileName */
}