as suggested in #1441.
generally U32 and unsigned are the same thing,
except when they are not ...
case : 32-bit compilation for MIPS (uint32_t == unsigned long)
A vast majority of transformation consists in transforming U32 into unsigned.
In rare cases, it's the other way around (typically for internal code, such as seeds).
Among a few issues this patches solves :
- some parameters were declared with type `unsigned` in *.h,
but with type `U32` in their implementation *.c .
- some parameters have type unsigned*,
but the caller user a pointer to U32 instead.
These fixes are useful.
However, the bulk of changes is about %u formating,
which requires unsigned type,
but generally receives U32 values instead,
often just for brevity (U32 is shorter than unsigned).
These changes are generally minor, or even annoying.
As a consequence, the amount of code changed is larger than I would expect for such a patch.
Testing is also a pain :
it requires manually modifying `mem.h`,
in order to lie about `U32`
and force it to be an `unsigned long` typically.
On a 64-bit system, this will break the equivalence unsigned == U32.
Unfortunately, it will also break a few static_assert(), controlling structure sizes.
So it also requires modifying `debug.h` to make `static_assert()` a noop.
And then reverting these changes.
So it's inconvenient, and as a consequence,
this property is currently not checked during CI tests.
Therefore, these problems can emerge again in the future.
I wonder if it is worth ensuring proper distinction of U32 != unsigned in CI tests.
It's another restriction for coding, adding more frustration during merge tests,
since most platforms don't need this distinction (hence contributor will not see it),
and while this can matter in theory, the number of platforms impacted seems minimal.
Thoughts ?
DISPLAYLEVEL(4, "Compressible data Generator \n");
if (probaU32!=COMPRESSIBILITY_DEFAULT)
DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32);
- DISPLAYLEVEL(3, "Seed = %u \n", seed);
+ DISPLAYLEVEL(3, "Seed = %u \n", (unsigned)seed);
RDG_genStdout(size, (double)probaU32/100, litProba, seed);
DISPLAYLEVEL(1, "\n");
size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs,
int compressionLevel,
int checksumFlag,
- U32 maxFrameSize)
+ unsigned maxFrameSize)
{
zcs->framelog.size = 0;
zcs->frameCSize = 0;
* Performs a binary search to find the last frame with a decompressed offset
* <= pos
* @return : the frame's index */
-U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long pos)
+unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long pos)
{
U32 lo = 0;
U32 hi = (U32)zs->seekTable.tableLen;
return lo;
}
-U32 ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs)
+unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs)
{
assert(zs->seekTable.tableLen <= UINT_MAX);
- return (U32)zs->seekTable.tableLen;
+ return (unsigned)zs->seekTable.tableLen;
}
-unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, U32 frameIndex)
+unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return zs->seekTable.entries[frameIndex].cOffset;
}
-unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, U32 frameIndex)
+unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return zs->seekTable.entries[frameIndex].dOffset;
}
-size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, U32 frameIndex)
+size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].cOffset -
zs->seekTable.entries[frameIndex].cOffset;
}
-size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, U32 frameIndex)
+size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex)
{
if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].dOffset -
return len;
}
-size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, U32 frameIndex)
+size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) {
return ERROR(frameIndex_tooLarge);
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{
size_t const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
/*! BIT_readBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{
size_t const value = BIT_lookBitsFast(bitD, nbBits);
assert(nbBits >= 1);
const U32 tableLog = MEM_read16(ptr);
statePtr->value = (ptrdiff_t)1<<tableLog;
statePtr->stateTable = u16ptr+2;
- statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
+ statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
statePtr->stateLog = tableLog;
}
}
}
-MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
{
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable);
#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
- const U32* count, U32 maxSymbolValue, U32 maxNbBits,
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
void* workSpace, size_t wkspSize);
/*! HUF_readStats() :
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
- U32 count[FSE_MAX_SYMBOL_VALUE+1];
+ unsigned count[FSE_MAX_SYMBOL_VALUE+1];
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
FSE_CTable* CTable = (FSE_CTable*)workSpace;
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
- U32 maxSymbolValue = HUF_TABLELOG_MAX;
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
- U32 count[HUF_TABLELOG_MAX+1];
+ unsigned count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
/* init conditions */
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
- const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
{
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
}
-size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 current;
} rankPos;
-static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
+static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
{
rankPos rank[32];
U32 n;
*/
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{
nodeElt* const huffNode0 = (nodeElt*)workSpace;
nodeElt* const huffNode = huffNode0+1;
* @return : maxNbBits
* Note : count is used before tree is written, so they can safely overlap
*/
-size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
+size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
{
huffNodeTable nodeTable;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
}
typedef struct {
- U32 count[HUF_SYMBOLVALUE_MAX + 1];
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
huffNodeTable nodeTable;
} HUF_compress_tables_t;
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
- { CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
- maxSymbolValue, huffLog,
- table->nodeTable, sizeof(table->nodeTable)) );
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+ maxSymbolValue, huffLog,
+ table->nodeTable, sizeof(table->nodeTable));
+ CHECK_F(maxBits);
huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */
memset(table->CTable + (maxSymbolValue + 1), 0,
/* opt parser space */
if (forCCtx && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
- ms->opt.litFreq = (U32*)ptr;
+ ms->opt.litFreq = (unsigned*)ptr;
ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
- (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+ (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
zc->blockSize = blockSize;
XXH64_reset(&zc->xxhState, 0);
ZSTD_buffered_policy_e zbuff)
{
- DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
+ DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+ (unsigned)pledgedSrcSize);
if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
return ZSTD_resetCCtx_byAttachingCDict(
assert(!ZSTD_isError(NCountCost));
assert(compressedCost < ERROR(maxCode));
DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
- (U32)basicCost, (U32)repeatCost, (U32)compressedCost);
+ (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
if (basicCost <= repeatCost && basicCost <= compressedCost) {
DEBUGLOG(5, "Selected set_basic");
assert(isDefaultAllowed);
MEM_STATIC size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
- U32* count, U32 max,
+ unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize,
U32 const ofBits = ofCode;
U32 const mlBits = ML_bits[mlCode];
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
- sequences[n].litLength,
- sequences[n].matchLength + MINMATCH,
- sequences[n].offset);
+ (unsigned)sequences[n].litLength,
+ (unsigned)sequences[n].matchLength + MINMATCH,
+ (unsigned)sequences[n].offset);
/* 32b*/ /* 64b*/
/* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
- U32 count[MaxSeq+1];
+ unsigned count[MaxSeq+1];
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */
- { U32 max = MaxLL;
+ { unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
op += countSize;
} }
/* build CTable for Offsets */
- { U32 max = MaxOff;
+ { unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
op += countSize;
} }
/* build CTable for MatchLengths */
- { U32 max = MaxML;
+ { unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
size_t cSize;
DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
- (U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
+ (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= 31);
- DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
+ DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
assert(dstCapacity >= cSize);
dstCapacity -= cSize;
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
- (U32)cSize);
+ (unsigned)cSize);
} }
if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
- !params.fParams.noDictIDFlag, dictID, dictIDSizeCode);
+ !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
if (params.format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
- cctx->stage, (U32)srcSize);
+ cctx->stage, (unsigned)srcSize);
if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
if (frame && (cctx->stage==ZSTDcs_init)) {
}
}
- DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
- (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
+ (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
return ERROR(srcSize_wrong);
}
}
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
- DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize);
+ DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
- DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
+ DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
if (dstCapacity<4) return ERROR(dstSize_tooSmall);
- DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum);
+ DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum);
op += 4;
}
DEBUGLOG(4, "end of frame : controlling src size");
if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
- (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
+ (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
return ERROR(srcSize_wrong);
} }
return cSize + endResult;
const void* dict,size_t dictSize,
ZSTD_CCtx_params params)
{
- DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize);
+ DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
CHECK_F( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) );
const void* src, size_t srcSize,
int compressionLevel)
{
- DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
+ DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
assert(cctx != NULL);
return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
}
size_t dictSize, ZSTD_compressionParameters cParams,
ZSTD_dictLoadMethod_e dictLoadMethod)
{
- DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
+ DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
}
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support sizeof on NULL */
- DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
+ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
}
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
- DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (U32)dictContentType);
+ DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(cParams));
cdict->matchState.cParams = cParams;
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{
- DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
{ ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
void* ptr;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
- (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
+ (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
if (dictLoadMethod == ZSTD_dlm_byCopy) {
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
{
ZSTD_CCtx_params params = zcs->requestedParams;
- DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
+ DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
params.fParams.contentSizeFlag = 1;
return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict && dictSize >= 8) {
- DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
+ DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
if (zcs->staticSize) { /* static CCtx : never uses malloc */
/* incompatible with internal cdict creation */
return ERROR(memory_allocation);
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
- (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
+ (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
CHECK_F( ZSTD_checkCParams(params.cParams) );
if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
U32 someMoreWork = 1;
/* check expectations */
- DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
assert(zcs->outBuff != NULL);
/* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd(zcs,
op, oend-op, ip, iend-ip);
- DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (U32)cSize);
+ DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
if (ZSTD_isError(cSize)) return cSize;
ip = iend;
op += cSize;
if (zcs->inBuffTarget > zcs->inBuffSize)
zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
- (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
size_t const flushed = ZSTD_limitCopy(op, oend-op,
zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
- (U32)toFlush, (U32)(oend-op), (U32)flushed);
+ (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
op += flushed;
zcs->outBuffFlushedSize += flushed;
if (toFlush!=flushed) {
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
{
- DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (U32)endOp);
+ DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */
if (output->pos > output->size) return ERROR(GENERIC);
if (input->pos > input->size) return ERROR(GENERIC);
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
- DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
+ DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
return toFlush;
}
}
typedef struct {
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
- U32* litFreq; /* table of literals statistics, of size 256 */
- U32* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
- U32* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
- U32* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base);
U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u",
- blockEndIdx, maxDist);
+ (unsigned)blockEndIdx, (unsigned)maxDist);
if (blockEndIdx > maxDist + loadedDictEnd) {
U32 const newLowLimit = blockEndIdx - maxDist;
if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
if (window->dictLimit < window->lowLimit) {
DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
- window->dictLimit, window->lowLimit);
+ (unsigned)window->dictLimit, (unsigned)window->lowLimit);
window->dictLimit = window->lowLimit;
}
if (loadedDictEndPtr)
/* ZSTD_downscaleStat() :
* reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
* return the resulting sum of elements */
-static U32 ZSTD_downscaleStat(U32* table, U32 lastEltIndex, int malus)
+static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
{
U32 s, sum=0;
- DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", lastEltIndex+1);
+ DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
for (s=0; s<lastEltIndex+1; s++) {
table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
U32 const offCode = opt[storePos].off;
U32 const advance = llen + mlen;
DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
- anchor - istart, llen, mlen);
+ anchor - istart, (unsigned)llen, (unsigned)mlen);
if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert(storePos == storeEnd); /* must be last sequence */
/* used in 2-pass strategy */
-static U32 ZSTD_upscaleStat(U32* table, U32 lastEltIndex, int bonus)
+static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
{
U32 s, sum=0;
assert(ZSTD_FREQ_DIV+bonus >= 0);
{ U32 const magicNumber = MEM_readLE32(src);
DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
- (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
+ (unsigned)magicNumber, ZSTD_MAGICNUMBER);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
if (ZSTD_isError(skippableSize))
* or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
- DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */
if (srcSize != dctx->expected)
return ERROR(srcSize_wrong); /* not allowed */
return ERROR(corruption_detected);
}
if (ZSTD_isError(rSize)) return rSize;
- DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
dctx->decodedSize += rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
- DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
if (dctx->decodedSize != dctx->fParams.frameContentSize) {
return ERROR(corruption_detected);
assert(srcSize == 4); /* guaranteed by dctx->expected */
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
- DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
if (check32 != h32) return ERROR(checksum_wrong);
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
}
{ short offcodeNCount[MaxOff+1];
- U32 offcodeMaxValue = MaxOff, offcodeLog;
+ unsigned offcodeMaxValue = MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
* @return : nb bytes read from src,
* or an error code if it fails */
static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
- symbolEncodingType_e type, U32 max, U32 maxLog,
+ symbolEncodingType_e type, unsigned max, U32 maxLog,
const void* src, size_t srcSize,
const U32* baseValue, const U32* nbAdditionalBits,
const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
}
return 0;
case set_compressed :
- { U32 tableLog;
+ { unsigned tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
if (FSE_isError(headerSize)) return ERROR(corruption_detected);
/*-*************************************
* Constants
***************************************/
-#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
#define DEFAULT_SPLITPOINT 1.0
/*-*************************************
if (totalSamplesSize < MAX(d, sizeof(U64)) ||
totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
- (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
+ (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
return 0;
}
/* Check if there are at least 5 training samples */
/* Zero the context */
memset(ctx, 0, sizeof(*ctx));
DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
- (U32)trainingSamplesSize);
+ (unsigned)trainingSamplesSize);
DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
- (U32)testSamplesSize);
+ (unsigned)testSamplesSize);
ctx->samples = samples;
ctx->samplesSizes = samplesSizes;
ctx->nbSamples = nbSamples;
/* Divide the data up into epochs of equal size.
* We will select at least one segment from each epoch.
*/
- const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4));
- const U32 epochSize = (U32)(ctx->suffixSize / epochs);
+ const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4));
+ const unsigned epochSize = (U32)(ctx->suffixSize / epochs);
size_t epoch;
- DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
- epochSize);
+ DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
+ epochs, epochSize);
/* Loop through the epochs until there are no more segments or the dictionary
* is full.
*/
memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
DISPLAYUPDATE(
2, "\r%u%% ",
- (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+ (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
}
DISPLAYLEVEL(2, "\r%79s\r", "");
return tail;
samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
if (!ZSTD_isError(dictionarySize)) {
DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
- (U32)dictionarySize);
+ (unsigned)dictionarySize);
}
COVER_ctx_destroy(&ctx);
COVER_map_destroy(&activeDmers);
}
/* Print status */
LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
- (U32)((iteration * 100) / kIterations));
+ (unsigned)((iteration * 100) / kIterations));
++iteration;
}
COVER_best_wait(&best);
/*-*************************************
* Constants
***************************************/
-#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
#define FASTCOVER_MAX_F 31
#define FASTCOVER_MAX_ACCEL 10
#define DEFAULT_SPLITPOINT 0.75
if (totalSamplesSize < MAX(d, sizeof(U64)) ||
totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
- (U32)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
+ (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
return 0;
}
/* Zero the context */
memset(ctx, 0, sizeof(*ctx));
DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
- (U32)trainingSamplesSize);
+ (unsigned)trainingSamplesSize);
DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
- (U32)testSamplesSize);
+ (unsigned)testSamplesSize);
ctx->samples = samples;
ctx->samplesSizes = samplesSizes;
/* Divide the data up into epochs of equal size.
* We will select at least one segment from each epoch.
*/
- const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k));
- const U32 epochSize = (U32)(ctx->nbDmers / epochs);
+ const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k));
+ const unsigned epochSize = (U32)(ctx->nbDmers / epochs);
size_t epoch;
- DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
- epochSize);
+ DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
+ epochs, epochSize);
/* Loop through the epochs until there are no more segments or the dictionary
* is full.
*/
memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
DISPLAYUPDATE(
2, "\r%u%% ",
- (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+ (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
}
DISPLAYLEVEL(2, "\r%79s\r", "");
return tail;
samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
if (!ZSTD_isError(dictionarySize)) {
DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
- (U32)dictionarySize);
+ (unsigned)dictionarySize);
}
FASTCOVER_ctx_destroy(&ctx);
free(segmentFreqs);
}
/* Print status */
LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
- (U32)((iteration * 100) / kIterations));
+ (unsigned)((iteration * 100) / kIterations));
++iteration;
}
COVER_best_wait(&best);
U32 refinedEnd = end;
DISPLAYLEVEL(4, "\n");
- DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (U32)(end-start), MINMATCHLENGTH, (U32)pos);
+ DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
DISPLAYLEVEL(4, "\n");
for (mml = MINMATCHLENGTH ; ; mml++) {
savings[i] = savings[i-1] + (lengthList[i] * (i-3));
DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
- (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength);
+ (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
solution.pos = (U32)pos;
solution.length = (U32)maxLength;
static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
const size_t* fileSizes, unsigned nbFiles,
- U32 minRatio, U32 notificationLevel)
+ unsigned minRatio, U32 notificationLevel)
{
int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
int* const suffix = suffix0+1;
memset(doneMarks, 0, bufferSize+16);
/* limit sample set size (divsufsort limitation)*/
- if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20));
+ if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
/* sort */
- DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20));
+ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
{ int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
}
#define MAXREPOFFSET 1024
static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
- U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets,
+ unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
const void* src, size_t srcSize,
U32 notificationLevel)
{
}
cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
- if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
+ if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
if (cSize) { /* if == 0; block is not compressible */
const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
* rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
* necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
*/
-static void ZDICT_flatLit(U32* countLit)
+static void ZDICT_flatLit(unsigned* countLit)
{
int u;
for (u=1; u<256; u++) countLit[u] = 2;
const void* dictBuffer, size_t dictBufferSize,
unsigned notificationLevel)
{
- U32 countLit[256];
+ unsigned countLit[256];
HUF_CREATE_STATIC_CTABLE(hufTable, 255);
- U32 offcodeCount[OFFCODE_MAX+1];
+ unsigned offcodeCount[OFFCODE_MAX+1];
short offcodeNCount[OFFCODE_MAX+1];
U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
- U32 matchLengthCount[MaxML+1];
+ unsigned matchLengthCount[MaxML+1];
short matchLengthNCount[MaxML+1];
- U32 litLengthCount[MaxLL+1];
+ unsigned litLengthCount[MaxLL+1];
short litLengthNCount[MaxLL+1];
U32 repOffset[MAXREPOFFSET];
offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
/* display best matches */
if (params.zParams.notificationLevel>= 3) {
- U32 const nb = MIN(25, dictList[0].pos);
- U32 const dictContentSize = ZDICT_dictSize(dictList);
- U32 u;
- DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize);
+ unsigned const nb = MIN(25, dictList[0].pos);
+ unsigned const dictContentSize = ZDICT_dictSize(dictList);
+ unsigned u;
+ DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
for (u=1; u<nb; u++) {
- U32 const pos = dictList[u].pos;
- U32 const length = dictList[u].length;
+ unsigned const pos = dictList[u].pos;
+ unsigned const length = dictList[u].length;
U32 const printedLength = MIN(40, length);
if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
free(dictList);
return ERROR(GENERIC); /* should never happen */
}
DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
- u, length, pos, dictList[u].savings);
+ u, length, pos, (unsigned)dictList[u].savings);
ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
DISPLAYLEVEL(3, "| \n");
} }
/* create dictionary */
- { U32 dictContentSize = ZDICT_dictSize(dictList);
+ { unsigned dictContentSize = ZDICT_dictSize(dictList);
if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
if (dictContentSize < targetDictSize/4) {
- DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize);
+ DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
if (samplesBuffSize < 10 * targetDictSize)
- DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20));
+ DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
if (minRep > MINRATIO) {
DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
}
if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
- U32 proposedSelectivity = selectivity-1;
+ unsigned proposedSelectivity = selectivity-1;
while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
- DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize);
+ DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
}
bitD->bitsConsumed += nbBits;
}
-MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)
{
size_t value = BITv05_lookBits(bitD, nbBits);
BITv05_skipBits(bitD, nbBits);
/*!BITv05_readBitsFast :
* unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
{
size_t value = BITv05_lookBitsFast(bitD, nbBits);
BITv05_skipBits(bitD, nbBits);
/* **************************************************************
* Complex types
****************************************************************/
-typedef U32 DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
+typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
/* **************************************************************
}
}
-size_t HUFv05_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
void* dtPtr = DTable;
HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
- HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
+ HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned)); /* if compilation fails here, assertion is false */
if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
//memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
size_t HUFv05_decompress1X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- const U32* DTable)
+ const unsigned* DTable)
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
size_t HUFv05_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- const U32* DTable)
+ const unsigned* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype;
- U32 LLlog, Offlog, MLlog;
+ unsigned LLlog, Offlog, MLlog;
size_t dumpsLength;
/* check */
break;
case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */
- { U32 max = MaxLL;
+ { unsigned max = MaxLL;
headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
break;
case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */
- { U32 max = MaxOff;
+ { unsigned max = MaxOff;
headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
break;
case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */
- { U32 max = MaxML;
+ { unsigned max = MaxML;
headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
const BYTE* const litEnd = litPtr + dctx->litSize;
int nbSeq=0;
const BYTE* dumps = NULL;
- U32* DTableLL = dctx->LLTable;
- U32* DTableML = dctx->MLTable;
- U32* DTableOffb = dctx->OffTable;
+ unsigned* DTableLL = dctx->LLTable;
+ unsigned* DTableML = dctx->MLTable;
+ unsigned* DTableOffb = dctx->OffTable;
const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
{
size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
short offcodeNCount[MaxOff+1];
- U32 offcodeMaxValue=MaxOff, offcodeLog;
+ unsigned offcodeMaxValue=MaxOff, offcodeLog;
short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
short litlengthNCount[MaxLL+1];
if ((p.errorFn != NULL) && (p.errorFn(res))) {
RETURN_QUIET_ERROR(BMK_runOutcome_error(res),
"Function benchmark failed on block %u (of size %u) with error %i",
- blockNb, (U32)p.srcSizes[blockNb], (int)res);
+ blockNb, (unsigned)p.srcSizes[blockNb], (int)res);
}
dstSize += res;
} }
dctxprep.dictBufferSize = dictBufferSize;
DISPLAYLEVEL(2, "\r%70s\r", ""); /* blank line */
- DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize);
+ DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize);
while (!(compressionCompleted && decompressionCompleted)) {
if (!compressionCompleted) {
{ int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s\r",
marks[markNb], displayName,
- (U32)srcSize, (U32)cSize,
+ (unsigned)srcSize, (unsigned)cSize,
ratioAccuracy, ratio,
benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT);
}
{ int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s ,%6.1f MB/s \r",
marks[markNb], displayName,
- (U32)srcSize, (U32)benchResult.cSize,
+ (unsigned)srcSize, (unsigned)benchResult.cSize,
ratioAccuracy, ratio,
benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT,
(double)benchResult.dSpeed / MB_UNIT);
displayName, (unsigned)crcOrig, (unsigned)crcCheck);
for (u=0; u<srcSize; u++) {
if (((const BYTE*)srcBuffer)[u] != resultBuffer[u]) {
- U32 segNb, bNb, pos;
+ unsigned segNb, bNb, pos;
size_t bacc = 0;
- DISPLAY("Decoding error at pos %u ", (U32)u);
+ DISPLAY("Decoding error at pos %u ", (unsigned)u);
for (segNb = 0; segNb < nbBlocks; segNb++) {
if (bacc + srcSizes[segNb] > u) break;
bacc += srcSizes[segNb];
if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */
DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n",
ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING,
- (U32)benchedSize, adv->nbSeconds, (U32)(adv->blockSize>>10));
+ (unsigned)benchedSize, adv->nbSeconds, (unsigned)(adv->blockSize>>10));
return BMK_benchMemAdvanced(srcBuffer, benchedSize,
NULL, 0,
if (dictBuffer==NULL) {
free(fileSizes);
RETURN_ERROR(11, BMK_benchOutcome_t, "not enough memory for dictionary (%u bytes)",
- (U32)dictBufferSize);
+ (unsigned)dictBufferSize);
}
{ int const errorCode = BMK_loadFiles(dictBuffer, dictBufferSize,
benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3;
if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad;
if (benchedSize < totalSizeToLoad)
- DISPLAY("Not enough memory; testing %u MB only...\n", (U32)(benchedSize >> 20));
+ DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20));
srcBuffer = benchedSize ? malloc(benchedSize) : NULL;
if (!srcBuffer) {
}
-static U32 RDG_rand15Bits (unsigned* seedPtr)
+static U32 RDG_rand15Bits (U32* seedPtr)
{
return RDG_rand(seedPtr) & 0x7FFF;
}
-static U32 RDG_randLength(unsigned* seedPtr)
+static U32 RDG_randLength(U32* seedPtr)
{
if (RDG_rand(seedPtr) & 7) return (RDG_rand(seedPtr) & 0xF); /* small length */
return (RDG_rand(seedPtr) & 0x1FF) + 0xF;
}
-static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, const BYTE* ldt, unsigned* seedPtr)
+static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, const BYTE* ldt, U32* seedPtr)
{
BYTE* const buffPtr = (BYTE*)buffer;
U32 const matchProba32 = (U32)(32768 * matchProba);
void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed)
{
+ U32 seed32 = seed;
BYTE ldt[LTSIZE];
memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */
if (litProba<=0.0) litProba = matchProba / 4.5;
RDG_fillLiteralDistrib(ldt, litProba);
- RDG_genBlock(buffer, size, 0, matchProba, ldt, &seed);
+ RDG_genBlock(buffer, size, 0, matchProba, ldt, &seed32);
}
void RDG_genStdout(unsigned long long size, double matchProba, double litProba, unsigned seed)
{
+ U32 seed32 = seed;
size_t const stdBlockSize = 128 KB;
size_t const stdDictSize = 32 KB;
BYTE* const buff = (BYTE*)malloc(stdDictSize + stdBlockSize);
SET_BINARY_MODE(stdout);
/* Generate initial dict */
- RDG_genBlock(buff, stdDictSize, 0, matchProba, ldt, &seed);
+ RDG_genBlock(buff, stdDictSize, 0, matchProba, ldt, &seed32);
/* Generate compressible data */
while (total < size) {
size_t const genBlockSize = (size_t) (MIN (stdBlockSize, size-total));
- RDG_genBlock(buff, stdDictSize+stdBlockSize, stdDictSize, matchProba, ldt, &seed);
+ RDG_genBlock(buff, stdDictSize+stdBlockSize, stdDictSize, matchProba, ldt, &seed32);
total += genBlockSize;
{ size_t const unused = fwrite(buff, 1, genBlockSize, stdout); (void)unused; }
/* update dict */
}
DISPLAYLEVEL(2, "\r%79s\r", "");
*bufferSizePtr = pos;
- DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10))
+ DISPLAYLEVEL(4, "loaded : %u KB \n", (unsigned)(pos >> 10))
return nbLoadedChunks;
}
fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX);
fs.nbSamples += nbSamples;
}
- DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10));
+ DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (unsigned)(fs.totalSizeToLoad >> 10));
return fs;
}
goto _cleanup;
}
/* save dict */
- DISPLAYLEVEL(2, "Save dictionary of size %u into file %s \n", (U32)dictSize, dictFileName);
+ DISPLAYLEVEL(2, "Save dictionary of size %u into file %s \n", (unsigned)dictSize, dictFileName);
DiB_saveDict(dictFileName, dictBuffer, dictSize);
}
void FIO_setChecksumFlag(unsigned checksumFlag) { g_checksumFlag = checksumFlag; }
static U32 g_removeSrcFile = 0;
void FIO_setRemoveSrcFile(unsigned flag) { g_removeSrcFile = (flag>0); }
-static U32 g_memLimit = 0;
+static unsigned g_memLimit = 0;
void FIO_setMemLimit(unsigned memLimit) { g_memLimit = memLimit; }
-static U32 g_nbWorkers = 1;
+static unsigned g_nbWorkers = 1;
void FIO_setNbWorkers(unsigned nbWorkers) {
#ifndef ZSTD_MULTITHREAD
if (nbWorkers > 0) DISPLAYLEVEL(2, "Note : multi-threading is disabled \n");
g_blockSize = blockSize;
}
#define FIO_OVERLAP_LOG_NOTSET 9999
-static U32 g_overlapLog = FIO_OVERLAP_LOG_NOTSET;
+static unsigned g_overlapLog = FIO_OVERLAP_LOG_NOTSET;
void FIO_setOverlapLog(unsigned overlapLog){
if (overlapLog && g_nbWorkers==0)
DISPLAYLEVEL(2, "Setting overlapLog is useless in single-thread mode \n");
}
if (srcFileSize == UTIL_FILESIZE_UNKNOWN)
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%",
- (U32)(inFileSize>>20),
+ (unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100)
else
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
- (U32)(inFileSize>>20), (U32)(srcFileSize>>20),
+ (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100);
}
} }
if (srcFileSize == UTIL_FILESIZE_UNKNOWN)
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%",
- (U32)(inFileSize>>20),
+ (unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100)
else
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
- (U32)(inFileSize>>20), (U32)(srcFileSize>>20),
+ (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100);
if (ret == LZMA_STREAM_END) break;
}
outFileSize += outSize;
if (srcFileSize == UTIL_FILESIZE_UNKNOWN) {
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%",
- (U32)(inFileSize>>20),
+ (unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100)
} else {
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
- (U32)(inFileSize>>20), (U32)(srcFileSize>>20),
+ (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100);
}
/* Fill input Buffer */
size_t const inSize = fread(ress.srcBuffer, (size_t)1, ress.srcBufferSize, srcFile);
ZSTD_inBuffer inBuff = { ress.srcBuffer, inSize, 0 };
- DISPLAYLEVEL(6, "fread %u bytes from source \n", (U32)inSize);
+ DISPLAYLEVEL(6, "fread %u bytes from source \n", (unsigned)inSize);
*readsize += inSize;
if ((inSize == 0) || (*readsize == fileSize))
/* Write compressed stream */
DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n",
- (U32)directive, (U32)inBuff.pos, (U32)inBuff.size, (U32)outBuff.pos);
+ (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos);
if (outBuff.pos) {
size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile);
if (sizeCheck != outBuff.pos)
if (g_displayLevel >= 3) {
DISPLAYUPDATE(3, "\r(L%i) Buffered :%4u MB - Consumed :%4u MB - Compressed :%4u MB => %.2f%% ",
compressionLevel,
- (U32)((zfp.ingested - zfp.consumed) >> 20),
- (U32)(zfp.consumed >> 20),
- (U32)(zfp.produced >> 20),
+ (unsigned)((zfp.ingested - zfp.consumed) >> 20),
+ (unsigned)(zfp.consumed >> 20),
+ (unsigned)(zfp.produced >> 20),
cShare );
} else { /* summarized notifications if == 2; */
- DISPLAYLEVEL(2, "\rRead : %u ", (U32)(zfp.consumed >> 20));
+ DISPLAYLEVEL(2, "\rRead : %u ", (unsigned)(zfp.consumed >> 20));
if (fileSize != UTIL_FILESIZE_UNKNOWN)
- DISPLAYLEVEL(2, "/ %u ", (U32)(fileSize >> 20));
+ DISPLAYLEVEL(2, "/ %u ", (unsigned)(fileSize >> 20));
DISPLAYLEVEL(2, "MB ==> %2.f%% ", cShare);
DELAY_NEXT_UPDATE();
}
assert(inputPresented > 0);
DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n",
inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100,
- (U32)newlyIngested, (U32)newlyConsumed,
- (U32)newlyFlushed, (U32)newlyProduced);
+ (unsigned)newlyIngested, (unsigned)newlyConsumed,
+ (unsigned)newlyFlushed, (unsigned)newlyProduced);
if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */
&& (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */
&& (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */
U64 readsize = 0;
U64 compressedfilesize = 0;
U64 const fileSize = UTIL_getFileSize(srcFileName);
- DISPLAYLEVEL(5, "%s: %u bytes \n", srcFileName, (U32)fileSize);
+ DISPLAYLEVEL(5, "%s: %u bytes \n", srcFileName, (unsigned)fileSize);
/* compression format selection */
switch (g_compressionType) {
err = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded);
if (err == 0) {
unsigned long long const windowSize = header.windowSize;
- U32 const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0);
+ unsigned const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0);
assert(g_memLimit > 0);
DISPLAYLEVEL(1, "%s : Window size larger than maximum : %llu > %u\n",
srcFileName, windowSize, g_memLimit);
if (windowLog <= ZSTD_WINDOWLOG_MAX) {
- U32 const windowMB = (U32)((windowSize >> 20) + ((windowSize & ((1 MB) - 1)) != 0));
+ unsigned const windowMB = (unsigned)((windowSize >> 20) + ((windowSize & ((1 MB) - 1)) != 0));
assert(windowSize < (U64)(1ULL << 52)); /* ensure now overflow for windowMB */
DISPLAYLEVEL(1, "%s : Use --long=%u or --memory=%uMB\n",
srcFileName, windowLog, windowMB);
storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, storedSkips);
frameSize += outBuff.pos;
DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ",
- srcFileName, (U32)((alreadyDecoded+frameSize)>>20) );
+ srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) );
if (inBuff.pos > 0) {
memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos);
total.numSkippableFrames + total.numActualFrames,
total.numSkippableFrames,
compressedSizeUnit, unitStr,
- checkString, total.nbFiles);
+ checkString, (unsigned)total.nbFiles);
} else {
DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %u files\n",
total.numSkippableFrames + total.numActualFrames,
total.numSkippableFrames,
compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr,
- ratio, checkString, total.nbFiles);
+ ratio, checkString, (unsigned)total.nbFiles);
} }
return error;
}
DISPLAYLEVEL(4, "Compressible data Generator \n");
if (probaU32!=COMPRESSIBILITY_DEFAULT)
DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32);
- DISPLAYLEVEL(3, "Seed = %u \n", seed);
+ DISPLAYLEVEL(3, "Seed = %u \n", (unsigned)seed);
RDG_genStdout(size, (double)probaU32/100, litProba, seed);
DISPLAYLEVEL(1, "\n");
#define ZDICT_STATIC_LINKING_ONLY
#include "zdict.h"
-// Direct access to internal compression functions is required
+/* Direct access to internal compression functions is required */
#include "zstd_compress.c"
#define XXH_STATIC_LINKING_ONLY
/*-*******************************************************
* Random function
*********************************************************/
-static unsigned RAND(unsigned* src)
+static U32 RAND(U32* src)
{
#define RAND_rotl32(x,r) ((x << r) | (x >> (32 - r)))
static const U32 prime1 = 2654435761U;
}
}
- DISPLAYLEVEL(3, " frame content size:\t%u\n", (U32)fh.contentSize);
+ DISPLAYLEVEL(3, " frame content size:\t%u\n", (unsigned)fh.contentSize);
DISPLAYLEVEL(3, " frame window size:\t%u\n", fh.windowSize);
DISPLAYLEVEL(3, " content size flag:\t%d\n", contentSizeFlag);
DISPLAYLEVEL(3, " single segment flag:\t%d\n", singleSegment);
/* RLE literals */
BYTE const symb = (BYTE) (RAND(seed) % 256);
- DISPLAYLEVEL(4, " rle literals: 0x%02x\n", (U32)symb);
+ DISPLAYLEVEL(4, " rle literals: 0x%02x\n", (unsigned)symb);
memset(LITERAL_BUFFER, symb, litSize);
op[0] = symb;
BYTE* op = ostart;
unsigned huffLog = 11;
- U32 maxSymbolValue = 255;
+ unsigned maxSymbolValue = 255;
- U32 count[HUF_SYMBOLVALUE_MAX+1];
+ unsigned count[HUF_SYMBOLVALUE_MAX+1];
/* Scan input and build symbol stats */
{ size_t const largest = HIST_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, WKSP, sizeof(WKSP));
op += compressedSize;
compressedSize += hufHeaderSize;
- DISPLAYLEVEL(5, " regenerated size: %u\n", (U32)litSize);
- DISPLAYLEVEL(5, " compressed size: %u\n", (U32)compressedSize);
+ DISPLAYLEVEL(5, " regenerated size: %u\n", (unsigned)litSize);
+ DISPLAYLEVEL(5, " compressed size: %u\n", (unsigned)compressedSize);
if (compressedSize >= litSize) {
DISPLAYLEVEL(5, " trying again\n");
/* if we have to try again, reset the stats so we don't accidentally
excessMatch = remainingMatch - numSequences * MIN_SEQ_LEN;
}
- DISPLAYLEVEL(5, " total match lengths: %u\n", (U32)remainingMatch);
+ DISPLAYLEVEL(5, " total match lengths: %u\n", (unsigned)remainingMatch);
for (i = 0; i < numSequences; i++) {
/* Generate match and literal lengths by exponential distribution to
* ensure nice numbers */
frame->stats.rep[0] = offset;
}
- DISPLAYLEVEL(6, " LL: %5u OF: %5u ML: %5u", literalLen, offset, matchLen);
+ DISPLAYLEVEL(6, " LL: %5u OF: %5u ML: %5u",
+ (unsigned)literalLen, (unsigned)offset, (unsigned)matchLen);
DISPLAYLEVEL(7, " srcPos: %8u seqNb: %3u",
- (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart), i);
+ (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart), (unsigned)i);
DISPLAYLEVEL(6, "\n");
if (offsetCode < 3) {
- DISPLAYLEVEL(7, " repeat offset: %d\n", repIndex);
+ DISPLAYLEVEL(7, " repeat offset: %d\n", (int)repIndex);
}
/* use libzstd sequence handling */
ZSTD_storeSeq(seqStore, literalLen, literals, offsetCode,
memcpy(srcPtr, literals, literalsSize);
srcPtr += literalsSize;
- DISPLAYLEVEL(6, " excess literals: %5u", (U32)literalsSize);
- DISPLAYLEVEL(7, " srcPos: %8u", (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart));
+ DISPLAYLEVEL(6, " excess literals: %5u", (unsigned)literalsSize);
+ DISPLAYLEVEL(7, " srcPos: %8u", (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart));
DISPLAYLEVEL(6, "\n");
return numSequences;
size_t nbSeq)
{
/* This code is mostly copied from ZSTD_compressSequences in zstd_compress.c */
- U32 count[MaxSeq+1];
+ unsigned count[MaxSeq+1];
S16 norm[MaxSeq+1];
FSE_CTable* CTable_LitLength = frame->stats.litlengthCTable;
FSE_CTable* CTable_OffsetBits = frame->stats.offcodeCTable;
ZSTD_seqToCodes(seqStorePtr);
/* CTable for Literal Lengths */
- { U32 max = MaxLL;
+ { unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */
assert(!HIST_isError(mostFrequent));
if (mostFrequent == nbSeq) {
/* CTable for Offsets */
/* see Literal Lengths for descriptions of mode choices */
- { U32 max = MaxOff;
+ { unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */
assert(!HIST_isError(mostFrequent));
if (mostFrequent == nbSeq) {
/* CTable for MatchLengths */
/* see Literal Lengths for descriptions of mode choices */
- { U32 max = MaxML;
+ { unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */
assert(!HIST_isError(mostFrequent));
if (mostFrequent == nbSeq) {
initSymbolSet(ofCodeTable, nbSeq, frame->stats.offsetSymbolSet, 28);
initSymbolSet(mlCodeTable, nbSeq, frame->stats.matchlengthSymbolSet, 52);
- DISPLAYLEVEL(5, " LL type: %d OF type: %d ML type: %d\n", LLtype, Offtype, MLtype);
+ DISPLAYLEVEL(5, " LL type: %d OF type: %d ML type: %d\n", (unsigned)LLtype, (unsigned)Offtype, (unsigned)MLtype);
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
literalsSize = writeLiteralsBlock(seed, frame, contentSize);
- DISPLAYLEVEL(4, " literals size: %u\n", (U32)literalsSize);
+ DISPLAYLEVEL(4, " literals size: %u\n", (unsigned)literalsSize);
nbSeq = writeSequencesBlock(seed, frame, contentSize, literalsSize, info);
- DISPLAYLEVEL(4, " number of sequences: %u\n", (U32)nbSeq);
+ DISPLAYLEVEL(4, " number of sequences: %u\n", (unsigned)nbSeq);
return (BYTE*)frame->data - blockStart;
}
BYTE *op = header + 3;
DISPLAYLEVEL(4, " block:\n");
- DISPLAYLEVEL(4, " block content size: %u\n", (U32)contentSize);
+ DISPLAYLEVEL(4, " block content size: %u\n", (unsigned)contentSize);
DISPLAYLEVEL(4, " last block: %s\n", lastBlock ? "yes" : "no");
if (blockTypeDesc == 0) {
frame->src = (BYTE*)frame->src + contentSize;
DISPLAYLEVEL(4, " block type: %s\n", BLOCK_TYPES[blockType]);
- DISPLAYLEVEL(4, " block size field: %u\n", (U32)blockSize);
+ DISPLAYLEVEL(4, " block size field: %u\n", (unsigned)blockSize);
header[0] = (BYTE) ((lastBlock | (blockType << 1) | (blockSize << 3)) & 0xff);
MEM_writeLE16(header + 1, (U16) (blockSize >> 5));
{
/* write checksum so implementations can verify their output */
U64 digest = XXH64(frame->srcStart, (BYTE*)frame->src-(BYTE*)frame->srcStart, 0);
- DISPLAYLEVEL(3, " checksum: %08x\n", (U32)digest);
+ DISPLAYLEVEL(3, " checksum: %08x\n", (unsigned)digest);
MEM_writeLE32(frame->data, (U32)digest);
frame->data = (BYTE*)frame->data + 4;
}
size_t blockContentSize;
int blockWritten = 0;
BYTE* op;
- DISPLAYLEVEL(4, "block seed: %u\n", seed);
+ DISPLAYLEVEL(4, "block seed: %u\n", (unsigned)seed);
initFrame(frame);
op = (BYTE*)frame->data;
DISPLAYLEVEL(5, " can't compress block : try again \n");
} else {
blockWritten = 1;
- DISPLAYLEVEL(4, " block size: %u \n", (U32)cSize);
+ DISPLAYLEVEL(4, " block size: %u \n", (unsigned)cSize);
frame->src = (BYTE*)frame->src + blockContentSize;
}
}
static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info)
{
/* generate a complete frame */
- DISPLAYLEVEL(3, "frame seed: %u\n", seed);
+ DISPLAYLEVEL(3, "frame seed: %u\n", (unsigned)seed);
initFrame(fr);
writeFrameHeader(&seed, fr, info);
{ size_t const r = testDecodeRawBlock(&fr);
if (ZSTD_isError(r)) {
- DISPLAY("Error in block mode on test seed %u: %s\n", seedCopy,
- ZSTD_getErrorName(r));
+ DISPLAY("Error in block mode on test seed %u: %s\n",
+ (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1;
}
}
{ size_t const r = testDecodeWithDict(*seed, gt_block);
if (ZSTD_isError(r)) {
DISPLAY("Error in block mode with dictionary on test seed %u: %s\n",
- seedCopy, ZSTD_getErrorName(r));
+ (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1;
}
}
{ size_t const r = testDecodeSimple(&fr);
if (ZSTD_isError(r)) {
DISPLAY("Error in simple mode on test seed %u: %s\n",
- seedCopy, ZSTD_getErrorName(r));
+ (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1;
}
}
{ size_t const r = testDecodeStreaming(&fr);
if (ZSTD_isError(r)) {
DISPLAY("Error in streaming mode on test seed %u: %s\n",
- seedCopy, ZSTD_getErrorName(r));
+ (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1;
}
}
{ size_t const r = testDecodeWithDict(*seed, gt_frame); /* avoid big dictionaries */
if (ZSTD_isError(r)) {
DISPLAY("Error in dictionary mode on test seed %u: %s\n",
- seedCopy, ZSTD_getErrorName(r));
+ (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1;
}
}
if (numFiles == 0 && !testDurationS) numFiles = 1;
- DISPLAY("seed: %u\n", seed);
+ DISPLAY("seed: %u\n", (unsigned)seed);
for (fnum = 0; fnum < numFiles || UTIL_clockSpanMicro(startClock) < maxClockSpan; fnum++) {
if (fnum < numFiles)
{
frame_t fr;
- DISPLAY("seed: %u\n", seed);
+ DISPLAY("seed: %u\n", (unsigned)seed);
{ dictInfo const info = initDictInfo(0, 0, NULL, 0);
if (genType == gt_frame) {
char outPath[MAX_PATH];
unsigned fnum;
- DISPLAY("seed: %u\n", seed);
+ DISPLAY("seed: %u\n", (unsigned)seed);
for (fnum = 0; fnum < numFiles; fnum++) {
frame_t fr;
/*_************************************
* Benchmark Parameters
**************************************/
-static U32 g_nbIterations = NBLOOPS;
+static unsigned g_nbIterations = NBLOOPS;
static double g_compressibility = COMPRESSIBILITY_DEFAULT;
-static void BMK_SetNbIterations(U32 nbLoops)
+static void BMK_SetNbIterations(unsigned nbLoops)
{
g_nbIterations = nbLoops;
DISPLAY("- %i iterations -\n", g_nbIterations);
/*_*******************************************************
* Bench functions
*********************************************************/
-static size_t benchMem(U32 benchNb,
+static size_t benchMem(unsigned benchNb,
const void* src, size_t srcSize,
int cLevel, ZSTD_compressionParameters cparams)
{
benchedSize = (size_t)inFileSize;
if ((U64)benchedSize < inFileSize) {
DISPLAY("Not enough memory for '%s' full size; testing %u MB only... \n",
- inFileName, (U32)(benchedSize>>20));
+ inFileName, (unsigned)(benchedSize>>20));
} }
/* Alloc */
#define MB *(1U<<20)
#define GB *(1U<<30)
-static const U32 FUZ_compressibility_default = 50;
-static const U32 nbTestsDefault = 30000;
+static const int FUZ_compressibility_default = 50;
+static const int nbTestsDefault = 30000;
/*-************************************
#define MAX(a,b) ((a)>(b)?(a):(b))
#define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-static unsigned FUZ_rand(unsigned* src)
+static U32 FUZ_rand(U32* src)
{
static const U32 prime1 = 2654435761U;
static const U32 prime2 = 2246822519U;
return rand32 >> 5;
}
-static unsigned FUZ_highbit32(U32 v32)
+static U32 FUZ_highbit32(U32 v32)
{
unsigned nbBits = 0;
if (v32==0) return 0;
void* const ptr = malloc(size);
if (ptr==NULL) return NULL;
DISPLAYLEVEL(4, "allocating %u KB => effectively %u KB \n",
- (U32)(size >> 10), (U32)(malloc_size(ptr) >> 10)); /* OS-X specific */
+ (unsigned)(size >> 10), (unsigned)(malloc_size(ptr) >> 10)); /* OS-X specific */
mcPtr->totalMalloc += size;
mcPtr->currentMalloc += size;
if (mcPtr->currentMalloc > mcPtr->peakMalloc)
static void FUZ_freeDebug(void* counter, void* address)
{
mallocCounter_t* const mcPtr = (mallocCounter_t*)counter;
- DISPLAYLEVEL(4, "freeing %u KB \n", (U32)(malloc_size(address) >> 10));
+ DISPLAYLEVEL(4, "freeing %u KB \n", (unsigned)(malloc_size(address) >> 10));
mcPtr->nbFree += 1;
mcPtr->currentMalloc -= malloc_size(address); /* OS-X specific */
free(address);
static void FUZ_displayMallocStats(mallocCounter_t count)
{
DISPLAYLEVEL(3, "peak:%6u KB, nbMallocs:%2u, total:%6u KB \n",
- (U32)(count.peakMalloc >> 10),
+ (unsigned)(count.peakMalloc >> 10),
count.nbMalloc,
- (U32)(count.totalMalloc >> 10));
+ (unsigned)(count.totalMalloc >> 10));
}
static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsigned part,
/* advanced MT API test */
if (part <= 3)
- { U32 nbThreads;
+ { unsigned nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
/* advanced MT streaming API test */
if (part <= 4)
- { U32 nbThreads;
+ { unsigned nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
void* const compressedBuffer = malloc(compressedBufferSize);
void* const decodedBuffer = malloc(CNBuffSize);
int testResult = 0;
- U32 testNb=0;
+ unsigned testNb=0;
size_t cSize;
/* Create compressible noise */
RDG_genBuffer(CNBuffer, CNBuffSize, compressibility, 0., seed);
/* Basic tests */
- DISPLAYLEVEL(3, "test%3i : ZSTD_getErrorName : ", testNb++);
+ DISPLAYLEVEL(3, "test%3u : ZSTD_getErrorName : ", testNb++);
{ const char* errorString = ZSTD_getErrorName(0);
DISPLAYLEVEL(3, "OK : %s \n", errorString);
}
- DISPLAYLEVEL(3, "test%3i : ZSTD_getErrorName with wrong value : ", testNb++);
+ DISPLAYLEVEL(3, "test%3u : ZSTD_getErrorName with wrong value : ", testNb++);
{ const char* errorString = ZSTD_getErrorName(499);
DISPLAYLEVEL(3, "OK : %s \n", errorString);
}
- DISPLAYLEVEL(3, "test%3i : min compression level : ", testNb++);
+ DISPLAYLEVEL(3, "test%3u : min compression level : ", testNb++);
{ int const mcl = ZSTD_minCLevel();
DISPLAYLEVEL(3, "%i (OK) \n", mcl);
}
- DISPLAYLEVEL(3, "test%3i : compress %u bytes : ", testNb++, (U32)CNBuffSize);
+ DISPLAYLEVEL(3, "test%3u : compress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ ZSTD_CCtx* const cctx = ZSTD_createCCtx();
if (cctx==NULL) goto _output_error;
CHECKPLUS(r, ZSTD_compressCCtx(cctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, 1),
cSize=r );
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : size of cctx for level 1 : ", testNb++);
{ size_t const cctxSize = ZSTD_sizeof_CCtx(cctx);
- DISPLAYLEVEL(3, "%u bytes \n", (U32)cctxSize);
+ DISPLAYLEVEL(3, "%u bytes \n", (unsigned)cctxSize);
}
ZSTD_freeCCtx(cctx);
}
}
DISPLAYLEVEL(3, "OK \n");
- DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize);
+ DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
{ U32 const maxNbAttempts = 1100; /* nb of usages before triggering size down is handled within zstd_compress.c.
* currently defined as 128x, but could be adjusted in the future.
* make this test long enough so that it's not too much tied to the current definition within zstd_compress.c */
- U32 u;
+ unsigned u;
for (u=0; u<maxNbAttempts; u++) {
CHECK_Z(ZSTD_compressCCtx(largeCCtx, compressedBuffer, compressedBufferSize, CNBuffer, 1, 1));
if (ZSTD_sizeof_CCtx(largeCCtx) < largeCCtxSize) break; /* sized down */
CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL),
cSize=r );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n",
- (U32)cSize, (double)cSize/CNBuffSize*100);
+ (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : simple decompression test with static DCtx : ", testNb++);
{ size_t const r = ZSTD_decompressDCtx(staticDCtx,
}
DISPLAYLEVEL(3, "OK \n");
- DISPLAYLEVEL(3, "test%3i : compress %u bytes with 2 threads : ", testNb++, (U32)CNBuffSize);
+ DISPLAYLEVEL(3, "test%3u : compress %u bytes with 2 threads : ", testNb++, (unsigned)CNBuffSize);
CHECKPLUS(r, ZSTDMT_compressCCtx(mtctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
1),
cSize=r );
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : decompressed size test : ", testNb++);
{ unsigned long long const rSize = ZSTD_getFrameContentSize(compressedBuffer, cSize);
if (rSize != CNBuffSize) {
- DISPLAY("ZSTD_getFrameContentSize incorrect : %u != %u \n", (U32)rSize, (U32)CNBuffSize);
+ DISPLAY("ZSTD_getFrameContentSize incorrect : %u != %u \n", (unsigned)rSize, (unsigned)CNBuffSize);
goto _output_error;
} }
DISPLAYLEVEL(3, "OK \n");
- DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize);
+ DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
NULL, params, 3 /*overlapRLog*/),
cSize=r );
}
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
- DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize);
+ DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
CHECKPLUS(r, ZSTD_compressEnd(ctxOrig, compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
cSize += r);
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : frame built with flat dictionary should be decompressible : ", testNb++);
CHECKPLUS(r, ZSTD_decompress_usingDict(dctx,
cSize += r);
if (cSize != cSizeOrig) goto _output_error; /* should be identical ==> same size */
}
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : frame built with duplicated context should be decompressible : ", testNb++);
CHECKPLUS(r, ZSTD_decompress_usingDict(dctx,
{ ZSTD_DDict* const ddict = ZSTD_createDDict(CNBuffer, dictSize);
size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict);
if (r != CNBuffSize - dictSize) goto _output_error;
- DISPLAYLEVEL(3, "OK (size of DDict : %u) \n", (U32)ZSTD_sizeof_DDict(ddict));
+ DISPLAYLEVEL(3, "OK (size of DDict : %u) \n", (unsigned)ZSTD_sizeof_DDict(ddict));
ZSTD_freeDDict(ddict);
}
if (r != CNBuffSize - dictSize) goto _output_error;
}
free(ddictBuffer);
- DISPLAYLEVEL(3, "OK (size of static DDict : %u) \n", (U32)ddictBufferSize);
+ DISPLAYLEVEL(3, "OK (size of static DDict : %u) \n", (unsigned)ddictBufferSize);
}
DISPLAYLEVEL(3, "test%3i : check content size on duplicated context : ", testNb++);
{ size_t const sDictSize = ZDICT_trainFromBuffer(dictBuffer, dictBufferCapacity,
decodedBuffer, samplesSizes, nbSamples);
if (ZDICT_isError(sDictSize)) goto _output_error;
- DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)sDictSize);
+ DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)sDictSize);
}
DISPLAYLEVEL(3, "test%3i : dictBuilder : ", testNb++);
dictSize = ZDICT_trainFromBuffer(dictBuffer, dictBufferCapacity,
CNBuffer, samplesSizes, nbSamples);
if (ZDICT_isError(dictSize)) goto _output_error;
- DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize);
+ DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : Multithreaded COVER dictBuilder : ", testNb++);
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
&coverParams);
if (ZDICT_isError(dictSize)) goto _output_error;
}
- DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize);
+ DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : Multithreaded FASTCOVER dictBuilder : ", testNb++);
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
&fastCoverParams);
if (ZDICT_isError(dictSize)) goto _output_error;
}
- DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize);
+ DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++);
dictID = ZDICT_getDictID(dictBuffer, dictSize);
if (dictID==0) goto _output_error;
- DISPLAYLEVEL(3, "OK : %u \n", dictID);
+ DISPLAYLEVEL(3, "OK : %u \n", (unsigned)dictID);
DISPLAYLEVEL(3, "test%3i : compress with dictionary : ", testNb++);
cSize = ZSTD_compress_usingDict(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
dictBuffer, dictSize, 4);
if (ZSTD_isError(cSize)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : retrieve dictID from dictionary : ", testNb++);
{ U32 const did = ZSTD_getDictID_fromDict(dictBuffer, dictSize);
DISPLAYLEVEL(3, "test%3i : estimate CDict size : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
size_t const estimatedSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byRef);
- DISPLAYLEVEL(3, "OK : %u \n", (U32)estimatedSize);
+ DISPLAYLEVEL(3, "OK : %u \n", (unsigned)estimatedSize);
}
DISPLAYLEVEL(3, "test%3i : compress with CDict ", testNb++);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize,
ZSTD_dlm_byRef, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
- DISPLAYLEVEL(3, "(size : %u) : ", (U32)ZSTD_sizeof_CDict(cdict));
+ DISPLAYLEVEL(3, "(size : %u) : ", (unsigned)ZSTD_sizeof_CDict(cdict));
cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, cdict);
ZSTD_freeCDict(cdict);
if (ZSTD_isError(cSize)) goto _output_error;
}
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : retrieve dictID from frame : ", testNb++);
{ U32 const did = ZSTD_getDictID_fromFrame(compressedBuffer, cSize);
} }
free(cdictBuffer);
} }
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : ZSTD_compress_usingCDict_advanced, no contentSize, no dictID : ", testNb++);
{ ZSTD_frameParameters const fParams = { 0 /* frameSize */, 1 /* checksum */, 1 /* noDictID*/ };
ZSTD_freeCDict(cdict);
if (ZSTD_isError(cSize)) goto _output_error;
}
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : try retrieving contentSize from frame : ", testNb++);
{ U64 const contentSize = ZSTD_getFrameContentSize(compressedBuffer, cSize);
dictBuffer, dictSize, p);
if (ZSTD_isError(cSize)) goto _output_error;
}
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : frame built without dictID should be decompressible : ", testNb++);
{ ZSTD_DCtx* const dctx = ZSTD_createDCtx(); assert(dctx != NULL);
CNBuffer, samplesSizes, nbSamples,
params);
if (ZDICT_isError(dictSize)) goto _output_error;
- DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize);
+ DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++);
dictID = ZDICT_getDictID(dictBuffer, dictSize);
if (dictID==0) goto _output_error;
- DISPLAYLEVEL(3, "OK : %u \n", dictID);
+ DISPLAYLEVEL(3, "OK : %u \n", (unsigned)dictID);
DISPLAYLEVEL(3, "test%3i : ZDICT_optimizeTrainFromBuffer_cover : ", testNb++);
memset(¶ms, 0, sizeof(params));
CNBuffer, samplesSizes,
nbSamples / 4, ¶ms);
if (ZDICT_isError(optDictSize)) goto _output_error;
- DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)optDictSize);
+ DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)optDictSize);
DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++);
dictID = ZDICT_getDictID(dictBuffer, optDictSize);
if (dictID==0) goto _output_error;
- DISPLAYLEVEL(3, "OK : %u \n", dictID);
+ DISPLAYLEVEL(3, "OK : %u \n", (unsigned)dictID);
ZSTD_freeCCtx(cctx);
free(dictBuffer);
cSize = compressedSize;
xxh64 = XXH64(compressedBuffer, compressedSize, 0);
}
- DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)cSize);
+ DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)inputSize, (unsigned)cSize);
ZSTD_freeCCtx(cctx);
}
CHECK(result);
if (result != cSize) goto _output_error; /* must result in same compressed result, hence same size */
if (XXH64(compressedBuffer, result, 0) != xxh64) goto _output_error; /* must result in exactly same content, hence same hash */
- DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)result);
+ DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)inputSize, (unsigned)result);
}
ZSTD_freeCCtx(cctx);
}
if (in.pos != in.size) goto _output_error;
cSize = out.pos;
}
- DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)cSize);
+ DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)inputSize, (unsigned)cSize);
DISPLAYLEVEL(3, "test%3i : decompress normally (should fail) : ", testNb++);
{ size_t const decodeResult = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (result != 0) goto _output_error;
if (in.pos != in.size) goto _output_error;
if (out.pos != inputSize) goto _output_error;
- DISPLAYLEVEL(3, "streaming OK : regenerated %u bytes \n", (U32)out.pos);
+ DISPLAYLEVEL(3, "streaming OK : regenerated %u bytes \n", (unsigned)out.pos);
}
ZSTD_freeCCtx(cctx);
memset(CNBuffer, 0, ZEROESLENGTH);
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(ZEROESLENGTH), CNBuffer, ZEROESLENGTH, 1) );
cSize = r; }
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/ZEROESLENGTH*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/ZEROESLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress %u zeroes : ", testNb++, ZEROESLENGTH);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, ZEROESLENGTH, compressedBuffer, cSize) );
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(_3BYTESTESTLENGTH),
CNBuffer, _3BYTESTESTLENGTH, 19) );
cSize = r; }
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/_3BYTESTESTLENGTH*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/_3BYTESTESTLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress lots 3-bytes sequence : ", testNb++);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, _3BYTESTESTLENGTH, compressedBuffer, cSize) );
if (cond) { \
DISPLAY("Error => "); \
DISPLAY(__VA_ARGS__); \
- DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \
+ DISPLAY(" (seed %u, test nb %u) \n", (unsigned)seed, testNb); \
goto _output_error; \
} }
if (ZSTD_isError(err)) { \
DISPLAY("Error => %s : %s ", \
#f, ZSTD_getErrorName(err)); \
- DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \
+ DISPLAY(" (seed %u, test nb %u) \n", (unsigned)seed, testNb); \
goto _output_error; \
} }
-static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxDurationS, double compressibility, int bigTests)
+static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const maxDurationS, double compressibility, int bigTests)
{
static const U32 maxSrcLog = 23;
static const U32 maxSampleLog = 22;
ZSTD_CCtx* const ctx = ZSTD_createCCtx();
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
U32 result = 0;
- U32 testNb = 0;
+ unsigned testNb = 0;
U32 coreSeed = seed;
UTIL_time_t const startClock = UTIL_getTime();
U64 const maxClockSpan = maxDurationS * SEC_TO_MICRO;
assert(cSize > 3);
{ const size_t missing = (FUZ_rand(&lseed) % (cSize-2)) + 1;
const size_t tooSmallSize = cSize - missing;
- const U32 endMark = 0x4DC2B1A9;
- memcpy(dstBuffer+tooSmallSize, &endMark, 4);
+ const unsigned endMark = 0x4DC2B1A9;
+ memcpy(dstBuffer+tooSmallSize, &endMark, sizeof(endMark));
DISPLAYLEVEL(5, "fuzzer t%u: compress into too small buffer of size %u (missing %u bytes) \n",
testNb, (unsigned)tooSmallSize, (unsigned)missing);
{ size_t const errorCode = ZSTD_compressCCtx(ctx, dstBuffer, tooSmallSize, sampleBuffer, sampleSize, cLevel);
- CHECK(!ZSTD_isError(errorCode), "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (U32)tooSmallSize, (U32)cSize); }
- { U32 endCheck; memcpy(&endCheck, dstBuffer+tooSmallSize, 4);
+ CHECK(!ZSTD_isError(errorCode), "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (unsigned)tooSmallSize, (unsigned)cSize); }
+ { unsigned endCheck; memcpy(&endCheck, dstBuffer+tooSmallSize, sizeof(endCheck));
CHECK(endCheck != endMark, "ZSTD_compressCCtx : dst buffer overflow (check.%08X != %08X.mark)", endCheck, endMark); }
} }
DISPLAYLEVEL(5, "fuzzer t%u: simple decompression test \n", testNb);
{ size_t const margin = (FUZ_rand(&lseed) & 1) ? 0 : (FUZ_rand(&lseed) & 31) + 1;
size_t const dSize = ZSTD_decompress(dstBuffer, sampleSize + margin, cBuffer, cSize);
- CHECK(dSize != sampleSize, "ZSTD_decompress failed (%s) (srcSize : %u ; cSize : %u)", ZSTD_getErrorName(dSize), (U32)sampleSize, (U32)cSize);
+ CHECK(dSize != sampleSize, "ZSTD_decompress failed (%s) (srcSize : %u ; cSize : %u)", ZSTD_getErrorName(dSize), (unsigned)sampleSize, (unsigned)cSize);
{ U64 const crcDest = XXH64(dstBuffer, sampleSize, 0);
- CHECK(crcOrig != crcDest, "decompression result corrupted (pos %u / %u)", (U32)findDiff(sampleBuffer, dstBuffer, sampleSize), (U32)sampleSize);
+ CHECK(crcOrig != crcDest, "decompression result corrupted (pos %u / %u)", (unsigned)findDiff(sampleBuffer, dstBuffer, sampleSize), (unsigned)sampleSize);
} }
free(sampleBuffer); /* no longer useful after this point */
static const BYTE token = 0xA9;
dstBuffer[tooSmallSize] = token;
{ size_t const errorCode = ZSTD_decompress(dstBuffer, tooSmallSize, cBuffer, cSize);
- CHECK(!ZSTD_isError(errorCode), "ZSTD_decompress should have failed : %u > %u (dst buffer too small)", (U32)errorCode, (U32)tooSmallSize); }
+ CHECK(!ZSTD_isError(errorCode), "ZSTD_decompress should have failed : %u > %u (dst buffer too small)", (unsigned)errorCode, (unsigned)tooSmallSize); }
CHECK(dstBuffer[tooSmallSize] != token, "ZSTD_decompress : dst buffer overflow");
}
{ size_t const decompressResult = ZSTD_decompress(dstBuffer, sampleSize, cBuffer, cSize);
/* result *may* be an unlikely success, but even then, it must strictly respect dst buffer boundaries */
CHECK((!ZSTD_isError(decompressResult)) && (decompressResult>sampleSize),
- "ZSTD_decompress on noisy src : result is too large : %u > %u (dst buffer)", (U32)decompressResult, (U32)sampleSize);
+ "ZSTD_decompress on noisy src : result is too large : %u > %u (dst buffer)", (unsigned)decompressResult, (unsigned)sampleSize);
}
{ U32 endCheck; memcpy(&endCheck, dstBuffer+sampleSize, 4);
CHECK(endMark!=endCheck, "ZSTD_decompress on noisy src : dst buffer overflow");
dict = srcBuffer + (FUZ_rand(&lseed) % (srcBufferSize - dictSize));
DISPLAYLEVEL(6, "fuzzer t%u: Compressing up to <=%u bytes at level %i with dictionary size %u \n",
- testNb, (U32)maxTestSize, cLevel, (U32)dictSize);
+ testNb, (unsigned)maxTestSize, cLevel, (unsigned)dictSize);
if (FUZ_rand(&lseed) & 0xF) {
CHECK_Z ( ZSTD_compressBegin_usingDict(refCtx, dict, dictSize, cLevel) );
CHECK_Z(roundBuffSize);
CHECK((roundBuffSize > totalTestSize) && (zfh.frameContentSize!=ZSTD_CONTENTSIZE_UNKNOWN),
"ZSTD_decodingBufferSize_min() requires more memory (%u) than necessary (%u)",
- (U32)roundBuffSize, (U32)totalTestSize );
+ (unsigned)roundBuffSize, (unsigned)totalTestSize );
} }
if (dictSize<8) dictSize=0, dict=NULL; /* disable dictionary */
CHECK_Z( ZSTD_decompressBegin_usingDict(dctx, dict, dictSize) );
CHECK (totalCSize != cSize, "compressed data should be fully read")
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
CHECK(crcOrig != crcDest, "streaming decompressed data corrupted (pos %u / %u)",
- (U32)findDiff(mirrorBuffer, dstBuffer, totalTestSize), (U32)totalTestSize);
+ (unsigned)findDiff(mirrorBuffer, dstBuffer, totalTestSize), (unsigned)totalTestSize);
}
} /* for ( ; (testNb <= nbTests) */
DISPLAY("\r%u fuzzer tests completed \n", testNb-1);
DISPLAY( " %s [args]\n", programName);
DISPLAY( "\n");
DISPLAY( "Arguments :\n");
- DISPLAY( " -i# : Nb of tests (default:%u) \n", nbTestsDefault);
+ DISPLAY( " -i# : Nb of tests (default:%i) \n", nbTestsDefault);
DISPLAY( " -s# : Select seed (default:prompt user)\n");
DISPLAY( " -t# : Select starting test number (default:0)\n");
- DISPLAY( " -P# : Select compressibility in %% (default:%u%%)\n", FUZ_compressibility_default);
+ DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n", FUZ_compressibility_default);
DISPLAY( " -v : verbose\n");
DISPLAY( " -p : pause at the end\n");
DISPLAY( " -h : display help and exit\n");
int argNb;
int nbTests = nbTestsDefault;
int testNb = 0;
- U32 proba = FUZ_compressibility_default;
+ int proba = FUZ_compressibility_default;
int result = 0;
U32 mainPause = 0;
U32 maxDuration = 0;
seed = h % 10000;
}
- DISPLAY("Seed = %u\n", seed);
- if (proba!=FUZ_compressibility_default) DISPLAY("Compressibility : %u%%\n", proba);
+ DISPLAY("Seed = %u\n", (unsigned)seed);
+ if (proba!=FUZ_compressibility_default) DISPLAY("Compressibility : %i%%\n", proba);
if (memTestsOnly) {
g_displayLevel = MAX(3, g_displayLevel);
}
/* display of params */
-static void displayParamVal(FILE* f, varInds_t param, U32 value, int width)
+static void displayParamVal(FILE* f, varInds_t param, unsigned value, int width)
{
switch(param) {
case wlog_ind:
if(adjust != pc->vals[wlog_ind]) {
pc->vals[wlog_ind] = adjust;
DISPLAY("Warning: windowLog larger than src/block size, adjusted to %u\n",
- pc->vals[wlog_ind]);
+ (unsigned)pc->vals[wlog_ind]);
}
}
}
if(pc->vals[clog_ind] > maxclog) {
pc->vals[clog_ind] = maxclog;
DISPLAY("Warning: chainlog too much larger than windowLog size, adjusted to %u\n",
- pc->vals[clog_ind]);
+ (unsigned)pc->vals[clog_ind]);
}
}
if(pc->vals[wlog_ind] + 1 < pc->vals[hlog_ind]) {
pc->vals[hlog_ind] = pc->vals[wlog_ind] + 1;
DISPLAY("Warning: hashlog too much larger than windowLog size, adjusted to %u\n",
- pc->vals[hlog_ind]);
+ (unsigned)pc->vals[hlog_ind]);
}
}
if(pc->vals[slog_ind] > pc->vals[clog_ind]) {
pc->vals[clog_ind] = pc->vals[slog_ind];
DISPLAY("Warning: searchLog larger than chainLog, adjusted to %u\n",
- pc->vals[slog_ind]);
+ (unsigned)pc->vals[slog_ind]);
}
}
}
if (!first) { fprintf(f, ","); }
fprintf(f,"%s=", g_paramNames[v]);
- if (v == strt_ind) { fprintf(f,"%u", params.vals[v]); }
+ if (v == strt_ind) { fprintf(f,"%u", (unsigned)params.vals[v]); }
else { displayParamVal(f, v, params.vals[v], 0); }
first = 0;
}
varInds_t vi;
DISPLAYLEVEL(3, "\r testing :");
for (vi=0; vi < NUM_PARAMS; vi++) {
- DISPLAYLEVEL(3, "%3u,", cParams.vals[vi]);
+ DISPLAYLEVEL(3, "%3u,", (unsigned)cParams.vals[vi]);
}
DISPLAYLEVEL(3, "\b \r");
}
DISPLAYLEVEL(2, "optimizing for %lu Files", (unsigned long)nbFiles);
}
- if(target.cSpeed != 0) { DISPLAYLEVEL(2," - limit compression speed %u MB/s", target.cSpeed >> 20); }
- if(target.dSpeed != 0) { DISPLAYLEVEL(2, " - limit decompression speed %u MB/s", target.dSpeed >> 20); }
- if(target.cMem != (U32)-1) { DISPLAYLEVEL(2, " - limit memory %u MB", target.cMem >> 20); }
+ if(target.cSpeed != 0) { DISPLAYLEVEL(2," - limit compression speed %u MB/s", (unsigned)(target.cSpeed >> 20)); }
+ if(target.dSpeed != 0) { DISPLAYLEVEL(2, " - limit decompression speed %u MB/s", (unsigned)(target.dSpeed >> 20)); }
+ if(target.cMem != (U32)-1) { DISPLAYLEVEL(2, " - limit memory %u MB", (unsigned)(target.cMem >> 20)); }
DISPLAYLEVEL(2, "\n");
init_clockGranularity();
DISPLAY( " -S : Single run \n");
DISPLAY( " --zstd : Single run, parameter selection same as zstdcli \n");
DISPLAY( " -P# : generated sample compressibility (default : %.1f%%) \n", COMPRESSIBILITY_DEFAULT * 100);
- DISPLAY( " -t# : Caps runtime of operation in seconds (default : %u seconds (%.1f hours)) \n", g_timeLimit_s, (double)g_timeLimit_s / 3600);
+ DISPLAY( " -t# : Caps runtime of operation in seconds (default : %u seconds (%.1f hours)) \n",
+ (unsigned)g_timeLimit_s, (double)g_timeLimit_s / 3600);
DISPLAY( " -v : Prints Benchmarking output\n");
DISPLAY( " -D : Next argument dictionary file\n");
DISPLAY( " -s : Seperate Files\n");
case 'B':
argument++;
g_blockSize = readU32FromChar(&argument);
- DISPLAY("using %u KB block size \n", g_blockSize>>10);
+ DISPLAY("using %u KB block size \n", (unsigned)(g_blockSize>>10));
break;
/* caps runtime (in seconds) */
static const size_t kMatchBytes = 128;
#define SEQ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-static BYTE SEQ_randByte(U32* src)
+static BYTE SEQ_randByte(unsigned* src)
{
static const U32 prime1 = 2654435761U;
static const U32 prime2 = 2246822519U;
#define MB *(1U<<20)
#define GB *(1U<<30)
-static const U32 nbTestsDefault = 10000;
+static const int nbTestsDefault = 10000;
static const U32 g_cLevelMax_smallTests = 10;
#define COMPRESSIBLE_NOISE_LENGTH (10 MB)
#define FUZ_COMPRESSIBILITY_DEFAULT 50
@return : a 27 bits random value, from a 32-bits `seed`.
`seed` is also modified */
#define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-static unsigned int FUZ_rand(unsigned int* seedPtr)
+static U32 FUZ_rand(U32* seedPtr)
{
static const U32 prime2 = 2246822519U;
U32 rand32 = *seedPtr;
DISPLAY("Error => "); \
DISPLAY(__VA_ARGS__); \
DISPLAY(" (seed %u, test nb %u, line %u) \n", \
- seed, testNb, __LINE__); \
+ (unsigned)seed, testNb, __LINE__); \
goto _output_error; \
} }
void* decodedBuffer = malloc(decodedBufferSize);
size_t cSize;
int testResult = 0;
- U32 testNb = 1;
+ int testNb = 1;
U32 coreSeed = 0; /* this name to conform with CHECK_Z macro display */
ZSTD_CStream* zc = ZSTD_createCStream();
ZSTD_DStream* zd = ZSTD_createDStream();
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */
- DISPLAYLEVEL(3, "OK (%u bytes)\n", (U32)outBuff.pos);
+ DISPLAYLEVEL(3, "OK (%u bytes)\n", (unsigned)outBuff.pos);
/* generate skippable frame */
MEM_writeLE32(compressedBuffer, ZSTD_MAGIC_SKIPPABLE_START);
{ size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */
cSize += outBuff.pos;
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n",
+ (unsigned)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
/* context size functions */
DISPLAYLEVEL(3, "test%3i : estimate CStream size : ", testNb++);
size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); /* uses ZSTD_initCStream_usingDict() */
if (ZSTD_isError(cstreamSize)) goto _output_error;
if (ZSTD_isError(cdictSize)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)(cstreamSize + cdictSize));
+ DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)(cstreamSize + cdictSize));
}
DISPLAYLEVEL(3, "test%3i : check actual CStream size : ", testNb++);
{ size_t const s = ZSTD_sizeof_CStream(zc);
if (ZSTD_isError(s)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
+ DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
}
/* Attempt bad compression parameters */
outBuff.size = CNBufferSize;
outBuff.pos = 0;
{ size_t const r = ZSTD_decompressStream(zd, &outBuff, &inBuff);
- DISPLAYLEVEL(5, " ( ZSTD_decompressStream => %u ) ", (U32)r);
+ DISPLAYLEVEL(5, " ( ZSTD_decompressStream => %u ) ", (unsigned)r);
if (r != 0) goto _output_error;
}
if (outBuff.pos != 0) goto _output_error; /* skippable frame output len is 0 */
const void* cStart = (char*)compressedBuffer + (skippableFrameSize + 8);
size_t const gfhError = ZSTD_getFrameHeader(&fhi, cStart, cSize);
if (gfhError!=0) goto _output_error;
- DISPLAYLEVEL(5, " (windowSize : %u) ", (U32)fhi.windowSize);
+ DISPLAYLEVEL(5, " (windowSize : %u) ", (unsigned)fhi.windowSize);
{ size_t const s = ZSTD_estimateDStreamSize(fhi.windowSize)
/* uses ZSTD_initDStream_usingDict() */
+ ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy);
if (ZSTD_isError(s)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
+ DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
} }
DISPLAYLEVEL(3, "test%3i : check actual DStream size : ", testNb++);
{ size_t const s = ZSTD_sizeof_DStream(zd);
if (ZSTD_isError(s)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
+ DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
}
/* Decompression by small increment */
if (r != 0) goto _output_error; } /* error, or some data not flushed */
{ unsigned long long origSize = ZSTD_findDecompressedSize(outBuff.dst, outBuff.pos);
if ((size_t)origSize != CNBufferSize) goto _output_error; } /* exact original size must be present */
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
/* wrong _srcSize compression test */
DISPLAYLEVEL(3, "test%3i : too large srcSize : %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH-1);
DISPLAYLEVEL(3, "test%3i : digested dictionary : ", testNb++);
{ ZSTD_CDict* const cdict = ZSTD_createCDict(dictionary.start, dictionary.filled, 1 /*byRef*/ );
size_t const initError = ZSTD_initCStream_usingCDict(zc, cdict);
- DISPLAYLEVEL(5, "ZSTD_initCStream_usingCDict result : %u ", (U32)initError);
+ DISPLAYLEVEL(5, "ZSTD_initCStream_usingCDict result : %u ", (unsigned)initError);
if (ZSTD_isError(initError)) goto _output_error;
outBuff.dst = compressedBuffer;
outBuff.size = compressedBufferSize;
CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff);
- DISPLAYLEVEL(5, "- ZSTD_endStream result : %u ", (U32)r);
+ DISPLAYLEVEL(5, "- ZSTD_endStream result : %u ", (unsigned)r);
if (r != 0) goto _output_error; /* error, or some data not flushed */
}
cSize = outBuff.pos;
ZSTD_freeCDict(cdict);
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
}
DISPLAYLEVEL(3, "test%3i : check CStream size : ", testNb++);
{ size_t const s = ZSTD_sizeof_CStream(zc);
if (ZSTD_isError(s)) goto _output_error;
- DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
+ DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
}
DISPLAYLEVEL(4, "test%3i : check Dictionary ID : ", testNb++);
}
/* DDict scenario */
- DISPLAYLEVEL(3, "test%3i : decompress %u bytes with digested dictionary : ", testNb++, (U32)CNBufferSize);
+ DISPLAYLEVEL(3, "test%3i : decompress %u bytes with digested dictionary : ", testNb++, (unsigned)CNBufferSize);
{ ZSTD_DDict* const ddict = ZSTD_createDDict(dictionary.start, dictionary.filled);
size_t const initError = ZSTD_initDStream_usingDDict(zd, ddict);
if (ZSTD_isError(initError)) goto _output_error;
if (r != 0) goto _output_error; } /* error, or some data not flushed */
cSize = outBuff.pos;
ZSTD_freeCDict(cdict);
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
}
DISPLAYLEVEL(3, "test%3i : try retrieving dictID from frame : ", testNb++);
CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
cSize = outBuff.pos;
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
DISPLAYLEVEL(3, "test%3i : decompress with ZSTD_DCtx_refPrefix : ", testNb++);
CHECK_Z( ZSTD_DCtx_refPrefix(zd, dictionary.start, dictionary.filled) );
CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
cSize = outBuff.pos;
- DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
+ DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
DISPLAYLEVEL(3, "test%3i : decompress without dictionary (should work): ", testNb++);
CHECK_Z( ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize) );
size_t const start = jobSize + (offset-1);
const BYTE* const srcToCopy = (const BYTE*)CNBuffer + start;
BYTE* const dst = (BYTE*)CNBuffer + start - offset;
- DISPLAYLEVEL(3, "test%3i : compress %u bytes with multiple threads + dictionary : ", testNb++, (U32)srcSize);
+ DISPLAYLEVEL(3, "test%3i : compress %u bytes with multiple threads + dictionary : ", testNb++, (unsigned)srcSize);
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_compressionLevel, 3) );
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_nbWorkers, nbWorkers) );
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_jobSize, jobSize) );
{ ZSTD_DStream* const dstream = ZSTD_createDCtx();
ZSTD_frameHeader zfh;
ZSTD_getFrameHeader(&zfh, compressedBuffer, cSize);
- DISPLAYLEVEL(5, "frame windowsize = %u : ", (U32)zfh.windowSize);
+ DISPLAYLEVEL(5, "frame windowsize = %u : ", (unsigned)zfh.windowSize);
outBuff.dst = decodedBuffer;
outBuff.size = CNBufferSize;
outBuff.pos = 0;
if (b1[u] != b2[u]) break;
}
if (u==max) {
- DISPLAY("=> No difference detected within %u bytes \n", (U32)max);
+ DISPLAY("=> No difference detected within %u bytes \n", (unsigned)max);
return u;
}
- DISPLAY("Error at position %u / %u \n", (U32)u, (U32)max);
+ DISPLAY("Error at position %u / %u \n", (unsigned)u, (unsigned)max);
if (u>=3)
DISPLAY(" %02X %02X %02X ",
b1[u-3], b1[u-2], b1[u-1]);
return (U32)((FUZ_rand(seed) % mod) + minVal);
}
-static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests)
+static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressibility, int bigTests)
{
U32 const maxSrcLog = bigTests ? 24 : 22;
static const U32 maxSampleLog = 19;
size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0;
- U32 testNb = 0;
+ unsigned testNb = 0;
U32 coreSeed = seed;
ZSTD_CStream* zc = ZSTD_createCStream(); /* will be re-created sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be re-created sometimes */
}
CHECK (decompressionResult != 0, "frame not fully decoded");
CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)",
- (U32)outBuff.pos, (U32)totalTestSize);
+ (unsigned)outBuff.pos, (unsigned)totalTestSize);
CHECK (inBuff.pos != cSize, "compressed data should be fully read")
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
/* fuzzing ZSTDMT_* interface */
-static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
+static int fuzzerTests_MT(U32 seed, int nbTests, int startTest,
double compressibility, int bigTests)
{
const U32 maxSrcLog = bigTests ? 24 : 22;
size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0;
- U32 testNb = 0;
+ int testNb = 0;
U32 coreSeed = seed;
- U32 nbThreads = 2;
+ int nbThreads = 2;
ZSTDMT_CCtx* zc = ZSTDMT_createCCtx(nbThreads); /* will be reset sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */
ZSTD_DStream* const zd_noise = ZSTD_createDStream();
RDG_genBuffer(cNoiseBuffer[4], srcBufferSize, 1.00, 0., coreSeed); /* sparse content */
memset(copyBuffer, 0x65, copyBufferSize); /* make copyBuffer considered initialized */
ZSTD_initDStream_usingDict(zd, NULL, 0); /* ensure at least one init */
- DISPLAYLEVEL(6, "Creating initial context with %u threads \n", nbThreads);
+ DISPLAYLEVEL(6, "Creating initial context with %i threads \n", nbThreads);
/* catch up testNb */
for (testNb=1; testNb < startTest; testNb++)
{ U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? ZSTD_CONTENTSIZE_UNKNOWN : maxTestSize;
ZSTD_parameters params = ZSTD_getParams(cLevel, pledgedSrcSize, dictSize);
DISPLAYLEVEL(5, "Init with windowLog = %u, pledgedSrcSize = %u, dictSize = %u \n",
- params.cParams.windowLog, (U32)pledgedSrcSize, (U32)dictSize);
+ params.cParams.windowLog, (unsigned)pledgedSrcSize, (unsigned)dictSize);
params.fParams.checksumFlag = FUZ_rand(&lseed) & 1;
params.fParams.noDictIDFlag = FUZ_rand(&lseed) & 1;
params.fParams.contentSizeFlag = FUZ_rand(&lseed) & 1;
ZSTD_inBuffer inBuff = { srcBuffer+srcStart, srcSize, 0 };
outBuff.size = outBuff.pos + dstBuffSize;
- DISPLAYLEVEL(6, "Sending %u bytes to compress \n", (U32)srcSize);
+ DISPLAYLEVEL(6, "Sending %u bytes to compress \n", (unsigned)srcSize);
CHECK_Z( ZSTDMT_compressStream(zc, &outBuff, &inBuff) );
- DISPLAYLEVEL(6, "%u bytes read by ZSTDMT_compressStream \n", (U32)inBuff.pos);
+ DISPLAYLEVEL(6, "%u bytes read by ZSTDMT_compressStream \n", (unsigned)inBuff.pos);
XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos);
memcpy(copyBuffer+totalTestSize, srcBuffer+srcStart, inBuff.pos);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
size_t const previousPos = outBuff.pos;
outBuff.size = outBuff.pos + adjustedDstSize;
- DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (U32)adjustedDstSize);
+ DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (unsigned)adjustedDstSize);
CHECK_Z( ZSTDMT_flushStream(zc, &outBuff) );
assert(outBuff.pos >= previousPos);
- DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_flushStream \n", (U32)(outBuff.pos-previousPos));
+ DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_flushStream \n", (unsigned)(outBuff.pos-previousPos));
} }
/* final frame epilogue */
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
size_t const previousPos = outBuff.pos;
outBuff.size = outBuff.pos + adjustedDstSize;
- DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (U32)adjustedDstSize);
+ DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (unsigned)adjustedDstSize);
remainingToFlush = ZSTDMT_endStream(zc, &outBuff);
CHECK (ZSTD_isError(remainingToFlush), "ZSTDMT_endStream error : %s", ZSTD_getErrorName(remainingToFlush));
assert(outBuff.pos >= previousPos);
- DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_endStream \n", (U32)(outBuff.pos-previousPos));
- DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (U32)remainingToFlush);
+ DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_endStream \n", (unsigned)(outBuff.pos-previousPos));
+ DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (unsigned)remainingToFlush);
} }
crcOrig = XXH64_digest(&xxhState);
cSize = outBuff.pos;
DISPLAYLEVEL(5, "Frame completed : %u bytes compressed into %u bytes \n",
- (U32)totalTestSize, (U32)cSize);
+ (unsigned)totalTestSize, (unsigned)cSize);
}
/* multi - fragments decompression test */
inBuff.size = inBuff.pos + readCSrcSize;
outBuff.size = outBuff.pos + dstBuffSize;
DISPLAYLEVEL(6, "ZSTD_decompressStream input %u bytes into outBuff %u bytes \n",
- (U32)readCSrcSize, (U32)dstBuffSize);
+ (unsigned)readCSrcSize, (unsigned)dstBuffSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
if (ZSTD_isError(decompressionResult)) {
DISPLAY("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(decompressionResult));
}
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
DISPLAYLEVEL(6, "total ingested (inBuff.pos) = %u and produced (outBuff.pos) = %u \n",
- (U32)inBuff.pos, (U32)outBuff.pos);
+ (unsigned)inBuff.pos, (unsigned)outBuff.pos);
}
- CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (U32)outBuff.pos, (U32)totalTestSize);
- CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (U32)inBuff.pos, (U32)cSize);
+ CHECK (outBuff.pos != totalTestSize,
+ "decompressed data : wrong size (%u != %u)",
+ (unsigned)outBuff.pos, (unsigned)totalTestSize );
+ CHECK (inBuff.pos != cSize,
+ "compressed data should be fully read (%u != %u)",
+ (unsigned)inBuff.pos, (unsigned)cSize );
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
CHECK (crcDest!=crcOrig, "decompressed data corrupted");
}
/* Tests for ZSTD_compress_generic() API */
-static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
+static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest,
double compressibility, int bigTests)
{
U32 const maxSrcLog = bigTests ? 24 : 22;
size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0;
- U32 testNb = 0;
+ int testNb = 0;
U32 coreSeed = seed;
ZSTD_CCtx* zc = ZSTD_createCCtx(); /* will be reset sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */
(ZSTD_maxCLevel() -
(MAX(testLog, dictLog) / 2))) +
1;
- U32 const cLevel = MIN(cLevelCandidate, cLevelMax);
- DISPLAYLEVEL(5, "t%u: base cLevel : %u \n", testNb, cLevel);
+ int const cLevel = MIN(cLevelCandidate, cLevelMax);
+ DISPLAYLEVEL(5, "t%i: base cLevel : %u \n", testNb, cLevel);
maxTestSize = FUZ_rLogLength(&lseed, testLog);
- DISPLAYLEVEL(5, "t%u: maxTestSize : %u \n", testNb, (U32)maxTestSize);
+ DISPLAYLEVEL(5, "t%i: maxTestSize : %u \n", testNb, (unsigned)maxTestSize);
oldTestLog = testLog;
/* random dictionary selection */
dictSize = ((FUZ_rand(&lseed)&63)==1) ? FUZ_rLogLength(&lseed, dictLog) : 0;
/* mess with frame parameters */
if (FUZ_rand(&lseed) & 1) {
- U32 const checksumFlag = FUZ_rand(&lseed) & 1;
+ int const checksumFlag = FUZ_rand(&lseed) & 1;
DISPLAYLEVEL(5, "t%u: frame checksum : %u \n", testNb, checksumFlag);
CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_checksumFlag, checksumFlag, opaqueAPI) );
}
if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_dictIDFlag, FUZ_rand(&lseed) & 1, opaqueAPI) );
if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_contentSizeFlag, FUZ_rand(&lseed) & 1, opaqueAPI) );
if (FUZ_rand(&lseed) & 1) {
- DISPLAYLEVEL(5, "t%u: pledgedSrcSize : %u \n", testNb, (U32)pledgedSrcSize);
+ DISPLAYLEVEL(5, "t%u: pledgedSrcSize : %u \n", testNb, (unsigned)pledgedSrcSize);
CHECK_Z( ZSTD_CCtx_setPledgedSrcSize(zc, pledgedSrcSize) );
}
if (bigTests || (FUZ_rand(&lseed) & 0xF) == 0xF) {
U32 const nbThreadsCandidate = (FUZ_rand(&lseed) & 4) + 1;
U32 const nbThreadsAdjusted = (windowLogMalus < nbThreadsCandidate) ? nbThreadsCandidate - windowLogMalus : 1;
- U32 const nbThreads = MIN(nbThreadsAdjusted, nbThreadsMax);
- DISPLAYLEVEL(5, "t%u: nbThreads : %u \n", testNb, nbThreads);
+ int const nbThreads = MIN(nbThreadsAdjusted, nbThreadsMax);
+ DISPLAYLEVEL(5, "t%i: nbThreads : %u \n", testNb, nbThreads);
CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_nbWorkers, nbThreads, opaqueAPI) );
if (nbThreads > 1) {
U32 const jobLog = FUZ_rand(&lseed) % (testLog+1);
CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, flush) );
DISPLAYLEVEL(6, "t%u: compress consumed %u bytes (total : %u) ; flush: %u (total : %u) \n",
- testNb, (U32)inBuff.pos, (U32)(totalTestSize + inBuff.pos), (U32)flush, (U32)outBuff.pos);
+ testNb, (unsigned)inBuff.pos, (unsigned)(totalTestSize + inBuff.pos), (unsigned)flush, (unsigned)outBuff.pos);
XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos);
memcpy(copyBuffer+totalTestSize, srcBuffer+srcStart, inBuff.pos);
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog+1);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
outBuff.size = outBuff.pos + adjustedDstSize;
- DISPLAYLEVEL(6, "t%u: End-flush into dst buffer of size %u \n", testNb, (U32)adjustedDstSize);
+ DISPLAYLEVEL(6, "t%u: End-flush into dst buffer of size %u \n", testNb, (unsigned)adjustedDstSize);
remainingToFlush = ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end);
- DISPLAYLEVEL(6, "t%u: Total flushed so far : %u bytes \n", testNb, (U32)outBuff.pos);
+ DISPLAYLEVEL(6, "t%u: Total flushed so far : %u bytes \n", testNb, (unsigned)outBuff.pos);
CHECK( ZSTD_isError(remainingToFlush),
"ZSTD_compressStream2 w/ ZSTD_e_end error : %s",
ZSTD_getErrorName(remainingToFlush) );
/* multi - fragments decompression test */
if (!dictSize /* don't reset if dictionary : could be different */ && (FUZ_rand(&lseed) & 1)) {
- DISPLAYLEVEL(5, "resetting DCtx (dict:%08X) \n", (U32)(size_t)dict);
+ DISPLAYLEVEL(5, "resetting DCtx (dict:%p) \n", dict);
CHECK_Z( ZSTD_resetDStream(zd) );
} else {
if (dictSize)
inBuff.size = inBuff.pos + readCSrcSize;
outBuff.size = outBuff.pos + dstBuffSize;
DISPLAYLEVEL(6, "decompression presented %u new bytes (pos:%u/%u)\n",
- (U32)readCSrcSize, (U32)inBuff.pos, (U32)cSize);
+ (unsigned)readCSrcSize, (unsigned)inBuff.pos, (unsigned)cSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
DISPLAYLEVEL(6, "so far: consumed = %u, produced = %u \n",
- (U32)inBuff.pos, (U32)outBuff.pos);
+ (unsigned)inBuff.pos, (unsigned)outBuff.pos);
if (ZSTD_isError(decompressionResult)) {
DISPLAY("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(decompressionResult));
findDiff(copyBuffer, dstBuffer, totalTestSize);
}
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
- CHECK (inBuff.pos > cSize, "ZSTD_decompressStream consumes too much input : %u > %u ", (U32)inBuff.pos, (U32)cSize);
+ CHECK (inBuff.pos > cSize, "ZSTD_decompressStream consumes too much input : %u > %u ", (unsigned)inBuff.pos, (unsigned)cSize);
}
- CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (U32)inBuff.pos, (U32)cSize);
- CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (U32)outBuff.pos, (U32)totalTestSize);
+ CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (unsigned)inBuff.pos, (unsigned)cSize);
+ CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (unsigned)outBuff.pos, (unsigned)totalTestSize);
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
CHECK (crcDest!=crcOrig, "decompressed data corrupted");
seed = h % 10000;
}
- DISPLAY("Seed = %u\n", seed);
+ DISPLAY("Seed = %u\n", (unsigned)seed);
if (proba!=FUZ_COMPRESSIBILITY_DEFAULT) DISPLAY("Compressibility : %i%%\n", proba);
if (nbTests<=0) nbTests=1;
/* *************************************
* Benchmark Parameters
***************************************/
-static U32 g_nbIterations = NBLOOPS;
+static unsigned g_nbIterations = NBLOOPS;
static size_t g_blockSize = 0;
int g_additionalParam = 0;
void BMK_SetBlockSize(size_t blockSize)
{
g_blockSize = blockSize;
- DISPLAYLEVEL(2, "using blocks of size %u KB \n", (U32)(blockSize>>10));
+ DISPLAYLEVEL(2, "using blocks of size %u KB \n", (unsigned)(blockSize>>10));
}
}
/* Compression */
- DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize);
+ DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize);
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
ratio = (double)srcSize / (double)cSize;
markNb = (markNb+1) % NB_MARKS;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r",
- marks[markNb], displayName, (U32)srcSize, (U32)cSize, ratio,
+ marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
(double)srcSize / fastestC );
(void)fastestD; (void)crcOrig; /* unused when decompression disabled */
ZSTD_DDict* ddict = ZSTD_createDDict(dictBuffer, dictBufferSize);
if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure");
do {
- U32 blockNb;
+ unsigned blockNb;
for (blockNb=0; blockNb<nbBlocks; blockNb++) {
size_t const regenSize = ZSTD_decompress_usingDDict(dctx,
blockTable[blockNb].resPtr, blockTable[blockNb].srcSize,
markNb = (markNb+1) % NB_MARKS;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r",
- marks[markNb], displayName, (U32)srcSize, (U32)cSize, ratio,
+ marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
(double)srcSize / fastestC,
(double)srcSize / fastestD );
DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck);
for (u=0; u<srcSize; u++) {
if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) {
- U32 segNb, bNb, pos;
+ unsigned segNb, bNb, pos;
size_t bacc = 0;
- DISPLAY("Decoding error at pos %u ", (U32)u);
+ DISPLAY("Decoding error at pos %u ", (unsigned)u);
for (segNb = 0; segNb < nbBlocks; segNb++) {
if (bacc + blockTable[segNb].srcSize > u) break;
bacc += blockTable[segNb].srcSize;
SET_REALTIME_PRIORITY;
if (g_displayLevel == 1 && !g_additionalParam)
- DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, (U32)benchedSize, g_nbIterations, (U32)(g_blockSize>>10));
+ DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n",
+ ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING,
+ (unsigned)benchedSize, g_nbIterations, (unsigned)(g_blockSize>>10));
if (cLevelLast < cLevel) cLevelLast = cLevel;
dictBufferSize = (size_t)dictFileSize;
dictBuffer = malloc(dictBufferSize);
if (dictBuffer==NULL)
- EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (U32)dictBufferSize);
+ EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (unsigned)dictBufferSize);
BMK_loadFiles(dictBuffer, dictBufferSize, fileSizes, &dictFileName, 1);
}
benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3;
if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad;
if (benchedSize < totalSizeToLoad)
- DISPLAY("Not enough memory; testing %u MB only...\n", (U32)(benchedSize >> 20));
+ DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20));
srcBuffer = malloc(benchedSize + !benchedSize);
if (!srcBuffer) EXM_THROW(12, "not enough memory");