static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31));
-//TODO: remove this gv as well
-//Only used in Synthetic test. Separate?
+/* remove this in the future? */
static U32 g_compressibilityDefault = 50;
/* *************************************
BMK_advancedParams_t BMK_defaultAdvancedParams(void) {
BMK_advancedParams_t res = {
- 0, /* mode */
- 0, /* nbCycles */
+ BMK_both, /* mode */
+ BMK_timeMode, /* loopMode */
BMK_TIMETEST_DEFAULT_S, /* nbSeconds */
0, /* blockSize */
0, /* nbWorkers */
}
-//ignore above for error stuff, return type still undecided
-
/* mode 0 : iter = # seconds, else iter = # cycles */
/* initFn will be measured once, bench fn will be measured x times */
/* benchFn should return error value or out Size */
-//problem : how to get cSize this way for ratio?
-//also possible fastest rounds down to 0 if 0 < loopDuration < nbLoops (that would mean <1ns / op though)
/* takes # of blocks and list of size & stuff for each. */
BMK_customReturn_t BMK_benchCustom(
const char* functionName, size_t blockCount,
- const void* const * const srcBuffers, size_t* srcSizes,
- void* const * const dstBuffers, size_t* dstSizes,
+ const void* const * const srcBuffers, const size_t* srcSizes,
+ void* const * const dstBuffers, const size_t* dstSizes,
size_t (*initFn)(void*), size_t (*benchFn)(const void*, size_t, void*, size_t, void*),
void* initPayload, void* benchPayload,
unsigned mode, unsigned iter,
/* display last 17 char's of functionName*/
if (strlen(functionName)>17) functionName += strlen(functionName)-17;
if(!iter) {
- if(mode) {
- EXM_THROW(1, BMK_customReturn_t, "nbSeconds must be nonzero \n");
- } else {
+ if(mode == BMK_iterMode) {
EXM_THROW(1, BMK_customReturn_t, "nbLoops must be nonzero \n");
}
srcSize += srcSizes[ind];
}
- //change to switch if more modes?
- if(!mode) {
- int completed = 0;
- U64 const maxTime = (iter * TIMELOOP_NANOSEC) + 1;
- unsigned nbLoops = 1;
- UTIL_time_t coolTime = UTIL_getTime();
- while(!completed) {
- unsigned i, j;
- /* Overheat protection */
- if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
- DISPLAYLEVEL(2, "\rcooling down ... \r");
- UTIL_sleep(COOLPERIOD_SEC);
- coolTime = UTIL_getTime();
- }
-
- for(i = 0; i < blockCount; i++) {
- memset(dstBuffers[i], 0xD6, dstSizes[i]); /* warm up and erase result buffer */
- }
+ switch(mode) {
+ case BMK_timeMode:
+ {
+ int completed = 0;
+ U64 const maxTime = (iter * TIMELOOP_NANOSEC) + 1;
+ unsigned nbLoops = 1;
+ UTIL_time_t coolTime = UTIL_getTime();
+ while(!completed) {
+ unsigned i, j;
+ /* Overheat protection */
+ if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
+ DISPLAYLEVEL(2, "\rcooling down ... \r");
+ UTIL_sleep(COOLPERIOD_SEC);
+ coolTime = UTIL_getTime();
+ }
+
+ for(i = 0; i < blockCount; i++) {
+ memset(dstBuffers[i], 0xD6, dstSizes[i]); /* warm up and erase result buffer */
+ }
+ clockStart = UTIL_getTime();
+ (*initFn)(initPayload);
+
+ for(i = 0; i < nbLoops; i++) {
+ for(j = 0; j < blockCount; j++) {
+ size_t res = (*benchFn)(srcBuffers[j], srcSizes[j], dstBuffers[j], dstSizes[j], benchPayload);
+ if(ZSTD_isError(res)) {
+ EXM_THROW(2, BMK_customReturn_t, "%s() failed on block %u of size %u : %s \n",
+ functionName, j, (U32)dstSizes[j], ZSTD_getErrorName(res));
+ } else if (toAdd) {
+ dstSize += res;
+ }
+ }
+ toAdd = 0;
+ }
+ { U64 const loopDuration = UTIL_clockSpanNano(clockStart);
+ if (loopDuration > 0) {
+ fastest = MIN(fastest, loopDuration / nbLoops);
+ nbLoops = (U32)(TIMELOOP_NANOSEC / fastest) + 1;
+ } else {
+ assert(nbLoops < 40000000); /* avoid overflow */
+ nbLoops *= 100;
+ }
+ totalTime += loopDuration;
+ completed = (totalTime >= maxTime);
+ }
+ }
+ break;
+ }
+ case BMK_iterMode:
+ {
+ unsigned i, j;
clockStart = UTIL_getTime();
- (*initFn)(initPayload);
-
- for(i = 0; i < nbLoops; i++) {
+ for(i = 0; i < iter; i++) {
for(j = 0; j < blockCount; j++) {
size_t res = (*benchFn)(srcBuffers[j], srcSizes[j], dstBuffers[j], dstSizes[j], benchPayload);
if(ZSTD_isError(res)) {
EXM_THROW(2, BMK_customReturn_t, "%s() failed on block %u of size %u : %s \n",
functionName, j, (U32)dstSizes[j], ZSTD_getErrorName(res));
- } else if (toAdd) {
+ } else if(toAdd) {
dstSize += res;
}
}
toAdd = 0;
}
- { U64 const loopDuration = UTIL_clockSpanNano(clockStart);
- if (loopDuration > 0) {
- fastest = MIN(fastest, loopDuration / nbLoops);
- nbLoops = (U32)(TIMELOOP_NANOSEC / fastest) + 1;
- } else {
- assert(nbLoops < 40000000); /* avoid overflow */
- nbLoops *= 100;
- }
- totalTime += loopDuration;
- completed = (totalTime >= maxTime);
- }
- }
- } else {
- unsigned i, j;
- clockStart = UTIL_getTime();
- for(i = 0; i < iter; i++) {
- for(j = 0; j < blockCount; j++) {
- size_t res = (*benchFn)(srcBuffers[j], srcSizes[j], dstBuffers[j], dstSizes[j], benchPayload);
- if(ZSTD_isError(res)) {
- EXM_THROW(2, BMK_customReturn_t, "%s() failed on block %u of size %u : %s \n",
- functionName, j, (U32)dstSizes[j], ZSTD_getErrorName(res));
- } else if(toAdd) {
- dstSize += res;
- }
+ totalTime = UTIL_clockSpanNano(clockStart);
+ if(!totalTime) {
+ EXM_THROW(3, BMK_customReturn_t, "Cycle count (%u) too short to measure \n", iter);
+ } else {
+ fastest = totalTime / iter;
}
- toAdd = 0;
- }
- totalTime = UTIL_clockSpanNano(clockStart);
- if(!totalTime) {
- EXM_THROW(3, BMK_customReturn_t, "Cycle count (%u) too short to measure \n", iter);
- } else {
- fastest = totalTime / iter;
+ break;
}
+ default:
+ EXM_THROW(4, BMK_customReturn_t, "Unknown Mode \n");
}
retval.error = 0;
retval.result.time = fastest;
int displayLevel, const char* displayName, const BMK_advancedParams_t* adv)
{
- size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_DECODE_ONLY)) ? adv->blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
+ size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles;
/* these are the blockTable parameters, just split up */
- const void ** const srcPtrs = malloc(maxNbBlocks * sizeof(void*));
- size_t* const srcSizes = malloc(maxNbBlocks * sizeof(size_t));
+ const void ** const srcPtrs = (const void ** const)malloc(maxNbBlocks * sizeof(void*));
+ size_t* const srcSizes = (size_t* const)malloc(maxNbBlocks * sizeof(size_t));
- void ** const cPtrs = malloc(maxNbBlocks * sizeof(void*));
- size_t* const cSizes = malloc(maxNbBlocks * sizeof(size_t));
+ void ** const cPtrs = (void** const)malloc(maxNbBlocks * sizeof(void*));
+ size_t* const cSizes = (size_t* const)malloc(maxNbBlocks * sizeof(size_t));
- void ** const resPtrs = malloc(maxNbBlocks * sizeof(void*));
- size_t* const resSizes = malloc(maxNbBlocks * sizeof(size_t));
+ void ** const resPtrs = (void** const)malloc(maxNbBlocks * sizeof(void*));
+ size_t* const resSizes = (size_t* const)malloc(maxNbBlocks * sizeof(size_t));
const size_t maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); /* add some room for safety */
void* compressedBuffer = malloc(maxCompressedSize);
/* init */
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* display last 17 characters */
- if (adv->mode == BMK_DECODE_ONLY) { /* benchmark only decompression : source must be already compressed */
+ if (adv->mode == BMK_decodeOnly) { /* benchmark only decompression : source must be already compressed */
const char* srcPtr = (const char*)srcBuffer;
U64 totalDSize64 = 0;
U32 fileNb;
U32 fileNb;
for (nbBlocks=0, fileNb=0; fileNb<nbFiles; fileNb++) {
size_t remaining = fileSizes[fileNb];
- U32 const nbBlocksforThisFile = (adv->mode == BMK_DECODE_ONLY) ? 1 : (U32)((remaining + (blockSize-1)) / blockSize);
+ U32 const nbBlocksforThisFile = (adv->mode == BMK_decodeOnly) ? 1 : (U32)((remaining + (blockSize-1)) / blockSize);
U32 const blockEnd = nbBlocks + nbBlocksforThisFile;
for ( ; nbBlocks<blockEnd; nbBlocks++) {
size_t const thisBlockSize = MIN(remaining, blockSize);
srcPtrs[nbBlocks] = (const void*)srcPtr;
srcSizes[nbBlocks] = thisBlockSize;
cPtrs[nbBlocks] = (void*)cPtr;
- cSizes[nbBlocks] = (adv->mode == BMK_DECODE_ONLY) ? thisBlockSize : ZSTD_compressBound(thisBlockSize);
- //blockTable[nbBlocks].cSize = blockTable[nbBlocks].cRoom;
+ cSizes[nbBlocks] = (adv->mode == BMK_decodeOnly) ? thisBlockSize : ZSTD_compressBound(thisBlockSize);
resPtrs[nbBlocks] = (void*)resPtr;
- resSizes[nbBlocks] = (adv->mode == BMK_DECODE_ONLY) ? (size_t) ZSTD_findDecompressedSize(srcPtr, thisBlockSize) : thisBlockSize;
+ resSizes[nbBlocks] = (adv->mode == BMK_decodeOnly) ? (size_t) ZSTD_findDecompressedSize(srcPtr, thisBlockSize) : thisBlockSize;
srcPtr += thisBlockSize;
- cPtr += cSizes[nbBlocks]; //blockTable[nbBlocks].cRoom;
+ cPtr += cSizes[nbBlocks];
resPtr += thisBlockSize;
remaining -= thisBlockSize;
}
}
/* warmimg up memory */
- if (adv->mode == BMK_DECODE_ONLY) {
+ if (adv->mode == BMK_decodeOnly) {
memcpy(compressedBuffer, srcBuffer, loadedCompressedSize);
} else {
RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1);
}
/* Bench */
-
- //TODO: Make sure w/o new loop decode_only code isn't run
- //TODO: Support nbLoops and nbSeconds
{
- U64 const crcOrig = (adv->mode == BMK_DECODE_ONLY) ? 0 : XXH64(srcBuffer, srcSize, 0);
+ U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0);
# define NB_MARKS 4
const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
U32 markNb = 0;
DISPLAYLEVEL(2, "\r%79s\r", "");
- if (adv->mode != BMK_DECODE_ONLY) {
- BMK_initCCtxArgs cctxprep = { ctx, dictBuffer, dictBufferSize, cLevel, comprParams, adv };
+ if (adv->mode != BMK_decodeOnly) {
+ BMK_initCCtxArgs cctxprep;
BMK_customReturn_t compressionResults;
+ cctxprep.ctx = ctx;
+ cctxprep.dictBuffer = dictBuffer;
+ cctxprep.dictBufferSize = dictBufferSize;
+ cctxprep.cLevel = cLevel;
+ cctxprep.comprParams = comprParams;
+ cctxprep.adv = adv;
/* Compression */
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize);
compressionResults = BMK_benchCustom("ZSTD_compress_generic", nbBlocks,
ratioAccuracy, ratio,
cSpeedAccuracy, compressionSpeed);
}
- } /* if (adv->mode != BMK_DECODE_ONLY) */
- {
- BMK_initDCtxArgs dctxprep = { dctx, dictBuffer, dictBufferSize };
+ } /* if (adv->mode != BMK_decodeOnly) */
+
+ if(adv->mode != BMK_compressOnly) {
+ BMK_initDCtxArgs dctxprep;
BMK_customReturn_t decompressionResults;
+ dctxprep.dctx = dctx;
+ dctxprep.dictBuffer = dictBuffer;
+ dctxprep.dictBufferSize = dictBufferSize;
decompressionResults = BMK_benchCustom("ZSTD_decompress_generic", nbBlocks,
(const void * const *)cPtrs, cSizes, resPtrs, resSizes,
&local_initDCtx, &local_defaultDecompress,
/* CRC Checking */
{ U64 const crcCheck = XXH64(resultBuffer, srcSize, 0);
- if ((adv->mode != BMK_DECODE_ONLY) && (crcOrig!=crcCheck)) {
+ /* adv->mode == 0 -> compress + decompress */
+ if ((adv->mode == BMK_both) && (crcOrig!=crcCheck)) {
size_t u;
DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck);
for (u=0; u<srcSize; u++) {
DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName);
}
DISPLAYLEVEL(2, "%2i#\n", cLevel);
-} /* Bench */
+ } /* Bench */
/* clean up */
free(compressedBuffer);
free(resultBuffer);
- free(srcPtrs);
+ free((void*)srcPtrs);
free(srcSizes);
free(cPtrs);
free(cSizes);
{
int l;
BMK_result_t* res = (BMK_result_t*)malloc(sizeof(BMK_result_t) * (cLevelLast - cLevel + 1));
- BMK_returnPtr_t ret = { 0, res };
+ BMK_returnPtr_t ret;
const char* pch = strrchr(displayName, '\\'); /* Windows */
+
+ ret.error = 0;
+ ret.result = res;
+
if (!pch) pch = strrchr(displayName, '/'); /* Linux */
if (pch) displayName = pch+1;
size_t benchedSize;
void* dictBuffer = NULL;
size_t dictBufferSize = 0;
- size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
+ size_t* const fileSizes = (size_t*)calloc(nbFiles, sizeof(size_t));
BMK_returnSet_t res;
U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles);
res.result.cLevel = cLevel;
res.result.cLevelLast = cLevelLast;
+ res.result.results = NULL;
if (!fileSizes) EXM_THROW(12, BMK_returnSet_t, "not enough memory for fileSizes");
/* Load dictionary */
res.result.nbFiles = 1;
snprintf (mfName, sizeof(mfName), " %u files", nbFiles);
{
+ BMK_returnPtr_t errorOrPtr;
const char* const displayName = (nbFiles > 1) ? mfName : fileNamesTable[0];
res.result.results = (BMK_result_t**)malloc(sizeof(BMK_result_t*));
- BMK_returnPtr_t errorOrPtr = BMK_benchCLevel(srcBuffer, benchedSize,
+ errorOrPtr = BMK_benchCLevel(srcBuffer, benchedSize,
fileSizes, nbFiles,
cLevel, cLevelLast, compressionParams,
dictBuffer, dictBufferSize,
size_t benchedSize = 10000000;
void* const srcBuffer = malloc(benchedSize);
BMK_returnSet_t res;
- res.result.results = malloc(sizeof(BMK_result_t*));
+ BMK_returnPtr_t errPtr;
+ res.result.results = (BMK_result_t**)calloc(1,sizeof(BMK_result_t*));
res.result.nbFiles = 1;
res.result.cLevel = cLevel;
res.result.cLevelLast = cLevelLast;
/* Bench */
snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100));
- BMK_returnPtr_t errPtr = BMK_benchCLevel(srcBuffer, benchedSize,
+ errPtr = BMK_benchCLevel(srcBuffer, benchedSize,
&benchedSize, 1,
cLevel, cLevelLast, compressionParams,
NULL, 0,
res.result.results[0] = errPtr.result;
/* clean up */
- free(srcBuffer);
+ free((void*)srcBuffer);
res.error = 0;
return res;
}
void BMK_freeResultSet(BMK_resultSet_t src) {
unsigned i;
- for(i = 0; i <= src.nbFiles; i++) {
+ if(src.results == NULL) { return; }
+ for(i = 0; i < src.nbFiles; i++) {
free(src.results[i]);
}
free(src.results);
#include "zstd.h" /* ZSTD_versionString */
#include "util.h" /* time functions */
#include "datagen.h"
+#include "bench.h" /* CustomBench*/
/*_************************************
/*_*******************************************************
* Benchmark wrappers
*********************************************************/
-size_t local_ZSTD_compress(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
+size_t local_nothing(void* x) {
+ (void)x;
+ return 0;
+}
+
+size_t local_ZSTD_compress(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)buff2;
return ZSTD_compress(dst, dstSize, src, srcSize, 1);
}
static size_t g_cSize = 0;
-size_t local_ZSTD_decompress(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_decompress(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)src; (void)srcSize;
return ZSTD_decompress(dst, dstSize, buff2, g_cSize);
#ifndef ZSTD_DLL_IMPORT
extern size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
-size_t local_ZSTD_decodeLiteralsBlock(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_decodeLiteralsBlock(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)src; (void)srcSize; (void)dst; (void)dstSize;
return ZSTD_decodeLiteralsBlock((ZSTD_DCtx*)g_zdc, buff2, g_cSize);
}
extern size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeq, const void* src, size_t srcSize);
-size_t local_ZSTD_decodeSeqHeaders(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
int nbSeq;
(void)src; (void)srcSize; (void)dst; (void)dstSize;
#endif
static ZSTD_CStream* g_cstream= NULL;
-size_t local_ZSTD_compressStream(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_compressStream(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
return buffOut.pos;
}
-static size_t local_ZSTD_compress_generic_end(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+static size_t local_ZSTD_compress_generic_end(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
return buffOut.pos;
}
-static size_t local_ZSTD_compress_generic_continue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+static size_t local_ZSTD_compress_generic_continue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
return buffOut.pos;
}
-static size_t local_ZSTD_compress_generic_T2_end(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+static size_t local_ZSTD_compress_generic_T2_end(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
return buffOut.pos;
}
-static size_t local_ZSTD_compress_generic_T2_continue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+static size_t local_ZSTD_compress_generic_T2_continue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
}
static ZSTD_DStream* g_dstream= NULL;
-static size_t local_ZSTD_decompressStream(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+static size_t local_ZSTD_decompressStream(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
static ZSTD_CCtx* g_zcc = NULL;
#ifndef ZSTD_DLL_IMPORT
-size_t local_ZSTD_compressContinue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_compressContinue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
(void)buff2;
ZSTD_compressBegin(g_zcc, 1 /* compressionLevel */);
}
#define FIRST_BLOCK_SIZE 8
-size_t local_ZSTD_compressContinue_extDict(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_compressContinue_extDict(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
BYTE firstBlockBuf[FIRST_BLOCK_SIZE];
return ZSTD_compressEnd(g_zcc, dst, dstCapacity, (const BYTE*)src + FIRST_BLOCK_SIZE, srcSize - FIRST_BLOCK_SIZE);
}
-size_t local_ZSTD_decompressContinue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
+size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
size_t regeneratedSize = 0;
const BYTE* ip = (const BYTE*)buff2;
size_t const dstBuffSize = ZSTD_compressBound(srcSize);
void* buff2;
const char* benchName;
- size_t (*benchFunction)(void* dst, size_t dstSize, void* verifBuff, const void* src, size_t srcSize);
- double bestTime = 100000000.;
+ size_t (*benchFunction)(const void* src, size_t srcSize, void* dst, size_t dstSize, void* verifBuff);
/* Selection */
switch(benchNb)
default : ;
}
+
/* warming up memory */
{ size_t i; for (i=0; i<dstBuffSize; i++) dstBuff[i]=(BYTE)i; }
+
/* benchmark loop */
- { U32 loopNb;
- U32 nbRounds = (U32)((50 MB) / (srcSize+1)) + 1; /* initial conservative speed estimate */
-# define TIME_SEC_MICROSEC (1*1000000ULL) /* 1 second */
-# define TIME_SEC_NANOSEC (1*1000000000ULL) /* 1 second */
- DISPLAY("%2i- %-30.30s : \r", benchNb, benchName);
- for (loopNb = 1; loopNb <= g_nbIterations; loopNb++) {
- UTIL_time_t clockStart;
- size_t benchResult=0;
- U32 roundNb;
-
- UTIL_sleepMilli(5); /* give processor time to other processes */
- UTIL_waitForNextTick();
- clockStart = UTIL_getTime();
- for (roundNb=0; roundNb < nbRounds; roundNb++) {
- benchResult = benchFunction(dstBuff, dstBuffSize, buff2, src, srcSize);
- if (ZSTD_isError(benchResult)) {
- DISPLAY("ERROR ! %s() => %s !! \n", benchName, ZSTD_getErrorName(benchResult));
- exit(1);
- } }
- { U64 const clockSpanNano = UTIL_clockSpanNano(clockStart);
- double const averageTime = (double)clockSpanNano / TIME_SEC_NANOSEC / nbRounds;
- if (clockSpanNano > 0) {
- if (averageTime < bestTime) bestTime = averageTime;
- assert(bestTime > (1./2000000000));
- nbRounds = (U32)(1. / bestTime); /* aim for 1 sec */
- DISPLAY("%2i- %-30.30s : %7.1f MB/s (%9u)\r",
- loopNb, benchName,
- (double)srcSize / (1 MB) / bestTime,
- (U32)benchResult);
- } else {
- assert(nbRounds < 40000000); /* avoid overflow */
- nbRounds *= 100;
- }
- } } }
- DISPLAY("%2u\n", benchNb);
+ {
+ BMK_customReturn_t r = BMK_benchCustom(benchName, 1, &src, &srcSize, (void * const * const)&dstBuff, &dstBuffSize, &local_nothing, benchFunction,
+ NULL, buff2, BMK_timeMode, 1, 2);
+ if(r.error) {
+ DISPLAY("ERROR %d ! ! \n", r.error);
+ exit(1);
+ }
+ DISPLAY("%2u#Speed: %f MB/s - Size: %f MB\n", benchNb, (double)srcSize / r.result.time * 1000, (double)r.result.size / 1000000);
+ }
+
_cleanOut:
free(dstBuff);
free(buff2);