ZSTD_CCtx* cctx;
if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
- ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, 1 /* static */);
cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
if (cctx == NULL) return NULL;
bounds.lowerBound = (int)ZSTD_bm_buffered;
bounds.upperBound = (int)ZSTD_bm_stable;
return bounds;
-
+
case ZSTD_c_blockDelimiters:
bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
return bounds;
-
+
case ZSTD_c_validateSequences:
bounds.lowerBound = 0;
bounds.upperBound = 1;
BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
return CCtxParams->outBufferMode;
-
+
case ZSTD_c_blockDelimiters:
BOUNDCHECK(ZSTD_c_blockDelimiters, value);
CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
return CCtxParams->blockDelimiters;
-
+
case ZSTD_c_validateSequences:
BOUNDCHECK(ZSTD_c_validateSequences, value);
CCtxParams->validateSequences = value;
return NULL;
}
- ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, 0 /* not static */);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
assert(cdict != NULL);
{
ZSTD_cwksp ws;
- ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, 1 /* static */);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
if (cdict == NULL) return NULL;
ZSTD_cwksp_move(&cdict->workspace, &ws);
/* Returns the number of bytes to move the current read position back by. Only non-zero
* if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
* went wrong.
- *
+ *
* This function will attempt to scan through blockSize bytes represented by the sequences
- * in inSeqs, storing any (partial) sequences.
- *
+ * in inSeqs, storing any (partial) sequences.
+ *
* Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
U32 matchLength;
U32 rawOffset;
U32 offCode;
-
+
if (cctx->cdict) {
dictSize = cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
size_t compressedSeqsSize;
size_t remaining = srcSize;
ZSTD_sequencePosition seqPos = {0, 0, 0};
-
+
BYTE const* ip = (BYTE const*)src;
BYTE* op = (BYTE*)dst;
ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
cSize += cBlockSize;
DEBUGLOG(4, "cSize running total: %zu", cSize);
-
+
if (lastBlock) {
break;
} else {
cctx->isFirstBlock = 0;
}
}
-
+
return cSize;
}
void* tableValidEnd;
void* allocStart;
- int allocFailed;
+ BYTE allocFailed;
+ BYTE isStatic;
int workspaceOversizedDuration;
ZSTD_cwksp_alloc_phase_e phase;
} ZSTD_cwksp;
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
- __asan_unpoison_memory_region(alloc, bytes);
+ if (!ws->isStatic) {
+ __asan_unpoison_memory_region(alloc, bytes);
+ }
#endif
return alloc;
ws->tableEnd = end;
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- __asan_unpoison_memory_region(alloc, bytes);
+ if (!ws->isStatic) {
+ __asan_unpoison_memory_region(alloc, bytes);
+ }
#endif
return alloc;
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
- __asan_unpoison_memory_region(alloc, bytes);
+ if (!ws->isStatic) {
+ __asan_unpoison_memory_region(alloc, bytes);
+ }
#endif
return alloc;
DEBUGLOG(4, "cwksp: clearing tables!");
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- {
+ /* We don't do this when the workspace is statically allocated, because
+ * when that is the case, we have no capability to hook into the end of the
+ * workspace's lifecycle to unpoison the memory.
+ */
+ if (!ws->isStatic) {
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
#endif
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- {
+ /* We don't do this when the workspace is statically allocated, because
+ * when that is the case, we have no capability to hook into the end of the
+ * workspace's lifecycle to unpoison the memory.
+ */
+ if (!ws->isStatic) {
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
* Any existing values in the workspace are ignored (the previously managed
* buffer, if present, must be separately freed).
*/
-MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
+MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, int isStatic) {
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
ws->workspace = start;
ws->objectEnd = ws->workspace;
ws->tableValidEnd = ws->objectEnd;
ws->phase = ZSTD_cwksp_alloc_objects;
+ ws->isStatic = !!isStatic;
ZSTD_cwksp_clear(ws);
ws->workspaceOversizedDuration = 0;
ZSTD_cwksp_assert_internal_consistency(ws);
void* workspace = ZSTD_customMalloc(size, customMem);
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
- ZSTD_cwksp_init(ws, workspace, size);
+ ZSTD_cwksp_init(ws, workspace, size, 0 /* not static */);
return 0;
}