From: W. Felix Handte Date: Wed, 9 Dec 2020 19:56:12 +0000 (-0500) Subject: Create Enum to Represent Static/Dynamic Allocation Distinction in cwksp X-Git-Tag: v1.4.7~9^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F2419%2Fhead;p=thirdparty%2Fzstd.git Create Enum to Represent Static/Dynamic Allocation Distinction in cwksp --- diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c index ee6a92d5c..eb7780cf1 100644 --- a/lib/compress/zstd_compress.c +++ b/lib/compress/zstd_compress.c @@ -109,7 +109,7 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) ZSTD_CCtx* cctx; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ - ZSTD_cwksp_init(&ws, workspace, workspaceSize, 1 /* static */); + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); if (cctx == NULL) return NULL; @@ -3692,7 +3692,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, return NULL; } - ZSTD_cwksp_init(&ws, workspace, workspaceSize, 0 /* not static */); + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); assert(cdict != NULL); @@ -3836,7 +3836,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( { ZSTD_cwksp ws; - ZSTD_cwksp_init(&ws, workspace, workspaceSize, 1 /* static */); + ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); if (cdict == NULL) return NULL; ZSTD_cwksp_move(&cdict->workspace, &ws); diff --git a/lib/compress/zstd_cwksp.h b/lib/compress/zstd_cwksp.h index 3e9bb7ee6..d65170b39 100644 --- a/lib/compress/zstd_cwksp.h +++ b/lib/compress/zstd_cwksp.h @@ -44,6 +44,16 @@ typedef enum { ZSTD_cwksp_alloc_aligned } ZSTD_cwksp_alloc_phase_e; +/** + * Used to describe whether the workspace is statically allocated (and will not + * necessarily ever be freed), or if it's dynamically allocated and we can + * expect a well-formed caller to free this. + */ +typedef enum { + ZSTD_cwksp_dynamic_alloc, + ZSTD_cwksp_static_alloc +} ZSTD_cwksp_static_alloc_e; + /** * Zstd fits all its internal datastructures into a single continuous buffer, * so that it only needs to perform a single OS allocation (or so that a buffer @@ -138,9 +148,9 @@ typedef struct { void* allocStart; BYTE allocFailed; - BYTE isStatic; int workspaceOversizedDuration; ZSTD_cwksp_alloc_phase_e phase; + ZSTD_cwksp_static_alloc_e isStatic; } ZSTD_cwksp; /*-************************************* @@ -257,7 +267,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal( /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; - if (!ws->isStatic) { + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } #endif @@ -305,7 +315,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { ws->tableEnd = end; #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) - if (!ws->isStatic) { + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } #endif @@ -346,7 +356,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; - if (!ws->isStatic) { + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } #endif @@ -409,7 +419,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { * when that is the case, we have no capability to hook into the end of the * workspace's lifecycle to unpoison the memory. */ - if (!ws->isStatic) { + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } @@ -442,7 +452,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { * when that is the case, we have no capability to hook into the end of the * workspace's lifecycle to unpoison the memory. */ - if (!ws->isStatic) { + if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; __asan_poison_memory_region(ws->objectEnd, size); } @@ -462,7 +472,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { * Any existing values in the workspace are ignored (the previously managed * buffer, if present, must be separately freed). */ -MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, int isStatic) { +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ ws->workspace = start; @@ -470,7 +480,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, int is ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; ws->phase = ZSTD_cwksp_alloc_objects; - ws->isStatic = !!isStatic; + ws->isStatic = isStatic; ZSTD_cwksp_clear(ws); ws->workspaceOversizedDuration = 0; ZSTD_cwksp_assert_internal_consistency(ws); @@ -480,7 +490,7 @@ MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem void* workspace = ZSTD_customMalloc(size, customMem); DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); - ZSTD_cwksp_init(ws, workspace, size, 0 /* not static */); + ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); return 0; }