]> git.ipfire.org Git - thirdparty/zstd.git/commitdiff
changed macro name to ZSTD_ALIGNOF 2896/head
authorYann Collet <cyan@fb.com>
Thu, 2 Dec 2021 20:57:42 +0000 (12:57 -0800)
committerYann Collet <cyan@fb.com>
Thu, 2 Dec 2021 20:57:42 +0000 (12:57 -0800)
for better consistency

lib/common/compiler.h
lib/compress/huf_compress.c
lib/compress/zstd_cwksp.h

index 98590ce67631fe173d64ce497b428267cecf578c..17c049bfda8a5841a63b161ae540ad29f3d9a035 100644 (file)
  * which remains valid for both user & kernel spaces.
  */
 
-#ifndef MEM_ALIGN_COND
+#ifndef ZSTD_ALIGNOF
 # if defined(__GNUC__) || defined(_MSC_VER)
 /* covers gcc, clang & MSVC */
 /* note : this section must come first, before C11,
  * due to a limitation in the kernel source generator */
-#  define MEM_ALIGN_COND(T) __alignof(T)
+#  define ZSTD_ALIGNOF(T) __alignof(T)
 
 # elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
 /* C11 support */
 #  include <stdalign.h>
-#  define MEM_ALIGN_COND(T) alignof(T)
+#  define ZSTD_ALIGNOF(T) alignof(T)
 
 # else
 /* No known support for alignof() - imperfect backup */
-#  define MEM_ALIGN_COND(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
+#  define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
 
 # endif
-#endif /* MEM_ALIGN_COND */
+#endif /* ZSTD_ALIGNOF */
 
 /*-**************************************************************
 *  Sanitizer
index ceedce727fa3e3c36d526594bdd139d3aeba436e..2b3d6adc2a20cb1408123136aa3ff1f28a8db1af 100644 (file)
@@ -97,7 +97,7 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT
 
     unsigned maxSymbolValue = HUF_TABLELOG_MAX;
     U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
-    HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, MEM_ALIGN_COND(U32));
+    HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
 
     if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
 
@@ -176,7 +176,7 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
     HUF_CElt const* const ct = CTable + 1;
     BYTE* op = (BYTE*)dst;
     U32 n;
-    HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, MEM_ALIGN_COND(U32));
+    HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
 
     /* check conditions */
     if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
@@ -679,7 +679,7 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i
 
 size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
 {
-    HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, MEM_ALIGN_COND(U32));
+    HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
     nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
     nodeElt* const huffNode = huffNode0+1;
     int nonNullRank;
@@ -1183,7 +1183,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
                        HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
                  const int bmi2, unsigned suspectUncompressible)
 {
-    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, MEM_ALIGN_COND(size_t));
+    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
     BYTE* const ostart = (BYTE*)dst;
     BYTE* const oend = ostart + dstSize;
     BYTE* op = ostart;
index 305667606d692c430a94086730c57d0d5a987826..29d027e9cdb0c44a3ff74c2c8cf43dcee7224bec 100644 (file)
@@ -422,8 +422,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
     DEBUGLOG(5,
         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
-    assert((size_t)alloc % MEM_ALIGN_COND(void*) == 0);
-    assert(bytes % MEM_ALIGN_COND(void*) == 0);
+    assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
+    assert(bytes % ZSTD_ALIGNOF(void*) == 0);
     ZSTD_cwksp_assert_internal_consistency(ws);
     /* we must be in the first phase, no advance is possible */
     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {