From: Hans Kristian Rosbach Date: Sun, 22 Dec 2024 12:25:27 +0000 (+0100) Subject: Rename functions to get rid of old and now misleading "unaligned" naming X-Git-Tag: 2.2.3~2 X-Git-Url: http://git.ipfire.org/gitweb/gitweb.cgi?a=commitdiff_plain;h=1aeb2915a0d793c549be1691c9c99977265b220f;p=thirdparty%2Fzlib-ng.git Rename functions to get rid of old and now misleading "unaligned" naming --- diff --git a/arch/generic/compare256_c.c b/arch/generic/compare256_c.c index ae0e7179..bdcb8139 100644 --- a/arch/generic/compare256_c.c +++ b/arch/generic/compare256_c.c @@ -60,7 +60,7 @@ Z_INTERNAL uint32_t compare256_c(const uint8_t *src0, const uint8_t *src1) { #if OPTIMAL_CMP >= 32 /* 16-bit unaligned integer comparison */ -static inline uint32_t compare256_unaligned_16_static(const uint8_t *src0, const uint8_t *src1) { +static inline uint32_t compare256_16_static(const uint8_t *src0, const uint8_t *src1) { uint32_t len = 0; do { @@ -84,24 +84,24 @@ static inline uint32_t compare256_unaligned_16_static(const uint8_t *src0, const return 256; } -Z_INTERNAL uint32_t compare256_unaligned_16(const uint8_t *src0, const uint8_t *src1) { - return compare256_unaligned_16_static(src0, src1); +Z_INTERNAL uint32_t compare256_16(const uint8_t *src0, const uint8_t *src1) { + return compare256_16_static(src0, src1); } -#define LONGEST_MATCH longest_match_unaligned_16 -#define COMPARE256 compare256_unaligned_16_static +#define LONGEST_MATCH longest_match_16 +#define COMPARE256 compare256_16_static #include "match_tpl.h" #define LONGEST_MATCH_SLOW -#define LONGEST_MATCH longest_match_slow_unaligned_16 -#define COMPARE256 compare256_unaligned_16_static +#define LONGEST_MATCH longest_match_slow_16 +#define COMPARE256 compare256_16_static #include "match_tpl.h" #ifdef HAVE_BUILTIN_CTZ /* 32-bit unaligned integer comparison */ -static inline uint32_t compare256_unaligned_32_static(const uint8_t *src0, const uint8_t *src1) { +static inline uint32_t compare256_32_static(const uint8_t *src0, const uint8_t *src1) { uint32_t len = 0; do { @@ -126,18 +126,18 @@ static inline uint32_t compare256_unaligned_32_static(const uint8_t *src0, const return 256; } -Z_INTERNAL uint32_t compare256_unaligned_32(const uint8_t *src0, const uint8_t *src1) { - return compare256_unaligned_32_static(src0, src1); +Z_INTERNAL uint32_t compare256_32(const uint8_t *src0, const uint8_t *src1) { + return compare256_32_static(src0, src1); } -#define LONGEST_MATCH longest_match_unaligned_32 -#define COMPARE256 compare256_unaligned_32_static +#define LONGEST_MATCH longest_match_32 +#define COMPARE256 compare256_32_static #include "match_tpl.h" #define LONGEST_MATCH_SLOW -#define LONGEST_MATCH longest_match_slow_unaligned_32 -#define COMPARE256 compare256_unaligned_32_static +#define LONGEST_MATCH longest_match_slow_32 +#define COMPARE256 compare256_32_static #include "match_tpl.h" @@ -145,7 +145,7 @@ Z_INTERNAL uint32_t compare256_unaligned_32(const uint8_t *src0, const uint8_t * #if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 /* 64-bit integer comparison */ -static inline uint32_t compare256_unaligned_64_static(const uint8_t *src0, const uint8_t *src1) { +static inline uint32_t compare256_64_static(const uint8_t *src0, const uint8_t *src1) { uint32_t len = 0; do { @@ -170,18 +170,18 @@ static inline uint32_t compare256_unaligned_64_static(const uint8_t *src0, const return 256; } -Z_INTERNAL uint32_t compare256_unaligned_64(const uint8_t *src0, const uint8_t *src1) { - return compare256_unaligned_64_static(src0, src1); +Z_INTERNAL uint32_t compare256_64(const uint8_t *src0, const uint8_t *src1) { + return compare256_64_static(src0, src1); } -#define LONGEST_MATCH longest_match_unaligned_64 -#define COMPARE256 compare256_unaligned_64_static +#define LONGEST_MATCH longest_match_64 +#define COMPARE256 compare256_64_static #include "match_tpl.h" #define LONGEST_MATCH_SLOW -#define LONGEST_MATCH longest_match_slow_unaligned_64 -#define COMPARE256 compare256_unaligned_64_static +#define LONGEST_MATCH longest_match_slow_64 +#define COMPARE256 compare256_64_static #include "match_tpl.h" diff --git a/arch/generic/generic_functions.h b/arch/generic/generic_functions.h index 3569f1f2..9fa31a88 100644 --- a/arch/generic/generic_functions.h +++ b/arch/generic/generic_functions.h @@ -29,12 +29,12 @@ uint32_t PREFIX(crc32_braid)(uint32_t crc, const uint8_t *buf, size_t len); uint32_t compare256_c(const uint8_t *src0, const uint8_t *src1); #if OPTIMAL_CMP >= 32 - uint32_t compare256_unaligned_16(const uint8_t *src0, const uint8_t *src1); + uint32_t compare256_16(const uint8_t *src0, const uint8_t *src1); # ifdef HAVE_BUILTIN_CTZ - uint32_t compare256_unaligned_32(const uint8_t *src0, const uint8_t *src1); + uint32_t compare256_32(const uint8_t *src0, const uint8_t *src1); # endif # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 - uint32_t compare256_unaligned_64(const uint8_t *src0, const uint8_t *src1); + uint32_t compare256_64(const uint8_t *src0, const uint8_t *src1); # endif #endif @@ -45,15 +45,15 @@ void slide_hash_c(deflate_state *s); uint32_t longest_match_c(deflate_state *const s, Pos cur_match); uint32_t longest_match_slow_c(deflate_state *const s, Pos cur_match); #if OPTIMAL_CMP >= 32 - uint32_t longest_match_unaligned_16(deflate_state *const s, Pos cur_match); - uint32_t longest_match_slow_unaligned_16(deflate_state *const s, Pos cur_match); + uint32_t longest_match_16(deflate_state *const s, Pos cur_match); + uint32_t longest_match_slow_16(deflate_state *const s, Pos cur_match); # ifdef HAVE_BUILTIN_CTZ - uint32_t longest_match_unaligned_32(deflate_state *const s, Pos cur_match); - uint32_t longest_match_slow_unaligned_32(deflate_state *const s, Pos cur_match); + uint32_t longest_match_32(deflate_state *const s, Pos cur_match); + uint32_t longest_match_slow_32(deflate_state *const s, Pos cur_match); # endif # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 - uint32_t longest_match_unaligned_64(deflate_state *const s, Pos cur_match); - uint32_t longest_match_slow_unaligned_64(deflate_state *const s, Pos cur_match); + uint32_t longest_match_64(deflate_state *const s, Pos cur_match); + uint32_t longest_match_slow_64(deflate_state *const s, Pos cur_match); # endif #endif @@ -61,17 +61,17 @@ uint32_t longest_match_slow_c(deflate_state *const s, Pos cur_match); // Select generic implementation for longest_match, longest_match_slow, longest_match_slow functions. #if OPTIMAL_CMP >= 32 # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 -# define longest_match_generic longest_match_unaligned_64 -# define longest_match_slow_generic longest_match_slow_unaligned_64 -# define compare256_generic compare256_unaligned_64 +# define longest_match_generic longest_match_64 +# define longest_match_slow_generic longest_match_slow_64 +# define compare256_generic compare256_64 # elif defined(HAVE_BUILTIN_CTZ) -# define longest_match_generic longest_match_unaligned_32 -# define longest_match_slow_generic longest_match_slow_unaligned_32 -# define compare256_generic compare256_unaligned_32 +# define longest_match_generic longest_match_32 +# define longest_match_slow_generic longest_match_slow_32 +# define compare256_generic compare256_32 # else -# define longest_match_generic longest_match_unaligned_16 -# define longest_match_slow_generic longest_match_slow_unaligned_16 -# define compare256_generic compare256_unaligned_16 +# define longest_match_generic longest_match_16 +# define longest_match_slow_generic longest_match_slow_16 +# define compare256_generic compare256_16 # endif #else # define longest_match_generic longest_match_c diff --git a/compare256_rle.h b/compare256_rle.h index 9940a284..0c80d962 100644 --- a/compare256_rle.h +++ b/compare256_rle.h @@ -46,7 +46,7 @@ static inline uint32_t compare256_rle_c(const uint8_t *src0, const uint8_t *src1 #if OPTIMAL_CMP >= 32 /* 16-bit unaligned integer comparison */ -static inline uint32_t compare256_rle_unaligned_16(const uint8_t *src0, const uint8_t *src1) { +static inline uint32_t compare256_rle_16(const uint8_t *src0, const uint8_t *src1) { uint32_t len = 0; uint16_t src0_cmp; @@ -72,7 +72,7 @@ static inline uint32_t compare256_rle_unaligned_16(const uint8_t *src0, const ui #ifdef HAVE_BUILTIN_CTZ /* 32-bit unaligned integer comparison */ -static inline uint32_t compare256_rle_unaligned_32(const uint8_t *src0, const uint8_t *src1) { +static inline uint32_t compare256_rle_32(const uint8_t *src0, const uint8_t *src1) { uint32_t sv, len = 0; uint16_t src0_cmp; @@ -104,7 +104,7 @@ static inline uint32_t compare256_rle_unaligned_32(const uint8_t *src0, const ui #if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 /* 64-bit unaligned integer comparison */ -static inline uint32_t compare256_rle_unaligned_64(const uint8_t *src0, const uint8_t *src1) { +static inline uint32_t compare256_rle_64(const uint8_t *src0, const uint8_t *src1) { uint32_t src0_cmp32, len = 0; uint16_t src0_cmp; uint64_t sv; diff --git a/deflate_rle.c b/deflate_rle.c index 551fe02a..8c554457 100644 --- a/deflate_rle.c +++ b/deflate_rle.c @@ -12,11 +12,11 @@ #if OPTIMAL_CMP >= 32 # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 -# define compare256_rle compare256_rle_unaligned_64 +# define compare256_rle compare256_rle_64 # elif defined(HAVE_BUILTIN_CTZ) -# define compare256_rle compare256_rle_unaligned_32 +# define compare256_rle compare256_rle_32 # else -# define compare256_rle compare256_rle_unaligned_16 +# define compare256_rle compare256_rle_16 # endif #else # define compare256_rle compare256_rle_c diff --git a/test/benchmarks/benchmark_compare256.cc b/test/benchmarks/benchmark_compare256.cc index a9aa0fca..22c9b4f1 100644 --- a/test/benchmarks/benchmark_compare256.cc +++ b/test/benchmarks/benchmark_compare256.cc @@ -67,12 +67,12 @@ BENCHMARK_COMPARE256(native, native_compare256, 1); #else #if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32 -BENCHMARK_COMPARE256(unaligned_16, compare256_unaligned_16, 1); +BENCHMARK_COMPARE256(16, compare256_16, 1); # if defined(HAVE_BUILTIN_CTZ) -BENCHMARK_COMPARE256(unaligned_32, compare256_unaligned_32, 1); +BENCHMARK_COMPARE256(32, compare256_32, 1); # endif # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 -BENCHMARK_COMPARE256(unaligned_64, compare256_unaligned_64, 1); +BENCHMARK_COMPARE256(64, compare256_64, 1); # endif #endif #if defined(X86_SSE2) && defined(HAVE_BUILTIN_CTZ) diff --git a/test/benchmarks/benchmark_compare256_rle.cc b/test/benchmarks/benchmark_compare256_rle.cc index 9eb299f3..82441629 100644 --- a/test/benchmarks/benchmark_compare256_rle.cc +++ b/test/benchmarks/benchmark_compare256_rle.cc @@ -62,11 +62,11 @@ public: BENCHMARK_COMPARE256_RLE(c, compare256_rle_c, 1); #if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32 -BENCHMARK_COMPARE256_RLE(unaligned_16, compare256_rle_unaligned_16, 1); +BENCHMARK_COMPARE256_RLE(16, compare256_rle_16, 1); # if defined(HAVE_BUILTIN_CTZ) -BENCHMARK_COMPARE256_RLE(unaligned_32, compare256_rle_unaligned_32, 1); +BENCHMARK_COMPARE256_RLE(32, compare256_rle_32, 1); # endif # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 -BENCHMARK_COMPARE256_RLE(unaligned_64, compare256_rle_unaligned_64, 1); +BENCHMARK_COMPARE256_RLE(64, compare256_rle_64, 1); # endif #endif diff --git a/test/test_compare256.cc b/test/test_compare256.cc index 97e28470..da25a75c 100644 --- a/test/test_compare256.cc +++ b/test/test_compare256.cc @@ -66,12 +66,12 @@ TEST_COMPARE256(native, native_compare256, 1) #else #if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32 -TEST_COMPARE256(unaligned_16, compare256_unaligned_16, 1) +TEST_COMPARE256(16, compare256_16, 1) # if defined(HAVE_BUILTIN_CTZ) -TEST_COMPARE256(unaligned_32, compare256_unaligned_32, 1) +TEST_COMPARE256(32, compare256_32, 1) # endif # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 -TEST_COMPARE256(unaligned_64, compare256_unaligned_64, 1) +TEST_COMPARE256(64, compare256_64, 1) # endif #endif diff --git a/test/test_compare256_rle.cc b/test/test_compare256_rle.cc index 6c5d9d4f..65e80a56 100644 --- a/test/test_compare256_rle.cc +++ b/test/test_compare256_rle.cc @@ -53,11 +53,11 @@ static inline void compare256_rle_match_check(compare256_rle_func compare256_rle TEST_COMPARE256_RLE(c, compare256_rle_c, 1) #if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32 -TEST_COMPARE256_RLE(unaligned_16, compare256_rle_unaligned_16, 1) +TEST_COMPARE256_RLE(16, compare256_rle_16, 1) # if defined(HAVE_BUILTIN_CTZ) -TEST_COMPARE256_RLE(unaligned_32, compare256_rle_unaligned_32, 1) +TEST_COMPARE256_RLE(32, compare256_rle_32, 1) # endif # if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64 -TEST_COMPARE256_RLE(unaligned_64, compare256_rle_unaligned_64, 1) +TEST_COMPARE256_RLE(64, compare256_rle_64, 1) # endif #endif