#if OPTIMAL_CMP >= 32
/* 16-bit unaligned integer comparison */
-static inline uint32_t compare256_unaligned_16_static(const uint8_t *src0, const uint8_t *src1) {
+static inline uint32_t compare256_16_static(const uint8_t *src0, const uint8_t *src1) {
uint32_t len = 0;
do {
return 256;
}
-Z_INTERNAL uint32_t compare256_unaligned_16(const uint8_t *src0, const uint8_t *src1) {
- return compare256_unaligned_16_static(src0, src1);
+Z_INTERNAL uint32_t compare256_16(const uint8_t *src0, const uint8_t *src1) {
+ return compare256_16_static(src0, src1);
}
-#define LONGEST_MATCH longest_match_unaligned_16
-#define COMPARE256 compare256_unaligned_16_static
+#define LONGEST_MATCH longest_match_16
+#define COMPARE256 compare256_16_static
#include "match_tpl.h"
#define LONGEST_MATCH_SLOW
-#define LONGEST_MATCH longest_match_slow_unaligned_16
-#define COMPARE256 compare256_unaligned_16_static
+#define LONGEST_MATCH longest_match_slow_16
+#define COMPARE256 compare256_16_static
#include "match_tpl.h"
#ifdef HAVE_BUILTIN_CTZ
/* 32-bit unaligned integer comparison */
-static inline uint32_t compare256_unaligned_32_static(const uint8_t *src0, const uint8_t *src1) {
+static inline uint32_t compare256_32_static(const uint8_t *src0, const uint8_t *src1) {
uint32_t len = 0;
do {
return 256;
}
-Z_INTERNAL uint32_t compare256_unaligned_32(const uint8_t *src0, const uint8_t *src1) {
- return compare256_unaligned_32_static(src0, src1);
+Z_INTERNAL uint32_t compare256_32(const uint8_t *src0, const uint8_t *src1) {
+ return compare256_32_static(src0, src1);
}
-#define LONGEST_MATCH longest_match_unaligned_32
-#define COMPARE256 compare256_unaligned_32_static
+#define LONGEST_MATCH longest_match_32
+#define COMPARE256 compare256_32_static
#include "match_tpl.h"
#define LONGEST_MATCH_SLOW
-#define LONGEST_MATCH longest_match_slow_unaligned_32
-#define COMPARE256 compare256_unaligned_32_static
+#define LONGEST_MATCH longest_match_slow_32
+#define COMPARE256 compare256_32_static
#include "match_tpl.h"
#if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
/* 64-bit integer comparison */
-static inline uint32_t compare256_unaligned_64_static(const uint8_t *src0, const uint8_t *src1) {
+static inline uint32_t compare256_64_static(const uint8_t *src0, const uint8_t *src1) {
uint32_t len = 0;
do {
return 256;
}
-Z_INTERNAL uint32_t compare256_unaligned_64(const uint8_t *src0, const uint8_t *src1) {
- return compare256_unaligned_64_static(src0, src1);
+Z_INTERNAL uint32_t compare256_64(const uint8_t *src0, const uint8_t *src1) {
+ return compare256_64_static(src0, src1);
}
-#define LONGEST_MATCH longest_match_unaligned_64
-#define COMPARE256 compare256_unaligned_64_static
+#define LONGEST_MATCH longest_match_64
+#define COMPARE256 compare256_64_static
#include "match_tpl.h"
#define LONGEST_MATCH_SLOW
-#define LONGEST_MATCH longest_match_slow_unaligned_64
-#define COMPARE256 compare256_unaligned_64_static
+#define LONGEST_MATCH longest_match_slow_64
+#define COMPARE256 compare256_64_static
#include "match_tpl.h"
uint32_t compare256_c(const uint8_t *src0, const uint8_t *src1);
#if OPTIMAL_CMP >= 32
- uint32_t compare256_unaligned_16(const uint8_t *src0, const uint8_t *src1);
+ uint32_t compare256_16(const uint8_t *src0, const uint8_t *src1);
# ifdef HAVE_BUILTIN_CTZ
- uint32_t compare256_unaligned_32(const uint8_t *src0, const uint8_t *src1);
+ uint32_t compare256_32(const uint8_t *src0, const uint8_t *src1);
# endif
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
- uint32_t compare256_unaligned_64(const uint8_t *src0, const uint8_t *src1);
+ uint32_t compare256_64(const uint8_t *src0, const uint8_t *src1);
# endif
#endif
uint32_t longest_match_c(deflate_state *const s, Pos cur_match);
uint32_t longest_match_slow_c(deflate_state *const s, Pos cur_match);
#if OPTIMAL_CMP >= 32
- uint32_t longest_match_unaligned_16(deflate_state *const s, Pos cur_match);
- uint32_t longest_match_slow_unaligned_16(deflate_state *const s, Pos cur_match);
+ uint32_t longest_match_16(deflate_state *const s, Pos cur_match);
+ uint32_t longest_match_slow_16(deflate_state *const s, Pos cur_match);
# ifdef HAVE_BUILTIN_CTZ
- uint32_t longest_match_unaligned_32(deflate_state *const s, Pos cur_match);
- uint32_t longest_match_slow_unaligned_32(deflate_state *const s, Pos cur_match);
+ uint32_t longest_match_32(deflate_state *const s, Pos cur_match);
+ uint32_t longest_match_slow_32(deflate_state *const s, Pos cur_match);
# endif
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
- uint32_t longest_match_unaligned_64(deflate_state *const s, Pos cur_match);
- uint32_t longest_match_slow_unaligned_64(deflate_state *const s, Pos cur_match);
+ uint32_t longest_match_64(deflate_state *const s, Pos cur_match);
+ uint32_t longest_match_slow_64(deflate_state *const s, Pos cur_match);
# endif
#endif
// Select generic implementation for longest_match, longest_match_slow, longest_match_slow functions.
#if OPTIMAL_CMP >= 32
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
-# define longest_match_generic longest_match_unaligned_64
-# define longest_match_slow_generic longest_match_slow_unaligned_64
-# define compare256_generic compare256_unaligned_64
+# define longest_match_generic longest_match_64
+# define longest_match_slow_generic longest_match_slow_64
+# define compare256_generic compare256_64
# elif defined(HAVE_BUILTIN_CTZ)
-# define longest_match_generic longest_match_unaligned_32
-# define longest_match_slow_generic longest_match_slow_unaligned_32
-# define compare256_generic compare256_unaligned_32
+# define longest_match_generic longest_match_32
+# define longest_match_slow_generic longest_match_slow_32
+# define compare256_generic compare256_32
# else
-# define longest_match_generic longest_match_unaligned_16
-# define longest_match_slow_generic longest_match_slow_unaligned_16
-# define compare256_generic compare256_unaligned_16
+# define longest_match_generic longest_match_16
+# define longest_match_slow_generic longest_match_slow_16
+# define compare256_generic compare256_16
# endif
#else
# define longest_match_generic longest_match_c
#if OPTIMAL_CMP >= 32
/* 16-bit unaligned integer comparison */
-static inline uint32_t compare256_rle_unaligned_16(const uint8_t *src0, const uint8_t *src1) {
+static inline uint32_t compare256_rle_16(const uint8_t *src0, const uint8_t *src1) {
uint32_t len = 0;
uint16_t src0_cmp;
#ifdef HAVE_BUILTIN_CTZ
/* 32-bit unaligned integer comparison */
-static inline uint32_t compare256_rle_unaligned_32(const uint8_t *src0, const uint8_t *src1) {
+static inline uint32_t compare256_rle_32(const uint8_t *src0, const uint8_t *src1) {
uint32_t sv, len = 0;
uint16_t src0_cmp;
#if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
/* 64-bit unaligned integer comparison */
-static inline uint32_t compare256_rle_unaligned_64(const uint8_t *src0, const uint8_t *src1) {
+static inline uint32_t compare256_rle_64(const uint8_t *src0, const uint8_t *src1) {
uint32_t src0_cmp32, len = 0;
uint16_t src0_cmp;
uint64_t sv;
#if OPTIMAL_CMP >= 32
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
-# define compare256_rle compare256_rle_unaligned_64
+# define compare256_rle compare256_rle_64
# elif defined(HAVE_BUILTIN_CTZ)
-# define compare256_rle compare256_rle_unaligned_32
+# define compare256_rle compare256_rle_32
# else
-# define compare256_rle compare256_rle_unaligned_16
+# define compare256_rle compare256_rle_16
# endif
#else
# define compare256_rle compare256_rle_c
#else
#if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32
-BENCHMARK_COMPARE256(unaligned_16, compare256_unaligned_16, 1);
+BENCHMARK_COMPARE256(16, compare256_16, 1);
# if defined(HAVE_BUILTIN_CTZ)
-BENCHMARK_COMPARE256(unaligned_32, compare256_unaligned_32, 1);
+BENCHMARK_COMPARE256(32, compare256_32, 1);
# endif
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
-BENCHMARK_COMPARE256(unaligned_64, compare256_unaligned_64, 1);
+BENCHMARK_COMPARE256(64, compare256_64, 1);
# endif
#endif
#if defined(X86_SSE2) && defined(HAVE_BUILTIN_CTZ)
BENCHMARK_COMPARE256_RLE(c, compare256_rle_c, 1);
#if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32
-BENCHMARK_COMPARE256_RLE(unaligned_16, compare256_rle_unaligned_16, 1);
+BENCHMARK_COMPARE256_RLE(16, compare256_rle_16, 1);
# if defined(HAVE_BUILTIN_CTZ)
-BENCHMARK_COMPARE256_RLE(unaligned_32, compare256_rle_unaligned_32, 1);
+BENCHMARK_COMPARE256_RLE(32, compare256_rle_32, 1);
# endif
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
-BENCHMARK_COMPARE256_RLE(unaligned_64, compare256_rle_unaligned_64, 1);
+BENCHMARK_COMPARE256_RLE(64, compare256_rle_64, 1);
# endif
#endif
#else
#if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32
-TEST_COMPARE256(unaligned_16, compare256_unaligned_16, 1)
+TEST_COMPARE256(16, compare256_16, 1)
# if defined(HAVE_BUILTIN_CTZ)
-TEST_COMPARE256(unaligned_32, compare256_unaligned_32, 1)
+TEST_COMPARE256(32, compare256_32, 1)
# endif
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
-TEST_COMPARE256(unaligned_64, compare256_unaligned_64, 1)
+TEST_COMPARE256(64, compare256_64, 1)
# endif
#endif
TEST_COMPARE256_RLE(c, compare256_rle_c, 1)
#if BYTE_ORDER == LITTLE_ENDIAN && OPTIMAL_CMP >= 32
-TEST_COMPARE256_RLE(unaligned_16, compare256_rle_unaligned_16, 1)
+TEST_COMPARE256_RLE(16, compare256_rle_16, 1)
# if defined(HAVE_BUILTIN_CTZ)
-TEST_COMPARE256_RLE(unaligned_32, compare256_rle_unaligned_32, 1)
+TEST_COMPARE256_RLE(32, compare256_rle_32, 1)
# endif
# if defined(HAVE_BUILTIN_CTZLL) && OPTIMAL_CMP >= 64
-TEST_COMPARE256_RLE(unaligned_64, compare256_rle_unaligned_64, 1)
+TEST_COMPARE256_RLE(64, compare256_rle_64, 1)
# endif
#endif