endif()
#
-# Macro to add either the given intrinsics option to the global compiler options,
+# Macro to add the given intrinsics option to the specified files,
# or ${NATIVEFLAG} (-march=native) if that is appropriate and possible.
-# An alternative version of this macro would take a file argument, and set ${flag}
-# only for that file as opposed to ${NATIVEFLAG} globally, to limit side-effect of
-# using ${flag} globally.
#
macro(add_intrinsics_option flag)
if(WITH_NATIVE_INSTRUCTIONS AND NATIVEFLAG)
- if(NOT "${CMAKE_C_FLAGS} " MATCHES ".*${NATIVEFLAG} .*")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${NATIVEFLAG}")
- endif()
+ set_property(SOURCE ${ARGN} PROPERTY COMPILE_FLAGS ${NATIVEFLAG})
else()
- if(NOT "${CMAKE_C_FLAGS} " MATCHES ".*${flag} .*")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
- endif()
+ set_property(SOURCE ${ARGN} PROPERTY COMPILE_FLAGS ${flag})
endif()
endmacro()
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm.h)
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/armfeature.c)
if(WITH_NEON)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/slide_neon.c)
add_definitions(-DARM_NEON_ADLER32 -DARM_NEON_SLIDEHASH)
- add_intrinsics_option("${NEONFLAG}")
+ set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/slide_neon.c)
+ list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS})
+ add_intrinsics_option("${NEONFLAG}" ${NEON_SRCS})
if(MSVC)
add_definitions(-D__ARM_NEON__)
endif()
add_feature_info(NEON_SLIDEHASH 1 "Support NEON instructions in slide_hash, using \"${NEONFLAG}\"")
endif()
if(WITH_ACLE)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
add_definitions(-DARM_ACLE_CRC_HASH)
+ set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
# For ARM aarch64, we need to check WITH_NEON first
if("${ARCH}" MATCHES "arm" OR NOT WITH_NEON)
- add_intrinsics_option("${ACLEFLAG}")
+ add_intrinsics_option("${ACLEFLAG}" ${ACLE_SRCS})
endif()
+ list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS})
add_feature_info(ACLE_CRC 1 "Support ACLE optimized CRC hash generation, using \"${ACLEFLAG}\"")
endif()
elseif(BASEARCH_PPC_FOUND)
add_definitions(-DPOWER_FEATURES)
add_definitions(-DPOWER8_VSX_ADLER32)
add_definitions(-DPOWER8_VSX_SLIDEHASH)
- set(ZLIB_POWER8_SRCS
- ${ARCHDIR}/adler32_power8.c
- ${ARCHDIR}/slide_hash_power8.c)
- set_source_files_properties(
- ${ZLIB_POWER8_SRCS}
- PROPERTIES COMPILE_FLAGS ${POWER8FLAG})
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power.h)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c ${ZLIB_POWER8_SRCS})
+ list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c)
+ set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/slide_hash_power8.c)
+ list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS})
+ add_intrinsics_option("${POWER8FLAG}" ${POWER8_SRCS})
endif()
elseif(BASEARCH_S360_FOUND AND "${ARCH}" MATCHES "s390x")
if(WITH_DFLTCC_DEFLATE OR WITH_DFLTCC_INFLATE)
endif()
if(WITH_AVX2 AND HAVE_AVX2_INTRIN)
add_definitions(-DX86_AVX2 -DX86_AVX2_ADLER32)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/slide_avx.c ${ARCHDIR}/adler32_avx.c)
+ set(AVX2_SRCS ${ARCHDIR}/slide_avx.c)
add_feature_info(AVX2_SLIDEHASH 1 "Support AVX2 optimized slide_hash, using \"${AVX2FLAG}\"")
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/compare258_avx.c)
+ list(APPEND AVX2_SRCS ${ARCHDIR}/compare258_avx.c)
add_feature_info(AVX2_COMPARE258 1 "Support AVX2 optimized compare258, using \"${AVX2FLAG}\"")
+ list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx.c)
add_feature_info(AVX2_ADLER32 1 "Support AVX2-accelerated adler32, using \"${AVX2FLAG}\"")
- add_intrinsics_option("${AVX2FLAG}")
+ list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS})
+ add_intrinsics_option("${AVX2FLAG}" ${AVX2_SRCS})
endif()
if(WITH_SSE4 AND (HAVE_SSE42CRC_INLINE_ASM OR HAVE_SSE42CRC_INTRIN))
add_definitions(-DX86_SSE42_CRC_HASH)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/insert_string_sse.c)
+ set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c)
add_feature_info(SSE42_CRC 1 "Support SSE4.2 optimized CRC hash generation, using \"${SSE4FLAG}\"")
- add_intrinsics_option("${SSE4FLAG}")
+ list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
+ add_intrinsics_option("${SSE4FLAG}" ${SSE42_SRCS})
if(HAVE_SSE42CRC_INTRIN)
add_definitions(-DX86_SSE42_CRC_INTRIN)
endif()
endif()
if(HAVE_SSE42CMPSTR_INTRIN)
add_definitions(-DX86_SSE42_CMP_STR)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/compare258_sse.c)
+ set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c)
add_feature_info(SSE42_COMPARE258 1 "Support SSE4.2 optimized compare258, using \"${SSE4FLAG}\"")
+ list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
+ add_intrinsics_option("${SSE4FLAG}" ${SSE42_SRCS})
endif()
if(WITH_SSE2 AND HAVE_SSE2_INTRIN)
add_definitions(-DX86_SSE2)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/slide_sse.c)
+ set(SSE2_SRCS ${ARCHDIR}/slide_sse.c)
+ list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS})
if(NOT ${ARCH} MATCHES "x86_64")
- add_intrinsics_option("${SSE2FLAG}")
+ add_intrinsics_option("${SSE2FLAG}" ${SSE2_SRCS})
add_feature_info(FORCE_SSE2 FORCE_SSE2 "Assume CPU is SSE2 capable")
if(FORCE_SSE2)
add_definitions(-DX86_NOCHECK_SSE2)
endif()
if(WITH_SSSE3 AND HAVE_SSSE3_INTRIN)
add_definitions(-DX86_SSSE3 -DX86_SSSE3_ADLER32)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/adler32_ssse3.c)
+ set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c)
add_feature_info(SSSE3_ADLER32 1 "Support SSSE3-accelerated adler32, using \"${SSSE3FLAG}\"")
- add_intrinsics_option("${SSSE3FLAG}")
+ list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS})
+ add_intrinsics_option("${SSSE3FLAG}" ${SSSE3_SRCS})
endif()
if(WITH_PCLMULQDQ AND HAVE_PCLMULQDQ_INTRIN)
add_definitions(-DX86_PCLMULQDQ_CRC)
- list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/crc_folding.c)
- add_intrinsics_option("${PCLMULFLAG}")
+ set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c)
+ list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
+ add_intrinsics_option("${SSE4FLAG} ${PCLMULFLAG}" ${PCLMULQDQ_SRCS})
if(HAVE_SSE42CRC_INLINE_ASM)
add_feature_info(PCLMUL_CRC 1 "Support CRC hash generation using PCLMULQDQ, using \"${PCLMULFLAG}\"")
else()
/* adler32 */
extern uint32_t adler32_c(uint32_t adler, const unsigned char *buf, size_t len);
-#if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && defined(ARM_NEON_ADLER32)
+#ifdef ARM_NEON_ADLER32
extern uint32_t adler32_neon(uint32_t adler, const unsigned char *buf, size_t len);
#endif
#ifdef X86_SSSE3_ADLER32
/* CRC32 */
ZLIB_INTERNAL uint32_t crc32_generic(uint32_t, const unsigned char *, uint64_t);
-#ifdef __ARM_FEATURE_CRC32
+#ifdef ARM_ACLE_CRC_HASH
extern uint32_t crc32_acle(uint32_t, const unsigned char *, uint64_t);
#endif
#ifdef X86_SSE42_CRC_HASH
if (x86_cpu_has_sse42)
functable.insert_string = &insert_string_sse4;
-#elif defined(__ARM_FEATURE_CRC32) && defined(ARM_ACLE_CRC_HASH)
+#elif defined(ARM_ACLE_CRC_HASH)
if (arm_cpu_has_crc32)
functable.insert_string = &insert_string_acle;
#endif
#ifdef X86_SSE42_CRC_HASH
if (x86_cpu_has_sse42)
functable.quick_insert_string = &quick_insert_string_sse4;
-#elif defined(__ARM_FEATURE_CRC32) && defined(ARM_ACLE_CRC_HASH)
+#elif defined(ARM_ACLE_CRC_HASH)
if (arm_cpu_has_crc32)
functable.quick_insert_string = &quick_insert_string_acle;
#endif
functable.adler32 = &adler32_c;
cpu_check_features();
-#if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && defined(ARM_NEON_ADLER32)
+#ifdef ARM_NEON_ADLER32
# ifndef ARM_NOCHECK_NEON
if (arm_cpu_has_neon)
# endif
if (sizeof(void *) == sizeof(ptrdiff_t)) {
#if BYTE_ORDER == LITTLE_ENDIAN
functable.crc32 = crc32_little;
-# if defined(__ARM_FEATURE_CRC32) && defined(ARM_ACLE_CRC_HASH)
+# if defined(ARM_ACLE_CRC_HASH)
if (arm_cpu_has_crc32)
functable.crc32 = crc32_acle;
# endif