endif()
#
-# Macro to add the given intrinsics option to the specified files,
+# Macro to set the given intrinsics option to the specified files,
# or ${NATIVEFLAG} (-march=native) if that is appropriate and possible.
#
-macro(add_intrinsics_option flag)
+macro(set_intrinsics_option flag)
if(WITH_NATIVE_INSTRUCTIONS AND NATIVEFLAG)
set_property(SOURCE ${ARGN} PROPERTY COMPILE_FLAGS ${NATIVEFLAG})
else()
add_definitions(-DARM_NEON_ADLER32 -DARM_NEON_MEMCHUNK -DARM_NEON_SLIDEHASH)
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/memchunk_neon.c ${ARCHDIR}/slide_neon.c)
list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS})
- add_intrinsics_option("${NEONFLAG}" ${NEON_SRCS})
+ set_intrinsics_option("${NEONFLAG}" ${NEON_SRCS})
if(MSVC)
add_definitions(-D__ARM_NEON__)
endif()
if(WITH_ACLE AND NOT MSVC)
add_definitions(-DARM_ACLE_CRC_HASH)
set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
- add_intrinsics_option("${ACLEFLAG}" ${ACLE_SRCS})
+ set_intrinsics_option("${ACLEFLAG}" ${ACLE_SRCS})
list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS})
add_feature_info(ACLE_CRC 1 "Support ACLE optimized CRC hash generation, using \"${ACLEFLAG}\"")
endif()
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c)
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/slide_hash_power8.c)
list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS})
- add_intrinsics_option("${POWER8FLAG}" ${POWER8_SRCS})
+ set_intrinsics_option("${POWER8FLAG}" ${POWER8_SRCS})
endif()
elseif(BASEARCH_S360_FOUND AND "${ARCH}" MATCHES "s390x")
if(WITH_DFLTCC_DEFLATE OR WITH_DFLTCC_INFLATE)
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx.c)
add_feature_info(AVX2_ADLER32 1 "Support AVX2-accelerated adler32, using \"${AVX2FLAG}\"")
list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS})
- add_intrinsics_option("${AVX2FLAG}" ${AVX2_SRCS})
+ set_intrinsics_option("${AVX2FLAG}" ${AVX2_SRCS})
endif()
if(WITH_SSE4 AND (HAVE_SSE42CRC_INLINE_ASM OR HAVE_SSE42CRC_INTRIN))
add_definitions(-DX86_SSE42_CRC_HASH)
set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c)
add_feature_info(SSE42_CRC 1 "Support SSE4.2 optimized CRC hash generation, using \"${SSE4FLAG}\"")
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
- add_intrinsics_option("${SSE4FLAG}" ${SSE42_SRCS})
+ set_intrinsics_option("${SSE4FLAG}" ${SSE42_SRCS})
if(HAVE_SSE42CRC_INTRIN)
add_definitions(-DX86_SSE42_CRC_INTRIN)
endif()
set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c)
add_feature_info(SSE42_COMPARE258 1 "Support SSE4.2 optimized compare258, using \"${SSE4FLAG}\"")
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
- add_intrinsics_option("${SSE4FLAG}" ${SSE42_SRCS})
+ set_intrinsics_option("${SSE4FLAG}" ${SSE42_SRCS})
endif()
if(WITH_SSE2 AND HAVE_SSE2_INTRIN)
add_definitions(-DX86_SSE2 -DX86_SSE2_MEMCHUNK)
set(SSE2_SRCS ${ARCHDIR}/memchunk_sse.c ${ARCHDIR}/slide_sse.c)
list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS})
if(NOT ${ARCH} MATCHES "x86_64")
- add_intrinsics_option("${SSE2FLAG}" ${SSE2_SRCS})
+ set_intrinsics_option("${SSE2FLAG}" ${SSE2_SRCS})
add_feature_info(FORCE_SSE2 FORCE_SSE2 "Assume CPU is SSE2 capable")
if(FORCE_SSE2)
add_definitions(-DX86_NOCHECK_SSE2)
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c)
add_feature_info(SSSE3_ADLER32 1 "Support SSSE3-accelerated adler32, using \"${SSSE3FLAG}\"")
list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS})
- add_intrinsics_option("${SSSE3FLAG}" ${SSSE3_SRCS})
+ set_intrinsics_option("${SSSE3FLAG}" ${SSSE3_SRCS})
endif()
if(WITH_PCLMULQDQ AND HAVE_PCLMULQDQ_INTRIN)
add_definitions(-DX86_PCLMULQDQ_CRC)
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c)
list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
- add_intrinsics_option("${SSE4FLAG} ${PCLMULFLAG}" ${PCLMULQDQ_SRCS})
+ set_intrinsics_option("${SSE4FLAG} ${PCLMULFLAG}" ${PCLMULQDQ_SRCS})
if(HAVE_SSE42CRC_INLINE_ASM)
add_feature_info(PCLMUL_CRC 1 "Support CRC hash generation using PCLMULQDQ, using \"${PCLMULFLAG}\"")
else()