]> git.ipfire.org Git - thirdparty/vectorscan.git/commitdiff
use __builtin_constant_p() instead for arm as well
authorKonstantinos Margaritis <konstantinos@vectorcamp.gr>
Thu, 25 Nov 2021 06:20:53 +0000 (06:20 +0000)
committerKonstantinos Margaritis <konstantinos@vectorcamp.gr>
Thu, 25 Nov 2021 06:20:53 +0000 (06:20 +0000)
src/util/arch/arm/simd_utils.h
src/util/supervector/arch/arm/impl.cpp

index 630cac932ada086d1bb6625a561f8f3d1278a22b..4c68b48526f9b14ea1d91b22d22259cda25eec9f 100644 (file)
@@ -328,11 +328,12 @@ m128 palignr_imm(m128 r, m128 l, int offset) {
 
 static really_really_inline
 m128 palignr(m128 r, m128 l, int offset) {
-#if defined(HS_OPTIMIZE)
-    return (m128)vextq_s8((int8x16_t)l, (int8x16_t)r, offset);
-#else
-    return palignr_imm(r, l, offset);
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+    if (__builtin_constant_p(offset)) {
+        return (m128)vextq_s8((int8x16_t)l, (int8x16_t)r, offset);
+    }
 #endif
+    return palignr_imm(r, l, offset);
 }
 #undef CASE_ALIGN_VECTORS
 
index f804abeb6abba229e10b3b45134a421999abec81..980f0b3931d14af984f823f5988307a02a0b1d4a 100644 (file)
@@ -482,34 +482,27 @@ really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const
     return vshr_128(N);
 }
 
-#ifdef HS_OPTIMIZE
-template <>
-really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const
-{
-    return {vextq_u8(u.u8x16[0], vdupq_n_u8(0), N)};
-}
-#else
 template <>
 really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const
 {
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+    if (__builtin_constant_p(N)) {
+         return {vextq_u8(u.u8x16[0], vdupq_n_u8(0), N)};
+    }
+#endif
     return vshr_128(N);
 }
-#endif
 
-#ifdef HS_OPTIMIZE
-template <>
-really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const
-{
-    return {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - N)};
-}
-#else
 template <>
 really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const
 {
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+    if (__builtin_constant_p(N)) {
+        return {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - N)};
+    }
+#endif
     return vshl_128(N);
 }
-#endif
-
 
 template<>
 really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N)
@@ -547,20 +540,18 @@ really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint
     return mask & v;
 }
 
-#ifdef HS_OPTIMIZE
 template<>
 really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset)
 {
-    if (offset == 16) {
-        return *this;
-    } else {
-        return {vextq_u8(other.u.u8x16[0], u.u8x16[0], offset)};
+#if defined(HAVE__BUILTIN_CONSTANT_P)
+    if (__builtin_constant_p(offset)) {
+        if (offset == 16) {
+            return *this;
+        } else {
+            return {vextq_u8(other.u.u8x16[0], u.u8x16[0], offset)};
+        }
     }
-}
-#else
-template<>
-really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset)
-{
+#endif
     switch(offset) {
     case 0: return other; break;
     case 1: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 1)}; break;
@@ -583,7 +574,6 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, in
     }
     return *this;
 }
-#endif
 
 template<>
 template<>