]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
libstdc++: Replace use of incorrect non-temporal store
authorMatthias Kretz <m.kretz@gsi.de>
Fri, 2 Jun 2023 11:44:22 +0000 (13:44 +0200)
committerMatthias Kretz <m.kretz@gsi.de>
Tue, 6 Jun 2023 13:45:14 +0000 (15:45 +0200)
The call to the base implementation sometimes didn't find a matching
signature because the _Abi parameter of _SimdImpl* was "wrong" after
conversion. It has to call into <new ABI tag>::_SimdImpl instead of the
current ABI tag's _SimdImpl. This also reduces the number of possible
template instantiations.

Signed-off-by: Matthias Kretz <m.kretz@gsi.de>
libstdc++-v3/ChangeLog:

PR libstdc++/110054
* include/experimental/bits/simd_builtin.h (_S_masked_store):
Call into deduced ABI's SimdImpl after conversion.
* include/experimental/bits/simd_x86.h (_S_masked_store_nocvt):
Don't use _mm_maskmoveu_si128. Use the generic fall-back
implementation. Also fix masked stores without SSE2, which
were not doing anything before.

libstdc++-v3/include/experimental/bits/simd_builtin.h
libstdc++-v3/include/experimental/bits/simd_x86.h

index 8337fa2d9a6cf3c6583f99b90f47a366dc35ddf3..64ef6efaf8ca4cb3b9a091d17655d40a5b950a9f 100644 (file)
@@ -1628,7 +1628,7 @@ template <typename _Abi, typename>
            if constexpr (_UW_size == _TV_size) // one convert+store
              {
                const _UW __converted = __convert<_UW>(__v);
-               _SuperImpl::_S_masked_store_nocvt(
+               _UAbi::_SimdImpl::_S_masked_store_nocvt(
                  __converted, __mem,
                  _UAbi::_MaskImpl::template _S_convert<
                    __int_for_sizeof_t<_Up>>(__k));
@@ -1643,7 +1643,7 @@ template <typename _Abi, typename>
                const array<_UV, _NAllStores> __converted
                  = __convert_all<_UV, _NAllStores>(__v);
                __execute_n_times<_NFullStores>([&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
-                 _SuperImpl::_S_masked_store_nocvt(
+                 _UAbi::_SimdImpl::_S_masked_store_nocvt(
                    _UW(__converted[__i]), __mem + __i * _UW_size,
                    _UAbi::_MaskImpl::template _S_convert<
                      __int_for_sizeof_t<_Up>>(
@@ -1651,7 +1651,7 @@ template <typename _Abi, typename>
                });
                if constexpr (_NAllStores
                              > _NFullStores) // one partial at the end
-                 _SuperImpl::_S_masked_store_nocvt(
+                 _UAbi::_SimdImpl::_S_masked_store_nocvt(
                    _UW(__converted[_NFullStores]),
                    __mem + _NFullStores * _UW_size,
                    _UAbi::_MaskImpl::template _S_convert<
index 77d2f84ab713b5d1b32e898b89531571aeea4023..2e301e45677e55dd6ea53aa8ea7ab8de9886c45b 100644 (file)
@@ -1106,31 +1106,6 @@ template <typename _Abi, typename>
                else
                  _mm512_mask_storeu_pd(__mem, __k, __vi);
              }
-#if 0 // with KNL either sizeof(_Tp) >= 4 or sizeof(_vi) <= 32
-      // with Skylake-AVX512, __have_avx512bw is true
-         else if constexpr (__have_sse2)
-           {
-             using _M   = __vector_type_t<_Tp, _Np>;
-             using _MVT = _VectorTraits<_M>;
-             _mm_maskmoveu_si128(__auto_bitcast(__extract<0, 4>(__v._M_data)),
-                                 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(__k._M_data)),
-                                 reinterpret_cast<char*>(__mem));
-             _mm_maskmoveu_si128(__auto_bitcast(__extract<1, 4>(__v._M_data)),
-                                 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
-                                   __k._M_data >> 1 * _MVT::_S_full_size)),
-                                 reinterpret_cast<char*>(__mem) + 1 * 16);
-             _mm_maskmoveu_si128(__auto_bitcast(__extract<2, 4>(__v._M_data)),
-                                 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
-                                   __k._M_data >> 2 * _MVT::_S_full_size)),
-                                 reinterpret_cast<char*>(__mem) + 2 * 16);
-             if constexpr (_Np > 48 / sizeof(_Tp))
-               _mm_maskmoveu_si128(
-                 __auto_bitcast(__extract<3, 4>(__v._M_data)),
-                 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
-                   __k._M_data >> 3 * _MVT::_S_full_size)),
-                 reinterpret_cast<char*>(__mem) + 3 * 16);
-           }
-#endif
            else
              __assert_unreachable<_Tp>();
          }
@@ -1233,8 +1208,8 @@ template <typename _Abi, typename>
            else if constexpr (__have_avx && sizeof(_Tp) == 8)
              _mm_maskstore_pd(reinterpret_cast<double*>(__mem), __ki,
                               __vector_bitcast<double>(__vi));
-           else if constexpr (__have_sse2)
-             _mm_maskmoveu_si128(__vi, __ki, reinterpret_cast<char*>(__mem));
+           else
+             _Base::_S_masked_store_nocvt(__v, __mem, __k);
          }
        else if constexpr (sizeof(__v) == 32)
          {
@@ -1259,13 +1234,8 @@ template <typename _Abi, typename>
            else if constexpr (__have_avx && sizeof(_Tp) == 8)
              _mm256_maskstore_pd(reinterpret_cast<double*>(__mem), __ki,
                                  __vector_bitcast<double>(__v));
-           else if constexpr (__have_sse2)
-             {
-               _mm_maskmoveu_si128(__lo128(__vi), __lo128(__ki),
-                                   reinterpret_cast<char*>(__mem));
-               _mm_maskmoveu_si128(__hi128(__vi), __hi128(__ki),
-                                   reinterpret_cast<char*>(__mem) + 16);
-             }
+           else
+             _Base::_S_masked_store_nocvt(__v, __mem, __k);
          }
        else
          __assert_unreachable<_Tp>();