]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/experimental/bits/simd_converter.h
Match: Add overloaded types_match to avoid code dup [NFC]
[thirdparty/gcc.git] / libstdc++-v3 / include / experimental / bits / simd_converter.h
1 // Generic simd conversions -*- C++ -*-
2
3 // Copyright (C) 2020-2024 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 #ifndef _GLIBCXX_EXPERIMENTAL_SIMD_CONVERTER_H_
26 #define _GLIBCXX_EXPERIMENTAL_SIMD_CONVERTER_H_
27
28 #if __cplusplus >= 201703L
29
30 _GLIBCXX_SIMD_BEGIN_NAMESPACE
31
32 template <typename _Arg, typename _Ret, typename _To, size_t _Np>
33 _Ret __converter_fallback(_Arg __a)
34 {
35 _Ret __ret{};
36 __execute_n_times<_Np>(
37 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
38 __ret._M_set(__i, static_cast<_To>(__a[__i]));
39 });
40 return __ret;
41 }
42
43 // _SimdConverter scalar -> scalar {{{
44 template <typename _From, typename _To>
45 struct _SimdConverter<_From, simd_abi::scalar, _To, simd_abi::scalar,
46 enable_if_t<!is_same_v<_From, _To>>>
47 {
48 _GLIBCXX_SIMD_INTRINSIC constexpr _To operator()(_From __a) const noexcept
49 { return static_cast<_To>(__a); }
50 };
51
52 // }}}
53 // _SimdConverter scalar -> "native" {{{
54 template <typename _From, typename _To, typename _Abi>
55 struct _SimdConverter<_From, simd_abi::scalar, _To, _Abi,
56 enable_if_t<!is_same_v<_Abi, simd_abi::scalar>>>
57 {
58 using _Ret = typename _Abi::template __traits<_To>::_SimdMember;
59
60 template <typename... _More>
61 _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
62 operator()(_From __a, _More... __more) const noexcept
63 {
64 static_assert(sizeof...(_More) + 1 == _Abi::template _S_size<_To>);
65 static_assert(conjunction_v<is_same<_From, _More>...>);
66 return __make_vector<_To>(__a, __more...);
67 }
68 };
69
70 // }}}
71 // _SimdConverter "native non-sve 1" -> "native non-sve 2" {{{
72 template <typename _From, typename _To, typename _AFrom, typename _ATo>
73 struct _SimdConverter<
74 _From, _AFrom, _To, _ATo,
75 enable_if_t<!disjunction_v<
76 __is_fixed_size_abi<_AFrom>, __is_fixed_size_abi<_ATo>,
77 is_same<_AFrom, simd_abi::scalar>, is_same<_ATo, simd_abi::scalar>,
78 conjunction<is_same<_From, _To>, is_same<_AFrom, _ATo>>>
79 && !(__is_sve_abi<_AFrom>() || __is_sve_abi<_ATo>())>>
80 {
81 using _Arg = typename _AFrom::template __traits<_From>::_SimdMember;
82 using _Ret = typename _ATo::template __traits<_To>::_SimdMember;
83 using _V = __vector_type_t<_To, simd_size_v<_To, _ATo>>;
84
85 template <typename... _More>
86 _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
87 operator()(_Arg __a, _More... __more) const noexcept
88 { return __vector_convert<_V>(__a, __more...); }
89 };
90
91 // }}}
92 // _SimdConverter "native 1" -> "native 2" {{{
93 template <typename _From, typename _To, typename _AFrom, typename _ATo>
94 struct _SimdConverter<
95 _From, _AFrom, _To, _ATo,
96 enable_if_t<!disjunction_v<
97 __is_fixed_size_abi<_AFrom>, __is_fixed_size_abi<_ATo>,
98 is_same<_AFrom, simd_abi::scalar>, is_same<_ATo, simd_abi::scalar>,
99 conjunction<is_same<_From, _To>, is_same<_AFrom, _ATo>>>
100 && (__is_sve_abi<_AFrom>() || __is_sve_abi<_ATo>())
101 >>
102 {
103 using _Arg = typename _AFrom::template __traits<_From>::_SimdMember;
104 using _Ret = typename _ATo::template __traits<_To>::_SimdMember;
105
106 _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
107 operator()(_Arg __x) const noexcept
108 { return __converter_fallback<_Arg, _Ret, _To, simd_size_v<_To, _ATo>>(__x); }
109 };
110
111 // }}}
112 // _SimdConverter scalar -> fixed_size<1> {{{1
113 template <typename _From, typename _To>
114 struct _SimdConverter<_From, simd_abi::scalar, _To, simd_abi::fixed_size<1>,
115 void>
116 {
117 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_To, simd_abi::scalar>
118 operator()(_From __x) const noexcept
119 { return {static_cast<_To>(__x)}; }
120 };
121
122 // _SimdConverter fixed_size<1> -> scalar {{{1
123 template <typename _From, typename _To>
124 struct _SimdConverter<_From, simd_abi::fixed_size<1>, _To, simd_abi::scalar,
125 void>
126 {
127 _GLIBCXX_SIMD_INTRINSIC constexpr _To
128 operator()(_SimdTuple<_From, simd_abi::scalar> __x) const noexcept
129 { return {static_cast<_To>(__x.first)}; }
130 };
131
132 // _SimdConverter fixed_size<_Np> -> fixed_size<_Np> {{{1
133 template <typename _From, typename _To, int _Np>
134 struct _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To,
135 simd_abi::fixed_size<_Np>,
136 enable_if_t<!is_same_v<_From, _To>>>
137 {
138 using _Ret = __fixed_size_storage_t<_To, _Np>;
139 using _Arg = __fixed_size_storage_t<_From, _Np>;
140
141 _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
142 operator()(const _Arg& __x) const noexcept
143 {
144 if constexpr (is_same_v<_From, _To>)
145 return __x;
146
147 // fallback to sequential when sve is available
148 else if constexpr (__have_sve)
149 return __converter_fallback<_Arg, _Ret, _To, _Np>(__x);
150
151 // special case (optimize) int signedness casts
152 else if constexpr (sizeof(_From) == sizeof(_To)
153 && is_integral_v<_From> && is_integral_v<_To>)
154 return __bit_cast<_Ret>(__x);
155
156 // special case if all ABI tags in _Ret are scalar
157 else if constexpr (__is_scalar_abi<typename _Ret::_FirstAbi>())
158 {
159 return __call_with_subscripts(
160 __x, make_index_sequence<_Np>(),
161 [](auto... __values) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Ret {
162 return __make_simd_tuple<_To, decltype((void) __values,
163 simd_abi::scalar())...>(
164 static_cast<_To>(__values)...);
165 });
166 }
167
168 // from one vector to one vector
169 else if constexpr (_Arg::_S_first_size == _Ret::_S_first_size)
170 {
171 _SimdConverter<_From, typename _Arg::_FirstAbi, _To,
172 typename _Ret::_FirstAbi>
173 __native_cvt;
174 if constexpr (_Arg::_S_tuple_size == 1)
175 return {__native_cvt(__x.first)};
176 else
177 {
178 constexpr size_t _NRemain = _Np - _Arg::_S_first_size;
179 _SimdConverter<_From, simd_abi::fixed_size<_NRemain>, _To,
180 simd_abi::fixed_size<_NRemain>>
181 __remainder_cvt;
182 return {__native_cvt(__x.first), __remainder_cvt(__x.second)};
183 }
184 }
185
186 // from one vector to multiple vectors
187 else if constexpr (_Arg::_S_first_size > _Ret::_S_first_size)
188 {
189 const auto __multiple_return_chunks
190 = __convert_all<__vector_type_t<_To, _Ret::_S_first_size>>(
191 __x.first);
192 constexpr auto __converted = __multiple_return_chunks.size()
193 * _Ret::_FirstAbi::template _S_size<_To>;
194 constexpr auto __remaining = _Np - __converted;
195 if constexpr (_Arg::_S_tuple_size == 1 && __remaining == 0)
196 return __to_simd_tuple<_To, _Np>(__multiple_return_chunks);
197 else if constexpr (_Arg::_S_tuple_size == 1)
198 { // e.g. <int, 3> -> <double, 2, 1> or <short, 7> -> <double, 4, 2,
199 // 1>
200 using _RetRem
201 = __remove_cvref_t<decltype(__simd_tuple_pop_front<__converted>(
202 _Ret()))>;
203 const auto __return_chunks2
204 = __convert_all<__vector_type_t<_To, _RetRem::_S_first_size>, 0,
205 __converted>(__x.first);
206 constexpr auto __converted2
207 = __converted
208 + __return_chunks2.size() * _RetRem::_S_first_size;
209 if constexpr (__converted2 == _Np)
210 return __to_simd_tuple<_To, _Np>(__multiple_return_chunks,
211 __return_chunks2);
212 else
213 {
214 using _RetRem2 = __remove_cvref_t<
215 decltype(__simd_tuple_pop_front<__return_chunks2.size()
216 * _RetRem::_S_first_size>(
217 _RetRem()))>;
218 const auto __return_chunks3 = __convert_all<
219 __vector_type_t<_To, _RetRem2::_S_first_size>, 0,
220 __converted2>(__x.first);
221 constexpr auto __converted3
222 = __converted2
223 + __return_chunks3.size() * _RetRem2::_S_first_size;
224 if constexpr (__converted3 == _Np)
225 return __to_simd_tuple<_To, _Np>(__multiple_return_chunks,
226 __return_chunks2,
227 __return_chunks3);
228 else
229 {
230 using _RetRem3
231 = __remove_cvref_t<decltype(__simd_tuple_pop_front<
232 __return_chunks3.size()
233 * _RetRem2::_S_first_size>(
234 _RetRem2()))>;
235 const auto __return_chunks4 = __convert_all<
236 __vector_type_t<_To, _RetRem3::_S_first_size>, 0,
237 __converted3>(__x.first);
238 constexpr auto __converted4
239 = __converted3
240 + __return_chunks4.size() * _RetRem3::_S_first_size;
241 if constexpr (__converted4 == _Np)
242 return __to_simd_tuple<_To, _Np>(
243 __multiple_return_chunks, __return_chunks2,
244 __return_chunks3, __return_chunks4);
245 else
246 __assert_unreachable<_To>();
247 }
248 }
249 }
250 else
251 {
252 constexpr size_t _NRemain = _Np - _Arg::_S_first_size;
253 _SimdConverter<_From, simd_abi::fixed_size<_NRemain>, _To,
254 simd_abi::fixed_size<_NRemain>>
255 __remainder_cvt;
256 return __simd_tuple_concat(
257 __to_simd_tuple<_To, _Arg::_S_first_size>(
258 __multiple_return_chunks),
259 __remainder_cvt(__x.second));
260 }
261 }
262
263 // from multiple vectors to one vector
264 // _Arg::_S_first_size < _Ret::_S_first_size
265 // a) heterogeneous input at the end of the tuple (possible with partial
266 // native registers in _Ret)
267 else if constexpr (_Ret::_S_tuple_size == 1
268 && _Np % _Arg::_S_first_size != 0)
269 {
270 static_assert(_Ret::_FirstAbi::template _S_is_partial<_To>);
271 return _Ret{__generate_from_n_evaluations<
272 _Np, typename _VectorTraits<typename _Ret::_FirstType>::type>(
273 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
274 return static_cast<_To>(__x[__i]);
275 })};
276 }
277 else
278 {
279 static_assert(_Arg::_S_tuple_size > 1);
280 constexpr auto __n
281 = __div_roundup(_Ret::_S_first_size, _Arg::_S_first_size);
282 return __call_with_n_evaluations<__n>(
283 [&__x](auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
284 // assuming _Arg Abi tags for all __i are _Arg::_FirstAbi
285 _SimdConverter<_From, typename _Arg::_FirstAbi, _To,
286 typename _Ret::_FirstAbi>
287 __native_cvt;
288 if constexpr (_Ret::_S_tuple_size == 1)
289 return _Ret{__native_cvt(__uncvted...)};
290 else
291 return _Ret{
292 __native_cvt(__uncvted...),
293 _SimdConverter<
294 _From, simd_abi::fixed_size<_Np - _Ret::_S_first_size>, _To,
295 simd_abi::fixed_size<_Np - _Ret::_S_first_size>>()(
296 __simd_tuple_pop_front<_Ret::_S_first_size>(__x))};
297 }, [&__x](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
298 return __get_tuple_at<__i>(__x);
299 });
300 }
301 }
302 };
303
304 // _SimdConverter "native" -> fixed_size<_Np> {{{1
305 // i.e. 1 register to ? registers
306 template <typename _From, typename _Ap, typename _To, int _Np>
307 struct _SimdConverter<_From, _Ap, _To, simd_abi::fixed_size<_Np>,
308 enable_if_t<!__is_fixed_size_abi_v<_Ap>>>
309 {
310 static_assert(
311 _Np == simd_size_v<_From, _Ap>,
312 "_SimdConverter to fixed_size only works for equal element counts");
313
314 using _Ret = __fixed_size_storage_t<_To, _Np>;
315 using _Arg = typename _SimdTraits<_From, _Ap>::_SimdMember;
316
317 _GLIBCXX_SIMD_INTRINSIC constexpr _Ret
318 operator()(_Arg __x) const noexcept
319 {
320 if constexpr (__have_sve)
321 return __converter_fallback<_Arg, _Ret, _To, _Np>(__x);
322 else if constexpr (_Ret::_S_tuple_size == 1)
323 return {__vector_convert<typename _Ret::_FirstType::_BuiltinType>(__x)};
324 else
325 {
326 using _FixedNp = simd_abi::fixed_size<_Np>;
327 _SimdConverter<_From, _FixedNp, _To, _FixedNp> __fixed_cvt;
328 using _FromFixedStorage = __fixed_size_storage_t<_From, _Np>;
329 if constexpr (_FromFixedStorage::_S_tuple_size == 1)
330 return __fixed_cvt(_FromFixedStorage{__x});
331 else if constexpr (_FromFixedStorage::_S_tuple_size == 2)
332 {
333 _FromFixedStorage __tmp;
334 static_assert(sizeof(__tmp) <= sizeof(__x));
335 __builtin_memcpy(&__tmp.first, &__x, sizeof(__tmp.first));
336 __builtin_memcpy(&__tmp.second.first,
337 reinterpret_cast<const char*>(&__x)
338 + sizeof(__tmp.first),
339 sizeof(__tmp.second.first));
340 return __fixed_cvt(__tmp);
341 }
342 else
343 __assert_unreachable<_From>();
344 }
345 }
346 };
347
348 // _SimdConverter fixed_size<_Np> -> "native" {{{1
349 // i.e. ? register to 1 registers
350 template <typename _From, int _Np, typename _To, typename _Ap>
351 struct _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To, _Ap,
352 enable_if_t<!__is_fixed_size_abi_v<_Ap>>>
353 {
354 static_assert(
355 _Np == simd_size_v<_To, _Ap>,
356 "_SimdConverter to fixed_size only works for equal element counts");
357
358 using _Arg = __fixed_size_storage_t<_From, _Np>;
359 using _Ret = typename _SimdTraits<_To, _Ap>::_SimdMember;
360
361 _GLIBCXX_SIMD_INTRINSIC constexpr
362 _Ret
363 operator()(const _Arg& __x) const noexcept
364 {
365 if constexpr(__have_sve)
366 return __converter_fallback<_Arg, _Ret, _To, _Np>(__x);
367 else if constexpr (_Arg::_S_tuple_size == 1)
368 return __vector_convert<__vector_type_t<_To, _Np>>(__x.first);
369 else if constexpr (_Arg::_S_is_homogeneous)
370 return __call_with_n_evaluations<_Arg::_S_tuple_size>(
371 [](auto... __members) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
372 if constexpr ((is_convertible_v<decltype(__members), _To> && ...))
373 return __vector_type_t<_To, _Np>{static_cast<_To>(__members)...};
374 else
375 return __vector_convert<__vector_type_t<_To, _Np>>(__members...);
376 }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
377 return __get_tuple_at<__i>(__x);
378 });
379 else if constexpr (__fixed_size_storage_t<_To, _Np>::_S_tuple_size == 1)
380 {
381 _SimdConverter<_From, simd_abi::fixed_size<_Np>, _To,
382 simd_abi::fixed_size<_Np>>
383 __fixed_cvt;
384 return __fixed_cvt(__x).first;
385 }
386 else
387 {
388 const _SimdWrapper<_From, _Np> __xv
389 = __generate_from_n_evaluations<_Np, __vector_type_t<_From, _Np>>(
390 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
391 return __vector_convert<__vector_type_t<_To, _Np>>(__xv);
392 }
393 }
394 };
395
396 // }}}1
397 _GLIBCXX_SIMD_END_NAMESPACE
398 #endif // __cplusplus >= 201703L
399 #endif // _GLIBCXX_EXPERIMENTAL_SIMD_CONVERTER_H_
400
401 // vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80