]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/experimental/bits/simd_fixed_size.h
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / experimental / bits / simd_fixed_size.h
1 // Simd fixed_size ABI specific implementations -*- C++ -*-
2
3 // Copyright (C) 2020-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /*
26 * The fixed_size ABI gives the following guarantees:
27 * - simd objects are passed via the stack
28 * - memory layout of `simd<_Tp, _Np>` is equivalent to `array<_Tp, _Np>`
29 * - alignment of `simd<_Tp, _Np>` is `_Np * sizeof(_Tp)` if _Np is __a
30 * power-of-2 value, otherwise `std::__bit_ceil(_Np * sizeof(_Tp))` (Note:
31 * if the alignment were to exceed the system/compiler maximum, it is bounded
32 * to that maximum)
33 * - simd_mask objects are passed like bitset<_Np>
34 * - memory layout of `simd_mask<_Tp, _Np>` is equivalent to `bitset<_Np>`
35 * - alignment of `simd_mask<_Tp, _Np>` is equal to the alignment of
36 * `bitset<_Np>`
37 */
38
39 #ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
40 #define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
41
42 #if __cplusplus >= 201703L
43
44 #include <array>
45
46 _GLIBCXX_SIMD_BEGIN_NAMESPACE
47
48 // __simd_tuple_element {{{
49 template <size_t _I, typename _Tp>
50 struct __simd_tuple_element;
51
52 template <typename _Tp, typename _A0, typename... _As>
53 struct __simd_tuple_element<0, _SimdTuple<_Tp, _A0, _As...>>
54 { using type = simd<_Tp, _A0>; };
55
56 template <size_t _I, typename _Tp, typename _A0, typename... _As>
57 struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
58 {
59 using type =
60 typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type;
61 };
62
63 template <size_t _I, typename _Tp>
64 using __simd_tuple_element_t = typename __simd_tuple_element<_I, _Tp>::type;
65
66 // }}}
67 // __simd_tuple_concat {{{
68
69 template <typename _Tp, typename... _A0s, typename... _A1s>
70 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0s..., _A1s...>
71 __simd_tuple_concat(const _SimdTuple<_Tp, _A0s...>& __left,
72 const _SimdTuple<_Tp, _A1s...>& __right)
73 {
74 if constexpr (sizeof...(_A0s) == 0)
75 return __right;
76 else if constexpr (sizeof...(_A1s) == 0)
77 return __left;
78 else
79 return {__left.first, __simd_tuple_concat(__left.second, __right)};
80 }
81
82 template <typename _Tp, typename _A10, typename... _A1s>
83 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10,
84 _A1s...>
85 __simd_tuple_concat(const _Tp& __left,
86 const _SimdTuple<_Tp, _A10, _A1s...>& __right)
87 { return {__left, __right}; }
88
89 // }}}
90 // __simd_tuple_pop_front {{{
91 // Returns the next _SimdTuple in __x that has _Np elements less.
92 // Precondition: _Np must match the number of elements in __first (recursively)
93 template <size_t _Np, typename _Tp>
94 _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
95 __simd_tuple_pop_front(_Tp&& __x)
96 {
97 if constexpr (_Np == 0)
98 return static_cast<_Tp&&>(__x);
99 else
100 {
101 using _Up = __remove_cvref_t<_Tp>;
102 static_assert(_Np >= _Up::_S_first_size);
103 return __simd_tuple_pop_front<_Np - _Up::_S_first_size>(__x.second);
104 }
105 }
106
107 // }}}
108 // __get_simd_at<_Np> {{{1
109 struct __as_simd {};
110
111 struct __as_simd_tuple {};
112
113 template <typename _Tp, typename _A0, typename... _Abis>
114 _GLIBCXX_SIMD_INTRINSIC constexpr simd<_Tp, _A0>
115 __simd_tuple_get_impl(__as_simd, const _SimdTuple<_Tp, _A0, _Abis...>& __t,
116 _SizeConstant<0>)
117 { return {__private_init, __t.first}; }
118
119 template <typename _Tp, typename _A0, typename... _Abis>
120 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
121 __simd_tuple_get_impl(__as_simd_tuple,
122 const _SimdTuple<_Tp, _A0, _Abis...>& __t,
123 _SizeConstant<0>)
124 { return __t.first; }
125
126 template <typename _Tp, typename _A0, typename... _Abis>
127 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
128 __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t,
129 _SizeConstant<0>)
130 { return __t.first; }
131
132 template <typename _R, size_t _Np, typename _Tp, typename... _Abis>
133 _GLIBCXX_SIMD_INTRINSIC constexpr auto
134 __simd_tuple_get_impl(_R, const _SimdTuple<_Tp, _Abis...>& __t,
135 _SizeConstant<_Np>)
136 { return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
137
138 template <size_t _Np, typename _Tp, typename... _Abis>
139 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
140 __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t,
141 _SizeConstant<_Np>)
142 {
143 return __simd_tuple_get_impl(__as_simd_tuple(), __t.second,
144 _SizeConstant<_Np - 1>());
145 }
146
147 template <size_t _Np, typename _Tp, typename... _Abis>
148 _GLIBCXX_SIMD_INTRINSIC constexpr auto
149 __get_simd_at(const _SimdTuple<_Tp, _Abis...>& __t)
150 { return __simd_tuple_get_impl(__as_simd(), __t, _SizeConstant<_Np>()); }
151
152 // }}}
153 // __get_tuple_at<_Np> {{{
154 template <size_t _Np, typename _Tp, typename... _Abis>
155 _GLIBCXX_SIMD_INTRINSIC constexpr auto
156 __get_tuple_at(const _SimdTuple<_Tp, _Abis...>& __t)
157 {
158 return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
159 }
160
161 template <size_t _Np, typename _Tp, typename... _Abis>
162 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
163 __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
164 {
165 return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
166 }
167
168 // __tuple_element_meta {{{1
169 template <typename _Tp, typename _Abi, size_t _Offset>
170 struct __tuple_element_meta : public _Abi::_SimdImpl
171 {
172 static_assert(is_same_v<typename _Abi::_SimdImpl::abi_type,
173 _Abi>); // this fails e.g. when _SimdImpl is an
174 // alias for _SimdImplBuiltin<_DifferentAbi>
175 using value_type = _Tp;
176 using abi_type = _Abi;
177 using _Traits = _SimdTraits<_Tp, _Abi>;
178 using _MaskImpl = typename _Abi::_MaskImpl;
179 using _MaskMember = typename _Traits::_MaskMember;
180 using simd_type = simd<_Tp, _Abi>;
181 static constexpr size_t _S_offset = _Offset;
182 static constexpr size_t _S_size() { return simd_size<_Tp, _Abi>::value; }
183 static constexpr _MaskImpl _S_mask_impl = {};
184
185 template <size_t _Np, bool _Sanitized>
186 _GLIBCXX_SIMD_INTRINSIC static auto
187 _S_submask(_BitMask<_Np, _Sanitized> __bits)
188 { return __bits.template _M_extract<_Offset, _S_size()>(); }
189
190 template <size_t _Np, bool _Sanitized>
191 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
192 _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
193 {
194 return _MaskImpl::template _S_convert<_Tp>(
195 __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
196 }
197
198 _GLIBCXX_SIMD_INTRINSIC static _ULLong
199 _S_mask_to_shifted_ullong(_MaskMember __k)
200 { return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
201 };
202
203 template <size_t _Offset, typename _Tp, typename _Abi, typename... _As>
204 __tuple_element_meta<_Tp, _Abi, _Offset>
205 __make_meta(const _SimdTuple<_Tp, _Abi, _As...>&)
206 { return {}; }
207
208 // }}}1
209 // _WithOffset wrapper class {{{
210 template <size_t _Offset, typename _Base>
211 struct _WithOffset : public _Base
212 {
213 static inline constexpr size_t _S_offset = _Offset;
214
215 _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
216 {
217 return reinterpret_cast<char*>(this)
218 + _S_offset * sizeof(typename _Base::value_type);
219 }
220
221 _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
222 {
223 return reinterpret_cast<const char*>(this)
224 + _S_offset * sizeof(typename _Base::value_type);
225 }
226 };
227
228 // make _WithOffset<_WithOffset> ill-formed to use:
229 template <size_t _O0, size_t _O1, typename _Base>
230 struct _WithOffset<_O0, _WithOffset<_O1, _Base>> {};
231
232 template <size_t _Offset, typename _Tp>
233 decltype(auto)
234 __add_offset(_Tp& __base)
235 { return static_cast<_WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(__base); }
236
237 template <size_t _Offset, typename _Tp>
238 decltype(auto)
239 __add_offset(const _Tp& __base)
240 {
241 return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(
242 __base);
243 }
244
245 template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
246 decltype(auto)
247 __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
248 {
249 return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&>(
250 static_cast<_Tp&>(__base));
251 }
252
253 template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
254 decltype(auto)
255 __add_offset(const _WithOffset<_ExistingOffset, _Tp>& __base)
256 {
257 return static_cast<const _WithOffset<_Offset + _ExistingOffset, _Tp>&>(
258 static_cast<const _Tp&>(__base));
259 }
260
261 template <typename _Tp>
262 constexpr inline size_t __offset = 0;
263
264 template <size_t _Offset, typename _Tp>
265 constexpr inline size_t __offset<_WithOffset<_Offset, _Tp>>
266 = _WithOffset<_Offset, _Tp>::_S_offset;
267
268 template <typename _Tp>
269 constexpr inline size_t __offset<const _Tp> = __offset<_Tp>;
270
271 template <typename _Tp>
272 constexpr inline size_t __offset<_Tp&> = __offset<_Tp>;
273
274 template <typename _Tp>
275 constexpr inline size_t __offset<_Tp&&> = __offset<_Tp>;
276
277 // }}}
278 // _SimdTuple specializations {{{1
279 // empty {{{2
280 template <typename _Tp>
281 struct _SimdTuple<_Tp>
282 {
283 using value_type = _Tp;
284 static constexpr size_t _S_tuple_size = 0;
285 static constexpr size_t _S_size() { return 0; }
286 };
287
288 // _SimdTupleData {{{2
289 template <typename _FirstType, typename _SecondType>
290 struct _SimdTupleData
291 {
292 _FirstType first;
293 _SecondType second;
294
295 _GLIBCXX_SIMD_INTRINSIC
296 constexpr bool _M_is_constprop() const
297 {
298 if constexpr (is_class_v<_FirstType>)
299 return first._M_is_constprop() && second._M_is_constprop();
300 else
301 return __builtin_constant_p(first) && second._M_is_constprop();
302 }
303 };
304
305 template <typename _FirstType, typename _Tp>
306 struct _SimdTupleData<_FirstType, _SimdTuple<_Tp>>
307 {
308 _FirstType first;
309 static constexpr _SimdTuple<_Tp> second = {};
310
311 _GLIBCXX_SIMD_INTRINSIC
312 constexpr bool _M_is_constprop() const
313 {
314 if constexpr (is_class_v<_FirstType>)
315 return first._M_is_constprop();
316 else
317 return __builtin_constant_p(first);
318 }
319 };
320
321 // 1 or more {{{2
322 template <typename _Tp, typename _Abi0, typename... _Abis>
323 struct _SimdTuple<_Tp, _Abi0, _Abis...>
324 : _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
325 _SimdTuple<_Tp, _Abis...>>
326 {
327 static_assert(!__is_fixed_size_abi_v<_Abi0>);
328 using value_type = _Tp;
329 using _FirstType = typename _SimdTraits<_Tp, _Abi0>::_SimdMember;
330 using _FirstAbi = _Abi0;
331 using _SecondType = _SimdTuple<_Tp, _Abis...>;
332 static constexpr size_t _S_tuple_size = sizeof...(_Abis) + 1;
333
334 static constexpr size_t _S_size()
335 { return simd_size_v<_Tp, _Abi0> + _SecondType::_S_size(); }
336
337 static constexpr size_t _S_first_size = simd_size_v<_Tp, _Abi0>;
338 static constexpr bool _S_is_homogeneous = (is_same_v<_Abi0, _Abis> && ...);
339
340 using _Base = _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
341 _SimdTuple<_Tp, _Abis...>>;
342 using _Base::first;
343 using _Base::second;
344
345 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple() = default;
346 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(const _SimdTuple&) = default;
347 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple& operator=(const _SimdTuple&)
348 = default;
349
350 template <typename _Up>
351 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x)
352 : _Base{static_cast<_Up&&>(__x)} {}
353
354 template <typename _Up, typename _Up2>
355 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _Up2&& __y)
356 : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
357
358 template <typename _Up>
359 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
360 : _Base{static_cast<_Up&&>(__x)} {}
361
362 _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
363 { return reinterpret_cast<char*>(this); }
364
365 _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
366 { return reinterpret_cast<const char*>(this); }
367
368 template <size_t _Np>
369 _GLIBCXX_SIMD_INTRINSIC constexpr auto& _M_at()
370 {
371 if constexpr (_Np == 0)
372 return first;
373 else
374 return second.template _M_at<_Np - 1>();
375 }
376
377 template <size_t _Np>
378 _GLIBCXX_SIMD_INTRINSIC constexpr const auto& _M_at() const
379 {
380 if constexpr (_Np == 0)
381 return first;
382 else
383 return second.template _M_at<_Np - 1>();
384 }
385
386 template <size_t _Np>
387 _GLIBCXX_SIMD_INTRINSIC constexpr auto _M_simd_at() const
388 {
389 if constexpr (_Np == 0)
390 return simd<_Tp, _Abi0>(__private_init, first);
391 else
392 return second.template _M_simd_at<_Np - 1>();
393 }
394
395 template <size_t _Offset = 0, typename _Fp>
396 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple
397 _S_generate(_Fp&& __gen, _SizeConstant<_Offset> = {})
398 {
399 auto&& __first = __gen(__tuple_element_meta<_Tp, _Abi0, _Offset>());
400 if constexpr (_S_tuple_size == 1)
401 return {__first};
402 else
403 return {__first,
404 _SecondType::_S_generate(
405 static_cast<_Fp&&>(__gen),
406 _SizeConstant<_Offset + simd_size_v<_Tp, _Abi0>>())};
407 }
408
409 template <size_t _Offset = 0, typename _Fp, typename... _More>
410 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
411 _M_apply_wrapped(_Fp&& __fun, const _More&... __more) const
412 {
413 auto&& __first
414 = __fun(__make_meta<_Offset>(*this), first, __more.first...);
415 if constexpr (_S_tuple_size == 1)
416 return {__first};
417 else
418 return {
419 __first,
420 second.template _M_apply_wrapped<_Offset + simd_size_v<_Tp, _Abi0>>(
421 static_cast<_Fp&&>(__fun), __more.second...)};
422 }
423
424 template <typename _Tup>
425 _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
426 _M_extract_argument(_Tup&& __tup) const
427 {
428 using _TupT = typename __remove_cvref_t<_Tup>::value_type;
429 if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
430 return __tup.first;
431 else if (__builtin_is_constant_evaluated())
432 return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate([&](
433 auto __meta) constexpr {
434 return __meta._S_generator(
435 [&](auto __i) constexpr { return __tup[__i]; },
436 static_cast<_TupT*>(nullptr));
437 });
438 else
439 return [&]() {
440 __fixed_size_storage_t<_TupT, _S_first_size> __r;
441 __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
442 sizeof(__r));
443 return __r;
444 }();
445 }
446
447 template <typename _Tup>
448 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
449 _M_skip_argument(_Tup&& __tup) const
450 {
451 static_assert(_S_tuple_size > 1);
452 using _Up = __remove_cvref_t<_Tup>;
453 constexpr size_t __off = __offset<_Up>;
454 if constexpr (_S_first_size == _Up::_S_first_size && __off == 0)
455 return __tup.second;
456 else if constexpr (_S_first_size > _Up::_S_first_size
457 && _S_first_size % _Up::_S_first_size == 0
458 && __off == 0)
459 return __simd_tuple_pop_front<_S_first_size>(__tup);
460 else if constexpr (_S_first_size + __off < _Up::_S_first_size)
461 return __add_offset<_S_first_size>(__tup);
462 else if constexpr (_S_first_size + __off == _Up::_S_first_size)
463 return __tup.second;
464 else
465 __assert_unreachable<_Tup>();
466 }
467
468 template <size_t _Offset, typename... _More>
469 _GLIBCXX_SIMD_INTRINSIC constexpr void
470 _M_assign_front(const _SimdTuple<_Tp, _Abi0, _More...>& __x) &
471 {
472 static_assert(_Offset == 0);
473 first = __x.first;
474 if constexpr (sizeof...(_More) > 0)
475 {
476 static_assert(sizeof...(_Abis) >= sizeof...(_More));
477 second.template _M_assign_front<0>(__x.second);
478 }
479 }
480
481 template <size_t _Offset>
482 _GLIBCXX_SIMD_INTRINSIC constexpr void
483 _M_assign_front(const _FirstType& __x) &
484 {
485 static_assert(_Offset == 0);
486 first = __x;
487 }
488
489 template <size_t _Offset, typename... _As>
490 _GLIBCXX_SIMD_INTRINSIC constexpr void
491 _M_assign_front(const _SimdTuple<_Tp, _As...>& __x) &
492 {
493 __builtin_memcpy(_M_as_charptr() + _Offset * sizeof(value_type),
494 __x._M_as_charptr(),
495 sizeof(_Tp) * _SimdTuple<_Tp, _As...>::_S_size());
496 }
497
498 /*
499 * Iterate over the first objects in this _SimdTuple and call __fun for each
500 * of them. If additional arguments are passed via __more, chunk them into
501 * _SimdTuple or __vector_type_t objects of the same number of values.
502 */
503 template <typename _Fp, typename... _More>
504 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple
505 _M_apply_per_chunk(_Fp&& __fun, _More&&... __more) const
506 {
507 if constexpr ((...
508 || conjunction_v<
509 is_lvalue_reference<_More>,
510 negation<is_const<remove_reference_t<_More>>>>) )
511 {
512 // need to write back at least one of __more after calling __fun
513 auto&& __first = [&](auto... __args) constexpr
514 {
515 auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
516 __args...);
517 [[maybe_unused]] auto&& __ignore_me = {(
518 [](auto&& __dst, const auto& __src) {
519 if constexpr (is_assignable_v<decltype(__dst),
520 decltype(__dst)>)
521 {
522 __dst.template _M_assign_front<__offset<decltype(__dst)>>(
523 __src);
524 }
525 }(static_cast<_More&&>(__more), __args),
526 0)...};
527 return __r;
528 }
529 (_M_extract_argument(__more)...);
530 if constexpr (_S_tuple_size == 1)
531 return {__first};
532 else
533 return {__first,
534 second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
535 _M_skip_argument(__more)...)};
536 }
537 else
538 {
539 auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
540 _M_extract_argument(__more)...);
541 if constexpr (_S_tuple_size == 1)
542 return {__first};
543 else
544 return {__first,
545 second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
546 _M_skip_argument(__more)...)};
547 }
548 }
549
550 template <typename _R = _Tp, typename _Fp, typename... _More>
551 _GLIBCXX_SIMD_INTRINSIC auto _M_apply_r(_Fp&& __fun,
552 const _More&... __more) const
553 {
554 auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
555 __more.first...);
556 if constexpr (_S_tuple_size == 1)
557 return __first;
558 else
559 return __simd_tuple_concat<_R>(
560 __first, second.template _M_apply_r<_R>(static_cast<_Fp&&>(__fun),
561 __more.second...));
562 }
563
564 template <typename _Fp, typename... _More>
565 _GLIBCXX_SIMD_INTRINSIC constexpr friend _SanitizedBitMask<_S_size()>
566 _M_test(const _Fp& __fun, const _SimdTuple& __x, const _More&... __more)
567 {
568 const _SanitizedBitMask<_S_first_size> __first
569 = _Abi0::_MaskImpl::_S_to_bits(
570 __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), __x.first,
571 __more.first...));
572 if constexpr (_S_tuple_size == 1)
573 return __first;
574 else
575 return _M_test(__fun, __x.second, __more.second...)
576 ._M_prepend(__first);
577 }
578
579 template <typename _Up, _Up _I>
580 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
581 operator[](integral_constant<_Up, _I>) const noexcept
582 {
583 if constexpr (_I < simd_size_v<_Tp, _Abi0>)
584 return _M_subscript_read(_I);
585 else
586 return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
587 }
588
589 _Tp operator[](size_t __i) const noexcept
590 {
591 if constexpr (_S_tuple_size == 1)
592 return _M_subscript_read(__i);
593 else
594 {
595 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
596 return reinterpret_cast<const __may_alias<_Tp>*>(this)[__i];
597 #else
598 if constexpr (__is_scalar_abi<_Abi0>())
599 {
600 const _Tp* ptr = &first;
601 return ptr[__i];
602 }
603 else
604 return __i < simd_size_v<_Tp, _Abi0>
605 ? _M_subscript_read(__i)
606 : second[__i - simd_size_v<_Tp, _Abi0>];
607 #endif
608 }
609 }
610
611 void _M_set(size_t __i, _Tp __val) noexcept
612 {
613 if constexpr (_S_tuple_size == 1)
614 return _M_subscript_write(__i, __val);
615 else
616 {
617 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
618 reinterpret_cast<__may_alias<_Tp>*>(this)[__i] = __val;
619 #else
620 if (__i < simd_size_v<_Tp, _Abi0>)
621 _M_subscript_write(__i, __val);
622 else
623 second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
624 #endif
625 }
626 }
627
628 private:
629 // _M_subscript_read/_write {{{
630 _Tp _M_subscript_read([[maybe_unused]] size_t __i) const noexcept
631 {
632 if constexpr (__is_vectorizable_v<_FirstType>)
633 return first;
634 else
635 return first[__i];
636 }
637
638 void _M_subscript_write([[maybe_unused]] size_t __i, _Tp __y) noexcept
639 {
640 if constexpr (__is_vectorizable_v<_FirstType>)
641 first = __y;
642 else
643 first._M_set(__i, __y);
644 }
645
646 // }}}
647 };
648
649 // __make_simd_tuple {{{1
650 template <typename _Tp, typename _A0>
651 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
652 __make_simd_tuple(simd<_Tp, _A0> __x0)
653 { return {__data(__x0)}; }
654
655 template <typename _Tp, typename _A0, typename... _As>
656 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _As...>
657 __make_simd_tuple(const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
658 { return {__data(__x0), __make_simd_tuple(__xs...)}; }
659
660 template <typename _Tp, typename _A0>
661 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
662 __make_simd_tuple(const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
663 { return {__arg0}; }
664
665 template <typename _Tp, typename _A0, typename _A1, typename... _Abis>
666 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _A1, _Abis...>
667 __make_simd_tuple(
668 const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
669 const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
670 const typename _SimdTraits<_Tp, _Abis>::_SimdMember&... __args)
671 { return {__arg0, __make_simd_tuple<_Tp, _A1, _Abis...>(__arg1, __args...)}; }
672
673 // __to_simd_tuple {{{1
674 template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
675 _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
676 __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX);
677
678 template <typename _Tp, size_t _Np,
679 size_t _Offset = 0, // skip this many elements in __from0
680 typename _R = __fixed_size_storage_t<_Tp, _Np>, typename _V0,
681 typename _V0VT = _VectorTraits<_V0>, typename... _VX>
682 _GLIBCXX_SIMD_INTRINSIC _R constexpr __to_simd_tuple(const _V0 __from0,
683 const _VX... __fromX)
684 {
685 static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
686 static_assert(_Offset < _V0VT::_S_full_size);
687 using _R0 = __vector_type_t<_Tp, _R::_S_first_size>;
688 if constexpr (_R::_S_tuple_size == 1)
689 {
690 if constexpr (_Np == 1)
691 return _R{__from0[_Offset]};
692 else if constexpr (_Offset == 0 && _V0VT::_S_full_size >= _Np)
693 return _R{__intrin_bitcast<_R0>(__from0)};
694 else if constexpr (_Offset * 2 == _V0VT::_S_full_size
695 && _V0VT::_S_full_size / 2 >= _Np)
696 return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0))};
697 else if constexpr (_Offset * 4 == _V0VT::_S_full_size
698 && _V0VT::_S_full_size / 4 >= _Np)
699 return _R{__intrin_bitcast<_R0>(__extract_part<1, 4>(__from0))};
700 else
701 __assert_unreachable<_Tp>();
702 }
703 else
704 {
705 if constexpr (1 == _R::_S_first_size)
706 { // extract one scalar and recurse
707 if constexpr (_Offset + 1 < _V0VT::_S_full_size)
708 return _R{__from0[_Offset],
709 __to_simd_tuple<_Tp, _Np - 1, _Offset + 1>(__from0,
710 __fromX...)};
711 else
712 return _R{__from0[_Offset],
713 __to_simd_tuple<_Tp, _Np - 1, 0>(__fromX...)};
714 }
715
716 // place __from0 into _R::first and recurse for __fromX -> _R::second
717 else if constexpr (_V0VT::_S_full_size == _R::_S_first_size
718 && _Offset == 0)
719 return _R{__from0,
720 __to_simd_tuple<_Tp, _Np - _R::_S_first_size>(__fromX...)};
721
722 // place lower part of __from0 into _R::first and recurse with _Offset
723 else if constexpr (_V0VT::_S_full_size > _R::_S_first_size
724 && _Offset == 0)
725 return _R{__intrin_bitcast<_R0>(__from0),
726 __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
727 _R::_S_first_size>(__from0, __fromX...)};
728
729 // place lower part of second quarter of __from0 into _R::first and
730 // recurse with _Offset
731 else if constexpr (_Offset * 4 == _V0VT::_S_full_size
732 && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
733 return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
734 __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
735 _Offset + _R::_S_first_size>(__from0,
736 __fromX...)};
737
738 // place lower half of high half of __from0 into _R::first and recurse
739 // with _Offset
740 else if constexpr (_Offset * 2 == _V0VT::_S_full_size
741 && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
742 return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
743 __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
744 _Offset + _R::_S_first_size>(__from0,
745 __fromX...)};
746
747 // place high half of __from0 into _R::first and recurse with __fromX
748 else if constexpr (_Offset * 2 == _V0VT::_S_full_size
749 && _V0VT::_S_full_size / 2 >= _R::_S_first_size)
750 return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0)),
751 __to_simd_tuple<_Tp, _Np - _R::_S_first_size, 0>(
752 __fromX...)};
753
754 // ill-formed if some unforseen pattern is needed
755 else
756 __assert_unreachable<_Tp>();
757 }
758 }
759
760 template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
761 _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
762 __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX)
763 {
764 if constexpr (is_same_v<_Tp, _V>)
765 {
766 static_assert(
767 sizeof...(_VX) == 0,
768 "An array of scalars must be the last argument to __to_simd_tuple");
769 return __call_with_subscripts(
770 __from,
771 make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
772 return __simd_tuple_concat(
773 _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
774 });
775 }
776 else
777 return __call_with_subscripts(
778 __from,
779 make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
780 return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
781 });
782 }
783
784 template <size_t, typename _Tp>
785 using __to_tuple_helper = _Tp;
786
787 template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
788 size_t... _Indexes>
789 _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t<_Tp, _NOut>
790 __to_simd_tuple_impl(index_sequence<_Indexes...>,
791 const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
792 {
793 return __make_simd_tuple<_Tp, __to_tuple_helper<_Indexes, _A0>...>(
794 __args[_Indexes]...);
795 }
796
797 template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
798 typename _R = __fixed_size_storage_t<_Tp, _NOut>>
799 _GLIBCXX_SIMD_INTRINSIC _R
800 __to_simd_tuple_sized(
801 const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
802 {
803 static_assert(_Np * simd_size_v<_Tp, _A0> >= _NOut);
804 return __to_simd_tuple_impl<_Tp, _A0, _NOut>(
805 make_index_sequence<_R::_S_tuple_size>(), __args);
806 }
807
808 // __optimize_simd_tuple {{{1
809 template <typename _Tp>
810 _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp>
811 __optimize_simd_tuple(const _SimdTuple<_Tp>)
812 { return {}; }
813
814 template <typename _Tp, typename _Ap>
815 _GLIBCXX_SIMD_INTRINSIC const _SimdTuple<_Tp, _Ap>&
816 __optimize_simd_tuple(const _SimdTuple<_Tp, _Ap>& __x)
817 { return __x; }
818
819 template <typename _Tp, typename _A0, typename _A1, typename... _Abis,
820 typename _R = __fixed_size_storage_t<
821 _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
822 _GLIBCXX_SIMD_INTRINSIC _R
823 __optimize_simd_tuple(const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
824 {
825 using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
826 if constexpr (is_same_v<_R, _Tup>)
827 return __x;
828 else if constexpr (is_same_v<typename _R::_FirstType,
829 typename _Tup::_FirstType>)
830 return {__x.first, __optimize_simd_tuple(__x.second)};
831 else if constexpr (__is_scalar_abi<_A0>()
832 || _A0::template _S_is_partial<_Tp>)
833 return {__generate_from_n_evaluations<_R::_S_first_size,
834 typename _R::_FirstType>(
835 [&](auto __i) { return __x[__i]; }),
836 __optimize_simd_tuple(
837 __simd_tuple_pop_front<_R::_S_first_size>(__x))};
838 else if constexpr (is_same_v<_A0, _A1>
839 && _R::_S_first_size == simd_size_v<_Tp, _A0> + simd_size_v<_Tp, _A1>)
840 return {__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
841 __optimize_simd_tuple(__x.second.second)};
842 else if constexpr (sizeof...(_Abis) >= 2
843 && _R::_S_first_size == (4 * simd_size_v<_Tp, _A0>)
844 && simd_size_v<_Tp, _A0> == __simd_tuple_element_t<
845 (sizeof...(_Abis) >= 2 ? 3 : 0), _Tup>::size())
846 return {
847 __concat(__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
848 __concat(__x.template _M_at<2>(), __x.template _M_at<3>())),
849 __optimize_simd_tuple(__x.second.second.second.second)};
850 else
851 {
852 static_assert(sizeof(_R) == sizeof(__x));
853 _R __r;
854 __builtin_memcpy(__r._M_as_charptr(), __x._M_as_charptr(),
855 sizeof(_Tp) * _R::_S_size());
856 return __r;
857 }
858 }
859
860 // __for_each(const _SimdTuple &, Fun) {{{1
861 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
862 _GLIBCXX_SIMD_INTRINSIC constexpr void
863 __for_each(const _SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
864 { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
865
866 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
867 typename... _As, typename _Fp>
868 _GLIBCXX_SIMD_INTRINSIC constexpr void
869 __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
870 {
871 __fun(__make_meta<_Offset>(__t), __t.first);
872 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
873 static_cast<_Fp&&>(__fun));
874 }
875
876 // __for_each(_SimdTuple &, Fun) {{{1
877 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
878 _GLIBCXX_SIMD_INTRINSIC constexpr void
879 __for_each(_SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
880 { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
881
882 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
883 typename... _As, typename _Fp>
884 _GLIBCXX_SIMD_INTRINSIC constexpr void
885 __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
886 {
887 __fun(__make_meta<_Offset>(__t), __t.first);
888 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
889 static_cast<_Fp&&>(__fun));
890 }
891
892 // __for_each(_SimdTuple &, const _SimdTuple &, Fun) {{{1
893 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
894 _GLIBCXX_SIMD_INTRINSIC constexpr void
895 __for_each(_SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
896 _Fp&& __fun)
897 {
898 static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
899 }
900
901 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
902 typename... _As, typename _Fp>
903 _GLIBCXX_SIMD_INTRINSIC constexpr void
904 __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __a,
905 const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
906 {
907 __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
908 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
909 static_cast<_Fp&&>(__fun));
910 }
911
912 // __for_each(const _SimdTuple &, const _SimdTuple &, Fun) {{{1
913 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
914 _GLIBCXX_SIMD_INTRINSIC constexpr void
915 __for_each(const _SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
916 _Fp&& __fun)
917 {
918 static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
919 }
920
921 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
922 typename... _As, typename _Fp>
923 _GLIBCXX_SIMD_INTRINSIC constexpr void
924 __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __a,
925 const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
926 {
927 __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
928 __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
929 static_cast<_Fp&&>(__fun));
930 }
931
932 // }}}1
933 // __extract_part(_SimdTuple) {{{
934 template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0,
935 typename... _As>
936 _GLIBCXX_SIMD_INTRINSIC auto // __vector_type_t or _SimdTuple
937 __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x)
938 {
939 // worst cases:
940 // (a) 4, 4, 4 => 3, 3, 3, 3 (_Total = 4)
941 // (b) 2, 2, 2 => 3, 3 (_Total = 2)
942 // (c) 4, 2 => 2, 2, 2 (_Total = 3)
943 using _Tuple = _SimdTuple<_Tp, _A0, _As...>;
944 static_assert(_Index + _Combine <= _Total && _Index >= 0 && _Total >= 1);
945 constexpr size_t _Np = _Tuple::_S_size();
946 static_assert(_Np >= _Total && _Np % _Total == 0);
947 constexpr size_t __values_per_part = _Np / _Total;
948 [[maybe_unused]] constexpr size_t __values_to_skip
949 = _Index * __values_per_part;
950 constexpr size_t __return_size = __values_per_part * _Combine;
951 using _RetAbi = simd_abi::deduce_t<_Tp, __return_size>;
952
953 // handle (optimize) the simple cases
954 if constexpr (_Index == 0 && _Tuple::_S_first_size == __return_size)
955 return __x.first._M_data;
956 else if constexpr (_Index == 0 && _Total == _Combine)
957 return __x;
958 else if constexpr (_Index == 0 && _Tuple::_S_first_size >= __return_size)
959 return __intrin_bitcast<__vector_type_t<_Tp, __return_size>>(
960 __as_vector(__x.first));
961
962 // recurse to skip unused data members at the beginning of _SimdTuple
963 else if constexpr (__values_to_skip >= _Tuple::_S_first_size)
964 { // recurse
965 if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
966 {
967 constexpr int __parts_in_first
968 = _Tuple::_S_first_size / __values_per_part;
969 return __extract_part<_Index - __parts_in_first,
970 _Total - __parts_in_first, _Combine>(
971 __x.second);
972 }
973 else
974 return __extract_part<__values_to_skip - _Tuple::_S_first_size,
975 _Np - _Tuple::_S_first_size, __return_size>(
976 __x.second);
977 }
978
979 // extract from multiple _SimdTuple data members
980 else if constexpr (__return_size > _Tuple::_S_first_size - __values_to_skip)
981 {
982 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
983 const __may_alias<_Tp>* const element_ptr
984 = reinterpret_cast<const __may_alias<_Tp>*>(&__x) + __values_to_skip;
985 return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
986 #else
987 [[maybe_unused]] constexpr size_t __offset = __values_to_skip;
988 return __as_vector(simd<_Tp, _RetAbi>([&](auto __i) constexpr {
989 constexpr _SizeConstant<__i + __offset> __k;
990 return __x[__k];
991 }));
992 #endif
993 }
994
995 // all of the return values are in __x.first
996 else if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
997 return __extract_part<_Index, _Tuple::_S_first_size / __values_per_part,
998 _Combine>(__x.first);
999 else
1000 return __extract_part<__values_to_skip, _Tuple::_S_first_size,
1001 _Combine * __values_per_part>(__x.first);
1002 }
1003
1004 // }}}
1005 // __fixed_size_storage_t<_Tp, _Np>{{{
1006 template <typename _Tp, int _Np, typename _Tuple,
1007 typename _Next = simd<_Tp, _AllNativeAbis::_BestAbi<_Tp, _Np>>,
1008 int _Remain = _Np - int(_Next::size())>
1009 struct __fixed_size_storage_builder;
1010
1011 template <typename _Tp, int _Np>
1012 struct __fixed_size_storage
1013 : public __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp>> {};
1014
1015 template <typename _Tp, int _Np, typename... _As, typename _Next>
1016 struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1017 0>
1018 { using type = _SimdTuple<_Tp, _As..., typename _Next::abi_type>; };
1019
1020 template <typename _Tp, int _Np, typename... _As, typename _Next, int _Remain>
1021 struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1022 _Remain>
1023 {
1024 using type = typename __fixed_size_storage_builder<
1025 _Tp, _Remain, _SimdTuple<_Tp, _As..., typename _Next::abi_type>>::type;
1026 };
1027
1028 // }}}
1029 // __autocvt_to_simd {{{
1030 template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
1031 struct __autocvt_to_simd
1032 {
1033 _Tp _M_data;
1034 using _TT = __remove_cvref_t<_Tp>;
1035
1036 operator _TT()
1037 { return _M_data; }
1038
1039 operator _TT&()
1040 {
1041 static_assert(is_lvalue_reference<_Tp>::value, "");
1042 static_assert(!is_const<_Tp>::value, "");
1043 return _M_data;
1044 }
1045
1046 operator _TT*()
1047 {
1048 static_assert(is_lvalue_reference<_Tp>::value, "");
1049 static_assert(!is_const<_Tp>::value, "");
1050 return &_M_data;
1051 }
1052
1053 constexpr inline __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
1054
1055 template <typename _Abi>
1056 operator simd<typename _TT::value_type, _Abi>()
1057 { return {__private_init, _M_data}; }
1058
1059 template <typename _Abi>
1060 operator simd<typename _TT::value_type, _Abi>&()
1061 {
1062 return *reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1063 &_M_data);
1064 }
1065
1066 template <typename _Abi>
1067 operator simd<typename _TT::value_type, _Abi>*()
1068 {
1069 return reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1070 &_M_data);
1071 }
1072 };
1073
1074 template <typename _Tp>
1075 __autocvt_to_simd(_Tp &&) -> __autocvt_to_simd<_Tp>;
1076
1077 template <typename _Tp>
1078 struct __autocvt_to_simd<_Tp, true>
1079 {
1080 using _TT = __remove_cvref_t<_Tp>;
1081 _Tp _M_data;
1082 fixed_size_simd<_TT, 1> _M_fd;
1083
1084 constexpr inline __autocvt_to_simd(_Tp dd) : _M_data(dd), _M_fd(_M_data) {}
1085
1086 ~__autocvt_to_simd()
1087 { _M_data = __data(_M_fd).first; }
1088
1089 operator fixed_size_simd<_TT, 1>()
1090 { return _M_fd; }
1091
1092 operator fixed_size_simd<_TT, 1> &()
1093 {
1094 static_assert(is_lvalue_reference<_Tp>::value, "");
1095 static_assert(!is_const<_Tp>::value, "");
1096 return _M_fd;
1097 }
1098
1099 operator fixed_size_simd<_TT, 1> *()
1100 {
1101 static_assert(is_lvalue_reference<_Tp>::value, "");
1102 static_assert(!is_const<_Tp>::value, "");
1103 return &_M_fd;
1104 }
1105 };
1106
1107 // }}}
1108
1109 struct _CommonImplFixedSize;
1110 template <int _Np> struct _SimdImplFixedSize;
1111 template <int _Np> struct _MaskImplFixedSize;
1112 // simd_abi::_Fixed {{{
1113 template <int _Np>
1114 struct simd_abi::_Fixed
1115 {
1116 template <typename _Tp> static constexpr size_t _S_size = _Np;
1117 template <typename _Tp> static constexpr size_t _S_full_size = _Np;
1118 // validity traits {{{
1119 struct _IsValidAbiTag : public __bool_constant<(_Np > 0)> {};
1120
1121 template <typename _Tp>
1122 struct _IsValidSizeFor
1123 : __bool_constant<(_Np <= simd_abi::max_fixed_size<_Tp>)> {};
1124
1125 template <typename _Tp>
1126 struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
1127 _IsValidSizeFor<_Tp>> {};
1128
1129 template <typename _Tp>
1130 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
1131
1132 // }}}
1133 // _S_masked {{{
1134 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1135 _S_masked(_BitMask<_Np> __x)
1136 { return __x._M_sanitized(); }
1137
1138 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1139 _S_masked(_SanitizedBitMask<_Np> __x)
1140 { return __x; }
1141
1142 // }}}
1143 // _*Impl {{{
1144 using _CommonImpl = _CommonImplFixedSize;
1145 using _SimdImpl = _SimdImplFixedSize<_Np>;
1146 using _MaskImpl = _MaskImplFixedSize<_Np>;
1147
1148 // }}}
1149 // __traits {{{
1150 template <typename _Tp, bool = _S_is_valid_v<_Tp>>
1151 struct __traits : _InvalidTraits {};
1152
1153 template <typename _Tp>
1154 struct __traits<_Tp, true>
1155 {
1156 using _IsValid = true_type;
1157 using _SimdImpl = _SimdImplFixedSize<_Np>;
1158 using _MaskImpl = _MaskImplFixedSize<_Np>;
1159
1160 // simd and simd_mask member types {{{
1161 using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1162 using _MaskMember = _SanitizedBitMask<_Np>;
1163
1164 static constexpr size_t _S_simd_align
1165 = std::__bit_ceil(_Np * sizeof(_Tp));
1166
1167 static constexpr size_t _S_mask_align = alignof(_MaskMember);
1168
1169 // }}}
1170 // _SimdBase / base class for simd, providing extra conversions {{{
1171 struct _SimdBase
1172 {
1173 // The following ensures, function arguments are passed via the stack.
1174 // This is important for ABI compatibility across TU boundaries
1175 _SimdBase(const _SimdBase&) {}
1176 _SimdBase() = default;
1177
1178 explicit operator const _SimdMember &() const
1179 { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
1180
1181 explicit operator array<_Tp, _Np>() const
1182 {
1183 array<_Tp, _Np> __r;
1184 // _SimdMember can be larger because of higher alignment
1185 static_assert(sizeof(__r) <= sizeof(_SimdMember), "");
1186 __builtin_memcpy(__r.data(), &static_cast<const _SimdMember&>(*this),
1187 sizeof(__r));
1188 return __r;
1189 }
1190 };
1191
1192 // }}}
1193 // _MaskBase {{{
1194 // empty. The bitset interface suffices
1195 struct _MaskBase {};
1196
1197 // }}}
1198 // _SimdCastType {{{
1199 struct _SimdCastType
1200 {
1201 _SimdCastType(const array<_Tp, _Np>&);
1202 _SimdCastType(const _SimdMember& dd) : _M_data(dd) {}
1203 explicit operator const _SimdMember &() const { return _M_data; }
1204
1205 private:
1206 const _SimdMember& _M_data;
1207 };
1208
1209 // }}}
1210 // _MaskCastType {{{
1211 class _MaskCastType
1212 {
1213 _MaskCastType() = delete;
1214 };
1215 // }}}
1216 };
1217 // }}}
1218 };
1219
1220 // }}}
1221 // _CommonImplFixedSize {{{
1222 struct _CommonImplFixedSize
1223 {
1224 // _S_store {{{
1225 template <typename _Tp, typename... _As>
1226 _GLIBCXX_SIMD_INTRINSIC static void
1227 _S_store(const _SimdTuple<_Tp, _As...>& __x, void* __addr)
1228 {
1229 constexpr size_t _Np = _SimdTuple<_Tp, _As...>::_S_size();
1230 __builtin_memcpy(__addr, &__x, _Np * sizeof(_Tp));
1231 }
1232
1233 // }}}
1234 };
1235
1236 // }}}
1237 // _SimdImplFixedSize {{{1
1238 // fixed_size should not inherit from _SimdMathFallback in order for
1239 // specializations in the used _SimdTuple Abis to get used
1240 template <int _Np>
1241 struct _SimdImplFixedSize
1242 {
1243 // member types {{{2
1244 using _MaskMember = _SanitizedBitMask<_Np>;
1245
1246 template <typename _Tp>
1247 using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1248
1249 template <typename _Tp>
1250 static constexpr size_t _S_tuple_size = _SimdMember<_Tp>::_S_tuple_size;
1251
1252 template <typename _Tp>
1253 using _Simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
1254
1255 template <typename _Tp>
1256 using _TypeTag = _Tp*;
1257
1258 // broadcast {{{2
1259 template <typename _Tp>
1260 static constexpr inline _SimdMember<_Tp> _S_broadcast(_Tp __x) noexcept
1261 {
1262 return _SimdMember<_Tp>::_S_generate([&](auto __meta) constexpr {
1263 return __meta._S_broadcast(__x);
1264 });
1265 }
1266
1267 // _S_generator {{{2
1268 template <typename _Fp, typename _Tp>
1269 static constexpr inline _SimdMember<_Tp> _S_generator(_Fp&& __gen,
1270 _TypeTag<_Tp>)
1271 {
1272 return _SimdMember<_Tp>::_S_generate([&__gen](auto __meta) constexpr {
1273 return __meta._S_generator(
1274 [&](auto __i) constexpr {
1275 return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
1276 : 0;
1277 },
1278 _TypeTag<_Tp>());
1279 });
1280 }
1281
1282 // _S_load {{{2
1283 template <typename _Tp, typename _Up>
1284 static inline _SimdMember<_Tp> _S_load(const _Up* __mem,
1285 _TypeTag<_Tp>) noexcept
1286 {
1287 return _SimdMember<_Tp>::_S_generate([&](auto __meta) {
1288 return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
1289 });
1290 }
1291
1292 // _S_masked_load {{{2
1293 template <typename _Tp, typename... _As, typename _Up>
1294 static inline _SimdTuple<_Tp, _As...>
1295 _S_masked_load(const _SimdTuple<_Tp, _As...>& __old,
1296 const _MaskMember __bits, const _Up* __mem) noexcept
1297 {
1298 auto __merge = __old;
1299 __for_each(__merge, [&](auto __meta, auto& __native) {
1300 if (__meta._S_submask(__bits).any())
1301 #pragma GCC diagnostic push
1302 // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1303 // the responsibility for avoiding UB to the caller of the masked load
1304 // via the mask. Consequently, the compiler may assume this branch is
1305 // unreachable, if the pointer arithmetic is UB.
1306 #pragma GCC diagnostic ignored "-Warray-bounds"
1307 __native
1308 = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
1309 __mem + __meta._S_offset);
1310 #pragma GCC diagnostic pop
1311 });
1312 return __merge;
1313 }
1314
1315 // _S_store {{{2
1316 template <typename _Tp, typename _Up>
1317 static inline void _S_store(const _SimdMember<_Tp>& __v, _Up* __mem,
1318 _TypeTag<_Tp>) noexcept
1319 {
1320 __for_each(__v, [&](auto __meta, auto __native) {
1321 __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
1322 });
1323 }
1324
1325 // _S_masked_store {{{2
1326 template <typename _Tp, typename... _As, typename _Up>
1327 static inline void _S_masked_store(const _SimdTuple<_Tp, _As...>& __v,
1328 _Up* __mem,
1329 const _MaskMember __bits) noexcept
1330 {
1331 __for_each(__v, [&](auto __meta, auto __native) {
1332 if (__meta._S_submask(__bits).any())
1333 #pragma GCC diagnostic push
1334 // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1335 // the responsibility for avoiding UB to the caller of the masked
1336 // store via the mask. Consequently, the compiler may assume this
1337 // branch is unreachable, if the pointer arithmetic is UB.
1338 #pragma GCC diagnostic ignored "-Warray-bounds"
1339 __meta._S_masked_store(__native, __mem + __meta._S_offset,
1340 __meta._S_make_mask(__bits));
1341 #pragma GCC diagnostic pop
1342 });
1343 }
1344
1345 // negation {{{2
1346 template <typename _Tp, typename... _As>
1347 static inline _MaskMember
1348 _S_negate(const _SimdTuple<_Tp, _As...>& __x) noexcept
1349 {
1350 _MaskMember __bits = 0;
1351 __for_each(
1352 __x, [&__bits](auto __meta, auto __native) constexpr {
1353 __bits
1354 |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
1355 });
1356 return __bits;
1357 }
1358
1359 // reductions {{{2
1360 template <typename _Tp, typename _BinaryOperation>
1361 static constexpr inline _Tp _S_reduce(const _Simd<_Tp>& __x,
1362 const _BinaryOperation& __binary_op)
1363 {
1364 using _Tup = _SimdMember<_Tp>;
1365 const _Tup& __tup = __data(__x);
1366 if constexpr (_Tup::_S_tuple_size == 1)
1367 return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
1368 __tup.template _M_simd_at<0>(), __binary_op);
1369 else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 2
1370 && _Tup::_SecondType::_S_size() == 1)
1371 {
1372 return __binary_op(simd<_Tp, simd_abi::scalar>(
1373 reduce(__tup.template _M_simd_at<0>(),
1374 __binary_op)),
1375 __tup.template _M_simd_at<1>())[0];
1376 }
1377 else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 4
1378 && _Tup::_SecondType::_S_size() == 2)
1379 {
1380 return __binary_op(
1381 simd<_Tp, simd_abi::scalar>(
1382 reduce(__tup.template _M_simd_at<0>(), __binary_op)),
1383 simd<_Tp, simd_abi::scalar>(
1384 reduce(__tup.template _M_simd_at<1>(), __binary_op)))[0];
1385 }
1386 else
1387 {
1388 const auto& __x2 = __call_with_n_evaluations<
1389 __div_roundup(_Tup::_S_tuple_size, 2)>(
1390 [](auto __first_simd, auto... __remaining) {
1391 if constexpr (sizeof...(__remaining) == 0)
1392 return __first_simd;
1393 else
1394 {
1395 using _Tup2
1396 = _SimdTuple<_Tp,
1397 typename decltype(__first_simd)::abi_type,
1398 typename decltype(__remaining)::abi_type...>;
1399 return fixed_size_simd<_Tp, _Tup2::_S_size()>(
1400 __private_init,
1401 __make_simd_tuple(__first_simd, __remaining...));
1402 }
1403 },
1404 [&](auto __i) {
1405 auto __left = __tup.template _M_simd_at<2 * __i>();
1406 if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
1407 return __left;
1408 else
1409 {
1410 auto __right = __tup.template _M_simd_at<2 * __i + 1>();
1411 using _LT = decltype(__left);
1412 using _RT = decltype(__right);
1413 if constexpr (_LT::size() == _RT::size())
1414 return __binary_op(__left, __right);
1415 else
1416 {
1417 _GLIBCXX_SIMD_USE_CONSTEXPR_API
1418 typename _LT::mask_type __k(
1419 __private_init,
1420 [](auto __j) constexpr { return __j < _RT::size(); });
1421 _LT __ext_right = __left;
1422 where(__k, __ext_right)
1423 = __proposed::resizing_simd_cast<_LT>(__right);
1424 where(__k, __left) = __binary_op(__left, __ext_right);
1425 return __left;
1426 }
1427 }
1428 });
1429 return reduce(__x2, __binary_op);
1430 }
1431 }
1432
1433 // _S_min, _S_max {{{2
1434 template <typename _Tp, typename... _As>
1435 static inline constexpr _SimdTuple<_Tp, _As...>
1436 _S_min(const _SimdTuple<_Tp, _As...>& __a,
1437 const _SimdTuple<_Tp, _As...>& __b)
1438 {
1439 return __a._M_apply_per_chunk(
1440 [](auto __impl, auto __aa, auto __bb) constexpr {
1441 return __impl._S_min(__aa, __bb);
1442 },
1443 __b);
1444 }
1445
1446 template <typename _Tp, typename... _As>
1447 static inline constexpr _SimdTuple<_Tp, _As...>
1448 _S_max(const _SimdTuple<_Tp, _As...>& __a,
1449 const _SimdTuple<_Tp, _As...>& __b)
1450 {
1451 return __a._M_apply_per_chunk(
1452 [](auto __impl, auto __aa, auto __bb) constexpr {
1453 return __impl._S_max(__aa, __bb);
1454 },
1455 __b);
1456 }
1457
1458 // _S_complement {{{2
1459 template <typename _Tp, typename... _As>
1460 static inline constexpr _SimdTuple<_Tp, _As...>
1461 _S_complement(const _SimdTuple<_Tp, _As...>& __x) noexcept
1462 {
1463 return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
1464 return __impl._S_complement(__xx);
1465 });
1466 }
1467
1468 // _S_unary_minus {{{2
1469 template <typename _Tp, typename... _As>
1470 static inline constexpr _SimdTuple<_Tp, _As...>
1471 _S_unary_minus(const _SimdTuple<_Tp, _As...>& __x) noexcept
1472 {
1473 return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
1474 return __impl._S_unary_minus(__xx);
1475 });
1476 }
1477
1478 // arithmetic operators {{{2
1479
1480 #define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
1481 template <typename _Tp, typename... _As> \
1482 static inline constexpr _SimdTuple<_Tp, _As...> name_( \
1483 const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y)\
1484 { \
1485 return __x._M_apply_per_chunk( \
1486 [](auto __impl, auto __xx, auto __yy) constexpr { \
1487 return __impl.name_(__xx, __yy); \
1488 }, \
1489 __y); \
1490 }
1491
1492 _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
1493 _GLIBCXX_SIMD_FIXED_OP(_S_minus, -)
1494 _GLIBCXX_SIMD_FIXED_OP(_S_multiplies, *)
1495 _GLIBCXX_SIMD_FIXED_OP(_S_divides, /)
1496 _GLIBCXX_SIMD_FIXED_OP(_S_modulus, %)
1497 _GLIBCXX_SIMD_FIXED_OP(_S_bit_and, &)
1498 _GLIBCXX_SIMD_FIXED_OP(_S_bit_or, |)
1499 _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor, ^)
1500 _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left, <<)
1501 _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right, >>)
1502 #undef _GLIBCXX_SIMD_FIXED_OP
1503
1504 template <typename _Tp, typename... _As>
1505 static inline constexpr _SimdTuple<_Tp, _As...>
1506 _S_bit_shift_left(const _SimdTuple<_Tp, _As...>& __x, int __y)
1507 {
1508 return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
1509 return __impl._S_bit_shift_left(__xx, __y);
1510 });
1511 }
1512
1513 template <typename _Tp, typename... _As>
1514 static inline constexpr _SimdTuple<_Tp, _As...>
1515 _S_bit_shift_right(const _SimdTuple<_Tp, _As...>& __x, int __y)
1516 {
1517 return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
1518 return __impl._S_bit_shift_right(__xx, __y);
1519 });
1520 }
1521
1522 // math {{{2
1523 #define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name) \
1524 template <typename _Tp, typename... _As, typename... _More> \
1525 static inline __fixed_size_storage_t<_RetTp, _Np> \
1526 _S_##__name(const _SimdTuple<_Tp, _As...>& __x, \
1527 const _More&... __more) \
1528 { \
1529 if constexpr (sizeof...(_More) == 0) \
1530 { \
1531 if constexpr (is_same_v<_Tp, _RetTp>) \
1532 return __x._M_apply_per_chunk( \
1533 [](auto __impl, auto __xx) constexpr { \
1534 using _V = typename decltype(__impl)::simd_type; \
1535 return __data(__name(_V(__private_init, __xx))); \
1536 }); \
1537 else \
1538 return __optimize_simd_tuple( \
1539 __x.template _M_apply_r<_RetTp>([](auto __impl, auto __xx) { \
1540 return __impl._S_##__name(__xx); \
1541 })); \
1542 } \
1543 else if constexpr ( \
1544 is_same_v< \
1545 _Tp, \
1546 _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) ) \
1547 return __x._M_apply_per_chunk( \
1548 [](auto __impl, auto __xx, auto... __pack) constexpr { \
1549 using _V = typename decltype(__impl)::simd_type; \
1550 return __data(__name(_V(__private_init, __xx), \
1551 _V(__private_init, __pack)...)); \
1552 }, \
1553 __more...); \
1554 else if constexpr (is_same_v<_Tp, _RetTp>) \
1555 return __x._M_apply_per_chunk( \
1556 [](auto __impl, auto __xx, auto... __pack) constexpr { \
1557 using _V = typename decltype(__impl)::simd_type; \
1558 return __data(__name(_V(__private_init, __xx), \
1559 __autocvt_to_simd(__pack)...)); \
1560 }, \
1561 __more...); \
1562 else \
1563 __assert_unreachable<_Tp>(); \
1564 }
1565
1566 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acos)
1567 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asin)
1568 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan)
1569 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan2)
1570 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cos)
1571 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sin)
1572 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tan)
1573 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acosh)
1574 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asinh)
1575 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atanh)
1576 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cosh)
1577 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sinh)
1578 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tanh)
1579 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp)
1580 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp2)
1581 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, expm1)
1582 _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, ilogb)
1583 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log)
1584 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log10)
1585 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log1p)
1586 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log2)
1587 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, logb)
1588 // modf implemented in simd_math.h
1589 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp,
1590 scalbn) // double scalbn(double x, int exp);
1591 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, scalbln)
1592 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cbrt)
1593 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, abs)
1594 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fabs)
1595 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, pow)
1596 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sqrt)
1597 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erf)
1598 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erfc)
1599 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, lgamma)
1600 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tgamma)
1601 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, trunc)
1602 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ceil)
1603 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, floor)
1604 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nearbyint)
1605
1606 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, rint)
1607 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lrint)
1608 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llrint)
1609
1610 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, round)
1611 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lround)
1612 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llround)
1613
1614 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ldexp)
1615 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmod)
1616 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, remainder)
1617 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, copysign)
1618 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nextafter)
1619 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fdim)
1620 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmax)
1621 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmin)
1622 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fma)
1623 _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, fpclassify)
1624 #undef _GLIBCXX_SIMD_APPLY_ON_TUPLE
1625
1626 template <typename _Tp, typename... _Abis>
1627 static _SimdTuple<_Tp, _Abis...> _S_remquo(
1628 const _SimdTuple<_Tp, _Abis...>& __x,
1629 const _SimdTuple<_Tp, _Abis...>& __y,
1630 __fixed_size_storage_t<int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
1631 {
1632 return __x._M_apply_per_chunk(
1633 [](auto __impl, const auto __xx, const auto __yy, auto& __zz) {
1634 return __impl._S_remquo(__xx, __yy, &__zz);
1635 },
1636 __y, *__z);
1637 }
1638
1639 template <typename _Tp, typename... _As>
1640 static inline _SimdTuple<_Tp, _As...>
1641 _S_frexp(const _SimdTuple<_Tp, _As...>& __x,
1642 __fixed_size_storage_t<int, _Np>& __exp) noexcept
1643 {
1644 return __x._M_apply_per_chunk(
1645 [](auto __impl, const auto& __a, auto& __b) {
1646 return __data(
1647 frexp(typename decltype(__impl)::simd_type(__private_init, __a),
1648 __autocvt_to_simd(__b)));
1649 },
1650 __exp);
1651 }
1652
1653 #define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
1654 template <typename _Tp, typename... _As> \
1655 static inline _MaskMember \
1656 _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
1657 { \
1658 return _M_test([](auto __impl, \
1659 auto __xx) { return __impl._S_##name_(__xx); }, \
1660 __x); \
1661 }
1662
1663 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
1664 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite)
1665 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan)
1666 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal)
1667 _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit)
1668 #undef _GLIBCXX_SIMD_TEST_ON_TUPLE_
1669
1670 // _S_increment & _S_decrement{{{2
1671 template <typename... _Ts>
1672 _GLIBCXX_SIMD_INTRINSIC static constexpr void
1673 _S_increment(_SimdTuple<_Ts...>& __x)
1674 {
1675 __for_each(
1676 __x, [](auto __meta, auto& native) constexpr {
1677 __meta._S_increment(native);
1678 });
1679 }
1680
1681 template <typename... _Ts>
1682 _GLIBCXX_SIMD_INTRINSIC static constexpr void
1683 _S_decrement(_SimdTuple<_Ts...>& __x)
1684 {
1685 __for_each(
1686 __x, [](auto __meta, auto& native) constexpr {
1687 __meta._S_decrement(native);
1688 });
1689 }
1690
1691 // compares {{{2
1692 #define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp) \
1693 template <typename _Tp, typename... _As> \
1694 _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember \
1695 __cmp(const _SimdTuple<_Tp, _As...>& __x, \
1696 const _SimdTuple<_Tp, _As...>& __y) \
1697 { \
1698 return _M_test( \
1699 [](auto __impl, auto __xx, auto __yy) constexpr { \
1700 return __impl.__cmp(__xx, __yy); \
1701 }, \
1702 __x, __y); \
1703 }
1704
1705 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
1706 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to)
1707 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less)
1708 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal)
1709 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless)
1710 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal)
1711 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater)
1712 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal)
1713 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater)
1714 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered)
1715 #undef _GLIBCXX_SIMD_CMP_OPERATIONS
1716
1717 // smart_reference access {{{2
1718 template <typename _Tp, typename... _As, typename _Up>
1719 _GLIBCXX_SIMD_INTRINSIC static void _S_set(_SimdTuple<_Tp, _As...>& __v,
1720 int __i, _Up&& __x) noexcept
1721 { __v._M_set(__i, static_cast<_Up&&>(__x)); }
1722
1723 // _S_masked_assign {{{2
1724 template <typename _Tp, typename... _As>
1725 _GLIBCXX_SIMD_INTRINSIC static void
1726 _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1727 const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
1728 {
1729 __for_each(
1730 __lhs, __rhs,
1731 [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
1732 __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1733 __native_rhs);
1734 });
1735 }
1736
1737 // Optimization for the case where the RHS is a scalar. No need to broadcast
1738 // the scalar to a simd first.
1739 template <typename _Tp, typename... _As>
1740 _GLIBCXX_SIMD_INTRINSIC static void
1741 _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1742 const __type_identity_t<_Tp> __rhs)
1743 {
1744 __for_each(
1745 __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
1746 __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1747 __rhs);
1748 });
1749 }
1750
1751 // _S_masked_cassign {{{2
1752 template <typename _Op, typename _Tp, typename... _As>
1753 static inline void _S_masked_cassign(const _MaskMember __bits,
1754 _SimdTuple<_Tp, _As...>& __lhs,
1755 const _SimdTuple<_Tp, _As...>& __rhs,
1756 _Op __op)
1757 {
1758 __for_each(
1759 __lhs, __rhs,
1760 [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
1761 __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1762 __native_lhs, __native_rhs, __op);
1763 });
1764 }
1765
1766 // Optimization for the case where the RHS is a scalar. No need to broadcast
1767 // the scalar to a simd first.
1768 template <typename _Op, typename _Tp, typename... _As>
1769 static inline void _S_masked_cassign(const _MaskMember __bits,
1770 _SimdTuple<_Tp, _As...>& __lhs,
1771 const _Tp& __rhs, _Op __op)
1772 {
1773 __for_each(
1774 __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
1775 __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1776 __native_lhs, __rhs, __op);
1777 });
1778 }
1779
1780 // _S_masked_unary {{{2
1781 template <template <typename> class _Op, typename _Tp, typename... _As>
1782 static inline _SimdTuple<_Tp, _As...>
1783 _S_masked_unary(const _MaskMember __bits, const _SimdTuple<_Tp, _As...>& __v)
1784 {
1785 return __v._M_apply_wrapped([&__bits](auto __meta,
1786 auto __native) constexpr {
1787 return __meta.template _S_masked_unary<_Op>(__meta._S_make_mask(
1788 __bits),
1789 __native);
1790 });
1791 }
1792
1793 // }}}2
1794 };
1795
1796 // _MaskImplFixedSize {{{1
1797 template <int _Np>
1798 struct _MaskImplFixedSize
1799 {
1800 static_assert(
1801 sizeof(_ULLong) * __CHAR_BIT__ >= _Np,
1802 "The fixed_size implementation relies on one _ULLong being able to store "
1803 "all boolean elements."); // required in load & store
1804
1805 // member types {{{
1806 using _Abi = simd_abi::fixed_size<_Np>;
1807
1808 using _MaskMember = _SanitizedBitMask<_Np>;
1809
1810 template <typename _Tp>
1811 using _FirstAbi = typename __fixed_size_storage_t<_Tp, _Np>::_FirstAbi;
1812
1813 template <typename _Tp>
1814 using _TypeTag = _Tp*;
1815
1816 // }}}
1817 // _S_broadcast {{{
1818 template <typename>
1819 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1820 _S_broadcast(bool __x)
1821 { return __x ? ~_MaskMember() : _MaskMember(); }
1822
1823 // }}}
1824 // _S_load {{{
1825 template <typename>
1826 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1827 _S_load(const bool* __mem)
1828 {
1829 using _Ip = __int_for_sizeof_t<bool>;
1830 // the following load uses element_aligned and relies on __mem already
1831 // carrying alignment information from when this load function was
1832 // called.
1833 const simd<_Ip, _Abi> __bools(reinterpret_cast<const __may_alias<_Ip>*>(
1834 __mem),
1835 element_aligned);
1836 return __data(__bools != 0);
1837 }
1838
1839 // }}}
1840 // _S_to_bits {{{
1841 template <bool _Sanitized>
1842 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1843 _S_to_bits(_BitMask<_Np, _Sanitized> __x)
1844 {
1845 if constexpr (_Sanitized)
1846 return __x;
1847 else
1848 return __x._M_sanitized();
1849 }
1850
1851 // }}}
1852 // _S_convert {{{
1853 template <typename _Tp, typename _Up, typename _UAbi>
1854 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1855 _S_convert(simd_mask<_Up, _UAbi> __x)
1856 {
1857 return _UAbi::_MaskImpl::_S_to_bits(__data(__x))
1858 .template _M_extract<0, _Np>();
1859 }
1860
1861 // }}}
1862 // _S_from_bitmask {{{2
1863 template <typename _Tp>
1864 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1865 _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) noexcept
1866 { return __bits; }
1867
1868 // _S_load {{{2
1869 static inline _MaskMember _S_load(const bool* __mem) noexcept
1870 {
1871 // TODO: _UChar is not necessarily the best type to use here. For smaller
1872 // _Np _UShort, _UInt, _ULLong, float, and double can be more efficient.
1873 _ULLong __r = 0;
1874 using _Vs = __fixed_size_storage_t<_UChar, _Np>;
1875 __for_each(_Vs{}, [&](auto __meta, auto) {
1876 __r |= __meta._S_mask_to_shifted_ullong(
1877 __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
1878 _SizeConstant<__meta._S_size()>()));
1879 });
1880 return __r;
1881 }
1882
1883 // _S_masked_load {{{2
1884 static inline _MaskMember _S_masked_load(_MaskMember __merge,
1885 _MaskMember __mask,
1886 const bool* __mem) noexcept
1887 {
1888 _BitOps::_S_bit_iteration(__mask.to_ullong(), [&](auto __i) {
1889 __merge.set(__i, __mem[__i]);
1890 });
1891 return __merge;
1892 }
1893
1894 // _S_store {{{2
1895 static inline void _S_store(const _MaskMember __bitmask,
1896 bool* __mem) noexcept
1897 {
1898 if constexpr (_Np == 1)
1899 __mem[0] = __bitmask[0];
1900 else
1901 _FirstAbi<_UChar>::_CommonImpl::_S_store_bool_array(__bitmask, __mem);
1902 }
1903
1904 // _S_masked_store {{{2
1905 static inline void _S_masked_store(const _MaskMember __v, bool* __mem,
1906 const _MaskMember __k) noexcept
1907 {
1908 _BitOps::_S_bit_iteration(__k, [&](auto __i) { __mem[__i] = __v[__i]; });
1909 }
1910
1911 // logical and bitwise operators {{{2
1912 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1913 _S_logical_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1914 { return __x & __y; }
1915
1916 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1917 _S_logical_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1918 { return __x | __y; }
1919
1920 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1921 _S_bit_not(const _MaskMember& __x) noexcept
1922 { return ~__x; }
1923
1924 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1925 _S_bit_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1926 { return __x & __y; }
1927
1928 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1929 _S_bit_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1930 { return __x | __y; }
1931
1932 _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1933 _S_bit_xor(const _MaskMember& __x, const _MaskMember& __y) noexcept
1934 { return __x ^ __y; }
1935
1936 // smart_reference access {{{2
1937 _GLIBCXX_SIMD_INTRINSIC static void _S_set(_MaskMember& __k, int __i,
1938 bool __x) noexcept
1939 { __k.set(__i, __x); }
1940
1941 // _S_masked_assign {{{2
1942 _GLIBCXX_SIMD_INTRINSIC static void
1943 _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs,
1944 const _MaskMember __rhs)
1945 { __lhs = (__lhs & ~__k) | (__rhs & __k); }
1946
1947 // Optimization for the case where the RHS is a scalar.
1948 _GLIBCXX_SIMD_INTRINSIC static void _S_masked_assign(const _MaskMember __k,
1949 _MaskMember& __lhs,
1950 const bool __rhs)
1951 {
1952 if (__rhs)
1953 __lhs |= __k;
1954 else
1955 __lhs &= ~__k;
1956 }
1957
1958 // }}}2
1959 // _S_all_of {{{
1960 template <typename _Tp>
1961 _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
1962 { return __data(__k).all(); }
1963
1964 // }}}
1965 // _S_any_of {{{
1966 template <typename _Tp>
1967 _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
1968 { return __data(__k).any(); }
1969
1970 // }}}
1971 // _S_none_of {{{
1972 template <typename _Tp>
1973 _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
1974 { return __data(__k).none(); }
1975
1976 // }}}
1977 // _S_some_of {{{
1978 template <typename _Tp>
1979 _GLIBCXX_SIMD_INTRINSIC static bool
1980 _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
1981 {
1982 if constexpr (_Np == 1)
1983 return false;
1984 else
1985 return __data(__k).any() && !__data(__k).all();
1986 }
1987
1988 // }}}
1989 // _S_popcount {{{
1990 template <typename _Tp>
1991 _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
1992 { return __data(__k).count(); }
1993
1994 // }}}
1995 // _S_find_first_set {{{
1996 template <typename _Tp>
1997 _GLIBCXX_SIMD_INTRINSIC static int
1998 _S_find_first_set(simd_mask<_Tp, _Abi> __k)
1999 { return std::__countr_zero(__data(__k).to_ullong()); }
2000
2001 // }}}
2002 // _S_find_last_set {{{
2003 template <typename _Tp>
2004 _GLIBCXX_SIMD_INTRINSIC static int
2005 _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2006 { return std::__bit_width(__data(__k).to_ullong()) - 1; }
2007
2008 // }}}
2009 };
2010 // }}}1
2011
2012 _GLIBCXX_SIMD_END_NAMESPACE
2013 #endif // __cplusplus >= 201703L
2014 #endif // _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
2015
2016 // vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80