1 // Simd fixed_size ABI specific implementations -*- C++ -*-
3 // Copyright (C) 2020-2022 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
26 * The fixed_size ABI gives the following guarantees:
27 * - simd objects are passed via the stack
28 * - memory layout of `simd<_Tp, _Np>` is equivalent to `array<_Tp, _Np>`
29 * - alignment of `simd<_Tp, _Np>` is `_Np * sizeof(_Tp)` if _Np is __a
30 * power-of-2 value, otherwise `std::__bit_ceil(_Np * sizeof(_Tp))` (Note:
31 * if the alignment were to exceed the system/compiler maximum, it is bounded
33 * - simd_mask objects are passed like bitset<_Np>
34 * - memory layout of `simd_mask<_Tp, _Np>` is equivalent to `bitset<_Np>`
35 * - alignment of `simd_mask<_Tp, _Np>` is equal to the alignment of
39 #ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
40 #define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
42 #if __cplusplus >= 201703L
46 _GLIBCXX_SIMD_BEGIN_NAMESPACE
48 // __simd_tuple_element {{{
49 template <size_t _I
, typename _Tp
>
50 struct __simd_tuple_element
;
52 template <typename _Tp
, typename _A0
, typename
... _As
>
53 struct __simd_tuple_element
<0, _SimdTuple
<_Tp
, _A0
, _As
...>>
54 { using type
= simd
<_Tp
, _A0
>; };
56 template <size_t _I
, typename _Tp
, typename _A0
, typename
... _As
>
57 struct __simd_tuple_element
<_I
, _SimdTuple
<_Tp
, _A0
, _As
...>>
60 typename __simd_tuple_element
<_I
- 1, _SimdTuple
<_Tp
, _As
...>>::type
;
63 template <size_t _I
, typename _Tp
>
64 using __simd_tuple_element_t
= typename __simd_tuple_element
<_I
, _Tp
>::type
;
67 // __simd_tuple_concat {{{
69 template <typename _Tp
, typename
... _A0s
, typename
... _A1s
>
70 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple
<_Tp
, _A0s
..., _A1s
...>
71 __simd_tuple_concat(const _SimdTuple
<_Tp
, _A0s
...>& __left
,
72 const _SimdTuple
<_Tp
, _A1s
...>& __right
)
74 if constexpr (sizeof...(_A0s
) == 0)
76 else if constexpr (sizeof...(_A1s
) == 0)
79 return {__left
.first
, __simd_tuple_concat(__left
.second
, __right
)};
82 template <typename _Tp
, typename _A10
, typename
... _A1s
>
83 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple
<_Tp
, simd_abi::scalar
, _A10
,
85 __simd_tuple_concat(const _Tp
& __left
,
86 const _SimdTuple
<_Tp
, _A10
, _A1s
...>& __right
)
87 { return {__left
, __right
}; }
90 // __simd_tuple_pop_front {{{
91 // Returns the next _SimdTuple in __x that has _Np elements less.
92 // Precondition: _Np must match the number of elements in __first (recursively)
93 template <size_t _Np
, typename _Tp
>
94 _GLIBCXX_SIMD_INTRINSIC
constexpr decltype(auto)
95 __simd_tuple_pop_front(_Tp
&& __x
)
97 if constexpr (_Np
== 0)
98 return static_cast<_Tp
&&>(__x
);
101 using _Up
= __remove_cvref_t
<_Tp
>;
102 static_assert(_Np
>= _Up::_S_first_size
);
103 return __simd_tuple_pop_front
<_Np
- _Up::_S_first_size
>(__x
.second
);
108 // __get_simd_at<_Np> {{{1
111 struct __as_simd_tuple
{};
113 template <typename _Tp
, typename _A0
, typename
... _Abis
>
114 _GLIBCXX_SIMD_INTRINSIC
constexpr simd
<_Tp
, _A0
>
115 __simd_tuple_get_impl(__as_simd
, const _SimdTuple
<_Tp
, _A0
, _Abis
...>& __t
,
117 { return {__private_init
, __t
.first
}; }
119 template <typename _Tp
, typename _A0
, typename
... _Abis
>
120 _GLIBCXX_SIMD_INTRINSIC
constexpr const auto&
121 __simd_tuple_get_impl(__as_simd_tuple
,
122 const _SimdTuple
<_Tp
, _A0
, _Abis
...>& __t
,
124 { return __t
.first
; }
126 template <typename _Tp
, typename _A0
, typename
... _Abis
>
127 _GLIBCXX_SIMD_INTRINSIC
constexpr auto&
128 __simd_tuple_get_impl(__as_simd_tuple
, _SimdTuple
<_Tp
, _A0
, _Abis
...>& __t
,
130 { return __t
.first
; }
132 template <typename _R
, size_t _Np
, typename _Tp
, typename
... _Abis
>
133 _GLIBCXX_SIMD_INTRINSIC
constexpr auto
134 __simd_tuple_get_impl(_R
, const _SimdTuple
<_Tp
, _Abis
...>& __t
,
136 { return __simd_tuple_get_impl(_R(), __t
.second
, _SizeConstant
<_Np
- 1>()); }
138 template <size_t _Np
, typename _Tp
, typename
... _Abis
>
139 _GLIBCXX_SIMD_INTRINSIC
constexpr auto&
140 __simd_tuple_get_impl(__as_simd_tuple
, _SimdTuple
<_Tp
, _Abis
...>& __t
,
143 return __simd_tuple_get_impl(__as_simd_tuple(), __t
.second
,
144 _SizeConstant
<_Np
- 1>());
147 template <size_t _Np
, typename _Tp
, typename
... _Abis
>
148 _GLIBCXX_SIMD_INTRINSIC
constexpr auto
149 __get_simd_at(const _SimdTuple
<_Tp
, _Abis
...>& __t
)
150 { return __simd_tuple_get_impl(__as_simd(), __t
, _SizeConstant
<_Np
>()); }
153 // __get_tuple_at<_Np> {{{
154 template <size_t _Np
, typename _Tp
, typename
... _Abis
>
155 _GLIBCXX_SIMD_INTRINSIC
constexpr auto
156 __get_tuple_at(const _SimdTuple
<_Tp
, _Abis
...>& __t
)
158 return __simd_tuple_get_impl(__as_simd_tuple(), __t
, _SizeConstant
<_Np
>());
161 template <size_t _Np
, typename _Tp
, typename
... _Abis
>
162 _GLIBCXX_SIMD_INTRINSIC
constexpr auto&
163 __get_tuple_at(_SimdTuple
<_Tp
, _Abis
...>& __t
)
165 return __simd_tuple_get_impl(__as_simd_tuple(), __t
, _SizeConstant
<_Np
>());
168 // __tuple_element_meta {{{1
169 template <typename _Tp
, typename _Abi
, size_t _Offset
>
170 struct __tuple_element_meta
: public _Abi::_SimdImpl
172 static_assert(is_same_v
<typename
_Abi::_SimdImpl::abi_type
,
173 _Abi
>); // this fails e.g. when _SimdImpl is an
174 // alias for _SimdImplBuiltin<_DifferentAbi>
175 using value_type
= _Tp
;
176 using abi_type
= _Abi
;
177 using _Traits
= _SimdTraits
<_Tp
, _Abi
>;
178 using _MaskImpl
= typename
_Abi::_MaskImpl
;
179 using _MaskMember
= typename
_Traits::_MaskMember
;
180 using simd_type
= simd
<_Tp
, _Abi
>;
181 static constexpr size_t _S_offset
= _Offset
;
182 static constexpr size_t _S_size() { return simd_size
<_Tp
, _Abi
>::value
; }
183 static constexpr _MaskImpl _S_mask_impl
= {};
185 template <size_t _Np
, bool _Sanitized
>
186 _GLIBCXX_SIMD_INTRINSIC
static auto
187 _S_submask(_BitMask
<_Np
, _Sanitized
> __bits
)
188 { return __bits
.template _M_extract
<_Offset
, _S_size()>(); }
190 template <size_t _Np
, bool _Sanitized
>
191 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
192 _S_make_mask(_BitMask
<_Np
, _Sanitized
> __bits
)
194 return _MaskImpl::template _S_convert
<_Tp
>(
195 __bits
.template _M_extract
<_Offset
, _S_size()>()._M_sanitized());
198 _GLIBCXX_SIMD_INTRINSIC
static _ULLong
199 _S_mask_to_shifted_ullong(_MaskMember __k
)
200 { return _MaskImpl::_S_to_bits(__k
).to_ullong() << _Offset
; }
203 template <size_t _Offset
, typename _Tp
, typename _Abi
, typename
... _As
>
204 __tuple_element_meta
<_Tp
, _Abi
, _Offset
>
205 __make_meta(const _SimdTuple
<_Tp
, _Abi
, _As
...>&)
209 // _WithOffset wrapper class {{{
210 template <size_t _Offset
, typename _Base
>
211 struct _WithOffset
: public _Base
213 static inline constexpr size_t _S_offset
= _Offset
;
215 _GLIBCXX_SIMD_INTRINSIC
char* _M_as_charptr()
217 return reinterpret_cast<char*>(this)
218 + _S_offset
* sizeof(typename
_Base::value_type
);
221 _GLIBCXX_SIMD_INTRINSIC
const char* _M_as_charptr() const
223 return reinterpret_cast<const char*>(this)
224 + _S_offset
* sizeof(typename
_Base::value_type
);
228 // make _WithOffset<_WithOffset> ill-formed to use:
229 template <size_t _O0
, size_t _O1
, typename _Base
>
230 struct _WithOffset
<_O0
, _WithOffset
<_O1
, _Base
>> {};
232 template <size_t _Offset
, typename _Tp
>
234 __add_offset(_Tp
& __base
)
235 { return static_cast<_WithOffset
<_Offset
, __remove_cvref_t
<_Tp
>>&>(__base
); }
237 template <size_t _Offset
, typename _Tp
>
239 __add_offset(const _Tp
& __base
)
241 return static_cast<const _WithOffset
<_Offset
, __remove_cvref_t
<_Tp
>>&>(
245 template <size_t _Offset
, size_t _ExistingOffset
, typename _Tp
>
247 __add_offset(_WithOffset
<_ExistingOffset
, _Tp
>& __base
)
249 return static_cast<_WithOffset
<_Offset
+ _ExistingOffset
, _Tp
>&>(
250 static_cast<_Tp
&>(__base
));
253 template <size_t _Offset
, size_t _ExistingOffset
, typename _Tp
>
255 __add_offset(const _WithOffset
<_ExistingOffset
, _Tp
>& __base
)
257 return static_cast<const _WithOffset
<_Offset
+ _ExistingOffset
, _Tp
>&>(
258 static_cast<const _Tp
&>(__base
));
261 template <typename _Tp
>
262 constexpr inline size_t __offset
= 0;
264 template <size_t _Offset
, typename _Tp
>
265 constexpr inline size_t __offset
<_WithOffset
<_Offset
, _Tp
>>
266 = _WithOffset
<_Offset
, _Tp
>::_S_offset
;
268 template <typename _Tp
>
269 constexpr inline size_t __offset
<const _Tp
> = __offset
<_Tp
>;
271 template <typename _Tp
>
272 constexpr inline size_t __offset
<_Tp
&> = __offset
<_Tp
>;
274 template <typename _Tp
>
275 constexpr inline size_t __offset
<_Tp
&&> = __offset
<_Tp
>;
278 // _SimdTuple specializations {{{1
280 template <typename _Tp
>
281 struct _SimdTuple
<_Tp
>
283 using value_type
= _Tp
;
284 static constexpr size_t _S_tuple_size
= 0;
285 static constexpr size_t _S_size() { return 0; }
288 // _SimdTupleData {{{2
289 template <typename _FirstType
, typename _SecondType
>
290 struct _SimdTupleData
295 _GLIBCXX_SIMD_INTRINSIC
296 constexpr bool _M_is_constprop() const
298 if constexpr (is_class_v
<_FirstType
>)
299 return first
._M_is_constprop() && second
._M_is_constprop();
301 return __builtin_constant_p(first
) && second
._M_is_constprop();
305 template <typename _FirstType
, typename _Tp
>
306 struct _SimdTupleData
<_FirstType
, _SimdTuple
<_Tp
>>
309 static constexpr _SimdTuple
<_Tp
> second
= {};
311 _GLIBCXX_SIMD_INTRINSIC
312 constexpr bool _M_is_constprop() const
314 if constexpr (is_class_v
<_FirstType
>)
315 return first
._M_is_constprop();
317 return __builtin_constant_p(first
);
322 template <typename _Tp
, typename _Abi0
, typename
... _Abis
>
323 struct _SimdTuple
<_Tp
, _Abi0
, _Abis
...>
324 : _SimdTupleData
<typename _SimdTraits
<_Tp
, _Abi0
>::_SimdMember
,
325 _SimdTuple
<_Tp
, _Abis
...>>
327 static_assert(!__is_fixed_size_abi_v
<_Abi0
>);
328 using value_type
= _Tp
;
329 using _FirstType
= typename _SimdTraits
<_Tp
, _Abi0
>::_SimdMember
;
330 using _FirstAbi
= _Abi0
;
331 using _SecondType
= _SimdTuple
<_Tp
, _Abis
...>;
332 static constexpr size_t _S_tuple_size
= sizeof...(_Abis
) + 1;
334 static constexpr size_t _S_size()
335 { return simd_size_v
<_Tp
, _Abi0
> + _SecondType::_S_size(); }
337 static constexpr size_t _S_first_size
= simd_size_v
<_Tp
, _Abi0
>;
338 static constexpr bool _S_is_homogeneous
= (is_same_v
<_Abi0
, _Abis
> && ...);
340 using _Base
= _SimdTupleData
<typename _SimdTraits
<_Tp
, _Abi0
>::_SimdMember
,
341 _SimdTuple
<_Tp
, _Abis
...>>;
345 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple() = default;
346 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple(const _SimdTuple
&) = default;
347 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple
& operator=(const _SimdTuple
&)
350 template <typename _Up
>
351 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple(_Up
&& __x
)
352 : _Base
{static_cast<_Up
&&>(__x
)} {}
354 template <typename _Up
, typename _Up2
>
355 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple(_Up
&& __x
, _Up2
&& __y
)
356 : _Base
{static_cast<_Up
&&>(__x
), static_cast<_Up2
&&>(__y
)} {}
358 template <typename _Up
>
359 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple(_Up
&& __x
, _SimdTuple
<_Tp
>)
360 : _Base
{static_cast<_Up
&&>(__x
)} {}
362 _GLIBCXX_SIMD_INTRINSIC
char* _M_as_charptr()
363 { return reinterpret_cast<char*>(this); }
365 _GLIBCXX_SIMD_INTRINSIC
const char* _M_as_charptr() const
366 { return reinterpret_cast<const char*>(this); }
368 template <size_t _Np
>
369 _GLIBCXX_SIMD_INTRINSIC
constexpr auto& _M_at()
371 if constexpr (_Np
== 0)
374 return second
.template _M_at
<_Np
- 1>();
377 template <size_t _Np
>
378 _GLIBCXX_SIMD_INTRINSIC
constexpr const auto& _M_at() const
380 if constexpr (_Np
== 0)
383 return second
.template _M_at
<_Np
- 1>();
386 template <size_t _Np
>
387 _GLIBCXX_SIMD_INTRINSIC
constexpr auto _M_simd_at() const
389 if constexpr (_Np
== 0)
390 return simd
<_Tp
, _Abi0
>(__private_init
, first
);
392 return second
.template _M_simd_at
<_Np
- 1>();
395 template <size_t _Offset
= 0, typename _Fp
>
396 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdTuple
397 _S_generate(_Fp
&& __gen
, _SizeConstant
<_Offset
> = {})
399 auto&& __first
= __gen(__tuple_element_meta
<_Tp
, _Abi0
, _Offset
>());
400 if constexpr (_S_tuple_size
== 1)
404 _SecondType::_S_generate(
405 static_cast<_Fp
&&>(__gen
),
406 _SizeConstant
<_Offset
+ simd_size_v
<_Tp
, _Abi0
>>())};
409 template <size_t _Offset
= 0, typename _Fp
, typename
... _More
>
410 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
411 _M_apply_wrapped(_Fp
&& __fun
, const _More
&... __more
) const
414 = __fun(__make_meta
<_Offset
>(*this), first
, __more
.first
...);
415 if constexpr (_S_tuple_size
== 1)
420 second
.template _M_apply_wrapped
<_Offset
+ simd_size_v
<_Tp
, _Abi0
>>(
421 static_cast<_Fp
&&>(__fun
), __more
.second
...)};
424 template <typename _Tup
>
425 _GLIBCXX_SIMD_INTRINSIC
constexpr decltype(auto)
426 _M_extract_argument(_Tup
&& __tup
) const
428 using _TupT
= typename __remove_cvref_t
<_Tup
>::value_type
;
429 if constexpr (is_same_v
<_SimdTuple
, __remove_cvref_t
<_Tup
>>)
431 else if (__builtin_is_constant_evaluated())
432 return __fixed_size_storage_t
<_TupT
, _S_first_size
>::_S_generate([&](
433 auto __meta
) constexpr {
434 return __meta
._S_generator(
435 [&](auto __i
) constexpr { return __tup
[__i
]; },
436 static_cast<_TupT
*>(nullptr));
440 __fixed_size_storage_t
<_TupT
, _S_first_size
> __r
;
441 __builtin_memcpy(__r
._M_as_charptr(), __tup
._M_as_charptr(),
447 template <typename _Tup
>
448 _GLIBCXX_SIMD_INTRINSIC
constexpr auto&
449 _M_skip_argument(_Tup
&& __tup
) const
451 static_assert(_S_tuple_size
> 1);
452 using _Up
= __remove_cvref_t
<_Tup
>;
453 constexpr size_t __off
= __offset
<_Up
>;
454 if constexpr (_S_first_size
== _Up::_S_first_size
&& __off
== 0)
456 else if constexpr (_S_first_size
> _Up::_S_first_size
457 && _S_first_size
% _Up::_S_first_size
== 0
459 return __simd_tuple_pop_front
<_S_first_size
>(__tup
);
460 else if constexpr (_S_first_size
+ __off
< _Up::_S_first_size
)
461 return __add_offset
<_S_first_size
>(__tup
);
462 else if constexpr (_S_first_size
+ __off
== _Up::_S_first_size
)
465 __assert_unreachable
<_Tup
>();
468 template <size_t _Offset
, typename
... _More
>
469 _GLIBCXX_SIMD_INTRINSIC
constexpr void
470 _M_assign_front(const _SimdTuple
<_Tp
, _Abi0
, _More
...>& __x
) &
472 static_assert(_Offset
== 0);
474 if constexpr (sizeof...(_More
) > 0)
476 static_assert(sizeof...(_Abis
) >= sizeof...(_More
));
477 second
.template _M_assign_front
<0>(__x
.second
);
481 template <size_t _Offset
>
482 _GLIBCXX_SIMD_INTRINSIC
constexpr void
483 _M_assign_front(const _FirstType
& __x
) &
485 static_assert(_Offset
== 0);
489 template <size_t _Offset
, typename
... _As
>
490 _GLIBCXX_SIMD_INTRINSIC
constexpr void
491 _M_assign_front(const _SimdTuple
<_Tp
, _As
...>& __x
) &
493 __builtin_memcpy(_M_as_charptr() + _Offset
* sizeof(value_type
),
495 sizeof(_Tp
) * _SimdTuple
<_Tp
, _As
...>::_S_size());
499 * Iterate over the first objects in this _SimdTuple and call __fun for each
500 * of them. If additional arguments are passed via __more, chunk them into
501 * _SimdTuple or __vector_type_t objects of the same number of values.
503 template <typename _Fp
, typename
... _More
>
504 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdTuple
505 _M_apply_per_chunk(_Fp
&& __fun
, _More
&&... __more
) const
509 is_lvalue_reference
<_More
>,
510 negation
<is_const
<remove_reference_t
<_More
>>>>) )
512 // need to write back at least one of __more after calling __fun
513 auto&& __first
= [&](auto... __args
) constexpr
515 auto __r
= __fun(__tuple_element_meta
<_Tp
, _Abi0
, 0>(), first
,
517 [[maybe_unused
]] auto&& __ignore_me
= {(
518 [](auto&& __dst
, const auto& __src
) {
519 if constexpr (is_assignable_v
<decltype(__dst
),
522 __dst
.template _M_assign_front
<__offset
<decltype(__dst
)>>(
525 }(static_cast<_More
&&>(__more
), __args
),
529 (_M_extract_argument(__more
)...);
530 if constexpr (_S_tuple_size
== 1)
534 second
._M_apply_per_chunk(static_cast<_Fp
&&>(__fun
),
535 _M_skip_argument(__more
)...)};
539 auto&& __first
= __fun(__tuple_element_meta
<_Tp
, _Abi0
, 0>(), first
,
540 _M_extract_argument(__more
)...);
541 if constexpr (_S_tuple_size
== 1)
545 second
._M_apply_per_chunk(static_cast<_Fp
&&>(__fun
),
546 _M_skip_argument(__more
)...)};
550 template <typename _R
= _Tp
, typename _Fp
, typename
... _More
>
551 _GLIBCXX_SIMD_INTRINSIC
auto _M_apply_r(_Fp
&& __fun
,
552 const _More
&... __more
) const
554 auto&& __first
= __fun(__tuple_element_meta
<_Tp
, _Abi0
, 0>(), first
,
556 if constexpr (_S_tuple_size
== 1)
559 return __simd_tuple_concat
<_R
>(
560 __first
, second
.template _M_apply_r
<_R
>(static_cast<_Fp
&&>(__fun
),
564 template <typename _Fp
, typename
... _More
>
565 _GLIBCXX_SIMD_INTRINSIC
constexpr friend _SanitizedBitMask
<_S_size()>
566 _M_test(const _Fp
& __fun
, const _SimdTuple
& __x
, const _More
&... __more
)
568 const _SanitizedBitMask
<_S_first_size
> __first
569 = _Abi0::_MaskImpl::_S_to_bits(
570 __fun(__tuple_element_meta
<_Tp
, _Abi0
, 0>(), __x
.first
,
572 if constexpr (_S_tuple_size
== 1)
575 return _M_test(__fun
, __x
.second
, __more
.second
...)
576 ._M_prepend(__first
);
579 template <typename _Up
, _Up _I
>
580 _GLIBCXX_SIMD_INTRINSIC
constexpr _Tp
581 operator[](integral_constant
<_Up
, _I
>) const noexcept
583 if constexpr (_I
< simd_size_v
<_Tp
, _Abi0
>)
584 return _M_subscript_read(_I
);
586 return second
[integral_constant
<_Up
, _I
- simd_size_v
<_Tp
, _Abi0
>>()];
589 _Tp
operator[](size_t __i
) const noexcept
591 if constexpr (_S_tuple_size
== 1)
592 return _M_subscript_read(__i
);
595 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
596 return reinterpret_cast<const __may_alias
<_Tp
>*>(this)[__i
];
598 if constexpr (__is_scalar_abi
<_Abi0
>())
600 const _Tp
* ptr
= &first
;
604 return __i
< simd_size_v
<_Tp
, _Abi0
>
605 ? _M_subscript_read(__i
)
606 : second
[__i
- simd_size_v
<_Tp
, _Abi0
>];
611 void _M_set(size_t __i
, _Tp __val
) noexcept
613 if constexpr (_S_tuple_size
== 1)
614 return _M_subscript_write(__i
, __val
);
617 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
618 reinterpret_cast<__may_alias
<_Tp
>*>(this)[__i
] = __val
;
620 if (__i
< simd_size_v
<_Tp
, _Abi0
>)
621 _M_subscript_write(__i
, __val
);
623 second
._M_set(__i
- simd_size_v
<_Tp
, _Abi0
>, __val
);
629 // _M_subscript_read/_write {{{
630 _Tp
_M_subscript_read([[maybe_unused
]] size_t __i
) const noexcept
632 if constexpr (__is_vectorizable_v
<_FirstType
>)
638 void _M_subscript_write([[maybe_unused
]] size_t __i
, _Tp __y
) noexcept
640 if constexpr (__is_vectorizable_v
<_FirstType
>)
643 first
._M_set(__i
, __y
);
649 // __make_simd_tuple {{{1
650 template <typename _Tp
, typename _A0
>
651 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
<_Tp
, _A0
>
652 __make_simd_tuple(simd
<_Tp
, _A0
> __x0
)
653 { return {__data(__x0
)}; }
655 template <typename _Tp
, typename _A0
, typename
... _As
>
656 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
<_Tp
, _A0
, _As
...>
657 __make_simd_tuple(const simd
<_Tp
, _A0
>& __x0
, const simd
<_Tp
, _As
>&... __xs
)
658 { return {__data(__x0
), __make_simd_tuple(__xs
...)}; }
660 template <typename _Tp
, typename _A0
>
661 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
<_Tp
, _A0
>
662 __make_simd_tuple(const typename _SimdTraits
<_Tp
, _A0
>::_SimdMember
& __arg0
)
665 template <typename _Tp
, typename _A0
, typename _A1
, typename
... _Abis
>
666 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
<_Tp
, _A0
, _A1
, _Abis
...>
668 const typename _SimdTraits
<_Tp
, _A0
>::_SimdMember
& __arg0
,
669 const typename _SimdTraits
<_Tp
, _A1
>::_SimdMember
& __arg1
,
670 const typename _SimdTraits
<_Tp
, _Abis
>::_SimdMember
&... __args
)
671 { return {__arg0
, __make_simd_tuple
<_Tp
, _A1
, _Abis
...>(__arg1
, __args
...)}; }
673 // __to_simd_tuple {{{1
674 template <typename _Tp
, size_t _Np
, typename _V
, size_t _NV
, typename
... _VX
>
675 _GLIBCXX_SIMD_INTRINSIC
constexpr __fixed_size_storage_t
<_Tp
, _Np
>
676 __to_simd_tuple(const array
<_V
, _NV
>& __from
, const _VX
... __fromX
);
678 template <typename _Tp
, size_t _Np
,
679 size_t _Offset
= 0, // skip this many elements in __from0
680 typename _R
= __fixed_size_storage_t
<_Tp
, _Np
>, typename _V0
,
681 typename _V0VT
= _VectorTraits
<_V0
>, typename
... _VX
>
682 _GLIBCXX_SIMD_INTRINSIC _R
constexpr __to_simd_tuple(const _V0 __from0
,
683 const _VX
... __fromX
)
685 static_assert(is_same_v
<typename
_V0VT::value_type
, _Tp
>);
686 static_assert(_Offset
< _V0VT::_S_full_size
);
687 using _R0
= __vector_type_t
<_Tp
, _R::_S_first_size
>;
688 if constexpr (_R::_S_tuple_size
== 1)
690 if constexpr (_Np
== 1)
691 return _R
{__from0
[_Offset
]};
692 else if constexpr (_Offset
== 0 && _V0VT::_S_full_size
>= _Np
)
693 return _R
{__intrin_bitcast
<_R0
>(__from0
)};
694 else if constexpr (_Offset
* 2 == _V0VT::_S_full_size
695 && _V0VT::_S_full_size
/ 2 >= _Np
)
696 return _R
{__intrin_bitcast
<_R0
>(__extract_part
<1, 2>(__from0
))};
697 else if constexpr (_Offset
* 4 == _V0VT::_S_full_size
698 && _V0VT::_S_full_size
/ 4 >= _Np
)
699 return _R
{__intrin_bitcast
<_R0
>(__extract_part
<1, 4>(__from0
))};
701 __assert_unreachable
<_Tp
>();
705 if constexpr (1 == _R::_S_first_size
)
706 { // extract one scalar and recurse
707 if constexpr (_Offset
+ 1 < _V0VT::_S_full_size
)
708 return _R
{__from0
[_Offset
],
709 __to_simd_tuple
<_Tp
, _Np
- 1, _Offset
+ 1>(__from0
,
712 return _R
{__from0
[_Offset
],
713 __to_simd_tuple
<_Tp
, _Np
- 1, 0>(__fromX
...)};
716 // place __from0 into _R::first and recurse for __fromX -> _R::second
717 else if constexpr (_V0VT::_S_full_size
== _R::_S_first_size
720 __to_simd_tuple
<_Tp
, _Np
- _R::_S_first_size
>(__fromX
...)};
722 // place lower part of __from0 into _R::first and recurse with _Offset
723 else if constexpr (_V0VT::_S_full_size
> _R::_S_first_size
725 return _R
{__intrin_bitcast
<_R0
>(__from0
),
726 __to_simd_tuple
<_Tp
, _Np
- _R::_S_first_size
,
727 _R::_S_first_size
>(__from0
, __fromX
...)};
729 // place lower part of second quarter of __from0 into _R::first and
730 // recurse with _Offset
731 else if constexpr (_Offset
* 4 == _V0VT::_S_full_size
732 && _V0VT::_S_full_size
>= 4 * _R::_S_first_size
)
733 return _R
{__intrin_bitcast
<_R0
>(__extract_part
<2, 4>(__from0
)),
734 __to_simd_tuple
<_Tp
, _Np
- _R::_S_first_size
,
735 _Offset
+ _R::_S_first_size
>(__from0
,
738 // place lower half of high half of __from0 into _R::first and recurse
740 else if constexpr (_Offset
* 2 == _V0VT::_S_full_size
741 && _V0VT::_S_full_size
>= 4 * _R::_S_first_size
)
742 return _R
{__intrin_bitcast
<_R0
>(__extract_part
<2, 4>(__from0
)),
743 __to_simd_tuple
<_Tp
, _Np
- _R::_S_first_size
,
744 _Offset
+ _R::_S_first_size
>(__from0
,
747 // place high half of __from0 into _R::first and recurse with __fromX
748 else if constexpr (_Offset
* 2 == _V0VT::_S_full_size
749 && _V0VT::_S_full_size
/ 2 >= _R::_S_first_size
)
750 return _R
{__intrin_bitcast
<_R0
>(__extract_part
<1, 2>(__from0
)),
751 __to_simd_tuple
<_Tp
, _Np
- _R::_S_first_size
, 0>(
754 // ill-formed if some unforseen pattern is needed
756 __assert_unreachable
<_Tp
>();
760 template <typename _Tp
, size_t _Np
, typename _V
, size_t _NV
, typename
... _VX
>
761 _GLIBCXX_SIMD_INTRINSIC
constexpr __fixed_size_storage_t
<_Tp
, _Np
>
762 __to_simd_tuple(const array
<_V
, _NV
>& __from
, const _VX
... __fromX
)
764 if constexpr (is_same_v
<_Tp
, _V
>)
768 "An array of scalars must be the last argument to __to_simd_tuple");
769 return __call_with_subscripts(
771 make_index_sequence
<_NV
>(), [&](const auto... __args
) constexpr {
772 return __simd_tuple_concat(
773 _SimdTuple
<_Tp
, simd_abi::scalar
>{__args
}..., _SimdTuple
<_Tp
>());
777 return __call_with_subscripts(
779 make_index_sequence
<_NV
>(), [&](const auto... __args
) constexpr {
780 return __to_simd_tuple
<_Tp
, _Np
>(__args
..., __fromX
...);
784 template <size_t, typename _Tp
>
785 using __to_tuple_helper
= _Tp
;
787 template <typename _Tp
, typename _A0
, size_t _NOut
, size_t _Np
,
789 _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t
<_Tp
, _NOut
>
790 __to_simd_tuple_impl(index_sequence
<_Indexes
...>,
791 const array
<__vector_type_t
<_Tp
, simd_size_v
<_Tp
, _A0
>>, _Np
>& __args
)
793 return __make_simd_tuple
<_Tp
, __to_tuple_helper
<_Indexes
, _A0
>...>(
794 __args
[_Indexes
]...);
797 template <typename _Tp
, typename _A0
, size_t _NOut
, size_t _Np
,
798 typename _R
= __fixed_size_storage_t
<_Tp
, _NOut
>>
799 _GLIBCXX_SIMD_INTRINSIC _R
800 __to_simd_tuple_sized(
801 const array
<__vector_type_t
<_Tp
, simd_size_v
<_Tp
, _A0
>>, _Np
>& __args
)
803 static_assert(_Np
* simd_size_v
<_Tp
, _A0
> >= _NOut
);
804 return __to_simd_tuple_impl
<_Tp
, _A0
, _NOut
>(
805 make_index_sequence
<_R::_S_tuple_size
>(), __args
);
808 // __optimize_simd_tuple {{{1
809 template <typename _Tp
>
810 _GLIBCXX_SIMD_INTRINSIC _SimdTuple
<_Tp
>
811 __optimize_simd_tuple(const _SimdTuple
<_Tp
>)
814 template <typename _Tp
, typename _Ap
>
815 _GLIBCXX_SIMD_INTRINSIC
const _SimdTuple
<_Tp
, _Ap
>&
816 __optimize_simd_tuple(const _SimdTuple
<_Tp
, _Ap
>& __x
)
819 template <typename _Tp
, typename _A0
, typename _A1
, typename
... _Abis
,
820 typename _R
= __fixed_size_storage_t
<
821 _Tp
, _SimdTuple
<_Tp
, _A0
, _A1
, _Abis
...>::_S_size()>>
822 _GLIBCXX_SIMD_INTRINSIC _R
823 __optimize_simd_tuple(const _SimdTuple
<_Tp
, _A0
, _A1
, _Abis
...>& __x
)
825 using _Tup
= _SimdTuple
<_Tp
, _A0
, _A1
, _Abis
...>;
826 if constexpr (is_same_v
<_R
, _Tup
>)
828 else if constexpr (is_same_v
<typename
_R::_FirstType
,
829 typename
_Tup::_FirstType
>)
830 return {__x
.first
, __optimize_simd_tuple(__x
.second
)};
831 else if constexpr (__is_scalar_abi
<_A0
>()
832 || _A0::template _S_is_partial
<_Tp
>)
833 return {__generate_from_n_evaluations
<_R::_S_first_size
,
834 typename
_R::_FirstType
>(
835 [&](auto __i
) { return __x
[__i
]; }),
836 __optimize_simd_tuple(
837 __simd_tuple_pop_front
<_R::_S_first_size
>(__x
))};
838 else if constexpr (is_same_v
<_A0
, _A1
>
839 && _R::_S_first_size
== simd_size_v
<_Tp
, _A0
> + simd_size_v
<_Tp
, _A1
>)
840 return {__concat(__x
.template _M_at
<0>(), __x
.template _M_at
<1>()),
841 __optimize_simd_tuple(__x
.second
.second
)};
842 else if constexpr (sizeof...(_Abis
) >= 2
843 && _R::_S_first_size
== (4 * simd_size_v
<_Tp
, _A0
>)
844 && simd_size_v
<_Tp
, _A0
> == __simd_tuple_element_t
<
845 (sizeof...(_Abis
) >= 2 ? 3 : 0), _Tup
>::size())
847 __concat(__concat(__x
.template _M_at
<0>(), __x
.template _M_at
<1>()),
848 __concat(__x
.template _M_at
<2>(), __x
.template _M_at
<3>())),
849 __optimize_simd_tuple(__x
.second
.second
.second
.second
)};
852 static_assert(sizeof(_R
) == sizeof(__x
));
854 __builtin_memcpy(__r
._M_as_charptr(), __x
._M_as_charptr(),
855 sizeof(_Tp
) * _R::_S_size());
860 // __for_each(const _SimdTuple &, Fun) {{{1
861 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _Fp
>
862 _GLIBCXX_SIMD_INTRINSIC
constexpr void
863 __for_each(const _SimdTuple
<_Tp
, _A0
>& __t
, _Fp
&& __fun
)
864 { static_cast<_Fp
&&>(__fun
)(__make_meta
<_Offset
>(__t
), __t
.first
); }
866 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _A1
,
867 typename
... _As
, typename _Fp
>
868 _GLIBCXX_SIMD_INTRINSIC
constexpr void
869 __for_each(const _SimdTuple
<_Tp
, _A0
, _A1
, _As
...>& __t
, _Fp
&& __fun
)
871 __fun(__make_meta
<_Offset
>(__t
), __t
.first
);
872 __for_each
<_Offset
+ simd_size
<_Tp
, _A0
>::value
>(__t
.second
,
873 static_cast<_Fp
&&>(__fun
));
876 // __for_each(_SimdTuple &, Fun) {{{1
877 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _Fp
>
878 _GLIBCXX_SIMD_INTRINSIC
constexpr void
879 __for_each(_SimdTuple
<_Tp
, _A0
>& __t
, _Fp
&& __fun
)
880 { static_cast<_Fp
&&>(__fun
)(__make_meta
<_Offset
>(__t
), __t
.first
); }
882 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _A1
,
883 typename
... _As
, typename _Fp
>
884 _GLIBCXX_SIMD_INTRINSIC
constexpr void
885 __for_each(_SimdTuple
<_Tp
, _A0
, _A1
, _As
...>& __t
, _Fp
&& __fun
)
887 __fun(__make_meta
<_Offset
>(__t
), __t
.first
);
888 __for_each
<_Offset
+ simd_size
<_Tp
, _A0
>::value
>(__t
.second
,
889 static_cast<_Fp
&&>(__fun
));
892 // __for_each(_SimdTuple &, const _SimdTuple &, Fun) {{{1
893 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _Fp
>
894 _GLIBCXX_SIMD_INTRINSIC
constexpr void
895 __for_each(_SimdTuple
<_Tp
, _A0
>& __a
, const _SimdTuple
<_Tp
, _A0
>& __b
,
898 static_cast<_Fp
&&>(__fun
)(__make_meta
<_Offset
>(__a
), __a
.first
, __b
.first
);
901 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _A1
,
902 typename
... _As
, typename _Fp
>
903 _GLIBCXX_SIMD_INTRINSIC
constexpr void
904 __for_each(_SimdTuple
<_Tp
, _A0
, _A1
, _As
...>& __a
,
905 const _SimdTuple
<_Tp
, _A0
, _A1
, _As
...>& __b
, _Fp
&& __fun
)
907 __fun(__make_meta
<_Offset
>(__a
), __a
.first
, __b
.first
);
908 __for_each
<_Offset
+ simd_size
<_Tp
, _A0
>::value
>(__a
.second
, __b
.second
,
909 static_cast<_Fp
&&>(__fun
));
912 // __for_each(const _SimdTuple &, const _SimdTuple &, Fun) {{{1
913 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _Fp
>
914 _GLIBCXX_SIMD_INTRINSIC
constexpr void
915 __for_each(const _SimdTuple
<_Tp
, _A0
>& __a
, const _SimdTuple
<_Tp
, _A0
>& __b
,
918 static_cast<_Fp
&&>(__fun
)(__make_meta
<_Offset
>(__a
), __a
.first
, __b
.first
);
921 template <size_t _Offset
= 0, typename _Tp
, typename _A0
, typename _A1
,
922 typename
... _As
, typename _Fp
>
923 _GLIBCXX_SIMD_INTRINSIC
constexpr void
924 __for_each(const _SimdTuple
<_Tp
, _A0
, _A1
, _As
...>& __a
,
925 const _SimdTuple
<_Tp
, _A0
, _A1
, _As
...>& __b
, _Fp
&& __fun
)
927 __fun(__make_meta
<_Offset
>(__a
), __a
.first
, __b
.first
);
928 __for_each
<_Offset
+ simd_size
<_Tp
, _A0
>::value
>(__a
.second
, __b
.second
,
929 static_cast<_Fp
&&>(__fun
));
933 // __extract_part(_SimdTuple) {{{
934 template <int _Index
, int _Total
, int _Combine
, typename _Tp
, typename _A0
,
936 _GLIBCXX_SIMD_INTRINSIC
auto // __vector_type_t or _SimdTuple
937 __extract_part(const _SimdTuple
<_Tp
, _A0
, _As
...>& __x
)
940 // (a) 4, 4, 4 => 3, 3, 3, 3 (_Total = 4)
941 // (b) 2, 2, 2 => 3, 3 (_Total = 2)
942 // (c) 4, 2 => 2, 2, 2 (_Total = 3)
943 using _Tuple
= _SimdTuple
<_Tp
, _A0
, _As
...>;
944 static_assert(_Index
+ _Combine
<= _Total
&& _Index
>= 0 && _Total
>= 1);
945 constexpr size_t _Np
= _Tuple::_S_size();
946 static_assert(_Np
>= _Total
&& _Np
% _Total
== 0);
947 constexpr size_t __values_per_part
= _Np
/ _Total
;
948 [[maybe_unused
]] constexpr size_t __values_to_skip
949 = _Index
* __values_per_part
;
950 constexpr size_t __return_size
= __values_per_part
* _Combine
;
951 using _RetAbi
= simd_abi::deduce_t
<_Tp
, __return_size
>;
953 // handle (optimize) the simple cases
954 if constexpr (_Index
== 0 && _Tuple::_S_first_size
== __return_size
)
955 return __x
.first
._M_data
;
956 else if constexpr (_Index
== 0 && _Total
== _Combine
)
958 else if constexpr (_Index
== 0 && _Tuple::_S_first_size
>= __return_size
)
959 return __intrin_bitcast
<__vector_type_t
<_Tp
, __return_size
>>(
960 __as_vector(__x
.first
));
962 // recurse to skip unused data members at the beginning of _SimdTuple
963 else if constexpr (__values_to_skip
>= _Tuple::_S_first_size
)
965 if constexpr (_Tuple::_S_first_size
% __values_per_part
== 0)
967 constexpr int __parts_in_first
968 = _Tuple::_S_first_size
/ __values_per_part
;
969 return __extract_part
<_Index
- __parts_in_first
,
970 _Total
- __parts_in_first
, _Combine
>(
974 return __extract_part
<__values_to_skip
- _Tuple::_S_first_size
,
975 _Np
- _Tuple::_S_first_size
, __return_size
>(
979 // extract from multiple _SimdTuple data members
980 else if constexpr (__return_size
> _Tuple::_S_first_size
- __values_to_skip
)
982 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
983 const __may_alias
<_Tp
>* const element_ptr
984 = reinterpret_cast<const __may_alias
<_Tp
>*>(&__x
) + __values_to_skip
;
985 return __as_vector(simd
<_Tp
, _RetAbi
>(element_ptr
, element_aligned
));
987 [[maybe_unused
]] constexpr size_t __offset
= __values_to_skip
;
988 return __as_vector(simd
<_Tp
, _RetAbi
>([&](auto __i
) constexpr {
989 constexpr _SizeConstant
<__i
+ __offset
> __k
;
995 // all of the return values are in __x.first
996 else if constexpr (_Tuple::_S_first_size
% __values_per_part
== 0)
997 return __extract_part
<_Index
, _Tuple::_S_first_size
/ __values_per_part
,
998 _Combine
>(__x
.first
);
1000 return __extract_part
<__values_to_skip
, _Tuple::_S_first_size
,
1001 _Combine
* __values_per_part
>(__x
.first
);
1005 // __fixed_size_storage_t<_Tp, _Np>{{{
1006 template <typename _Tp
, int _Np
, typename _Tuple
,
1007 typename _Next
= simd
<_Tp
, _AllNativeAbis::_BestAbi
<_Tp
, _Np
>>,
1008 int _Remain
= _Np
- int(_Next::size())>
1009 struct __fixed_size_storage_builder
;
1011 template <typename _Tp
, int _Np
>
1012 struct __fixed_size_storage
1013 : public __fixed_size_storage_builder
<_Tp
, _Np
, _SimdTuple
<_Tp
>> {};
1015 template <typename _Tp
, int _Np
, typename
... _As
, typename _Next
>
1016 struct __fixed_size_storage_builder
<_Tp
, _Np
, _SimdTuple
<_Tp
, _As
...>, _Next
,
1018 { using type
= _SimdTuple
<_Tp
, _As
..., typename
_Next::abi_type
>; };
1020 template <typename _Tp
, int _Np
, typename
... _As
, typename _Next
, int _Remain
>
1021 struct __fixed_size_storage_builder
<_Tp
, _Np
, _SimdTuple
<_Tp
, _As
...>, _Next
,
1024 using type
= typename __fixed_size_storage_builder
<
1025 _Tp
, _Remain
, _SimdTuple
<_Tp
, _As
..., typename
_Next::abi_type
>>::type
;
1029 // __autocvt_to_simd {{{
1030 template <typename _Tp
, bool = is_arithmetic_v
<__remove_cvref_t
<_Tp
>>>
1031 struct __autocvt_to_simd
1034 using _TT
= __remove_cvref_t
<_Tp
>;
1041 static_assert(is_lvalue_reference
<_Tp
>::value
, "");
1042 static_assert(!is_const
<_Tp
>::value
, "");
1048 static_assert(is_lvalue_reference
<_Tp
>::value
, "");
1049 static_assert(!is_const
<_Tp
>::value
, "");
1053 constexpr inline __autocvt_to_simd(_Tp dd
) : _M_data(dd
) {}
1055 template <typename _Abi
>
1056 operator simd
<typename
_TT::value_type
, _Abi
>()
1057 { return {__private_init
, _M_data
}; }
1059 template <typename _Abi
>
1060 operator simd
<typename
_TT::value_type
, _Abi
>&()
1062 return *reinterpret_cast<simd
<typename
_TT::value_type
, _Abi
>*>(
1066 template <typename _Abi
>
1067 operator simd
<typename
_TT::value_type
, _Abi
>*()
1069 return reinterpret_cast<simd
<typename
_TT::value_type
, _Abi
>*>(
1074 template <typename _Tp
>
1075 __autocvt_to_simd(_Tp
&&) -> __autocvt_to_simd
<_Tp
>;
1077 template <typename _Tp
>
1078 struct __autocvt_to_simd
<_Tp
, true>
1080 using _TT
= __remove_cvref_t
<_Tp
>;
1082 fixed_size_simd
<_TT
, 1> _M_fd
;
1084 constexpr inline __autocvt_to_simd(_Tp dd
) : _M_data(dd
), _M_fd(_M_data
) {}
1086 ~__autocvt_to_simd()
1087 { _M_data
= __data(_M_fd
).first
; }
1089 operator fixed_size_simd
<_TT
, 1>()
1092 operator fixed_size_simd
<_TT
, 1> &()
1094 static_assert(is_lvalue_reference
<_Tp
>::value
, "");
1095 static_assert(!is_const
<_Tp
>::value
, "");
1099 operator fixed_size_simd
<_TT
, 1> *()
1101 static_assert(is_lvalue_reference
<_Tp
>::value
, "");
1102 static_assert(!is_const
<_Tp
>::value
, "");
1109 struct _CommonImplFixedSize
;
1110 template <int _Np
> struct _SimdImplFixedSize
;
1111 template <int _Np
> struct _MaskImplFixedSize
;
1112 // simd_abi::_Fixed {{{
1114 struct simd_abi::_Fixed
1116 template <typename _Tp
> static constexpr size_t _S_size
= _Np
;
1117 template <typename _Tp
> static constexpr size_t _S_full_size
= _Np
;
1118 // validity traits {{{
1119 struct _IsValidAbiTag
: public __bool_constant
<(_Np
> 0)> {};
1121 template <typename _Tp
>
1122 struct _IsValidSizeFor
1123 : __bool_constant
<(_Np
<= simd_abi::max_fixed_size
<_Tp
>)> {};
1125 template <typename _Tp
>
1126 struct _IsValid
: conjunction
<_IsValidAbiTag
, __is_vectorizable
<_Tp
>,
1127 _IsValidSizeFor
<_Tp
>> {};
1129 template <typename _Tp
>
1130 static constexpr bool _S_is_valid_v
= _IsValid
<_Tp
>::value
;
1134 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SanitizedBitMask
<_Np
>
1135 _S_masked(_BitMask
<_Np
> __x
)
1136 { return __x
._M_sanitized(); }
1138 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SanitizedBitMask
<_Np
>
1139 _S_masked(_SanitizedBitMask
<_Np
> __x
)
1144 using _CommonImpl
= _CommonImplFixedSize
;
1145 using _SimdImpl
= _SimdImplFixedSize
<_Np
>;
1146 using _MaskImpl
= _MaskImplFixedSize
<_Np
>;
1150 template <typename _Tp
, bool = _S_is_valid_v
<_Tp
>>
1151 struct __traits
: _InvalidTraits
{};
1153 template <typename _Tp
>
1154 struct __traits
<_Tp
, true>
1156 using _IsValid
= true_type
;
1157 using _SimdImpl
= _SimdImplFixedSize
<_Np
>;
1158 using _MaskImpl
= _MaskImplFixedSize
<_Np
>;
1160 // simd and simd_mask member types {{{
1161 using _SimdMember
= __fixed_size_storage_t
<_Tp
, _Np
>;
1162 using _MaskMember
= _SanitizedBitMask
<_Np
>;
1164 static constexpr size_t _S_simd_align
1165 = std::__bit_ceil(_Np
* sizeof(_Tp
));
1167 static constexpr size_t _S_mask_align
= alignof(_MaskMember
);
1170 // _SimdBase / base class for simd, providing extra conversions {{{
1173 // The following ensures, function arguments are passed via the stack.
1174 // This is important for ABI compatibility across TU boundaries
1175 _SimdBase(const _SimdBase
&) {}
1176 _SimdBase() = default;
1178 explicit operator const _SimdMember
&() const
1179 { return static_cast<const simd
<_Tp
, _Fixed
>*>(this)->_M_data
; }
1181 explicit operator array
<_Tp
, _Np
>() const
1183 array
<_Tp
, _Np
> __r
;
1184 // _SimdMember can be larger because of higher alignment
1185 static_assert(sizeof(__r
) <= sizeof(_SimdMember
), "");
1186 __builtin_memcpy(__r
.data(), &static_cast<const _SimdMember
&>(*this),
1194 // empty. The bitset interface suffices
1195 struct _MaskBase
{};
1198 // _SimdCastType {{{
1199 struct _SimdCastType
1201 _SimdCastType(const array
<_Tp
, _Np
>&);
1202 _SimdCastType(const _SimdMember
& dd
) : _M_data(dd
) {}
1203 explicit operator const _SimdMember
&() const { return _M_data
; }
1206 const _SimdMember
& _M_data
;
1210 // _MaskCastType {{{
1213 _MaskCastType() = delete;
1221 // _CommonImplFixedSize {{{
1222 struct _CommonImplFixedSize
1225 template <typename _Tp
, typename
... _As
>
1226 _GLIBCXX_SIMD_INTRINSIC
static void
1227 _S_store(const _SimdTuple
<_Tp
, _As
...>& __x
, void* __addr
)
1229 constexpr size_t _Np
= _SimdTuple
<_Tp
, _As
...>::_S_size();
1230 __builtin_memcpy(__addr
, &__x
, _Np
* sizeof(_Tp
));
1237 // _SimdImplFixedSize {{{1
1238 // fixed_size should not inherit from _SimdMathFallback in order for
1239 // specializations in the used _SimdTuple Abis to get used
1241 struct _SimdImplFixedSize
1243 // member types {{{2
1244 using _MaskMember
= _SanitizedBitMask
<_Np
>;
1246 template <typename _Tp
>
1247 using _SimdMember
= __fixed_size_storage_t
<_Tp
, _Np
>;
1249 template <typename _Tp
>
1250 static constexpr size_t _S_tuple_size
= _SimdMember
<_Tp
>::_S_tuple_size
;
1252 template <typename _Tp
>
1253 using _Simd
= simd
<_Tp
, simd_abi::fixed_size
<_Np
>>;
1255 template <typename _Tp
>
1256 using _TypeTag
= _Tp
*;
1259 template <typename _Tp
>
1260 static constexpr inline _SimdMember
<_Tp
> _S_broadcast(_Tp __x
) noexcept
1262 return _SimdMember
<_Tp
>::_S_generate([&](auto __meta
) constexpr {
1263 return __meta
._S_broadcast(__x
);
1267 // _S_generator {{{2
1268 template <typename _Fp
, typename _Tp
>
1269 static constexpr inline _SimdMember
<_Tp
> _S_generator(_Fp
&& __gen
,
1272 return _SimdMember
<_Tp
>::_S_generate([&__gen
](auto __meta
) constexpr {
1273 return __meta
._S_generator(
1274 [&](auto __i
) constexpr {
1275 return __i
< _Np
? __gen(_SizeConstant
<__meta
._S_offset
+ __i
>())
1283 template <typename _Tp
, typename _Up
>
1284 static inline _SimdMember
<_Tp
> _S_load(const _Up
* __mem
,
1285 _TypeTag
<_Tp
>) noexcept
1287 return _SimdMember
<_Tp
>::_S_generate([&](auto __meta
) {
1288 return __meta
._S_load(&__mem
[__meta
._S_offset
], _TypeTag
<_Tp
>());
1292 // _S_masked_load {{{2
1293 template <typename _Tp
, typename
... _As
, typename _Up
>
1294 static inline _SimdTuple
<_Tp
, _As
...>
1295 _S_masked_load(const _SimdTuple
<_Tp
, _As
...>& __old
,
1296 const _MaskMember __bits
, const _Up
* __mem
) noexcept
1298 auto __merge
= __old
;
1299 __for_each(__merge
, [&](auto __meta
, auto& __native
) {
1300 if (__meta
._S_submask(__bits
).any())
1301 #pragma GCC diagnostic push
1302 // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1303 // the responsibility for avoiding UB to the caller of the masked load
1304 // via the mask. Consequently, the compiler may assume this branch is
1305 // unreachable, if the pointer arithmetic is UB.
1306 #pragma GCC diagnostic ignored "-Warray-bounds"
1308 = __meta
._S_masked_load(__native
, __meta
._S_make_mask(__bits
),
1309 __mem
+ __meta
._S_offset
);
1310 #pragma GCC diagnostic pop
1316 template <typename _Tp
, typename _Up
>
1317 static inline void _S_store(const _SimdMember
<_Tp
>& __v
, _Up
* __mem
,
1318 _TypeTag
<_Tp
>) noexcept
1320 __for_each(__v
, [&](auto __meta
, auto __native
) {
1321 __meta
._S_store(__native
, &__mem
[__meta
._S_offset
], _TypeTag
<_Tp
>());
1325 // _S_masked_store {{{2
1326 template <typename _Tp
, typename
... _As
, typename _Up
>
1327 static inline void _S_masked_store(const _SimdTuple
<_Tp
, _As
...>& __v
,
1329 const _MaskMember __bits
) noexcept
1331 __for_each(__v
, [&](auto __meta
, auto __native
) {
1332 if (__meta
._S_submask(__bits
).any())
1333 #pragma GCC diagnostic push
1334 // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1335 // the responsibility for avoiding UB to the caller of the masked
1336 // store via the mask. Consequently, the compiler may assume this
1337 // branch is unreachable, if the pointer arithmetic is UB.
1338 #pragma GCC diagnostic ignored "-Warray-bounds"
1339 __meta
._S_masked_store(__native
, __mem
+ __meta
._S_offset
,
1340 __meta
._S_make_mask(__bits
));
1341 #pragma GCC diagnostic pop
1346 template <typename _Tp
, typename
... _As
>
1347 static inline _MaskMember
1348 _S_negate(const _SimdTuple
<_Tp
, _As
...>& __x
) noexcept
1350 _MaskMember __bits
= 0;
1352 __x
, [&__bits
](auto __meta
, auto __native
) constexpr {
1354 |= __meta
._S_mask_to_shifted_ullong(__meta
._S_negate(__native
));
1360 template <typename _Tp
, typename _BinaryOperation
>
1361 static constexpr inline _Tp
_S_reduce(const _Simd
<_Tp
>& __x
,
1362 const _BinaryOperation
& __binary_op
)
1364 using _Tup
= _SimdMember
<_Tp
>;
1365 const _Tup
& __tup
= __data(__x
);
1366 if constexpr (_Tup::_S_tuple_size
== 1)
1367 return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
1368 __tup
.template _M_simd_at
<0>(), __binary_op
);
1369 else if constexpr (_Tup::_S_tuple_size
== 2 && _Tup::_S_size() > 2
1370 && _Tup::_SecondType::_S_size() == 1)
1372 return __binary_op(simd
<_Tp
, simd_abi::scalar
>(
1373 reduce(__tup
.template _M_simd_at
<0>(),
1375 __tup
.template _M_simd_at
<1>())[0];
1377 else if constexpr (_Tup::_S_tuple_size
== 2 && _Tup::_S_size() > 4
1378 && _Tup::_SecondType::_S_size() == 2)
1381 simd
<_Tp
, simd_abi::scalar
>(
1382 reduce(__tup
.template _M_simd_at
<0>(), __binary_op
)),
1383 simd
<_Tp
, simd_abi::scalar
>(
1384 reduce(__tup
.template _M_simd_at
<1>(), __binary_op
)))[0];
1388 const auto& __x2
= __call_with_n_evaluations
<
1389 __div_roundup(_Tup::_S_tuple_size
, 2)>(
1390 [](auto __first_simd
, auto... __remaining
) {
1391 if constexpr (sizeof...(__remaining
) == 0)
1392 return __first_simd
;
1397 typename
decltype(__first_simd
)::abi_type
,
1398 typename
decltype(__remaining
)::abi_type
...>;
1399 return fixed_size_simd
<_Tp
, _Tup2::_S_size()>(
1401 __make_simd_tuple(__first_simd
, __remaining
...));
1405 auto __left
= __tup
.template _M_simd_at
<2 * __i
>();
1406 if constexpr (2 * __i
+ 1 == _Tup::_S_tuple_size
)
1410 auto __right
= __tup
.template _M_simd_at
<2 * __i
+ 1>();
1411 using _LT
= decltype(__left
);
1412 using _RT
= decltype(__right
);
1413 if constexpr (_LT::size() == _RT::size())
1414 return __binary_op(__left
, __right
);
1417 _GLIBCXX_SIMD_USE_CONSTEXPR_API
1418 typename
_LT::mask_type
__k(
1420 [](auto __j
) constexpr { return __j
< _RT::size(); });
1421 _LT __ext_right
= __left
;
1422 where(__k
, __ext_right
)
1423 = __proposed::resizing_simd_cast
<_LT
>(__right
);
1424 where(__k
, __left
) = __binary_op(__left
, __ext_right
);
1429 return reduce(__x2
, __binary_op
);
1433 // _S_min, _S_max {{{2
1434 template <typename _Tp
, typename
... _As
>
1435 static inline constexpr _SimdTuple
<_Tp
, _As
...>
1436 _S_min(const _SimdTuple
<_Tp
, _As
...>& __a
,
1437 const _SimdTuple
<_Tp
, _As
...>& __b
)
1439 return __a
._M_apply_per_chunk(
1440 [](auto __impl
, auto __aa
, auto __bb
) constexpr {
1441 return __impl
._S_min(__aa
, __bb
);
1446 template <typename _Tp
, typename
... _As
>
1447 static inline constexpr _SimdTuple
<_Tp
, _As
...>
1448 _S_max(const _SimdTuple
<_Tp
, _As
...>& __a
,
1449 const _SimdTuple
<_Tp
, _As
...>& __b
)
1451 return __a
._M_apply_per_chunk(
1452 [](auto __impl
, auto __aa
, auto __bb
) constexpr {
1453 return __impl
._S_max(__aa
, __bb
);
1458 // _S_complement {{{2
1459 template <typename _Tp
, typename
... _As
>
1460 static inline constexpr _SimdTuple
<_Tp
, _As
...>
1461 _S_complement(const _SimdTuple
<_Tp
, _As
...>& __x
) noexcept
1463 return __x
._M_apply_per_chunk([](auto __impl
, auto __xx
) constexpr {
1464 return __impl
._S_complement(__xx
);
1468 // _S_unary_minus {{{2
1469 template <typename _Tp
, typename
... _As
>
1470 static inline constexpr _SimdTuple
<_Tp
, _As
...>
1471 _S_unary_minus(const _SimdTuple
<_Tp
, _As
...>& __x
) noexcept
1473 return __x
._M_apply_per_chunk([](auto __impl
, auto __xx
) constexpr {
1474 return __impl
._S_unary_minus(__xx
);
1478 // arithmetic operators {{{2
1480 #define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
1481 template <typename _Tp, typename... _As> \
1482 static inline constexpr _SimdTuple<_Tp, _As...> name_( \
1483 const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y)\
1485 return __x._M_apply_per_chunk( \
1486 [](auto __impl, auto __xx, auto __yy) constexpr { \
1487 return __impl.name_(__xx, __yy); \
1492 _GLIBCXX_SIMD_FIXED_OP(_S_plus
, +)
1493 _GLIBCXX_SIMD_FIXED_OP(_S_minus
, -)
1494 _GLIBCXX_SIMD_FIXED_OP(_S_multiplies
, *)
1495 _GLIBCXX_SIMD_FIXED_OP(_S_divides
, /)
1496 _GLIBCXX_SIMD_FIXED_OP(_S_modulus
, %)
1497 _GLIBCXX_SIMD_FIXED_OP(_S_bit_and
, &)
1498 _GLIBCXX_SIMD_FIXED_OP(_S_bit_or
, |)
1499 _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor
, ^)
1500 _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left
, <<)
1501 _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right
, >>)
1502 #undef _GLIBCXX_SIMD_FIXED_OP
1504 template <typename _Tp
, typename
... _As
>
1505 static inline constexpr _SimdTuple
<_Tp
, _As
...>
1506 _S_bit_shift_left(const _SimdTuple
<_Tp
, _As
...>& __x
, int __y
)
1508 return __x
._M_apply_per_chunk([__y
](auto __impl
, auto __xx
) constexpr {
1509 return __impl
._S_bit_shift_left(__xx
, __y
);
1513 template <typename _Tp
, typename
... _As
>
1514 static inline constexpr _SimdTuple
<_Tp
, _As
...>
1515 _S_bit_shift_right(const _SimdTuple
<_Tp
, _As
...>& __x
, int __y
)
1517 return __x
._M_apply_per_chunk([__y
](auto __impl
, auto __xx
) constexpr {
1518 return __impl
._S_bit_shift_right(__xx
, __y
);
1523 #define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name) \
1524 template <typename _Tp, typename... _As, typename... _More> \
1525 static inline __fixed_size_storage_t<_RetTp, _Np> \
1526 _S_##__name(const _SimdTuple<_Tp, _As...>& __x, \
1527 const _More&... __more) \
1529 if constexpr (sizeof...(_More) == 0) \
1531 if constexpr (is_same_v<_Tp, _RetTp>) \
1532 return __x._M_apply_per_chunk( \
1533 [](auto __impl, auto __xx) constexpr { \
1534 using _V = typename decltype(__impl)::simd_type; \
1535 return __data(__name(_V(__private_init, __xx))); \
1538 return __optimize_simd_tuple( \
1539 __x.template _M_apply_r<_RetTp>([](auto __impl, auto __xx) { \
1540 return __impl._S_##__name(__xx); \
1543 else if constexpr ( \
1546 _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) ) \
1547 return __x._M_apply_per_chunk( \
1548 [](auto __impl, auto __xx, auto... __pack) constexpr { \
1549 using _V = typename decltype(__impl)::simd_type; \
1550 return __data(__name(_V(__private_init, __xx), \
1551 _V(__private_init, __pack)...)); \
1554 else if constexpr (is_same_v<_Tp, _RetTp>) \
1555 return __x._M_apply_per_chunk( \
1556 [](auto __impl, auto __xx, auto... __pack) constexpr { \
1557 using _V = typename decltype(__impl)::simd_type; \
1558 return __data(__name(_V(__private_init, __xx), \
1559 __autocvt_to_simd(__pack)...)); \
1563 __assert_unreachable<_Tp>(); \
1566 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, acos
)
1567 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, asin
)
1568 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, atan
)
1569 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, atan2
)
1570 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, cos
)
1571 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, sin
)
1572 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, tan
)
1573 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, acosh
)
1574 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, asinh
)
1575 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, atanh
)
1576 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, cosh
)
1577 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, sinh
)
1578 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, tanh
)
1579 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, exp
)
1580 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, exp2
)
1581 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, expm1
)
1582 _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, ilogb
)
1583 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, log
)
1584 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, log10
)
1585 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, log1p
)
1586 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, log2
)
1587 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, logb
)
1588 // modf implemented in simd_math.h
1589 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
,
1590 scalbn
) // double scalbn(double x, int exp);
1591 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, scalbln
)
1592 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, cbrt
)
1593 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, abs
)
1594 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, fabs
)
1595 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, pow
)
1596 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, sqrt
)
1597 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, erf
)
1598 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, erfc
)
1599 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, lgamma
)
1600 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, tgamma
)
1601 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, trunc
)
1602 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, ceil
)
1603 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, floor
)
1604 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, nearbyint
)
1606 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, rint
)
1607 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lrint
)
1608 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llrint
)
1610 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, round
)
1611 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lround
)
1612 _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llround
)
1614 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, ldexp
)
1615 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, fmod
)
1616 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, remainder
)
1617 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, copysign
)
1618 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, nextafter
)
1619 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, fdim
)
1620 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, fmax
)
1621 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, fmin
)
1622 _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp
, fma
)
1623 _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, fpclassify
)
1624 #undef _GLIBCXX_SIMD_APPLY_ON_TUPLE
1626 template <typename _Tp
, typename
... _Abis
>
1627 static _SimdTuple
<_Tp
, _Abis
...> _S_remquo(
1628 const _SimdTuple
<_Tp
, _Abis
...>& __x
,
1629 const _SimdTuple
<_Tp
, _Abis
...>& __y
,
1630 __fixed_size_storage_t
<int, _SimdTuple
<_Tp
, _Abis
...>::_S_size()>* __z
)
1632 return __x
._M_apply_per_chunk(
1633 [](auto __impl
, const auto __xx
, const auto __yy
, auto& __zz
) {
1634 return __impl
._S_remquo(__xx
, __yy
, &__zz
);
1639 template <typename _Tp
, typename
... _As
>
1640 static inline _SimdTuple
<_Tp
, _As
...>
1641 _S_frexp(const _SimdTuple
<_Tp
, _As
...>& __x
,
1642 __fixed_size_storage_t
<int, _Np
>& __exp
) noexcept
1644 return __x
._M_apply_per_chunk(
1645 [](auto __impl
, const auto& __a
, auto& __b
) {
1647 frexp(typename
decltype(__impl
)::simd_type(__private_init
, __a
),
1648 __autocvt_to_simd(__b
)));
1653 #define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
1654 template <typename _Tp, typename... _As> \
1655 static inline _MaskMember \
1656 _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
1658 return _M_test([](auto __impl, \
1659 auto __xx) { return __impl._S_##name_(__xx); }, \
1663 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf
)
1664 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite
)
1665 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan
)
1666 _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal
)
1667 _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit
)
1668 #undef _GLIBCXX_SIMD_TEST_ON_TUPLE_
1670 // _S_increment & _S_decrement{{{2
1671 template <typename
... _Ts
>
1672 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1673 _S_increment(_SimdTuple
<_Ts
...>& __x
)
1676 __x
, [](auto __meta
, auto& native
) constexpr {
1677 __meta
._S_increment(native
);
1681 template <typename
... _Ts
>
1682 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1683 _S_decrement(_SimdTuple
<_Ts
...>& __x
)
1686 __x
, [](auto __meta
, auto& native
) constexpr {
1687 __meta
._S_decrement(native
);
1692 #define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp) \
1693 template <typename _Tp, typename... _As> \
1694 _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember \
1695 __cmp(const _SimdTuple<_Tp, _As...>& __x, \
1696 const _SimdTuple<_Tp, _As...>& __y) \
1699 [](auto __impl, auto __xx, auto __yy) constexpr { \
1700 return __impl.__cmp(__xx, __yy); \
1705 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to
)
1706 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to
)
1707 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less
)
1708 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal
)
1709 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless
)
1710 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal
)
1711 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater
)
1712 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal
)
1713 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater
)
1714 _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered
)
1715 #undef _GLIBCXX_SIMD_CMP_OPERATIONS
1717 // smart_reference access {{{2
1718 template <typename _Tp
, typename
... _As
, typename _Up
>
1719 _GLIBCXX_SIMD_INTRINSIC
static void _S_set(_SimdTuple
<_Tp
, _As
...>& __v
,
1720 int __i
, _Up
&& __x
) noexcept
1721 { __v
._M_set(__i
, static_cast<_Up
&&>(__x
)); }
1723 // _S_masked_assign {{{2
1724 template <typename _Tp
, typename
... _As
>
1725 _GLIBCXX_SIMD_INTRINSIC
static void
1726 _S_masked_assign(const _MaskMember __bits
, _SimdTuple
<_Tp
, _As
...>& __lhs
,
1727 const __type_identity_t
<_SimdTuple
<_Tp
, _As
...>>& __rhs
)
1731 [&](auto __meta
, auto& __native_lhs
, auto __native_rhs
) constexpr {
1732 __meta
._S_masked_assign(__meta
._S_make_mask(__bits
), __native_lhs
,
1737 // Optimization for the case where the RHS is a scalar. No need to broadcast
1738 // the scalar to a simd first.
1739 template <typename _Tp
, typename
... _As
>
1740 _GLIBCXX_SIMD_INTRINSIC
static void
1741 _S_masked_assign(const _MaskMember __bits
, _SimdTuple
<_Tp
, _As
...>& __lhs
,
1742 const __type_identity_t
<_Tp
> __rhs
)
1745 __lhs
, [&](auto __meta
, auto& __native_lhs
) constexpr {
1746 __meta
._S_masked_assign(__meta
._S_make_mask(__bits
), __native_lhs
,
1751 // _S_masked_cassign {{{2
1752 template <typename _Op
, typename _Tp
, typename
... _As
>
1753 static inline void _S_masked_cassign(const _MaskMember __bits
,
1754 _SimdTuple
<_Tp
, _As
...>& __lhs
,
1755 const _SimdTuple
<_Tp
, _As
...>& __rhs
,
1760 [&](auto __meta
, auto& __native_lhs
, auto __native_rhs
) constexpr {
1761 __meta
.template _S_masked_cassign(__meta
._S_make_mask(__bits
),
1762 __native_lhs
, __native_rhs
, __op
);
1766 // Optimization for the case where the RHS is a scalar. No need to broadcast
1767 // the scalar to a simd first.
1768 template <typename _Op
, typename _Tp
, typename
... _As
>
1769 static inline void _S_masked_cassign(const _MaskMember __bits
,
1770 _SimdTuple
<_Tp
, _As
...>& __lhs
,
1771 const _Tp
& __rhs
, _Op __op
)
1774 __lhs
, [&](auto __meta
, auto& __native_lhs
) constexpr {
1775 __meta
.template _S_masked_cassign(__meta
._S_make_mask(__bits
),
1776 __native_lhs
, __rhs
, __op
);
1780 // _S_masked_unary {{{2
1781 template <template <typename
> class _Op
, typename _Tp
, typename
... _As
>
1782 static inline _SimdTuple
<_Tp
, _As
...>
1783 _S_masked_unary(const _MaskMember __bits
, const _SimdTuple
<_Tp
, _As
...>& __v
)
1785 return __v
._M_apply_wrapped([&__bits
](auto __meta
,
1786 auto __native
) constexpr {
1787 return __meta
.template _S_masked_unary
<_Op
>(__meta
._S_make_mask(
1796 // _MaskImplFixedSize {{{1
1798 struct _MaskImplFixedSize
1801 sizeof(_ULLong
) * __CHAR_BIT__
>= _Np
,
1802 "The fixed_size implementation relies on one _ULLong being able to store "
1803 "all boolean elements."); // required in load & store
1806 using _Abi
= simd_abi::fixed_size
<_Np
>;
1808 using _MaskMember
= _SanitizedBitMask
<_Np
>;
1810 template <typename _Tp
>
1811 using _FirstAbi
= typename __fixed_size_storage_t
<_Tp
, _Np
>::_FirstAbi
;
1813 template <typename _Tp
>
1814 using _TypeTag
= _Tp
*;
1819 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember
1820 _S_broadcast(bool __x
)
1821 { return __x
? ~_MaskMember() : _MaskMember(); }
1826 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember
1827 _S_load(const bool* __mem
)
1829 using _Ip
= __int_for_sizeof_t
<bool>;
1830 // the following load uses element_aligned and relies on __mem already
1831 // carrying alignment information from when this load function was
1833 const simd
<_Ip
, _Abi
> __bools(reinterpret_cast<const __may_alias
<_Ip
>*>(
1836 return __data(__bools
!= 0);
1841 template <bool _Sanitized
>
1842 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SanitizedBitMask
<_Np
>
1843 _S_to_bits(_BitMask
<_Np
, _Sanitized
> __x
)
1845 if constexpr (_Sanitized
)
1848 return __x
._M_sanitized();
1853 template <typename _Tp
, typename _Up
, typename _UAbi
>
1854 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember
1855 _S_convert(simd_mask
<_Up
, _UAbi
> __x
)
1857 return _UAbi::_MaskImpl::_S_to_bits(__data(__x
))
1858 .template _M_extract
<0, _Np
>();
1862 // _S_from_bitmask {{{2
1863 template <typename _Tp
>
1864 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
1865 _S_from_bitmask(_MaskMember __bits
, _TypeTag
<_Tp
>) noexcept
1869 static inline _MaskMember
_S_load(const bool* __mem
) noexcept
1871 // TODO: _UChar is not necessarily the best type to use here. For smaller
1872 // _Np _UShort, _UInt, _ULLong, float, and double can be more efficient.
1874 using _Vs
= __fixed_size_storage_t
<_UChar
, _Np
>;
1875 __for_each(_Vs
{}, [&](auto __meta
, auto) {
1876 __r
|= __meta
._S_mask_to_shifted_ullong(
1877 __meta
._S_mask_impl
._S_load(&__mem
[__meta
._S_offset
],
1878 _SizeConstant
<__meta
._S_size()>()));
1883 // _S_masked_load {{{2
1884 static inline _MaskMember
_S_masked_load(_MaskMember __merge
,
1886 const bool* __mem
) noexcept
1888 _BitOps::_S_bit_iteration(__mask
.to_ullong(), [&](auto __i
) {
1889 __merge
.set(__i
, __mem
[__i
]);
1895 static inline void _S_store(const _MaskMember __bitmask
,
1896 bool* __mem
) noexcept
1898 if constexpr (_Np
== 1)
1899 __mem
[0] = __bitmask
[0];
1901 _FirstAbi
<_UChar
>::_CommonImpl::_S_store_bool_array(__bitmask
, __mem
);
1904 // _S_masked_store {{{2
1905 static inline void _S_masked_store(const _MaskMember __v
, bool* __mem
,
1906 const _MaskMember __k
) noexcept
1908 _BitOps::_S_bit_iteration(__k
, [&](auto __i
) { __mem
[__i
] = __v
[__i
]; });
1911 // logical and bitwise operators {{{2
1912 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
1913 _S_logical_and(const _MaskMember
& __x
, const _MaskMember
& __y
) noexcept
1914 { return __x
& __y
; }
1916 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
1917 _S_logical_or(const _MaskMember
& __x
, const _MaskMember
& __y
) noexcept
1918 { return __x
| __y
; }
1920 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember
1921 _S_bit_not(const _MaskMember
& __x
) noexcept
1924 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
1925 _S_bit_and(const _MaskMember
& __x
, const _MaskMember
& __y
) noexcept
1926 { return __x
& __y
; }
1928 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
1929 _S_bit_or(const _MaskMember
& __x
, const _MaskMember
& __y
) noexcept
1930 { return __x
| __y
; }
1932 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember
1933 _S_bit_xor(const _MaskMember
& __x
, const _MaskMember
& __y
) noexcept
1934 { return __x
^ __y
; }
1936 // smart_reference access {{{2
1937 _GLIBCXX_SIMD_INTRINSIC
static void _S_set(_MaskMember
& __k
, int __i
,
1939 { __k
.set(__i
, __x
); }
1941 // _S_masked_assign {{{2
1942 _GLIBCXX_SIMD_INTRINSIC
static void
1943 _S_masked_assign(const _MaskMember __k
, _MaskMember
& __lhs
,
1944 const _MaskMember __rhs
)
1945 { __lhs
= (__lhs
& ~__k
) | (__rhs
& __k
); }
1947 // Optimization for the case where the RHS is a scalar.
1948 _GLIBCXX_SIMD_INTRINSIC
static void _S_masked_assign(const _MaskMember __k
,
1960 template <typename _Tp
>
1961 _GLIBCXX_SIMD_INTRINSIC
static bool _S_all_of(simd_mask
<_Tp
, _Abi
> __k
)
1962 { return __data(__k
).all(); }
1966 template <typename _Tp
>
1967 _GLIBCXX_SIMD_INTRINSIC
static bool _S_any_of(simd_mask
<_Tp
, _Abi
> __k
)
1968 { return __data(__k
).any(); }
1972 template <typename _Tp
>
1973 _GLIBCXX_SIMD_INTRINSIC
static bool _S_none_of(simd_mask
<_Tp
, _Abi
> __k
)
1974 { return __data(__k
).none(); }
1978 template <typename _Tp
>
1979 _GLIBCXX_SIMD_INTRINSIC
static bool
1980 _S_some_of([[maybe_unused
]] simd_mask
<_Tp
, _Abi
> __k
)
1982 if constexpr (_Np
== 1)
1985 return __data(__k
).any() && !__data(__k
).all();
1990 template <typename _Tp
>
1991 _GLIBCXX_SIMD_INTRINSIC
static int _S_popcount(simd_mask
<_Tp
, _Abi
> __k
)
1992 { return __data(__k
).count(); }
1995 // _S_find_first_set {{{
1996 template <typename _Tp
>
1997 _GLIBCXX_SIMD_INTRINSIC
static int
1998 _S_find_first_set(simd_mask
<_Tp
, _Abi
> __k
)
1999 { return std::__countr_zero(__data(__k
).to_ullong()); }
2002 // _S_find_last_set {{{
2003 template <typename _Tp
>
2004 _GLIBCXX_SIMD_INTRINSIC
static int
2005 _S_find_last_set(simd_mask
<_Tp
, _Abi
> __k
)
2006 { return std::__bit_width(__data(__k
).to_ullong()) - 1; }
2012 _GLIBCXX_SIMD_END_NAMESPACE
2013 #endif // __cplusplus >= 201703L
2014 #endif // _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
2016 // vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80