1 /* ACLE support for AArch64 SVE (function shapes)
2 Copyright (C) 2018-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "insn-codes.h"
30 #include "aarch64-sve-builtins.h"
31 #include "aarch64-sve-builtins-shapes.h"
33 /* In the comments below, _t0 represents the first type suffix and _t1
34 represents the second. Square brackets enclose characters that are
35 present in only the full name, not the overloaded name. Governing
36 predicate arguments and predicate suffixes are not shown, since they
37 depend on the predication type, which is a separate piece of
38 information from the shape.
40 Non-overloaded functions may have additional suffixes beyond the
41 ones shown, if those suffixes don't affect the types in the type
42 signature. E.g. the predicate form of svtrn1 has a _b<bits> suffix,
43 but this does not affect the prototype, which is always
44 "svbool_t(svbool_t, svbool_t)". */
46 namespace aarch64_sve
{
48 /* Return a representation of "const T *". */
50 build_const_pointer (tree t
)
52 return build_pointer_type (build_qualified_type (t
, TYPE_QUAL_CONST
));
55 /* If INSTANCE has a governing predicate, add it to the list of argument
56 types in ARGUMENT_TYPES. RETURN_TYPE is the type returned by the
59 apply_predication (const function_instance
&instance
, tree return_type
,
60 vec
<tree
> &argument_types
)
62 if (instance
.pred
!= PRED_none
)
64 argument_types
.quick_insert (0, get_svbool_t ());
65 /* For unary merge operations, the first argument is a vector with
66 the same type as the result. For unary_convert_narrowt it also
67 provides the "bottom" half of active elements, and is present
68 for all types of predication. */
69 if ((argument_types
.length () == 2 && instance
.pred
== PRED_m
)
70 || instance
.shape
== shapes::unary_convert_narrowt
)
71 argument_types
.quick_insert (0, return_type
);
75 /* Parse and move past an element type in FORMAT and return it as a type
76 suffix. The format is:
78 [01] - the element type in type suffix 0 or 1 of INSTANCE
79 f<bits> - a floating-point type with the given number of bits
80 f[01] - a floating-point type with the same width as type suffix 0 or 1
81 h<elt> - a half-sized version of <elt>
82 p - a predicate (represented as TYPE_SUFFIX_b)
83 q<elt> - a quarter-sized version of <elt>
84 s<bits> - a signed type with the given number of bits
85 s[01] - a signed type with the same width as type suffix 0 or 1
86 u<bits> - an unsigned type with the given number of bits
87 u[01] - an unsigned type with the same width as type suffix 0 or 1
88 w<elt> - a 64-bit version of <elt> if <elt> is integral, otherwise <elt>
90 where <elt> is another element type. */
91 static type_suffix_index
92 parse_element_type (const function_instance
&instance
, const char *&format
)
96 if (ch
== 'f' || ch
== 's' || ch
== 'u')
98 type_class_index tclass
= (ch
== 'f' ? TYPE_float
99 : ch
== 's' ? TYPE_signed
102 unsigned int bits
= strtol (format
, &end
, 10);
104 if (bits
== 0 || bits
== 1)
105 bits
= instance
.type_suffix (bits
).element_bits
;
106 return find_type_suffix (tclass
, bits
);
111 type_suffix_index suffix
= parse_element_type (instance
, format
);
112 if (type_suffixes
[suffix
].integer_p
)
113 return find_type_suffix (type_suffixes
[suffix
].tclass
, 64);
118 return TYPE_SUFFIX_b
;
122 type_suffix_index suffix
= parse_element_type (instance
, format
);
123 return find_type_suffix (type_suffixes
[suffix
].tclass
,
124 type_suffixes
[suffix
].element_bits
/ 4);
129 type_suffix_index suffix
= parse_element_type (instance
, format
);
130 /* Widening and narrowing doesn't change the type for predicates;
131 everything's still an svbool_t. */
132 if (suffix
== TYPE_SUFFIX_b
)
134 return find_type_suffix (type_suffixes
[suffix
].tclass
,
135 type_suffixes
[suffix
].element_bits
/ 2);
138 if (ch
== '0' || ch
== '1')
139 return instance
.type_suffix_ids
[ch
- '0'];
144 /* Read and return a type from FORMAT for function INSTANCE. Advance
145 FORMAT beyond the type string. The format is:
148 al - array pointer for loads
149 ap - array pointer for prefetches
150 as - array pointer for stores
151 b - base vector type (from a _<m0>base suffix)
152 d - displacement vector type (from a _<m1>index or _<m1>offset suffix)
153 e<name> - an enum with the given name
154 s<elt> - a scalar type with the given element suffix
155 t<elt> - a vector or tuple type with given element suffix [*1]
156 v<elt> - a vector with the given element suffix
158 where <elt> has the format described above parse_element_type
160 [*1] the vectors_per_tuple function indicates whether the type should
161 be a tuple, and if so, how many vectors it should contain. */
163 parse_type (const function_instance
&instance
, const char *&format
)
168 return void_type_node
;
174 return build_const_pointer (instance
.memory_scalar_type ());
176 return const_ptr_type_node
;
178 return build_pointer_type (instance
.memory_scalar_type ());
183 return instance
.base_vector_type ();
186 return instance
.displacement_vector_type ();
190 if (strncmp (format
, "pattern", 7) == 0)
193 return acle_svpattern
;
195 if (strncmp (format
, "prfop", 5) == 0)
205 type_suffix_index suffix
= parse_element_type (instance
, format
);
206 return scalar_types
[type_suffixes
[suffix
].vector_type
];
211 type_suffix_index suffix
= parse_element_type (instance
, format
);
212 vector_type_index vector_type
= type_suffixes
[suffix
].vector_type
;
213 unsigned int num_vectors
= instance
.vectors_per_tuple ();
214 return acle_vector_types
[num_vectors
- 1][vector_type
];
219 type_suffix_index suffix
= parse_element_type (instance
, format
);
220 return acle_vector_types
[0][type_suffixes
[suffix
].vector_type
];
226 /* Read and move past any argument count at FORMAT for the function
227 signature of INSTANCE. The counts are:
229 *q: one argument per element in a 128-bit quadword (as for svdupq)
230 *t: one argument per vector in a tuple (as for svcreate)
232 Otherwise the count is 1. */
234 parse_count (const function_instance
&instance
, const char *&format
)
236 if (format
[0] == '*' && format
[1] == 'q')
239 return instance
.elements_per_vq (0);
241 if (format
[0] == '*' && format
[1] == 't')
244 return instance
.vectors_per_tuple ();
249 /* Read a type signature for INSTANCE from FORMAT. Add the argument types
250 to ARGUMENT_TYPES and return the return type.
252 The format is a comma-separated list of types (as for parse_type),
253 with the first type being the return type and the rest being the
254 argument types. Each argument type can be followed by an optional
255 count (as for parse_count). */
257 parse_signature (const function_instance
&instance
, const char *format
,
258 vec
<tree
> &argument_types
)
260 tree return_type
= parse_type (instance
, format
);
261 while (format
[0] == ',')
264 tree argument_type
= parse_type (instance
, format
);
265 unsigned int count
= parse_count (instance
, format
);
266 for (unsigned int i
= 0; i
< count
; ++i
)
267 argument_types
.quick_push (argument_type
);
269 gcc_assert (format
[0] == 0);
273 /* Add one function instance for GROUP, using mode suffix MODE_SUFFIX_ID,
274 the type suffixes at index TI and the predication suffix at index PI.
275 The other arguments are as for build_all. */
277 build_one (function_builder
&b
, const char *signature
,
278 const function_group_info
&group
, mode_suffix_index mode_suffix_id
,
279 unsigned int ti
, unsigned int pi
, bool force_direct_overloads
)
281 /* Byte forms of svdupq take 16 arguments. */
282 auto_vec
<tree
, 16> argument_types
;
283 function_instance
instance (group
.base_name
, *group
.base
, *group
.shape
,
284 mode_suffix_id
, group
.types
[ti
],
286 tree return_type
= parse_signature (instance
, signature
, argument_types
);
287 apply_predication (instance
, return_type
, argument_types
);
288 b
.add_unique_function (instance
, return_type
, argument_types
,
289 group
.required_extensions
, force_direct_overloads
);
292 /* GROUP describes some sort of gather or scatter operation. There are
295 - If the function has any type suffixes (as for loads and stores), the
296 first function type suffix specifies either a 32-bit or a 64-bit type,
297 which in turn selects either MODE32 or MODE64 as the addressing mode.
298 Add a function instance for every type and predicate combination
299 in GROUP for which the associated addressing mode is not MODE_none.
301 - If the function has no type suffixes (as for prefetches), add one
302 MODE32 form and one MODE64 form for each predication type.
304 The other arguments are as for build_all. */
306 build_32_64 (function_builder
&b
, const char *signature
,
307 const function_group_info
&group
, mode_suffix_index mode32
,
308 mode_suffix_index mode64
, bool force_direct_overloads
= false)
310 for (unsigned int pi
= 0; group
.preds
[pi
] != NUM_PREDS
; ++pi
)
311 if (group
.types
[0][0] == NUM_TYPE_SUFFIXES
)
313 gcc_assert (mode32
!= MODE_none
&& mode64
!= MODE_none
);
314 build_one (b
, signature
, group
, mode32
, 0, pi
,
315 force_direct_overloads
);
316 build_one (b
, signature
, group
, mode64
, 0, pi
,
317 force_direct_overloads
);
320 for (unsigned int ti
= 0; group
.types
[ti
][0] != NUM_TYPE_SUFFIXES
; ++ti
)
322 unsigned int bits
= type_suffixes
[group
.types
[ti
][0]].element_bits
;
323 gcc_assert (bits
== 32 || bits
== 64);
324 mode_suffix_index mode
= bits
== 32 ? mode32
: mode64
;
325 if (mode
!= MODE_none
)
326 build_one (b
, signature
, group
, mode
, ti
, pi
,
327 force_direct_overloads
);
331 /* For every type and predicate combination in GROUP, add one function
332 that takes a scalar (pointer) base and a signed vector array index,
333 and another that instead takes an unsigned vector array index.
334 The vector array index has the same element size as the first
335 function type suffix. SIGNATURE is as for build_all. */
337 build_sv_index (function_builder
&b
, const char *signature
,
338 const function_group_info
&group
)
340 build_32_64 (b
, signature
, group
, MODE_s32index
, MODE_s64index
);
341 build_32_64 (b
, signature
, group
, MODE_u32index
, MODE_u64index
);
344 /* Like build_sv_index, but only handle 64-bit types. */
346 build_sv_index64 (function_builder
&b
, const char *signature
,
347 const function_group_info
&group
)
349 build_32_64 (b
, signature
, group
, MODE_none
, MODE_s64index
);
350 build_32_64 (b
, signature
, group
, MODE_none
, MODE_u64index
);
353 /* Like build_sv_index, but taking vector byte offsets instead of vector
356 build_sv_offset (function_builder
&b
, const char *signature
,
357 const function_group_info
&group
)
359 build_32_64 (b
, signature
, group
, MODE_s32offset
, MODE_s64offset
);
360 build_32_64 (b
, signature
, group
, MODE_u32offset
, MODE_u64offset
);
363 /* Like build_sv_offset, but exclude offsets that must be interpreted
364 as signed (i.e. s32offset). */
366 build_sv_uint_offset (function_builder
&b
, const char *signature
,
367 const function_group_info
&group
)
369 build_32_64 (b
, signature
, group
, MODE_none
, MODE_s64offset
);
370 build_32_64 (b
, signature
, group
, MODE_u32offset
, MODE_u64offset
);
373 /* For every type and predicate combination in GROUP, add a function
374 that takes a vector base address and no displacement. The vector
375 base has the same element size as the first type suffix.
377 The other arguments are as for build_all. */
379 build_v_base (function_builder
&b
, const char *signature
,
380 const function_group_info
&group
,
381 bool force_direct_overloads
= false)
383 build_32_64 (b
, signature
, group
, MODE_u32base
, MODE_u64base
,
384 force_direct_overloads
);
387 /* Like build_v_base, but for functions that also take a scalar array
390 build_vs_index (function_builder
&b
, const char *signature
,
391 const function_group_info
&group
,
392 bool force_direct_overloads
= false)
394 build_32_64 (b
, signature
, group
, MODE_u32base_index
, MODE_u64base_index
,
395 force_direct_overloads
);
398 /* Like build_v_base, but for functions that also take a scalar byte
401 build_vs_offset (function_builder
&b
, const char *signature
,
402 const function_group_info
&group
,
403 bool force_direct_overloads
= false)
405 build_32_64 (b
, signature
, group
, MODE_u32base_offset
, MODE_u64base_offset
,
406 force_direct_overloads
);
409 /* Add a function instance for every type and predicate combination
410 in GROUP. Take the function base name from GROUP and the mode suffix
411 from MODE_SUFFIX_ID. Use SIGNATURE to construct the function signature
412 without a governing predicate, then use apply_predication to add in the
413 predicate. FORCE_DIRECT_OVERLOADS is true if there is a one-to-one
414 mapping between "short" and "full" names, and if standard overload
415 resolution therefore isn't necessary. */
417 build_all (function_builder
&b
, const char *signature
,
418 const function_group_info
&group
, mode_suffix_index mode_suffix_id
,
419 bool force_direct_overloads
= false)
421 for (unsigned int pi
= 0; group
.preds
[pi
] != NUM_PREDS
; ++pi
)
422 for (unsigned int ti
= 0;
423 ti
== 0 || group
.types
[ti
][0] != NUM_TYPE_SUFFIXES
; ++ti
)
424 build_one (b
, signature
, group
, mode_suffix_id
, ti
, pi
,
425 force_direct_overloads
);
428 /* TYPE is the largest type suffix associated with the arguments of R,
429 but the result is twice as wide. Return the associated type suffix
430 if it exists, otherwise report an appropriate error and return
431 NUM_TYPE_SUFFIXES. */
432 static type_suffix_index
433 long_type_suffix (function_resolver
&r
, type_suffix_index type
)
435 unsigned int element_bits
= type_suffixes
[type
].element_bits
;
436 if (type_suffixes
[type
].integer_p
&& element_bits
< 64)
437 return find_type_suffix (type_suffixes
[type
].tclass
, element_bits
* 2);
439 r
.report_no_such_form (type
);
440 return NUM_TYPE_SUFFIXES
;
443 /* Declare the function shape NAME, pointing it to an instance
444 of class <NAME>_def. */
445 #define SHAPE(NAME) \
446 static CONSTEXPR const NAME##_def NAME##_obj; \
447 namespace shapes { const function_shape *const NAME = &NAME##_obj; }
449 /* Base class for functions that are not overloaded. */
450 struct nonoverloaded_base
: public function_shape
453 explicit_type_suffix_p (unsigned int) const OVERRIDE
459 resolve (function_resolver
&) const OVERRIDE
465 /* Base class for overloaded functions. Bit N of EXPLICIT_MASK is true
466 if type suffix N appears in the overloaded name. */
467 template<unsigned int EXPLICIT_MASK
>
468 struct overloaded_base
: public function_shape
471 explicit_type_suffix_p (unsigned int i
) const OVERRIDE
473 return (EXPLICIT_MASK
>> i
) & 1;
477 /* Base class for adr_index and adr_offset. */
478 struct adr_base
: public overloaded_base
<0>
480 /* The function takes two arguments: a vector base and a vector displacement
481 (either an index or an offset). Resolve based on them both. */
483 resolve (function_resolver
&r
) const OVERRIDE
485 unsigned int i
, nargs
;
486 mode_suffix_index mode
;
487 if (!r
.check_gp_argument (2, i
, nargs
)
488 || (mode
= r
.resolve_adr_address (0)) == MODE_none
)
489 return error_mark_node
;
491 return r
.resolve_to (mode
);
495 /* Base class for narrowing bottom binary functions that take an
496 immediate second operand. The result is half the size of input
497 and has class CLASS. */
498 template<type_class_index CLASS
= function_resolver::SAME_TYPE_CLASS
>
499 struct binary_imm_narrowb_base
: public overloaded_base
<0>
502 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
504 b
.add_overloaded_functions (group
, MODE_n
);
505 STATIC_ASSERT (CLASS
== function_resolver::SAME_TYPE_CLASS
506 || CLASS
== TYPE_unsigned
);
507 if (CLASS
== TYPE_unsigned
)
508 build_all (b
, "vhu0,v0,su64", group
, MODE_n
);
510 build_all (b
, "vh0,v0,su64", group
, MODE_n
);
514 resolve (function_resolver
&r
) const OVERRIDE
516 return r
.resolve_uniform (1, 1);
520 /* The top equivalent of binary_imm_narrowb_base. It takes three arguments,
521 with the first being the values of the even elements, which are typically
522 the result of the narrowb operation. */
523 template<type_class_index CLASS
= function_resolver::SAME_TYPE_CLASS
>
524 struct binary_imm_narrowt_base
: public overloaded_base
<0>
527 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
529 b
.add_overloaded_functions (group
, MODE_n
);
530 STATIC_ASSERT (CLASS
== function_resolver::SAME_TYPE_CLASS
531 || CLASS
== TYPE_unsigned
);
532 if (CLASS
== TYPE_unsigned
)
533 build_all (b
, "vhu0,vhu0,v0,su64", group
, MODE_n
);
535 build_all (b
, "vh0,vh0,v0,su64", group
, MODE_n
);
539 resolve (function_resolver
&r
) const OVERRIDE
541 unsigned int i
, nargs
;
542 type_suffix_index type
;
543 if (!r
.check_gp_argument (3, i
, nargs
)
544 || (type
= r
.infer_vector_type (i
+ 1)) == NUM_TYPE_SUFFIXES
545 || !r
.require_derived_vector_type (i
, i
+ 1, type
, CLASS
, r
.HALF_SIZE
)
546 || !r
.require_integer_immediate (i
+ 2))
547 return error_mark_node
;
549 return r
.resolve_to (r
.mode_suffix_id
, type
);
553 /* Base class for long (i.e. narrow op narrow -> wide) binary functions
554 that take an immediate second operand. The type suffix specifies
556 struct binary_imm_long_base
: public overloaded_base
<0>
559 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
561 b
.add_overloaded_functions (group
, MODE_n
);
562 build_all (b
, "v0,vh0,su64", group
, MODE_n
);
566 resolve (function_resolver
&r
) const OVERRIDE
568 unsigned int i
, nargs
;
569 type_suffix_index type
, result_type
;
570 if (!r
.check_gp_argument (2, i
, nargs
)
571 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
572 || !r
.require_integer_immediate (i
+ 1)
573 || (result_type
= long_type_suffix (r
, type
)) == NUM_TYPE_SUFFIXES
)
574 return error_mark_node
;
576 if (tree res
= r
.lookup_form (r
.mode_suffix_id
, result_type
))
579 return r
.report_no_such_form (type
);
583 /* Base class for inc_dec and inc_dec_pat. */
584 struct inc_dec_base
: public overloaded_base
<0>
586 CONSTEXPR
inc_dec_base (bool pat_p
) : m_pat_p (pat_p
) {}
588 /* Resolve based on the first argument only, which must be either a
589 scalar or a vector. If it's a scalar, it must be a 32-bit or
592 resolve (function_resolver
&r
) const
594 unsigned int i
, nargs
;
595 if (!r
.check_gp_argument (m_pat_p
? 3 : 2, i
, nargs
)
596 || !r
.require_vector_or_scalar_type (i
))
597 return error_mark_node
;
599 mode_suffix_index mode
;
600 type_suffix_index type
;
601 if (r
.scalar_argument_p (i
))
604 type
= r
.infer_integer_scalar_type (i
);
609 type
= r
.infer_vector_type (i
);
611 if (type
== NUM_TYPE_SUFFIXES
)
612 return error_mark_node
;
614 for (++i
; i
< nargs
; ++i
)
615 if (!r
.require_integer_immediate (i
))
616 return error_mark_node
;
618 return r
.resolve_to (mode
, type
);
622 check (function_checker
&c
) const OVERRIDE
624 return c
.require_immediate_range (m_pat_p
? 2 : 1, 1, 16);
630 /* Base class for load and load_replicate. */
631 struct load_contiguous_base
: public overloaded_base
<0>
633 /* Resolve a call based purely on a pointer argument. The other arguments
634 are a governing predicate and (for MODE_vnum) a vnum offset. */
636 resolve (function_resolver
&r
) const OVERRIDE
638 bool vnum_p
= r
.mode_suffix_id
== MODE_vnum
;
639 gcc_assert (r
.mode_suffix_id
== MODE_none
|| vnum_p
);
641 unsigned int i
, nargs
;
642 type_suffix_index type
;
643 if (!r
.check_gp_argument (vnum_p
? 2 : 1, i
, nargs
)
644 || (type
= r
.infer_pointer_type (i
)) == NUM_TYPE_SUFFIXES
645 || (vnum_p
&& !r
.require_scalar_type (i
+ 1, "int64_t")))
646 return error_mark_node
;
648 return r
.resolve_to (r
.mode_suffix_id
, type
);
652 /* Base class for gather loads that take a scalar base and a vector
653 displacement (either an offset or an index). */
654 struct load_gather_sv_base
: public overloaded_base
<0>
657 resolve (function_resolver
&r
) const OVERRIDE
659 unsigned int i
, nargs
;
660 mode_suffix_index mode
;
661 type_suffix_index type
;
662 if (!r
.check_gp_argument (2, i
, nargs
)
663 || (type
= r
.infer_pointer_type (i
, true)) == NUM_TYPE_SUFFIXES
664 || (mode
= r
.resolve_sv_displacement (i
+ 1, type
, true),
666 return error_mark_node
;
668 return r
.resolve_to (mode
, type
);
672 /* Base class for load_ext_gather_index and load_ext_gather_offset,
673 which differ only in the units of the displacement. */
674 struct load_ext_gather_base
: public overloaded_base
<1>
676 /* Resolve a gather load that takes one of:
678 - a scalar pointer base and a vector displacement
679 - a vector base with no displacement or
680 - a vector base and a scalar displacement
682 The function has an explicit type suffix that determines the type
683 of the loaded data. */
685 resolve (function_resolver
&r
) const OVERRIDE
687 /* No resolution is needed for a vector base with no displacement;
688 there's a one-to-one mapping between short and long names. */
689 gcc_assert (r
.displacement_units () != UNITS_none
);
691 type_suffix_index type
= r
.type_suffix_ids
[0];
693 unsigned int i
, nargs
;
694 mode_suffix_index mode
;
695 if (!r
.check_gp_argument (2, i
, nargs
)
696 || (mode
= r
.resolve_gather_address (i
, type
, true)) == MODE_none
)
697 return error_mark_node
;
699 return r
.resolve_to (mode
, type
);
703 /* Base class for prefetch_gather_index and prefetch_gather_offset,
704 which differ only in the units of the displacement. */
705 struct prefetch_gather_base
: public overloaded_base
<0>
707 /* Resolve a gather prefetch that takes one of:
709 - a scalar pointer base (const void *) and a vector displacement
710 - a vector base with no displacement or
711 - a vector base and a scalar displacement
713 The prefetch operation is the final argument. This is purely a
714 mode-based resolution; there are no type suffixes. */
716 resolve (function_resolver
&r
) const OVERRIDE
718 bool has_displacement_p
= r
.displacement_units () != UNITS_none
;
720 unsigned int i
, nargs
;
721 mode_suffix_index mode
;
722 if (!r
.check_gp_argument (has_displacement_p
? 3 : 2, i
, nargs
)
723 || (mode
= r
.resolve_gather_address (i
, NUM_TYPE_SUFFIXES
,
725 || !r
.require_integer_immediate (nargs
- 1))
726 return error_mark_node
;
728 return r
.resolve_to (mode
);
732 /* Wraps BASE to provide a narrowing shift right function. Argument N
733 is an immediate shift amount in the range [1, sizeof(<t0>_t) * 4]. */
734 template<typename BASE
, unsigned int N
>
735 struct shift_right_imm_narrow_wrapper
: public BASE
738 check (function_checker
&c
) const OVERRIDE
740 unsigned int bits
= c
.type_suffix (0).element_bits
/ 2;
741 return c
.require_immediate_range (N
, 1, bits
);
745 /* Base class for store_scatter_index and store_scatter_offset,
746 which differ only in the units of the displacement. */
747 struct store_scatter_base
: public overloaded_base
<0>
749 /* Resolve a scatter store that takes one of:
751 - a scalar pointer base and a vector displacement
752 - a vector base with no displacement or
753 - a vector base and a scalar displacement
755 The stored data is the final argument, and it determines the
758 resolve (function_resolver
&r
) const OVERRIDE
760 bool has_displacement_p
= r
.displacement_units () != UNITS_none
;
762 unsigned int i
, nargs
;
763 mode_suffix_index mode
;
764 type_suffix_index type
;
765 if (!r
.check_gp_argument (has_displacement_p
? 3 : 2, i
, nargs
)
766 || (type
= r
.infer_sd_vector_type (nargs
- 1)) == NUM_TYPE_SUFFIXES
767 || (mode
= r
.resolve_gather_address (i
, type
, false)) == MODE_none
)
768 return error_mark_node
;
770 return r
.resolve_to (mode
, type
);
774 /* Base class for ternary operations in which the final argument is an
775 immediate shift amount. The derived class should check the range. */
776 struct ternary_shift_imm_base
: public overloaded_base
<0>
779 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
781 b
.add_overloaded_functions (group
, MODE_n
);
782 build_all (b
, "v0,v0,v0,su64", group
, MODE_n
);
786 resolve (function_resolver
&r
) const OVERRIDE
788 return r
.resolve_uniform (2, 1);
792 /* Base class for ternary operations in which the first argument has the
793 same element type as the result, and in which the second and third
794 arguments have an element type that is derived the first. MODIFIER
795 is the number of element bits in the second and third arguments,
796 or a function_resolver modifier that says how this precision is
797 derived from the first argument's elements. */
798 template<unsigned int MODIFIER
>
799 struct ternary_resize2_opt_n_base
: public overloaded_base
<0>
802 resolve (function_resolver
&r
) const OVERRIDE
804 unsigned int i
, nargs
;
805 type_suffix_index type
;
806 if (!r
.check_gp_argument (3, i
, nargs
)
807 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
808 || !r
.require_derived_vector_type (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
810 return error_mark_node
;
812 return r
.finish_opt_n_resolution (i
+ 2, i
, type
, r
.SAME_TYPE_CLASS
,
817 /* Like ternary_resize2_opt_n_base, but for functions that take a final
819 template<unsigned int MODIFIER
>
820 struct ternary_resize2_lane_base
: public overloaded_base
<0>
823 resolve (function_resolver
&r
) const OVERRIDE
825 unsigned int i
, nargs
;
826 type_suffix_index type
;
827 if (!r
.check_gp_argument (4, i
, nargs
)
828 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
829 || !r
.require_derived_vector_type (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
831 || !r
.require_derived_vector_type (i
+ 2, i
, type
, r
.SAME_TYPE_CLASS
,
833 || !r
.require_integer_immediate (i
+ 3))
834 return error_mark_node
;
836 return r
.resolve_to (r
.mode_suffix_id
, type
);
840 /* Base class for narrowing bottom unary functions. The result is half
841 the size of input and has class CLASS. */
842 template<type_class_index CLASS
= function_resolver::SAME_TYPE_CLASS
>
843 struct unary_narrowb_base
: public overloaded_base
<0>
846 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
848 b
.add_overloaded_functions (group
, MODE_none
);
849 STATIC_ASSERT (CLASS
== function_resolver::SAME_TYPE_CLASS
850 || CLASS
== TYPE_unsigned
);
851 if (CLASS
== TYPE_unsigned
)
852 build_all (b
, "vhu0,v0", group
, MODE_none
);
854 build_all (b
, "vh0,v0", group
, MODE_none
);
858 resolve (function_resolver
&r
) const OVERRIDE
860 return r
.resolve_unary (CLASS
, r
.HALF_SIZE
);
864 /* The top equivalent of unary_imm_narrowb_base. All forms take the values
865 of the even elements as an extra argument, before any governing predicate.
866 These even elements are typically the result of the narrowb operation. */
867 template<type_class_index CLASS
= function_resolver::SAME_TYPE_CLASS
>
868 struct unary_narrowt_base
: public overloaded_base
<0>
871 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
873 b
.add_overloaded_functions (group
, MODE_none
);
874 STATIC_ASSERT (CLASS
== function_resolver::SAME_TYPE_CLASS
875 || CLASS
== TYPE_unsigned
);
876 if (CLASS
== TYPE_unsigned
)
877 build_all (b
, "vhu0,vhu0,v0", group
, MODE_none
);
879 build_all (b
, "vh0,vh0,v0", group
, MODE_none
);
883 resolve (function_resolver
&r
) const OVERRIDE
885 unsigned int i
, nargs
;
886 type_suffix_index type
;
887 if (!r
.check_gp_argument (2, i
, nargs
)
888 || (type
= r
.infer_vector_type (i
+ 1)) == NUM_TYPE_SUFFIXES
889 || !r
.require_derived_vector_type (i
, i
+ 1, type
, CLASS
, r
.HALF_SIZE
))
890 return error_mark_node
;
892 return r
.resolve_to (r
.mode_suffix_id
, type
);
896 /* sv<m0>_t svfoo[_m0base]_[m1]index(sv<m0>_t, sv<m1>_t)
898 for all valid combinations of vector base type <m0> and vector
899 displacement type <m1>. */
900 struct adr_index_def
: public adr_base
903 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
905 b
.add_overloaded_functions (group
, MODE_index
);
906 build_all (b
, "b,b,d", group
, MODE_u32base_s32index
);
907 build_all (b
, "b,b,d", group
, MODE_u32base_u32index
);
908 build_all (b
, "b,b,d", group
, MODE_u64base_s64index
);
909 build_all (b
, "b,b,d", group
, MODE_u64base_u64index
);
914 /* sv<m0>_t svfoo[_m0base]_[m1]offset(sv<m0>_t, sv<m1>_t).
916 for all valid combinations of vector base type <m0> and vector
917 displacement type <m1>. */
918 struct adr_offset_def
: public adr_base
921 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
923 b
.add_overloaded_functions (group
, MODE_offset
);
924 build_all (b
, "b,b,d", group
, MODE_u32base_s32offset
);
925 build_all (b
, "b,b,d", group
, MODE_u32base_u32offset
);
926 build_all (b
, "b,b,d", group
, MODE_u64base_s64offset
);
927 build_all (b
, "b,b,d", group
, MODE_u64base_u64offset
);
932 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
934 i.e. a binary operation with uniform types, but with no scalar form. */
935 struct binary_def
: public overloaded_base
<0>
938 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
940 b
.add_overloaded_functions (group
, MODE_none
);
941 build_all (b
, "v0,v0,v0", group
, MODE_none
);
945 resolve (function_resolver
&r
) const OVERRIDE
947 return r
.resolve_uniform (2);
952 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int>_t)
953 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:int>_t).
955 i.e. a version of the standard binary shape binary_opt_n in which
956 the final argument is always a signed integer. */
957 struct binary_int_opt_n_def
: public overloaded_base
<0>
960 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
962 b
.add_overloaded_functions (group
, MODE_none
);
963 build_all (b
, "v0,v0,vs0", group
, MODE_none
);
964 build_all (b
, "v0,v0,ss0", group
, MODE_n
);
968 resolve (function_resolver
&r
) const OVERRIDE
970 unsigned int i
, nargs
;
971 type_suffix_index type
;
972 if (!r
.check_gp_argument (2, i
, nargs
)
973 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
974 return error_mark_node
;
976 return r
.finish_opt_n_resolution (i
+ 1, i
, type
, TYPE_signed
);
979 SHAPE (binary_int_opt_n
)
981 /* sv<t0>_t svfoo_<t0>(sv<t0>_t, sv<t0>_t, uint64_t)
983 where the final argument is an integer constant expression in the
984 range [0, 16 / sizeof (<t0>_t) - 1]. */
985 struct binary_lane_def
: public overloaded_base
<0>
988 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
990 b
.add_overloaded_functions (group
, MODE_none
);
991 build_all (b
, "v0,v0,v0,su64", group
, MODE_none
);
995 resolve (function_resolver
&r
) const OVERRIDE
997 return r
.resolve_uniform (2, 1);
1001 check (function_checker
&c
) const OVERRIDE
1003 return c
.require_immediate_lane_index (2);
1008 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t, uint64_t).
1010 where the final argument is an integer constant expression in the
1011 range [0, 32 / sizeof (<t0>_t) - 1]. */
1012 struct binary_long_lane_def
: public overloaded_base
<0>
1015 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1017 b
.add_overloaded_functions (group
, MODE_none
);
1018 build_all (b
, "v0,vh0,vh0,su64", group
, MODE_none
);
1022 resolve (function_resolver
&r
) const OVERRIDE
1024 unsigned int i
, nargs
;
1025 type_suffix_index type
, result_type
;
1026 if (!r
.check_gp_argument (3, i
, nargs
)
1027 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1028 || !r
.require_matching_vector_type (i
+ 1, type
)
1029 || !r
.require_integer_immediate (i
+ 2)
1030 || (result_type
= long_type_suffix (r
, type
)) == NUM_TYPE_SUFFIXES
)
1031 return error_mark_node
;
1033 if (tree res
= r
.lookup_form (r
.mode_suffix_id
, result_type
))
1036 return r
.report_no_such_form (type
);
1040 check (function_checker
&c
) const OVERRIDE
1042 return c
.require_immediate_lane_index (2);
1045 SHAPE (binary_long_lane
)
1047 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t)
1048 sv<t0>_t svfoo[_n_t0](sv<t0:half>_t, <t0:half>_t). */
1049 struct binary_long_opt_n_def
: public overloaded_base
<0>
1052 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1054 b
.add_overloaded_functions (group
, MODE_none
);
1055 build_all (b
, "v0,vh0,vh0", group
, MODE_none
);
1056 build_all (b
, "v0,vh0,sh0", group
, MODE_n
);
1060 resolve (function_resolver
&r
) const OVERRIDE
1062 unsigned int i
, nargs
;
1063 type_suffix_index type
, result_type
;
1064 if (!r
.check_gp_argument (2, i
, nargs
)
1065 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1066 || (result_type
= long_type_suffix (r
, type
)) == NUM_TYPE_SUFFIXES
)
1067 return error_mark_node
;
1069 return r
.finish_opt_n_resolution (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
1070 r
.SAME_SIZE
, result_type
);
1073 SHAPE (binary_long_opt_n
)
1075 /* sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t).
1077 i.e. a binary operation in which the final argument is always a scalar
1078 rather than a vector. */
1079 struct binary_n_def
: public overloaded_base
<0>
1082 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1084 b
.add_overloaded_functions (group
, MODE_n
);
1085 build_all (b
, "v0,v0,s0", group
, MODE_n
);
1089 resolve (function_resolver
&r
) const OVERRIDE
1091 unsigned int i
, nargs
;
1092 type_suffix_index type
;
1093 if (!r
.check_gp_argument (2, i
, nargs
)
1094 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1095 || !r
.require_derived_scalar_type (i
+ 1, r
.SAME_TYPE_CLASS
))
1096 return error_mark_node
;
1098 return r
.resolve_to (r
.mode_suffix_id
, type
);
1103 /* sv<t0:half>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1104 sv<t0:half>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1106 i.e. a version of binary_opt_n in which the output elements are half the
1107 width of the input elements. */
1108 struct binary_narrowb_opt_n_def
: public overloaded_base
<0>
1111 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1113 b
.add_overloaded_functions (group
, MODE_none
);
1114 build_all (b
, "vh0,v0,v0", group
, MODE_none
);
1115 build_all (b
, "vh0,v0,s0", group
, MODE_n
);
1119 resolve (function_resolver
&r
) const OVERRIDE
1121 return r
.resolve_uniform_opt_n (2);
1124 SHAPE (binary_narrowb_opt_n
)
1126 /* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t, sv<t0>_t)
1127 sv<t0:half>_t svfoo[_n_t0](sv<t0:half>_t, sv<t0>_t, <t0>_t)
1129 This is the "top" counterpart to binary_narrowb_opt_n. */
1130 struct binary_narrowt_opt_n_def
: public overloaded_base
<0>
1133 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1135 b
.add_overloaded_functions (group
, MODE_none
);
1136 build_all (b
, "vh0,vh0,v0,v0", group
, MODE_none
);
1137 build_all (b
, "vh0,vh0,v0,s0", group
, MODE_n
);
1141 resolve (function_resolver
&r
) const OVERRIDE
1143 unsigned int i
, nargs
;
1144 type_suffix_index type
;
1145 if (!r
.check_gp_argument (3, i
, nargs
)
1146 || (type
= r
.infer_vector_type (i
+ 1)) == NUM_TYPE_SUFFIXES
1147 || !r
.require_derived_vector_type (i
, i
+ 1, type
, r
.SAME_TYPE_CLASS
,
1149 return error_mark_node
;
1151 return r
.finish_opt_n_resolution (i
+ 2, i
+ 1, type
);
1154 SHAPE (binary_narrowt_opt_n
)
1156 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1157 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1159 i.e. the standard shape for binary operations that operate on
1161 struct binary_opt_n_def
: public overloaded_base
<0>
1164 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1166 b
.add_overloaded_functions (group
, MODE_none
);
1167 build_all (b
, "v0,v0,v0", group
, MODE_none
);
1168 /* _b functions do not have an _n form, but are classified as
1169 binary_opt_n so that they can be overloaded with vector
1171 if (group
.types
[0][0] == TYPE_SUFFIX_b
)
1172 gcc_assert (group
.types
[0][1] == NUM_TYPE_SUFFIXES
);
1174 build_all (b
, "v0,v0,s0", group
, MODE_n
);
1178 resolve (function_resolver
&r
) const OVERRIDE
1180 return r
.resolve_uniform_opt_n (2);
1183 SHAPE (binary_opt_n
)
1185 /* svbool_t svfoo(svbool_t, svbool_t). */
1186 struct binary_pred_def
: public nonoverloaded_base
1189 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1191 build_all (b
, "v0,v0,v0", group
, MODE_none
);
1196 /* sv<t0>_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
1198 where the final argument must be 90 or 270. */
1199 struct binary_rotate_def
: public overloaded_base
<0>
1202 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1204 b
.add_overloaded_functions (group
, MODE_none
);
1205 build_all (b
, "v0,v0,v0,su64", group
, MODE_none
);
1209 resolve (function_resolver
&r
) const OVERRIDE
1211 return r
.resolve_uniform (2, 1);
1215 check (function_checker
&c
) const OVERRIDE
1217 return c
.require_immediate_either_or (2, 90, 270);
1220 SHAPE (binary_rotate
)
1222 /* sv<t0>_t svfoo_t0(<t0>_t, <t0>_t)
1224 i.e. a binary function that takes two scalars and returns a vector.
1225 An explicit type suffix is required. */
1226 struct binary_scalar_def
: public nonoverloaded_base
1229 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1231 build_all (b
, "v0,s0,s0", group
, MODE_none
);
1234 SHAPE (binary_scalar
)
1236 /* sv<t0:uint>_t svfoo[_t0](sv<t0>_t, sv<t0>_t).
1238 i.e. a version of "binary" that returns unsigned integers. */
1239 struct binary_to_uint_def
: public overloaded_base
<0>
1242 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1244 b
.add_overloaded_functions (group
, MODE_none
);
1245 build_all (b
, "vu0,v0,v0", group
, MODE_none
);
1249 resolve (function_resolver
&r
) const OVERRIDE
1251 return r
.resolve_uniform (2);
1254 SHAPE (binary_to_uint
)
1256 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1258 i.e. a version of "binary" in which the final argument is always an
1259 unsigned integer. */
1260 struct binary_uint_def
: public overloaded_base
<0>
1263 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1265 b
.add_overloaded_functions (group
, MODE_none
);
1266 build_all (b
, "v0,v0,vu0", group
, MODE_none
);
1270 resolve (function_resolver
&r
) const OVERRIDE
1272 unsigned int i
, nargs
;
1273 type_suffix_index type
;
1274 if (!r
.check_gp_argument (2, i
, nargs
)
1275 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1276 || !r
.require_derived_vector_type (i
+ 1, i
, type
, TYPE_unsigned
))
1277 return error_mark_node
;
1279 return r
.resolve_to (r
.mode_suffix_id
, type
);
1284 /* sv<t0>_t svfoo[_t0](sv<t0>_t, <t0:uint>_t)
1286 i.e. a version of binary_n in which the final argument is always an
1287 unsigned integer. */
1288 struct binary_uint_n_def
: public overloaded_base
<0>
1291 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1293 b
.add_overloaded_functions (group
, MODE_none
);
1294 build_all (b
, "v0,v0,su0", group
, MODE_none
);
1298 resolve (function_resolver
&r
) const OVERRIDE
1300 unsigned int i
, nargs
;
1301 type_suffix_index type
;
1302 if (!r
.check_gp_argument (2, i
, nargs
)
1303 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1304 || !r
.require_derived_scalar_type (i
+ 1, TYPE_unsigned
))
1305 return error_mark_node
;
1307 return r
.resolve_to (r
.mode_suffix_id
, type
);
1310 SHAPE (binary_uint_n
)
1312 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1313 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:uint>_t)
1315 i.e. a version of the standard binary shape binary_opt_n in which
1316 the final argument is always an unsigned integer. */
1317 struct binary_uint_opt_n_def
: public overloaded_base
<0>
1320 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1322 b
.add_overloaded_functions (group
, MODE_none
);
1323 build_all (b
, "v0,v0,vu0", group
, MODE_none
);
1324 build_all (b
, "v0,v0,su0", group
, MODE_n
);
1328 resolve (function_resolver
&r
) const OVERRIDE
1330 unsigned int i
, nargs
;
1331 type_suffix_index type
;
1332 if (!r
.check_gp_argument (2, i
, nargs
)
1333 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
1334 return error_mark_node
;
1336 return r
.finish_opt_n_resolution (i
+ 1, i
, type
, TYPE_unsigned
);
1339 SHAPE (binary_uint_opt_n
)
1341 /* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t).
1343 i.e. a version of binary_n in which the final argument is always
1344 a 64-bit unsigned integer. */
1345 struct binary_uint64_n_def
: public overloaded_base
<0>
1348 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1350 b
.add_overloaded_functions (group
, MODE_none
);
1351 build_all (b
, "v0,v0,su64", group
, MODE_none
);
1355 resolve (function_resolver
&r
) const OVERRIDE
1357 unsigned int i
, nargs
;
1358 type_suffix_index type
;
1359 if (!r
.check_gp_argument (2, i
, nargs
)
1360 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1361 || !r
.require_scalar_type (i
+ 1, "uint64_t"))
1362 return error_mark_node
;
1364 return r
.resolve_to (r
.mode_suffix_id
, type
);
1367 SHAPE (binary_uint64_n
)
1369 /* sv<t0>_t svfoo[_t0](sv<t0>_t, svuint64_t)
1370 sv<t0>_t svfoo[_n_t0](sv<t0>_t, uint64_t)
1372 i.e. a version of the standard binary shape binary_opt_n in which
1373 the final argument is always a uint64_t. */
1374 struct binary_uint64_opt_n_def
: public overloaded_base
<0>
1377 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1379 b
.add_overloaded_functions (group
, MODE_none
);
1380 build_all (b
, "v0,v0,vu64", group
, MODE_none
);
1381 build_all (b
, "v0,v0,su64", group
, MODE_n
);
1385 resolve (function_resolver
&r
) const OVERRIDE
1387 unsigned int i
, nargs
;
1388 type_suffix_index type
;
1389 if (!r
.check_gp_argument (2, i
, nargs
)
1390 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
1391 return error_mark_node
;
1393 return r
.finish_opt_n_resolution (i
+ 1, i
, type
, TYPE_unsigned
, 64);
1396 SHAPE (binary_uint64_opt_n
)
1398 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t). */
1399 struct binary_wide_def
: public overloaded_base
<0>
1402 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1404 b
.add_overloaded_functions (group
, MODE_none
);
1405 build_all (b
, "v0,v0,vh0", group
, MODE_none
);
1409 resolve (function_resolver
&r
) const OVERRIDE
1411 unsigned int i
, nargs
;
1412 type_suffix_index type
;
1413 if (!r
.check_gp_argument (2, i
, nargs
)
1414 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1415 || !r
.require_derived_vector_type (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
1417 return error_mark_node
;
1419 return r
.resolve_to (r
.mode_suffix_id
, type
);
1424 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t)
1425 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:half>_t). */
1426 struct binary_wide_opt_n_def
: public overloaded_base
<0>
1429 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1431 b
.add_overloaded_functions (group
, MODE_none
);
1432 build_all (b
, "v0,v0,vh0", group
, MODE_none
);
1433 build_all (b
, "v0,v0,sh0", group
, MODE_n
);
1437 resolve (function_resolver
&r
) const OVERRIDE
1439 unsigned int i
, nargs
;
1440 type_suffix_index type
;
1441 if (!r
.check_gp_argument (2, i
, nargs
)
1442 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
1443 return error_mark_node
;
1445 return r
.finish_opt_n_resolution (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
1449 SHAPE (binary_wide_opt_n
)
1451 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1452 <t0>_t svfoo[_n_t0](<t0>_t, sv<t0>_t). */
1453 struct clast_def
: public overloaded_base
<0>
1456 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1458 b
.add_overloaded_functions (group
, MODE_none
);
1459 build_all (b
, "v0,v0,v0", group
, MODE_none
);
1460 build_all (b
, "s0,s0,v0", group
, MODE_n
);
1464 resolve (function_resolver
&r
) const OVERRIDE
1466 unsigned int i
, nargs
;
1467 if (!r
.check_gp_argument (2, i
, nargs
)
1468 || !r
.require_vector_or_scalar_type (i
))
1469 return error_mark_node
;
1471 if (r
.scalar_argument_p (i
))
1473 type_suffix_index type
;
1474 if (!r
.require_derived_scalar_type (i
, r
.SAME_TYPE_CLASS
)
1475 || (type
= r
.infer_vector_type (i
+ 1)) == NUM_TYPE_SUFFIXES
)
1476 return error_mark_node
;
1477 return r
.resolve_to (MODE_n
, type
);
1481 type_suffix_index type
;
1482 if ((type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1483 || !r
.require_matching_vector_type (i
+ 1, type
))
1484 return error_mark_node
;
1485 return r
.resolve_to (MODE_none
, type
);
1491 /* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t). */
1492 struct compare_def
: public overloaded_base
<0>
1495 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1497 b
.add_overloaded_functions (group
, MODE_none
);
1498 build_all (b
, "vp,v0,v0", group
, MODE_none
);
1502 resolve (function_resolver
&r
) const OVERRIDE
1504 return r
.resolve_uniform (2);
1509 /* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1510 svbool_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1512 i.e. a comparison between two vectors, or between a vector and a scalar. */
1513 struct compare_opt_n_def
: public overloaded_base
<0>
1516 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1518 b
.add_overloaded_functions (group
, MODE_none
);
1519 build_all (b
, "vp,v0,v0", group
, MODE_none
);
1520 build_all (b
, "vp,v0,s0", group
, MODE_n
);
1524 resolve (function_resolver
&r
) const OVERRIDE
1526 return r
.resolve_uniform_opt_n (2);
1529 SHAPE (compare_opt_n
)
1531 /* svbool_t svfoo[_t0](const <t0>_t *, const <t0>_t *). */
1532 struct compare_ptr_def
: public overloaded_base
<0>
1535 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1537 b
.add_overloaded_functions (group
, MODE_none
);
1538 build_all (b
, "vp,al,al", group
, MODE_none
);
1542 resolve (function_resolver
&r
) const OVERRIDE
1544 unsigned int i
, nargs
;
1545 type_suffix_index type
;
1546 if (!r
.check_gp_argument (2, i
, nargs
)
1547 || (type
= r
.infer_pointer_type (i
)) == NUM_TYPE_SUFFIXES
1548 || !r
.require_matching_pointer_type (i
+ 1, i
, type
))
1549 return error_mark_node
;
1551 return r
.resolve_to (r
.mode_suffix_id
, type
);
1556 /* svbool_t svfoo_t0[_t1](<t1>_t, <t1>_t)
1558 where _t0 is a _b<bits> suffix that describes the predicate result.
1559 There is no direct relationship between the element sizes of _t0
1561 struct compare_scalar_def
: public overloaded_base
<1>
1564 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1566 b
.add_overloaded_functions (group
, MODE_none
);
1567 build_all (b
, "vp,s1,s1", group
, MODE_none
);
1571 resolve (function_resolver
&r
) const OVERRIDE
1573 unsigned int i
, nargs
;
1574 type_suffix_index type
;
1575 if (!r
.check_gp_argument (2, i
, nargs
)
1576 || (type
= r
.infer_integer_scalar_type (i
)) == NUM_TYPE_SUFFIXES
1577 || !r
.require_matching_integer_scalar_type (i
+ 1, i
, type
))
1578 return error_mark_node
;
1580 return r
.resolve_to (r
.mode_suffix_id
, r
.type_suffix_ids
[0], type
);
1583 SHAPE (compare_scalar
)
1585 /* svbool_t svfoo[_t0](sv<t0>_t, svint64_t) (for signed t0)
1586 svbool_t svfoo[_n_t0](sv<t0>_t, int64_t) (for signed t0)
1587 svbool_t svfoo[_t0](sv<t0>_t, svuint64_t) (for unsigned t0)
1588 svbool_t svfoo[_n_t0](sv<t0>_t, uint64_t) (for unsigned t0)
1590 i.e. a comparison in which the second argument is 64 bits. */
1591 struct compare_wide_opt_n_def
: public overloaded_base
<0>
1594 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1596 b
.add_overloaded_functions (group
, MODE_none
);
1597 build_all (b
, "vp,v0,vw0", group
, MODE_none
);
1598 build_all (b
, "vp,v0,sw0", group
, MODE_n
);
1602 resolve (function_resolver
&r
) const OVERRIDE
1604 unsigned int i
, nargs
;
1605 type_suffix_index type
;
1606 if (!r
.check_gp_argument (2, i
, nargs
)
1607 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
1608 return error_mark_node
;
1610 return r
.finish_opt_n_resolution (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
, 64);
1613 SHAPE (compare_wide_opt_n
)
1615 /* uint64_t svfoo(). */
1616 struct count_inherent_def
: public nonoverloaded_base
1619 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1621 build_all (b
, "su64", group
, MODE_none
);
1624 SHAPE (count_inherent
)
1626 /* uint64_t svfoo(enum svpattern). */
1627 struct count_pat_def
: public nonoverloaded_base
1630 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1632 build_all (b
, "su64,epattern", group
, MODE_none
);
1637 /* uint64_t svfoo(svbool_t). */
1638 struct count_pred_def
: public nonoverloaded_base
1641 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1643 build_all (b
, "su64,vp", group
, MODE_none
);
1648 /* uint64_t svfoo[_t0](sv<t0>_t). */
1649 struct count_vector_def
: public overloaded_base
<0>
1652 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1654 b
.add_overloaded_functions (group
, MODE_none
);
1655 build_all (b
, "su64,v0", group
, MODE_none
);
1659 resolve (function_resolver
&r
) const OVERRIDE
1661 return r
.resolve_uniform (1);
1664 SHAPE (count_vector
)
1666 /* sv<t0>xN_t svfoo[_t0](sv<t0>_t, ..., sv<t0>_t)
1668 where there are N arguments in total. */
1669 struct create_def
: public overloaded_base
<0>
1672 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1674 b
.add_overloaded_functions (group
, MODE_none
);
1675 build_all (b
, "t0,v0*t", group
, MODE_none
);
1679 resolve (function_resolver
&r
) const OVERRIDE
1681 return r
.resolve_uniform (r
.vectors_per_tuple ());
1686 /* sv<t0>_t svfoo[_n]_t0(<t0>_t, ..., <t0>_t)
1688 where there are enough arguments to fill 128 bits of data (or to
1689 control 128 bits of data in the case of predicates). */
1690 struct dupq_def
: public overloaded_base
<1>
1693 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1695 /* The "_n" suffix is optional; the full name has it, but the short
1697 build_all (b
, "v0,s0*q", group
, MODE_n
, true);
1701 resolve (function_resolver
&) const OVERRIDE
1703 /* The short forms just make "_n" implicit, so no resolution is needed. */
1709 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, uint64_t)
1711 where the final argument is an integer constant expression that when
1712 multiplied by the number of bytes in t0 is in the range [0, 255]. */
1713 struct ext_def
: public overloaded_base
<0>
1716 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1718 b
.add_overloaded_functions (group
, MODE_none
);
1719 build_all (b
, "v0,v0,v0,su64", group
, MODE_none
);
1723 resolve (function_resolver
&r
) const OVERRIDE
1725 return r
.resolve_uniform (2, 1);
1729 check (function_checker
&c
) const OVERRIDE
1731 unsigned int bytes
= c
.type_suffix (0).element_bytes
;
1732 return c
.require_immediate_range (2, 0, 256 / bytes
- 1);
1737 /* <t0>_t svfoo[_t0](<t0>_t, sv<t0>_t). */
1738 struct fold_left_def
: public overloaded_base
<0>
1741 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1743 b
.add_overloaded_functions (group
, MODE_none
);
1744 build_all (b
, "s0,s0,v0", group
, MODE_none
);
1748 resolve (function_resolver
&r
) const OVERRIDE
1750 unsigned int i
, nargs
;
1751 type_suffix_index type
;
1752 if (!r
.check_gp_argument (2, i
, nargs
)
1753 || !r
.require_derived_scalar_type (i
, r
.SAME_TYPE_CLASS
)
1754 || (type
= r
.infer_vector_type (i
+ 1)) == NUM_TYPE_SUFFIXES
)
1755 return error_mark_node
;
1757 return r
.resolve_to (r
.mode_suffix_id
, type
);
1762 /* sv<t0>_t svfoo[_t0](sv<t0>xN_t, uint64_t)
1764 where the final argument is an integer constant expression in
1765 the range [0, N - 1]. */
1766 struct get_def
: public overloaded_base
<0>
1769 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1771 b
.add_overloaded_functions (group
, MODE_none
);
1772 build_all (b
, "v0,t0,su64", group
, MODE_none
);
1776 resolve (function_resolver
&r
) const OVERRIDE
1778 unsigned int i
, nargs
;
1779 type_suffix_index type
;
1780 if (!r
.check_gp_argument (2, i
, nargs
)
1781 || (type
= r
.infer_tuple_type (i
)) == NUM_TYPE_SUFFIXES
1782 || !r
.require_integer_immediate (i
+ 1))
1783 return error_mark_node
;
1785 return r
.resolve_to (r
.mode_suffix_id
, type
);
1789 check (function_checker
&c
) const OVERRIDE
1791 unsigned int nvectors
= c
.vectors_per_tuple ();
1792 return c
.require_immediate_range (1, 0, nvectors
- 1);
1797 /* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t)
1798 <t0>_t svfoo[_n_t0](<t0>_t, uint64_t)
1800 where the t0 in the vector form is a signed or unsigned integer
1801 whose size is tied to the [bhwd] suffix of "svfoo". */
1802 struct inc_dec_def
: public inc_dec_base
1804 CONSTEXPR
inc_dec_def () : inc_dec_base (false) {}
1807 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1809 b
.add_overloaded_functions (group
, MODE_none
);
1810 /* These functions are unusual in that the type suffixes for
1811 the scalar and vector forms are not related. The vector
1812 form always has exactly two potential suffixes while the
1813 scalar form always has four. */
1814 if (group
.types
[2][0] == NUM_TYPE_SUFFIXES
)
1815 build_all (b
, "v0,v0,su64", group
, MODE_none
);
1817 build_all (b
, "s0,s0,su64", group
, MODE_n
);
1822 /* sv<t0>_t svfoo[_t0](sv<t0>_t, enum svpattern, uint64_t)
1823 <t0>_t svfoo[_n_t0](<t0>_t, enum svpattern, uint64_t)
1825 where the t0 in the vector form is a signed or unsigned integer
1826 whose size is tied to the [bhwd] suffix of "svfoo". */
1827 struct inc_dec_pat_def
: public inc_dec_base
1829 CONSTEXPR
inc_dec_pat_def () : inc_dec_base (true) {}
1832 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1834 b
.add_overloaded_functions (group
, MODE_none
);
1835 /* These functions are unusual in that the type suffixes for
1836 the scalar and vector forms are not related. The vector
1837 form always has exactly two potential suffixes while the
1838 scalar form always has four. */
1839 if (group
.types
[2][0] == NUM_TYPE_SUFFIXES
)
1840 build_all (b
, "v0,v0,epattern,su64", group
, MODE_none
);
1842 build_all (b
, "s0,s0,epattern,su64", group
, MODE_n
);
1847 /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbool_t). */
1848 struct inc_dec_pred_def
: public overloaded_base
<0>
1851 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1853 b
.add_overloaded_functions (group
, MODE_none
);
1854 build_all (b
, "v0,v0,vp", group
, MODE_none
);
1858 resolve (function_resolver
&r
) const OVERRIDE
1860 unsigned int i
, nargs
;
1861 type_suffix_index type
;
1862 if (!r
.check_gp_argument (2, i
, nargs
)
1863 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
1864 || !r
.require_vector_type (i
+ 1, VECTOR_TYPE_svbool_t
))
1865 return error_mark_node
;
1867 return r
.resolve_to (r
.mode_suffix_id
, type
);
1870 SHAPE (inc_dec_pred
)
1872 /* <t0>_t svfoo[_n_t0]_t1(<t0>_t, svbool_t)
1874 where _t1 is a _b<bits> suffix that describes the svbool_t argument. */
1875 struct inc_dec_pred_scalar_def
: public overloaded_base
<2>
1878 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1880 b
.add_overloaded_functions (group
, MODE_n
);
1881 build_all (b
, "s0,s0,vp", group
, MODE_n
);
1885 resolve (function_resolver
&r
) const OVERRIDE
1887 unsigned int i
, nargs
;
1888 type_suffix_index type
;
1889 if (!r
.check_gp_argument (2, i
, nargs
)
1890 || (type
= r
.infer_integer_scalar_type (i
)) == NUM_TYPE_SUFFIXES
1891 || !r
.require_vector_type (i
+ 1, VECTOR_TYPE_svbool_t
))
1892 return error_mark_node
;
1894 return r
.resolve_to (r
.mode_suffix_id
, type
, r
.type_suffix_ids
[1]);
1897 SHAPE (inc_dec_pred_scalar
)
1899 /* sv<t0>[xN]_t svfoo_t0(). */
1900 struct inherent_def
: public nonoverloaded_base
1903 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1905 build_all (b
, "t0", group
, MODE_none
);
1910 /* svbool_t svfoo[_b](). */
1911 struct inherent_b_def
: public overloaded_base
<0>
1914 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1916 /* The "_b" suffix is optional; the full name has it, but the short
1918 build_all (b
, "v0", group
, MODE_none
, true);
1922 resolve (function_resolver
&) const OVERRIDE
1924 /* The short forms just make "_b" implicit, so no resolution is needed. */
1930 /* sv<t0>[xN]_t svfoo[_t0](const <t0>_t *)
1931 sv<t0>[xN]_t svfoo_vnum[_t0](const <t0>_t *, int64_t). */
1932 struct load_def
: public load_contiguous_base
1935 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1937 b
.add_overloaded_functions (group
, MODE_none
);
1938 b
.add_overloaded_functions (group
, MODE_vnum
);
1939 build_all (b
, "t0,al", group
, MODE_none
);
1940 build_all (b
, "t0,al,ss64", group
, MODE_vnum
);
1945 /* sv<t0>_t svfoo_t0(const <X>_t *)
1946 sv<t0>_t svfoo_vnum_t0(const <X>_t *, int64_t)
1948 where <X> is determined by the function base name. */
1949 struct load_ext_def
: public nonoverloaded_base
1952 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1954 build_all (b
, "t0,al", group
, MODE_none
);
1955 build_all (b
, "t0,al,ss64", group
, MODE_vnum
);
1960 /* sv<t0>_t svfoo_[s32]index_t0(const <X>_t *, svint32_t)
1961 sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
1962 sv<t0>_t svfoo_[u32]index_t0(const <X>_t *, svuint32_t)
1963 sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
1965 sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
1966 sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
1968 where <X> is determined by the function base name. */
1969 struct load_ext_gather_index_def
: public load_ext_gather_base
1972 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1974 b
.add_overloaded_functions (group
, MODE_index
);
1975 build_sv_index (b
, "t0,al,d", group
);
1976 build_vs_index (b
, "t0,b,ss64", group
);
1979 SHAPE (load_ext_gather_index
)
1981 /* sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
1982 sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
1984 sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
1985 sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
1987 where <X> is determined by the function base name. This is
1988 load_ext_gather_index that doesn't support 32-bit vector indices. */
1989 struct load_ext_gather_index_restricted_def
: public load_ext_gather_base
1992 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
1994 b
.add_overloaded_functions (group
, MODE_index
);
1995 build_sv_index64 (b
, "t0,al,d", group
);
1996 build_vs_index (b
, "t0,b,ss64", group
);
1999 SHAPE (load_ext_gather_index_restricted
)
2001 /* sv<t0>_t svfoo_[s32]offset_t0(const <X>_t *, svint32_t)
2002 sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2003 sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2004 sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2006 sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2007 sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2009 sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2010 sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2012 where <X> is determined by the function base name. */
2013 struct load_ext_gather_offset_def
: public load_ext_gather_base
2016 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2018 b
.add_overloaded_functions (group
, MODE_offset
);
2019 build_sv_offset (b
, "t0,al,d", group
);
2020 build_v_base (b
, "t0,b", group
, true);
2021 build_vs_offset (b
, "t0,b,ss64", group
);
2024 SHAPE (load_ext_gather_offset
)
2026 /* sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2027 sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2028 sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2030 sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2031 sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2033 sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2034 sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2036 where <X> is determined by the function base name. This is
2037 load_ext_gather_offset without the s32 vector offset form. */
2038 struct load_ext_gather_offset_restricted_def
: public load_ext_gather_base
2041 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2043 b
.add_overloaded_functions (group
, MODE_offset
);
2044 build_sv_uint_offset (b
, "t0,al,d", group
);
2045 build_v_base (b
, "t0,b", group
, true);
2046 build_vs_offset (b
, "t0,b,ss64", group
);
2049 SHAPE (load_ext_gather_offset_restricted
)
2051 /* sv<t0>_t svfoo_[s32]index[_t0](const <t0>_t *, svint32_t)
2052 sv<t0>_t svfoo_[s64]index[_t0](const <t0>_t *, svint64_t)
2053 sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2054 sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2056 sv<t0>_t svfoo_[s32]offset[_t0](const <t0>_t *, svint32_t)
2057 sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2058 sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2059 sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t). */
2060 struct load_gather_sv_def
: public load_gather_sv_base
2063 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2065 b
.add_overloaded_functions (group
, MODE_index
);
2066 b
.add_overloaded_functions (group
, MODE_offset
);
2067 build_sv_index (b
, "t0,al,d", group
);
2068 build_sv_offset (b
, "t0,al,d", group
);
2071 SHAPE (load_gather_sv
)
2073 /* sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2074 sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2076 sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2077 sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2078 sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t)
2080 This is load_gather_sv without the 32-bit vector index forms and
2081 without the s32 vector offset form. */
2082 struct load_gather_sv_restricted_def
: public load_gather_sv_base
2085 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2087 b
.add_overloaded_functions (group
, MODE_index
);
2088 b
.add_overloaded_functions (group
, MODE_offset
);
2089 build_sv_index64 (b
, "t0,al,d", group
);
2090 build_sv_uint_offset (b
, "t0,al,d", group
);
2093 SHAPE (load_gather_sv_restricted
)
2095 /* sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2096 sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2098 sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2099 sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2101 sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2102 sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t). */
2103 struct load_gather_vs_def
: public overloaded_base
<1>
2106 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2108 /* The base vector mode is optional; the full name has it but the
2109 short name doesn't. There is no ambiguity with SHAPE_load_gather_sv
2110 because the latter uses an implicit type suffix. */
2111 build_v_base (b
, "t0,b", group
, true);
2112 build_vs_index (b
, "t0,b,ss64", group
, true);
2113 build_vs_offset (b
, "t0,b,ss64", group
, true);
2117 resolve (function_resolver
&) const OVERRIDE
2119 /* The short name just makes the base vector mode implicit;
2120 no resolution is needed. */
2124 SHAPE (load_gather_vs
)
2126 /* sv<t0>_t svfoo[_t0](const <t0>_t *)
2128 The only difference from "load" is that this shape has no vnum form. */
2129 struct load_replicate_def
: public load_contiguous_base
2132 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2134 b
.add_overloaded_functions (group
, MODE_none
);
2135 build_all (b
, "t0,al", group
, MODE_none
);
2138 SHAPE (load_replicate
)
2140 /* svbool_t svfoo(enum svpattern). */
2141 struct pattern_pred_def
: public nonoverloaded_base
2144 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2146 build_all (b
, "vp,epattern", group
, MODE_none
);
2149 SHAPE (pattern_pred
)
2151 /* void svfoo(const void *, svprfop)
2152 void svfoo_vnum(const void *, int64_t, svprfop). */
2153 struct prefetch_def
: public nonoverloaded_base
2156 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2158 build_all (b
, "_,ap,eprfop", group
, MODE_none
);
2159 build_all (b
, "_,ap,ss64,eprfop", group
, MODE_vnum
);
2164 /* void svfoo_[s32]index(const void *, svint32_t, svprfop)
2165 void svfoo_[s64]index(const void *, svint64_t, svprfop)
2166 void svfoo_[u32]index(const void *, svuint32_t, svprfop)
2167 void svfoo_[u64]index(const void *, svuint64_t, svprfop)
2169 void svfoo[_u32base](svuint32_t, svprfop)
2170 void svfoo[_u64base](svuint64_t, svprfop)
2172 void svfoo[_u32base]_index(svuint32_t, int64_t, svprfop)
2173 void svfoo[_u64base]_index(svuint64_t, int64_t, svprfop). */
2174 struct prefetch_gather_index_def
: public prefetch_gather_base
2177 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2179 b
.add_overloaded_functions (group
, MODE_none
);
2180 b
.add_overloaded_functions (group
, MODE_index
);
2181 build_sv_index (b
, "_,ap,d,eprfop", group
);
2182 build_v_base (b
, "_,b,eprfop", group
);
2183 build_vs_index (b
, "_,b,ss64,eprfop", group
);
2186 SHAPE (prefetch_gather_index
)
2188 /* void svfoo_[s32]offset(const void *, svint32_t, svprfop)
2189 void svfoo_[s64]offset(const void *, svint64_t, svprfop)
2190 void svfoo_[u32]offset(const void *, svuint32_t, svprfop)
2191 void svfoo_[u64]offset(const void *, svuint64_t, svprfop)
2193 void svfoo[_u32base](svuint32_t, svprfop)
2194 void svfoo[_u64base](svuint64_t, svprfop)
2196 void svfoo[_u32base]_offset(svuint32_t, int64_t, svprfop)
2197 void svfoo[_u64base]_offset(svuint64_t, int64_t, svprfop). */
2198 struct prefetch_gather_offset_def
: public prefetch_gather_base
2201 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2203 b
.add_overloaded_functions (group
, MODE_none
);
2204 b
.add_overloaded_functions (group
, MODE_offset
);
2205 build_sv_offset (b
, "_,ap,d,eprfop", group
);
2206 build_v_base (b
, "_,b,eprfop", group
);
2207 build_vs_offset (b
, "_,b,ss64,eprfop", group
);
2210 SHAPE (prefetch_gather_offset
)
2212 /* bool svfoo(svbool_t). */
2213 struct ptest_def
: public nonoverloaded_base
2216 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2218 build_all (b
, "sp,vp", group
, MODE_none
);
2223 /* svbool_t svfoo(). */
2224 struct rdffr_def
: public nonoverloaded_base
2227 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2229 build_all (b
, "vp", group
, MODE_none
);
2234 /* <t0>_t svfoo[_t0](sv<t0>_t). */
2235 struct reduction_def
: public overloaded_base
<0>
2238 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2240 b
.add_overloaded_functions (group
, MODE_none
);
2241 build_all (b
, "s0,v0", group
, MODE_none
);
2245 resolve (function_resolver
&r
) const OVERRIDE
2247 return r
.resolve_uniform (1);
2252 /* int64_t svfoo[_t0](sv<t0>_t) (for signed t0)
2253 uint64_t svfoo[_t0](sv<t0>_t) (for unsigned t0)
2254 <t0>_t svfoo[_t0](sv<t0>_t) (for floating-point t0)
2256 i.e. a version of "reduction" in which the return type for integers
2257 always has 64 bits. */
2258 struct reduction_wide_def
: public overloaded_base
<0>
2261 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2263 b
.add_overloaded_functions (group
, MODE_none
);
2264 build_all (b
, "sw0,v0", group
, MODE_none
);
2268 resolve (function_resolver
&r
) const OVERRIDE
2270 return r
.resolve_uniform (1);
2273 SHAPE (reduction_wide
)
2275 /* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
2277 where the second argument is an integer constant expression in the
2278 range [0, N - 1]. */
2279 struct set_def
: public overloaded_base
<0>
2282 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2284 b
.add_overloaded_functions (group
, MODE_none
);
2285 build_all (b
, "t0,t0,su64,v0", group
, MODE_none
);
2289 resolve (function_resolver
&r
) const OVERRIDE
2291 unsigned int i
, nargs
;
2292 type_suffix_index type
;
2293 if (!r
.check_gp_argument (3, i
, nargs
)
2294 || (type
= r
.infer_tuple_type (i
)) == NUM_TYPE_SUFFIXES
2295 || !r
.require_integer_immediate (i
+ 1)
2296 || !r
.require_derived_vector_type (i
+ 2, i
, type
))
2297 return error_mark_node
;
2299 return r
.resolve_to (r
.mode_suffix_id
, type
);
2303 check (function_checker
&c
) const OVERRIDE
2305 unsigned int nvectors
= c
.vectors_per_tuple ();
2306 return c
.require_immediate_range (1, 0, nvectors
- 1);
2312 struct setffr_def
: public nonoverloaded_base
2315 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2317 build_all (b
, "_", group
, MODE_none
);
2322 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2324 where the final argument must be an integer constant expression in the
2325 range [0, sizeof (<t0>_t) * 8 - 1]. */
2326 struct shift_left_imm_def
: public overloaded_base
<0>
2329 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2331 b
.add_overloaded_functions (group
, MODE_n
);
2332 build_all (b
, "v0,v0,su64", group
, MODE_n
);
2336 resolve (function_resolver
&r
) const OVERRIDE
2338 return r
.resolve_uniform (1, 1);
2342 check (function_checker
&c
) const OVERRIDE
2344 unsigned int bits
= c
.type_suffix (0).element_bits
;
2345 return c
.require_immediate_range (1, 0, bits
- 1);
2348 SHAPE (shift_left_imm
)
2350 /* sv<t0>_t svfoo[_n_t0])(sv<t0:half>_t, uint64_t)
2352 where the final argument must be an integer constant expression in the
2353 range [0, sizeof (<t0>_t) * 4 - 1]. */
2354 struct shift_left_imm_long_def
: public binary_imm_long_base
2357 check (function_checker
&c
) const OVERRIDE
2359 unsigned int bits
= c
.type_suffix (0).element_bits
/ 2;
2360 return c
.require_immediate_range (1, 0, bits
- 1);
2363 SHAPE (shift_left_imm_long
)
2365 /* sv<t0:uint>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2367 where the final argument must be an integer constant expression in the
2368 range [0, sizeof (<t0>_t) * 8 - 1]. */
2369 struct shift_left_imm_to_uint_def
: public shift_left_imm_def
2372 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2374 b
.add_overloaded_functions (group
, MODE_n
);
2375 build_all (b
, "vu0,v0,su64", group
, MODE_n
);
2378 SHAPE (shift_left_imm_to_uint
)
2380 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2382 where the final argument must be an integer constant expression in the
2383 range [1, sizeof (<t0>_t) * 8]. */
2384 struct shift_right_imm_def
: public overloaded_base
<0>
2387 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2389 b
.add_overloaded_functions (group
, MODE_n
);
2390 build_all (b
, "v0,v0,su64", group
, MODE_n
);
2394 resolve (function_resolver
&r
) const OVERRIDE
2396 return r
.resolve_uniform (1, 1);
2400 check (function_checker
&c
) const OVERRIDE
2402 unsigned int bits
= c
.type_suffix (0).element_bits
;
2403 return c
.require_immediate_range (1, 1, bits
);
2406 SHAPE (shift_right_imm
)
2408 /* sv<t0:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2410 where the final argument must be an integer constant expression in the
2411 range [1, sizeof (<t0>_t) * 4]. */
2412 typedef shift_right_imm_narrow_wrapper
<binary_imm_narrowb_base
<>, 1>
2413 shift_right_imm_narrowb_def
;
2414 SHAPE (shift_right_imm_narrowb
)
2416 /* sv<t0:half>_t svfoo[_n_t0])(sv<t0:half>_t, sv<t0>_t, uint64_t)
2418 where the final argument must be an integer constant expression in the
2419 range [1, sizeof (<t0>_t) * 4]. */
2420 typedef shift_right_imm_narrow_wrapper
<binary_imm_narrowt_base
<>, 2>
2421 shift_right_imm_narrowt_def
;
2422 SHAPE (shift_right_imm_narrowt
)
2424 /* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2426 where the final argument must be an integer constant expression in the
2427 range [1, sizeof (<t0>_t) * 4]. */
2428 typedef binary_imm_narrowb_base
<TYPE_unsigned
>
2429 binary_imm_narrowb_base_unsigned
;
2430 typedef shift_right_imm_narrow_wrapper
<binary_imm_narrowb_base_unsigned
, 1>
2431 shift_right_imm_narrowb_to_uint_def
;
2432 SHAPE (shift_right_imm_narrowb_to_uint
)
2434 /* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0:uint:half>_t, sv<t0>_t, uint64_t)
2436 where the final argument must be an integer constant expression in the
2437 range [1, sizeof (<t0>_t) * 4]. */
2438 typedef binary_imm_narrowt_base
<TYPE_unsigned
>
2439 binary_imm_narrowt_base_unsigned
;
2440 typedef shift_right_imm_narrow_wrapper
<binary_imm_narrowt_base_unsigned
, 2>
2441 shift_right_imm_narrowt_to_uint_def
;
2442 SHAPE (shift_right_imm_narrowt_to_uint
)
2444 /* void svfoo[_t0](<X>_t *, sv<t0>[xN]_t)
2445 void svfoo_vnum[_t0](<X>_t *, int64_t, sv<t0>[xN]_t)
2447 where <X> might be tied to <t0> (for non-truncating stores) or might
2448 depend on the function base name (for truncating stores). */
2449 struct store_def
: public overloaded_base
<0>
2452 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2454 b
.add_overloaded_functions (group
, MODE_none
);
2455 b
.add_overloaded_functions (group
, MODE_vnum
);
2456 build_all (b
, "_,as,t0", group
, MODE_none
);
2457 build_all (b
, "_,as,ss64,t0", group
, MODE_vnum
);
2461 resolve (function_resolver
&r
) const OVERRIDE
2463 bool vnum_p
= r
.mode_suffix_id
== MODE_vnum
;
2464 gcc_assert (r
.mode_suffix_id
== MODE_none
|| vnum_p
);
2466 unsigned int i
, nargs
;
2467 type_suffix_index type
;
2468 if (!r
.check_gp_argument (vnum_p
? 3 : 2, i
, nargs
)
2469 || !r
.require_pointer_type (i
)
2470 || (vnum_p
&& !r
.require_scalar_type (i
+ 1, "int64_t"))
2471 || ((type
= r
.infer_tuple_type (nargs
- 1)) == NUM_TYPE_SUFFIXES
))
2472 return error_mark_node
;
2474 return r
.resolve_to (r
.mode_suffix_id
, type
);
2479 /* void svfoo_[s32]index[_t0](<X>_t *, svint32_t, sv<t0>_t)
2480 void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2481 void svfoo_[u32]index[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2482 void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2484 void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2485 void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2487 where <X> might be tied to <t0> (for non-truncating stores) or might
2488 depend on the function base name (for truncating stores). */
2489 struct store_scatter_index_def
: public store_scatter_base
2492 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2494 b
.add_overloaded_functions (group
, MODE_index
);
2495 build_sv_index (b
, "_,as,d,t0", group
);
2496 build_vs_index (b
, "_,b,ss64,t0", group
);
2499 SHAPE (store_scatter_index
)
2501 /* void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2502 void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2504 void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2505 void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2507 i.e. a version of store_scatter_index that doesn't support 32-bit
2509 struct store_scatter_index_restricted_def
: public store_scatter_base
2512 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2514 b
.add_overloaded_functions (group
, MODE_index
);
2515 build_sv_index64 (b
, "_,as,d,t0", group
);
2516 build_vs_index (b
, "_,b,ss64,t0", group
);
2519 SHAPE (store_scatter_index_restricted
)
2521 /* void svfoo_[s32]offset[_t0](<X>_t *, svint32_t, sv<t0>_t)
2522 void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2523 void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2524 void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2526 void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2527 void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2529 void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2530 void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2532 where <X> might be tied to <t0> (for non-truncating stores) or might
2533 depend on the function base name (for truncating stores). */
2534 struct store_scatter_offset_def
: public store_scatter_base
2537 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2539 b
.add_overloaded_functions (group
, MODE_none
);
2540 b
.add_overloaded_functions (group
, MODE_offset
);
2541 build_sv_offset (b
, "_,as,d,t0", group
);
2542 build_v_base (b
, "_,b,t0", group
);
2543 build_vs_offset (b
, "_,b,ss64,t0", group
);
2546 SHAPE (store_scatter_offset
)
2548 /* void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2549 void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2550 void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2552 void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2553 void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2555 void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2556 void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2558 i.e. a version of store_scatter_offset that doesn't support svint32_t
2560 struct store_scatter_offset_restricted_def
: public store_scatter_base
2563 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2565 b
.add_overloaded_functions (group
, MODE_none
);
2566 b
.add_overloaded_functions (group
, MODE_offset
);
2567 build_sv_uint_offset (b
, "_,as,d,t0", group
);
2568 build_v_base (b
, "_,b,t0", group
);
2569 build_vs_offset (b
, "_,b,ss64,t0", group
);
2572 SHAPE (store_scatter_offset_restricted
)
2574 /* sv<t0>_t svfoo[_t0](sv<t0>xN_t, sv<t0:uint>_t). */
2575 struct tbl_tuple_def
: public overloaded_base
<0>
2578 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2580 b
.add_overloaded_functions (group
, MODE_none
);
2581 build_all (b
, "v0,t0,vu0", group
, MODE_none
);
2585 resolve (function_resolver
&r
) const OVERRIDE
2587 unsigned int i
, nargs
;
2588 type_suffix_index type
;
2589 if (!r
.check_gp_argument (2, i
, nargs
)
2590 || (type
= r
.infer_tuple_type (i
)) == NUM_TYPE_SUFFIXES
2591 || !r
.require_derived_vector_type (i
+ 1, i
, type
, TYPE_unsigned
))
2592 return error_mark_node
;
2594 return r
.resolve_to (r
.mode_suffix_id
, type
);
2599 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
2601 where the final argument is an integer constant expression in the
2602 range [0, 16 / sizeof (<t0>_t) - 1]. */
2603 struct ternary_lane_def
: public overloaded_base
<0>
2606 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2608 b
.add_overloaded_functions (group
, MODE_none
);
2609 build_all (b
, "v0,v0,v0,v0,su64", group
, MODE_none
);
2613 resolve (function_resolver
&r
) const OVERRIDE
2615 return r
.resolve_uniform (3, 1);
2619 check (function_checker
&c
) const OVERRIDE
2621 return c
.require_immediate_lane_index (3);
2624 SHAPE (ternary_lane
)
2626 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t, uint64_t)
2628 where the penultimate argument is an integer constant expression in
2629 the range [0, 8 / sizeof (<t0>_t) - 1] and where the final argument
2630 is an integer constant expression in {0, 90, 180, 270}. */
2631 struct ternary_lane_rotate_def
: public overloaded_base
<0>
2634 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2636 b
.add_overloaded_functions (group
, MODE_none
);
2637 build_all (b
, "v0,v0,v0,v0,su64,su64", group
, MODE_none
);
2641 resolve (function_resolver
&r
) const OVERRIDE
2643 return r
.resolve_uniform (3, 2);
2647 check (function_checker
&c
) const OVERRIDE
2649 return (c
.require_immediate_lane_index (3, 2)
2650 && c
.require_immediate_one_of (4, 0, 90, 180, 270));
2653 SHAPE (ternary_lane_rotate
)
2655 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t, uint64_t)
2657 where the final argument is an integer constant expression in the range
2658 [0, 32 / sizeof (<t0>_t) - 1]. */
2659 struct ternary_long_lane_def
2660 : public ternary_resize2_lane_base
<function_resolver::HALF_SIZE
>
2663 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2665 b
.add_overloaded_functions (group
, MODE_none
);
2666 build_all (b
, "v0,v0,vh0,vh0,su64", group
, MODE_none
);
2670 check (function_checker
&c
) const OVERRIDE
2672 return c
.require_immediate_lane_index (3);
2675 SHAPE (ternary_long_lane
)
2677 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t)
2678 sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:half>_t, <t0:half>_t)
2680 i.e. a version of the standard ternary shape ternary_opt_n in which
2681 the element type of the last two arguments is the half-sized
2682 equivalent of <t0>. */
2683 struct ternary_long_opt_n_def
2684 : public ternary_resize2_opt_n_base
<function_resolver::HALF_SIZE
>
2687 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2689 b
.add_overloaded_functions (group
, MODE_none
);
2690 build_all (b
, "v0,v0,vh0,vh0", group
, MODE_none
);
2691 build_all (b
, "v0,v0,vh0,sh0", group
, MODE_n
);
2694 SHAPE (ternary_long_opt_n
)
2696 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)
2697 sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0>_t, <t0>_t)
2699 i.e. the standard shape for ternary operations that operate on
2701 struct ternary_opt_n_def
: public overloaded_base
<0>
2704 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2706 b
.add_overloaded_functions (group
, MODE_none
);
2707 build_all (b
, "v0,v0,v0,v0", group
, MODE_none
);
2708 build_all (b
, "v0,v0,v0,s0", group
, MODE_n
);
2712 resolve (function_resolver
&r
) const OVERRIDE
2714 return r
.resolve_uniform_opt_n (3);
2717 SHAPE (ternary_opt_n
)
2719 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0.quarter>_t, sv<t0.quarter>_t, uint64_t)
2721 where the final argument is an integer constant expression in the range
2722 [0, 16 / sizeof (<t0>_t) - 1]. */
2723 struct ternary_qq_lane_def
2724 : public ternary_resize2_lane_base
<function_resolver::QUARTER_SIZE
>
2727 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2729 b
.add_overloaded_functions (group
, MODE_none
);
2730 build_all (b
, "v0,v0,vq0,vq0,su64", group
, MODE_none
);
2734 check (function_checker
&c
) const OVERRIDE
2736 return c
.require_immediate_lane_index (3, 4);
2739 SHAPE (ternary_qq_lane
)
2741 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2744 where the final argument is an integer constant expression in
2745 {0, 90, 180, 270}. */
2746 struct ternary_qq_lane_rotate_def
: public overloaded_base
<0>
2749 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2751 b
.add_overloaded_functions (group
, MODE_none
);
2752 build_all (b
, "v0,v0,vq0,vq0,su64,su64", group
, MODE_none
);
2756 resolve (function_resolver
&r
) const OVERRIDE
2758 unsigned int i
, nargs
;
2759 type_suffix_index type
;
2760 if (!r
.check_gp_argument (5, i
, nargs
)
2761 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
2762 || !r
.require_derived_vector_type (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
2764 || !r
.require_derived_vector_type (i
+ 2, i
, type
, r
.SAME_TYPE_CLASS
,
2766 || !r
.require_integer_immediate (i
+ 3)
2767 || !r
.require_integer_immediate (i
+ 4))
2768 return error_mark_node
;
2770 return r
.resolve_to (r
.mode_suffix_id
, type
);
2774 check (function_checker
&c
) const OVERRIDE
2776 return (c
.require_immediate_lane_index (3, 4)
2777 && c
.require_immediate_one_of (4, 0, 90, 180, 270));
2780 SHAPE (ternary_qq_lane_rotate
)
2782 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0.quarter>_t, sv<t0.quarter>_t)
2783 sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0.quarter>_t, <t0.quarter>_t)
2785 i.e. a version of the standard ternary shape ternary_opt_n in which
2786 the element type of the last two arguments is the quarter-sized
2787 equivalent of <t0>. */
2788 struct ternary_qq_opt_n_def
2789 : public ternary_resize2_opt_n_base
<function_resolver::QUARTER_SIZE
>
2792 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2794 b
.add_overloaded_functions (group
, MODE_none
);
2795 build_all (b
, "v0,v0,vq0,vq0", group
, MODE_none
);
2796 build_all (b
, "v0,v0,vq0,sq0", group
, MODE_n
);
2799 SHAPE (ternary_qq_opt_n
)
2801 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2804 where the final argument is an integer constant expression in
2805 {0, 90, 180, 270}. */
2806 struct ternary_qq_rotate_def
: public overloaded_base
<0>
2809 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2811 b
.add_overloaded_functions (group
, MODE_none
);
2812 build_all (b
, "v0,v0,vq0,vq0,su64", group
, MODE_none
);
2816 resolve (function_resolver
&r
) const OVERRIDE
2818 unsigned int i
, nargs
;
2819 type_suffix_index type
;
2820 if (!r
.check_gp_argument (4, i
, nargs
)
2821 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
2822 || !r
.require_derived_vector_type (i
+ 1, i
, type
, r
.SAME_TYPE_CLASS
,
2824 || !r
.require_derived_vector_type (i
+ 2, i
, type
, r
.SAME_TYPE_CLASS
,
2826 || !r
.require_integer_immediate (i
+ 3))
2827 return error_mark_node
;
2829 return r
.resolve_to (r
.mode_suffix_id
, type
);
2833 check (function_checker
&c
) const OVERRIDE
2835 return c
.require_immediate_one_of (3, 0, 90, 180, 270);
2838 SHAPE (ternary_qq_rotate
)
2840 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
2842 where the final argument is an integer constant expression in
2843 {0, 90, 180, 270}. */
2844 struct ternary_rotate_def
: public overloaded_base
<0>
2847 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2849 b
.add_overloaded_functions (group
, MODE_none
);
2850 build_all (b
, "v0,v0,v0,v0,su64", group
, MODE_none
);
2854 resolve (function_resolver
&r
) const OVERRIDE
2856 return r
.resolve_uniform (3, 1);
2860 check (function_checker
&c
) const OVERRIDE
2862 return c
.require_immediate_one_of (3, 0, 90, 180, 270);
2865 SHAPE (ternary_rotate
)
2867 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
2869 where the final argument must be an integer constant expression in the
2870 range [0, sizeof (<t0>_t) * 8 - 1]. */
2871 struct ternary_shift_left_imm_def
: public ternary_shift_imm_base
2874 check (function_checker
&c
) const OVERRIDE
2876 unsigned int bits
= c
.type_suffix (0).element_bits
;
2877 return c
.require_immediate_range (2, 0, bits
- 1);
2880 SHAPE (ternary_shift_left_imm
)
2882 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
2884 where the final argument must be an integer constant expression in the
2885 range [1, sizeof (<t0>_t) * 8]. */
2886 struct ternary_shift_right_imm_def
: public ternary_shift_imm_base
2889 check (function_checker
&c
) const OVERRIDE
2891 unsigned int bits
= c
.type_suffix (0).element_bits
;
2892 return c
.require_immediate_range (2, 1, bits
);
2895 SHAPE (ternary_shift_right_imm
)
2897 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0:uint>_t). */
2898 struct ternary_uint_def
: public overloaded_base
<0>
2901 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2903 b
.add_overloaded_functions (group
, MODE_none
);
2904 build_all (b
, "v0,v0,v0,vu0", group
, MODE_none
);
2908 resolve (function_resolver
&r
) const OVERRIDE
2910 unsigned int i
, nargs
;
2911 type_suffix_index type
;
2912 if (!r
.check_gp_argument (3, i
, nargs
)
2913 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
2914 || !r
.require_matching_vector_type (i
+ 1, type
)
2915 || !r
.require_derived_vector_type (i
+ 2, i
, type
, TYPE_unsigned
))
2916 return error_mark_node
;
2918 return r
.resolve_to (r
.mode_suffix_id
, type
);
2921 SHAPE (ternary_uint
)
2923 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
2925 where the final argument is an integer constant expression in the
2927 struct tmad_def
: public overloaded_base
<0>
2930 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2932 b
.add_overloaded_functions (group
, MODE_none
);
2933 build_all (b
, "v0,v0,v0,su64", group
, MODE_none
);
2937 resolve (function_resolver
&r
) const OVERRIDE
2939 return r
.resolve_uniform (2, 1);
2943 check (function_checker
&c
) const OVERRIDE
2945 return c
.require_immediate_range (2, 0, 7);
2950 /* sv<t0>_t svfoo[_t0](sv<t0>_t)
2952 i.e. the standard shape for unary operations that operate on
2954 struct unary_def
: public overloaded_base
<0>
2957 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2959 b
.add_overloaded_functions (group
, MODE_none
);
2960 build_all (b
, "v0,v0", group
, MODE_none
);
2964 resolve (function_resolver
&r
) const OVERRIDE
2966 return r
.resolve_unary ();
2971 /* sv<t0>_t svfoo_t0[_t1](sv<t1>_t)
2973 where the target type <t0> must be specified explicitly but the source
2974 type <t1> can be inferred. */
2975 struct unary_convert_def
: public overloaded_base
<1>
2978 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
2980 b
.add_overloaded_functions (group
, MODE_none
);
2981 build_all (b
, "v0,v1", group
, MODE_none
);
2985 resolve (function_resolver
&r
) const OVERRIDE
2987 return r
.resolve_unary (r
.type_suffix (0).tclass
,
2988 r
.type_suffix (0).element_bits
);
2991 SHAPE (unary_convert
)
2993 /* sv<t0>_t svfoo_t0[_t1](sv<t0>_t, sv<t1>_t)
2995 This is a version of unary_convert in which the even-indexed
2996 elements are passed in as a first parameter, before any governing
2998 struct unary_convert_narrowt_def
: public overloaded_base
<1>
3001 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3003 b
.add_overloaded_functions (group
, MODE_none
);
3004 build_all (b
, "v0,v1", group
, MODE_none
);
3008 resolve (function_resolver
&r
) const OVERRIDE
3010 return r
.resolve_unary (r
.type_suffix (0).tclass
,
3011 r
.type_suffix (0).element_bits
, true);
3014 SHAPE (unary_convert_narrowt
)
3016 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t). */
3017 struct unary_long_def
: public overloaded_base
<0>
3020 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3022 b
.add_overloaded_functions (group
, MODE_none
);
3023 build_all (b
, "v0,vh0", group
, MODE_none
);
3027 resolve (function_resolver
&r
) const OVERRIDE
3029 unsigned int i
, nargs
;
3030 type_suffix_index type
, result_type
;
3031 if (!r
.check_gp_argument (1, i
, nargs
)
3032 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
3033 || (result_type
= long_type_suffix (r
, type
)) == NUM_TYPE_SUFFIXES
)
3034 return error_mark_node
;
3036 if (tree res
= r
.lookup_form (r
.mode_suffix_id
, result_type
))
3039 return r
.report_no_such_form (type
);
3044 /* sv<t0>_t svfoo[_n]_t0(<t0>_t). */
3045 struct unary_n_def
: public overloaded_base
<1>
3048 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3050 /* The "_n" suffix is optional; the full name has it, but the short
3052 build_all (b
, "v0,s0", group
, MODE_n
, true);
3056 resolve (function_resolver
&) const OVERRIDE
3058 /* The short forms just make "_n" implicit, so no resolution is needed. */
3064 /* sv<t0:half>_t svfoo[_t0](sv<t0>_t). */
3065 typedef unary_narrowb_base
<> unary_narrowb_def
;
3066 SHAPE (unary_narrowb
)
3068 /* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t). */
3069 typedef unary_narrowt_base
<> unary_narrowt_def
;
3070 SHAPE (unary_narrowt
)
3072 /* sv<t0:uint:half>_t svfoo[_t0](sv<t0>_t). */
3073 typedef unary_narrowb_base
<TYPE_unsigned
> unary_narrowb_to_uint_def
;
3074 SHAPE (unary_narrowb_to_uint
)
3076 /* sv<t0:uint:half>_t svfoo[_t0](sv<t0:uint:half>_t, sv<t0>_t). */
3077 typedef unary_narrowt_base
<TYPE_unsigned
> unary_narrowt_to_uint_def
;
3078 SHAPE (unary_narrowt_to_uint
)
3080 /* svbool_t svfoo(svbool_t). */
3081 struct unary_pred_def
: public nonoverloaded_base
3084 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3086 build_all (b
, "v0,v0", group
, MODE_none
);
3091 /* sv<t0:int>_t svfoo[_t0](sv<t0>_t)
3093 i.e. a version of "unary" in which the returned vector contains
3095 struct unary_to_int_def
: public overloaded_base
<0>
3098 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3100 b
.add_overloaded_functions (group
, MODE_none
);
3101 build_all (b
, "vs0,v0", group
, MODE_none
);
3105 resolve (function_resolver
&r
) const OVERRIDE
3107 return r
.resolve_unary (TYPE_signed
);
3110 SHAPE (unary_to_int
)
3112 /* sv<t0:uint>_t svfoo[_t0](sv<t0>_t)
3114 i.e. a version of "unary" in which the returned vector contains
3115 unsigned integers. */
3116 struct unary_to_uint_def
: public overloaded_base
<0>
3119 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3121 b
.add_overloaded_functions (group
, MODE_none
);
3122 build_all (b
, "vu0,v0", group
, MODE_none
);
3126 resolve (function_resolver
&r
) const OVERRIDE
3128 return r
.resolve_unary (TYPE_unsigned
);
3131 SHAPE (unary_to_uint
)
3133 /* sv<t0>_t svfoo[_t0](sv<t0:uint>_t)
3135 where <t0> always belongs a certain type class, and where <t0:uint>
3136 therefore uniquely determines <t0>. */
3137 struct unary_uint_def
: public overloaded_base
<0>
3140 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3142 b
.add_overloaded_functions (group
, MODE_none
);
3143 build_all (b
, "v0,vu0", group
, MODE_none
);
3147 resolve (function_resolver
&r
) const OVERRIDE
3149 unsigned int i
, nargs
;
3150 type_suffix_index type
;
3151 if (!r
.check_gp_argument (1, i
, nargs
)
3152 || (type
= r
.infer_unsigned_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
3153 return error_mark_node
;
3155 /* Search for a valid suffix with the same number of bits as TYPE. */
3156 unsigned int element_bits
= type_suffixes
[type
].element_bits
;
3157 if (type_suffixes
[type
].unsigned_p
)
3158 for (unsigned int j
= 0; j
< NUM_TYPE_SUFFIXES
; ++j
)
3159 if (type_suffixes
[j
].element_bits
== element_bits
)
3160 if (tree res
= r
.lookup_form (r
.mode_suffix_id
,
3161 type_suffix_index (j
)))
3164 return r
.report_no_such_form (type
);
3169 /* sv<t0>_t svfoo[_<t0>](sv<t0:half>_t)
3171 i.e. a version of "unary" in which the source elements are half the
3172 size of the destination elements, but have the same type class. */
3173 struct unary_widen_def
: public overloaded_base
<0>
3176 build (function_builder
&b
, const function_group_info
&group
) const OVERRIDE
3178 b
.add_overloaded_functions (group
, MODE_none
);
3179 build_all (b
, "v0,vh0", group
, MODE_none
);
3183 resolve (function_resolver
&r
) const OVERRIDE
3185 unsigned int i
, nargs
;
3186 type_suffix_index type
;
3187 if (!r
.check_gp_argument (1, i
, nargs
)
3188 || (type
= r
.infer_vector_type (i
)) == NUM_TYPE_SUFFIXES
)
3189 return error_mark_node
;
3191 /* There is only a single form for predicates. */
3192 if (type
== TYPE_SUFFIX_b
)
3193 return r
.resolve_to (r
.mode_suffix_id
, type
);
3195 if (type_suffixes
[type
].integer_p
3196 && type_suffixes
[type
].element_bits
< 64)
3198 type_suffix_index wide_suffix
3199 = find_type_suffix (type_suffixes
[type
].tclass
,
3200 type_suffixes
[type
].element_bits
* 2);
3201 if (tree res
= r
.lookup_form (r
.mode_suffix_id
, wide_suffix
))
3205 return r
.report_no_such_form (type
);