]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
[AArch64] Add support for the SVE2 ACLE
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64-sve-builtins-shapes.cc
1 /* ACLE support for AArch64 SVE (function shapes)
2 Copyright (C) 2018-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "memmodel.h"
28 #include "insn-codes.h"
29 #include "optabs.h"
30 #include "aarch64-sve-builtins.h"
31 #include "aarch64-sve-builtins-shapes.h"
32
33 /* In the comments below, _t0 represents the first type suffix and _t1
34 represents the second. Square brackets enclose characters that are
35 present in only the full name, not the overloaded name. Governing
36 predicate arguments and predicate suffixes are not shown, since they
37 depend on the predication type, which is a separate piece of
38 information from the shape.
39
40 Non-overloaded functions may have additional suffixes beyond the
41 ones shown, if those suffixes don't affect the types in the type
42 signature. E.g. the predicate form of svtrn1 has a _b<bits> suffix,
43 but this does not affect the prototype, which is always
44 "svbool_t(svbool_t, svbool_t)". */
45
46 namespace aarch64_sve {
47
48 /* Return a representation of "const T *". */
49 static tree
50 build_const_pointer (tree t)
51 {
52 return build_pointer_type (build_qualified_type (t, TYPE_QUAL_CONST));
53 }
54
55 /* If INSTANCE has a governing predicate, add it to the list of argument
56 types in ARGUMENT_TYPES. RETURN_TYPE is the type returned by the
57 function. */
58 static void
59 apply_predication (const function_instance &instance, tree return_type,
60 vec<tree> &argument_types)
61 {
62 if (instance.pred != PRED_none)
63 {
64 argument_types.quick_insert (0, get_svbool_t ());
65 /* For unary merge operations, the first argument is a vector with
66 the same type as the result. For unary_convert_narrowt it also
67 provides the "bottom" half of active elements, and is present
68 for all types of predication. */
69 if ((argument_types.length () == 2 && instance.pred == PRED_m)
70 || instance.shape == shapes::unary_convert_narrowt)
71 argument_types.quick_insert (0, return_type);
72 }
73 }
74
75 /* Parse and move past an element type in FORMAT and return it as a type
76 suffix. The format is:
77
78 [01] - the element type in type suffix 0 or 1 of INSTANCE
79 f<bits> - a floating-point type with the given number of bits
80 f[01] - a floating-point type with the same width as type suffix 0 or 1
81 h<elt> - a half-sized version of <elt>
82 p - a predicate (represented as TYPE_SUFFIX_b)
83 q<elt> - a quarter-sized version of <elt>
84 s<bits> - a signed type with the given number of bits
85 s[01] - a signed type with the same width as type suffix 0 or 1
86 u<bits> - an unsigned type with the given number of bits
87 u[01] - an unsigned type with the same width as type suffix 0 or 1
88 w<elt> - a 64-bit version of <elt> if <elt> is integral, otherwise <elt>
89
90 where <elt> is another element type. */
91 static type_suffix_index
92 parse_element_type (const function_instance &instance, const char *&format)
93 {
94 int ch = *format++;
95
96 if (ch == 'f' || ch == 's' || ch == 'u')
97 {
98 type_class_index tclass = (ch == 'f' ? TYPE_float
99 : ch == 's' ? TYPE_signed
100 : TYPE_unsigned);
101 char *end;
102 unsigned int bits = strtol (format, &end, 10);
103 format = end;
104 if (bits == 0 || bits == 1)
105 bits = instance.type_suffix (bits).element_bits;
106 return find_type_suffix (tclass, bits);
107 }
108
109 if (ch == 'w')
110 {
111 type_suffix_index suffix = parse_element_type (instance, format);
112 if (type_suffixes[suffix].integer_p)
113 return find_type_suffix (type_suffixes[suffix].tclass, 64);
114 return suffix;
115 }
116
117 if (ch == 'p')
118 return TYPE_SUFFIX_b;
119
120 if (ch == 'q')
121 {
122 type_suffix_index suffix = parse_element_type (instance, format);
123 return find_type_suffix (type_suffixes[suffix].tclass,
124 type_suffixes[suffix].element_bits / 4);
125 }
126
127 if (ch == 'h')
128 {
129 type_suffix_index suffix = parse_element_type (instance, format);
130 /* Widening and narrowing doesn't change the type for predicates;
131 everything's still an svbool_t. */
132 if (suffix == TYPE_SUFFIX_b)
133 return suffix;
134 return find_type_suffix (type_suffixes[suffix].tclass,
135 type_suffixes[suffix].element_bits / 2);
136 }
137
138 if (ch == '0' || ch == '1')
139 return instance.type_suffix_ids[ch - '0'];
140
141 gcc_unreachable ();
142 }
143
144 /* Read and return a type from FORMAT for function INSTANCE. Advance
145 FORMAT beyond the type string. The format is:
146
147 _ - void
148 al - array pointer for loads
149 ap - array pointer for prefetches
150 as - array pointer for stores
151 b - base vector type (from a _<m0>base suffix)
152 d - displacement vector type (from a _<m1>index or _<m1>offset suffix)
153 e<name> - an enum with the given name
154 s<elt> - a scalar type with the given element suffix
155 t<elt> - a vector or tuple type with given element suffix [*1]
156 v<elt> - a vector with the given element suffix
157
158 where <elt> has the format described above parse_element_type
159
160 [*1] the vectors_per_tuple function indicates whether the type should
161 be a tuple, and if so, how many vectors it should contain. */
162 static tree
163 parse_type (const function_instance &instance, const char *&format)
164 {
165 int ch = *format++;
166
167 if (ch == '_')
168 return void_type_node;
169
170 if (ch == 'a')
171 {
172 ch = *format++;
173 if (ch == 'l')
174 return build_const_pointer (instance.memory_scalar_type ());
175 if (ch == 'p')
176 return const_ptr_type_node;
177 if (ch == 's')
178 return build_pointer_type (instance.memory_scalar_type ());
179 gcc_unreachable ();
180 }
181
182 if (ch == 'b')
183 return instance.base_vector_type ();
184
185 if (ch == 'd')
186 return instance.displacement_vector_type ();
187
188 if (ch == 'e')
189 {
190 if (strncmp (format, "pattern", 7) == 0)
191 {
192 format += 7;
193 return acle_svpattern;
194 }
195 if (strncmp (format, "prfop", 5) == 0)
196 {
197 format += 5;
198 return acle_svprfop;
199 }
200 gcc_unreachable ();
201 }
202
203 if (ch == 's')
204 {
205 type_suffix_index suffix = parse_element_type (instance, format);
206 return scalar_types[type_suffixes[suffix].vector_type];
207 }
208
209 if (ch == 't')
210 {
211 type_suffix_index suffix = parse_element_type (instance, format);
212 vector_type_index vector_type = type_suffixes[suffix].vector_type;
213 unsigned int num_vectors = instance.vectors_per_tuple ();
214 return acle_vector_types[num_vectors - 1][vector_type];
215 }
216
217 if (ch == 'v')
218 {
219 type_suffix_index suffix = parse_element_type (instance, format);
220 return acle_vector_types[0][type_suffixes[suffix].vector_type];
221 }
222
223 gcc_unreachable ();
224 }
225
226 /* Read and move past any argument count at FORMAT for the function
227 signature of INSTANCE. The counts are:
228
229 *q: one argument per element in a 128-bit quadword (as for svdupq)
230 *t: one argument per vector in a tuple (as for svcreate)
231
232 Otherwise the count is 1. */
233 static unsigned int
234 parse_count (const function_instance &instance, const char *&format)
235 {
236 if (format[0] == '*' && format[1] == 'q')
237 {
238 format += 2;
239 return instance.elements_per_vq (0);
240 }
241 if (format[0] == '*' && format[1] == 't')
242 {
243 format += 2;
244 return instance.vectors_per_tuple ();
245 }
246 return 1;
247 }
248
249 /* Read a type signature for INSTANCE from FORMAT. Add the argument types
250 to ARGUMENT_TYPES and return the return type.
251
252 The format is a comma-separated list of types (as for parse_type),
253 with the first type being the return type and the rest being the
254 argument types. Each argument type can be followed by an optional
255 count (as for parse_count). */
256 static tree
257 parse_signature (const function_instance &instance, const char *format,
258 vec<tree> &argument_types)
259 {
260 tree return_type = parse_type (instance, format);
261 while (format[0] == ',')
262 {
263 format += 1;
264 tree argument_type = parse_type (instance, format);
265 unsigned int count = parse_count (instance, format);
266 for (unsigned int i = 0; i < count; ++i)
267 argument_types.quick_push (argument_type);
268 }
269 gcc_assert (format[0] == 0);
270 return return_type;
271 }
272
273 /* Add one function instance for GROUP, using mode suffix MODE_SUFFIX_ID,
274 the type suffixes at index TI and the predication suffix at index PI.
275 The other arguments are as for build_all. */
276 static void
277 build_one (function_builder &b, const char *signature,
278 const function_group_info &group, mode_suffix_index mode_suffix_id,
279 unsigned int ti, unsigned int pi, bool force_direct_overloads)
280 {
281 /* Byte forms of svdupq take 16 arguments. */
282 auto_vec<tree, 16> argument_types;
283 function_instance instance (group.base_name, *group.base, *group.shape,
284 mode_suffix_id, group.types[ti],
285 group.preds[pi]);
286 tree return_type = parse_signature (instance, signature, argument_types);
287 apply_predication (instance, return_type, argument_types);
288 b.add_unique_function (instance, return_type, argument_types,
289 group.required_extensions, force_direct_overloads);
290 }
291
292 /* GROUP describes some sort of gather or scatter operation. There are
293 two cases:
294
295 - If the function has any type suffixes (as for loads and stores), the
296 first function type suffix specifies either a 32-bit or a 64-bit type,
297 which in turn selects either MODE32 or MODE64 as the addressing mode.
298 Add a function instance for every type and predicate combination
299 in GROUP for which the associated addressing mode is not MODE_none.
300
301 - If the function has no type suffixes (as for prefetches), add one
302 MODE32 form and one MODE64 form for each predication type.
303
304 The other arguments are as for build_all. */
305 static void
306 build_32_64 (function_builder &b, const char *signature,
307 const function_group_info &group, mode_suffix_index mode32,
308 mode_suffix_index mode64, bool force_direct_overloads = false)
309 {
310 for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
311 if (group.types[0][0] == NUM_TYPE_SUFFIXES)
312 {
313 gcc_assert (mode32 != MODE_none && mode64 != MODE_none);
314 build_one (b, signature, group, mode32, 0, pi,
315 force_direct_overloads);
316 build_one (b, signature, group, mode64, 0, pi,
317 force_direct_overloads);
318 }
319 else
320 for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
321 {
322 unsigned int bits = type_suffixes[group.types[ti][0]].element_bits;
323 gcc_assert (bits == 32 || bits == 64);
324 mode_suffix_index mode = bits == 32 ? mode32 : mode64;
325 if (mode != MODE_none)
326 build_one (b, signature, group, mode, ti, pi,
327 force_direct_overloads);
328 }
329 }
330
331 /* For every type and predicate combination in GROUP, add one function
332 that takes a scalar (pointer) base and a signed vector array index,
333 and another that instead takes an unsigned vector array index.
334 The vector array index has the same element size as the first
335 function type suffix. SIGNATURE is as for build_all. */
336 static void
337 build_sv_index (function_builder &b, const char *signature,
338 const function_group_info &group)
339 {
340 build_32_64 (b, signature, group, MODE_s32index, MODE_s64index);
341 build_32_64 (b, signature, group, MODE_u32index, MODE_u64index);
342 }
343
344 /* Like build_sv_index, but only handle 64-bit types. */
345 static void
346 build_sv_index64 (function_builder &b, const char *signature,
347 const function_group_info &group)
348 {
349 build_32_64 (b, signature, group, MODE_none, MODE_s64index);
350 build_32_64 (b, signature, group, MODE_none, MODE_u64index);
351 }
352
353 /* Like build_sv_index, but taking vector byte offsets instead of vector
354 array indices. */
355 static void
356 build_sv_offset (function_builder &b, const char *signature,
357 const function_group_info &group)
358 {
359 build_32_64 (b, signature, group, MODE_s32offset, MODE_s64offset);
360 build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
361 }
362
363 /* Like build_sv_offset, but exclude offsets that must be interpreted
364 as signed (i.e. s32offset). */
365 static void
366 build_sv_uint_offset (function_builder &b, const char *signature,
367 const function_group_info &group)
368 {
369 build_32_64 (b, signature, group, MODE_none, MODE_s64offset);
370 build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
371 }
372
373 /* For every type and predicate combination in GROUP, add a function
374 that takes a vector base address and no displacement. The vector
375 base has the same element size as the first type suffix.
376
377 The other arguments are as for build_all. */
378 static void
379 build_v_base (function_builder &b, const char *signature,
380 const function_group_info &group,
381 bool force_direct_overloads = false)
382 {
383 build_32_64 (b, signature, group, MODE_u32base, MODE_u64base,
384 force_direct_overloads);
385 }
386
387 /* Like build_v_base, but for functions that also take a scalar array
388 index. */
389 static void
390 build_vs_index (function_builder &b, const char *signature,
391 const function_group_info &group,
392 bool force_direct_overloads = false)
393 {
394 build_32_64 (b, signature, group, MODE_u32base_index, MODE_u64base_index,
395 force_direct_overloads);
396 }
397
398 /* Like build_v_base, but for functions that also take a scalar byte
399 offset. */
400 static void
401 build_vs_offset (function_builder &b, const char *signature,
402 const function_group_info &group,
403 bool force_direct_overloads = false)
404 {
405 build_32_64 (b, signature, group, MODE_u32base_offset, MODE_u64base_offset,
406 force_direct_overloads);
407 }
408
409 /* Add a function instance for every type and predicate combination
410 in GROUP. Take the function base name from GROUP and the mode suffix
411 from MODE_SUFFIX_ID. Use SIGNATURE to construct the function signature
412 without a governing predicate, then use apply_predication to add in the
413 predicate. FORCE_DIRECT_OVERLOADS is true if there is a one-to-one
414 mapping between "short" and "full" names, and if standard overload
415 resolution therefore isn't necessary. */
416 static void
417 build_all (function_builder &b, const char *signature,
418 const function_group_info &group, mode_suffix_index mode_suffix_id,
419 bool force_direct_overloads = false)
420 {
421 for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
422 for (unsigned int ti = 0;
423 ti == 0 || group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
424 build_one (b, signature, group, mode_suffix_id, ti, pi,
425 force_direct_overloads);
426 }
427
428 /* TYPE is the largest type suffix associated with the arguments of R,
429 but the result is twice as wide. Return the associated type suffix
430 if it exists, otherwise report an appropriate error and return
431 NUM_TYPE_SUFFIXES. */
432 static type_suffix_index
433 long_type_suffix (function_resolver &r, type_suffix_index type)
434 {
435 unsigned int element_bits = type_suffixes[type].element_bits;
436 if (type_suffixes[type].integer_p && element_bits < 64)
437 return find_type_suffix (type_suffixes[type].tclass, element_bits * 2);
438
439 r.report_no_such_form (type);
440 return NUM_TYPE_SUFFIXES;
441 }
442
443 /* Declare the function shape NAME, pointing it to an instance
444 of class <NAME>_def. */
445 #define SHAPE(NAME) \
446 static CONSTEXPR const NAME##_def NAME##_obj; \
447 namespace shapes { const function_shape *const NAME = &NAME##_obj; }
448
449 /* Base class for functions that are not overloaded. */
450 struct nonoverloaded_base : public function_shape
451 {
452 bool
453 explicit_type_suffix_p (unsigned int) const OVERRIDE
454 {
455 return true;
456 }
457
458 tree
459 resolve (function_resolver &) const OVERRIDE
460 {
461 gcc_unreachable ();
462 }
463 };
464
465 /* Base class for overloaded functions. Bit N of EXPLICIT_MASK is true
466 if type suffix N appears in the overloaded name. */
467 template<unsigned int EXPLICIT_MASK>
468 struct overloaded_base : public function_shape
469 {
470 bool
471 explicit_type_suffix_p (unsigned int i) const OVERRIDE
472 {
473 return (EXPLICIT_MASK >> i) & 1;
474 }
475 };
476
477 /* Base class for adr_index and adr_offset. */
478 struct adr_base : public overloaded_base<0>
479 {
480 /* The function takes two arguments: a vector base and a vector displacement
481 (either an index or an offset). Resolve based on them both. */
482 tree
483 resolve (function_resolver &r) const OVERRIDE
484 {
485 unsigned int i, nargs;
486 mode_suffix_index mode;
487 if (!r.check_gp_argument (2, i, nargs)
488 || (mode = r.resolve_adr_address (0)) == MODE_none)
489 return error_mark_node;
490
491 return r.resolve_to (mode);
492 };
493 };
494
495 /* Base class for narrowing bottom binary functions that take an
496 immediate second operand. The result is half the size of input
497 and has class CLASS. */
498 template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
499 struct binary_imm_narrowb_base : public overloaded_base<0>
500 {
501 void
502 build (function_builder &b, const function_group_info &group) const OVERRIDE
503 {
504 b.add_overloaded_functions (group, MODE_n);
505 STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
506 || CLASS == TYPE_unsigned);
507 if (CLASS == TYPE_unsigned)
508 build_all (b, "vhu0,v0,su64", group, MODE_n);
509 else
510 build_all (b, "vh0,v0,su64", group, MODE_n);
511 }
512
513 tree
514 resolve (function_resolver &r) const OVERRIDE
515 {
516 return r.resolve_uniform (1, 1);
517 }
518 };
519
520 /* The top equivalent of binary_imm_narrowb_base. It takes three arguments,
521 with the first being the values of the even elements, which are typically
522 the result of the narrowb operation. */
523 template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
524 struct binary_imm_narrowt_base : public overloaded_base<0>
525 {
526 void
527 build (function_builder &b, const function_group_info &group) const OVERRIDE
528 {
529 b.add_overloaded_functions (group, MODE_n);
530 STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
531 || CLASS == TYPE_unsigned);
532 if (CLASS == TYPE_unsigned)
533 build_all (b, "vhu0,vhu0,v0,su64", group, MODE_n);
534 else
535 build_all (b, "vh0,vh0,v0,su64", group, MODE_n);
536 }
537
538 tree
539 resolve (function_resolver &r) const OVERRIDE
540 {
541 unsigned int i, nargs;
542 type_suffix_index type;
543 if (!r.check_gp_argument (3, i, nargs)
544 || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
545 || !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE)
546 || !r.require_integer_immediate (i + 2))
547 return error_mark_node;
548
549 return r.resolve_to (r.mode_suffix_id, type);
550 }
551 };
552
553 /* Base class for long (i.e. narrow op narrow -> wide) binary functions
554 that take an immediate second operand. The type suffix specifies
555 the wider type. */
556 struct binary_imm_long_base : public overloaded_base<0>
557 {
558 void
559 build (function_builder &b, const function_group_info &group) const OVERRIDE
560 {
561 b.add_overloaded_functions (group, MODE_n);
562 build_all (b, "v0,vh0,su64", group, MODE_n);
563 }
564
565 tree
566 resolve (function_resolver &r) const OVERRIDE
567 {
568 unsigned int i, nargs;
569 type_suffix_index type, result_type;
570 if (!r.check_gp_argument (2, i, nargs)
571 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
572 || !r.require_integer_immediate (i + 1)
573 || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
574 return error_mark_node;
575
576 if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
577 return res;
578
579 return r.report_no_such_form (type);
580 }
581 };
582
583 /* Base class for inc_dec and inc_dec_pat. */
584 struct inc_dec_base : public overloaded_base<0>
585 {
586 CONSTEXPR inc_dec_base (bool pat_p) : m_pat_p (pat_p) {}
587
588 /* Resolve based on the first argument only, which must be either a
589 scalar or a vector. If it's a scalar, it must be a 32-bit or
590 64-bit integer. */
591 tree
592 resolve (function_resolver &r) const
593 {
594 unsigned int i, nargs;
595 if (!r.check_gp_argument (m_pat_p ? 3 : 2, i, nargs)
596 || !r.require_vector_or_scalar_type (i))
597 return error_mark_node;
598
599 mode_suffix_index mode;
600 type_suffix_index type;
601 if (r.scalar_argument_p (i))
602 {
603 mode = MODE_n;
604 type = r.infer_integer_scalar_type (i);
605 }
606 else
607 {
608 mode = MODE_none;
609 type = r.infer_vector_type (i);
610 }
611 if (type == NUM_TYPE_SUFFIXES)
612 return error_mark_node;
613
614 for (++i; i < nargs; ++i)
615 if (!r.require_integer_immediate (i))
616 return error_mark_node;
617
618 return r.resolve_to (mode, type);
619 }
620
621 bool
622 check (function_checker &c) const OVERRIDE
623 {
624 return c.require_immediate_range (m_pat_p ? 2 : 1, 1, 16);
625 }
626
627 bool m_pat_p;
628 };
629
630 /* Base class for load and load_replicate. */
631 struct load_contiguous_base : public overloaded_base<0>
632 {
633 /* Resolve a call based purely on a pointer argument. The other arguments
634 are a governing predicate and (for MODE_vnum) a vnum offset. */
635 tree
636 resolve (function_resolver &r) const OVERRIDE
637 {
638 bool vnum_p = r.mode_suffix_id == MODE_vnum;
639 gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
640
641 unsigned int i, nargs;
642 type_suffix_index type;
643 if (!r.check_gp_argument (vnum_p ? 2 : 1, i, nargs)
644 || (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
645 || (vnum_p && !r.require_scalar_type (i + 1, "int64_t")))
646 return error_mark_node;
647
648 return r.resolve_to (r.mode_suffix_id, type);
649 }
650 };
651
652 /* Base class for gather loads that take a scalar base and a vector
653 displacement (either an offset or an index). */
654 struct load_gather_sv_base : public overloaded_base<0>
655 {
656 tree
657 resolve (function_resolver &r) const OVERRIDE
658 {
659 unsigned int i, nargs;
660 mode_suffix_index mode;
661 type_suffix_index type;
662 if (!r.check_gp_argument (2, i, nargs)
663 || (type = r.infer_pointer_type (i, true)) == NUM_TYPE_SUFFIXES
664 || (mode = r.resolve_sv_displacement (i + 1, type, true),
665 mode == MODE_none))
666 return error_mark_node;
667
668 return r.resolve_to (mode, type);
669 }
670 };
671
672 /* Base class for load_ext_gather_index and load_ext_gather_offset,
673 which differ only in the units of the displacement. */
674 struct load_ext_gather_base : public overloaded_base<1>
675 {
676 /* Resolve a gather load that takes one of:
677
678 - a scalar pointer base and a vector displacement
679 - a vector base with no displacement or
680 - a vector base and a scalar displacement
681
682 The function has an explicit type suffix that determines the type
683 of the loaded data. */
684 tree
685 resolve (function_resolver &r) const OVERRIDE
686 {
687 /* No resolution is needed for a vector base with no displacement;
688 there's a one-to-one mapping between short and long names. */
689 gcc_assert (r.displacement_units () != UNITS_none);
690
691 type_suffix_index type = r.type_suffix_ids[0];
692
693 unsigned int i, nargs;
694 mode_suffix_index mode;
695 if (!r.check_gp_argument (2, i, nargs)
696 || (mode = r.resolve_gather_address (i, type, true)) == MODE_none)
697 return error_mark_node;
698
699 return r.resolve_to (mode, type);
700 }
701 };
702
703 /* Base class for prefetch_gather_index and prefetch_gather_offset,
704 which differ only in the units of the displacement. */
705 struct prefetch_gather_base : public overloaded_base<0>
706 {
707 /* Resolve a gather prefetch that takes one of:
708
709 - a scalar pointer base (const void *) and a vector displacement
710 - a vector base with no displacement or
711 - a vector base and a scalar displacement
712
713 The prefetch operation is the final argument. This is purely a
714 mode-based resolution; there are no type suffixes. */
715 tree
716 resolve (function_resolver &r) const OVERRIDE
717 {
718 bool has_displacement_p = r.displacement_units () != UNITS_none;
719
720 unsigned int i, nargs;
721 mode_suffix_index mode;
722 if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
723 || (mode = r.resolve_gather_address (i, NUM_TYPE_SUFFIXES,
724 false)) == MODE_none
725 || !r.require_integer_immediate (nargs - 1))
726 return error_mark_node;
727
728 return r.resolve_to (mode);
729 }
730 };
731
732 /* Wraps BASE to provide a narrowing shift right function. Argument N
733 is an immediate shift amount in the range [1, sizeof(<t0>_t) * 4]. */
734 template<typename BASE, unsigned int N>
735 struct shift_right_imm_narrow_wrapper : public BASE
736 {
737 bool
738 check (function_checker &c) const OVERRIDE
739 {
740 unsigned int bits = c.type_suffix (0).element_bits / 2;
741 return c.require_immediate_range (N, 1, bits);
742 }
743 };
744
745 /* Base class for store_scatter_index and store_scatter_offset,
746 which differ only in the units of the displacement. */
747 struct store_scatter_base : public overloaded_base<0>
748 {
749 /* Resolve a scatter store that takes one of:
750
751 - a scalar pointer base and a vector displacement
752 - a vector base with no displacement or
753 - a vector base and a scalar displacement
754
755 The stored data is the final argument, and it determines the
756 type suffix. */
757 tree
758 resolve (function_resolver &r) const OVERRIDE
759 {
760 bool has_displacement_p = r.displacement_units () != UNITS_none;
761
762 unsigned int i, nargs;
763 mode_suffix_index mode;
764 type_suffix_index type;
765 if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
766 || (type = r.infer_sd_vector_type (nargs - 1)) == NUM_TYPE_SUFFIXES
767 || (mode = r.resolve_gather_address (i, type, false)) == MODE_none)
768 return error_mark_node;
769
770 return r.resolve_to (mode, type);
771 }
772 };
773
774 /* Base class for ternary operations in which the final argument is an
775 immediate shift amount. The derived class should check the range. */
776 struct ternary_shift_imm_base : public overloaded_base<0>
777 {
778 void
779 build (function_builder &b, const function_group_info &group) const OVERRIDE
780 {
781 b.add_overloaded_functions (group, MODE_n);
782 build_all (b, "v0,v0,v0,su64", group, MODE_n);
783 }
784
785 tree
786 resolve (function_resolver &r) const OVERRIDE
787 {
788 return r.resolve_uniform (2, 1);
789 }
790 };
791
792 /* Base class for ternary operations in which the first argument has the
793 same element type as the result, and in which the second and third
794 arguments have an element type that is derived the first. MODIFIER
795 is the number of element bits in the second and third arguments,
796 or a function_resolver modifier that says how this precision is
797 derived from the first argument's elements. */
798 template<unsigned int MODIFIER>
799 struct ternary_resize2_opt_n_base : public overloaded_base<0>
800 {
801 tree
802 resolve (function_resolver &r) const OVERRIDE
803 {
804 unsigned int i, nargs;
805 type_suffix_index type;
806 if (!r.check_gp_argument (3, i, nargs)
807 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
808 || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
809 MODIFIER))
810 return error_mark_node;
811
812 return r.finish_opt_n_resolution (i + 2, i, type, r.SAME_TYPE_CLASS,
813 MODIFIER);
814 }
815 };
816
817 /* Like ternary_resize2_opt_n_base, but for functions that take a final
818 lane argument. */
819 template<unsigned int MODIFIER>
820 struct ternary_resize2_lane_base : public overloaded_base<0>
821 {
822 tree
823 resolve (function_resolver &r) const OVERRIDE
824 {
825 unsigned int i, nargs;
826 type_suffix_index type;
827 if (!r.check_gp_argument (4, i, nargs)
828 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
829 || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
830 MODIFIER)
831 || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
832 MODIFIER)
833 || !r.require_integer_immediate (i + 3))
834 return error_mark_node;
835
836 return r.resolve_to (r.mode_suffix_id, type);
837 }
838 };
839
840 /* Base class for narrowing bottom unary functions. The result is half
841 the size of input and has class CLASS. */
842 template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
843 struct unary_narrowb_base : public overloaded_base<0>
844 {
845 void
846 build (function_builder &b, const function_group_info &group) const OVERRIDE
847 {
848 b.add_overloaded_functions (group, MODE_none);
849 STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
850 || CLASS == TYPE_unsigned);
851 if (CLASS == TYPE_unsigned)
852 build_all (b, "vhu0,v0", group, MODE_none);
853 else
854 build_all (b, "vh0,v0", group, MODE_none);
855 }
856
857 tree
858 resolve (function_resolver &r) const OVERRIDE
859 {
860 return r.resolve_unary (CLASS, r.HALF_SIZE);
861 }
862 };
863
864 /* The top equivalent of unary_imm_narrowb_base. All forms take the values
865 of the even elements as an extra argument, before any governing predicate.
866 These even elements are typically the result of the narrowb operation. */
867 template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
868 struct unary_narrowt_base : public overloaded_base<0>
869 {
870 void
871 build (function_builder &b, const function_group_info &group) const OVERRIDE
872 {
873 b.add_overloaded_functions (group, MODE_none);
874 STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
875 || CLASS == TYPE_unsigned);
876 if (CLASS == TYPE_unsigned)
877 build_all (b, "vhu0,vhu0,v0", group, MODE_none);
878 else
879 build_all (b, "vh0,vh0,v0", group, MODE_none);
880 }
881
882 tree
883 resolve (function_resolver &r) const OVERRIDE
884 {
885 unsigned int i, nargs;
886 type_suffix_index type;
887 if (!r.check_gp_argument (2, i, nargs)
888 || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
889 || !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE))
890 return error_mark_node;
891
892 return r.resolve_to (r.mode_suffix_id, type);
893 }
894 };
895
896 /* sv<m0>_t svfoo[_m0base]_[m1]index(sv<m0>_t, sv<m1>_t)
897
898 for all valid combinations of vector base type <m0> and vector
899 displacement type <m1>. */
900 struct adr_index_def : public adr_base
901 {
902 void
903 build (function_builder &b, const function_group_info &group) const OVERRIDE
904 {
905 b.add_overloaded_functions (group, MODE_index);
906 build_all (b, "b,b,d", group, MODE_u32base_s32index);
907 build_all (b, "b,b,d", group, MODE_u32base_u32index);
908 build_all (b, "b,b,d", group, MODE_u64base_s64index);
909 build_all (b, "b,b,d", group, MODE_u64base_u64index);
910 }
911 };
912 SHAPE (adr_index)
913
914 /* sv<m0>_t svfoo[_m0base]_[m1]offset(sv<m0>_t, sv<m1>_t).
915
916 for all valid combinations of vector base type <m0> and vector
917 displacement type <m1>. */
918 struct adr_offset_def : public adr_base
919 {
920 void
921 build (function_builder &b, const function_group_info &group) const OVERRIDE
922 {
923 b.add_overloaded_functions (group, MODE_offset);
924 build_all (b, "b,b,d", group, MODE_u32base_s32offset);
925 build_all (b, "b,b,d", group, MODE_u32base_u32offset);
926 build_all (b, "b,b,d", group, MODE_u64base_s64offset);
927 build_all (b, "b,b,d", group, MODE_u64base_u64offset);
928 }
929 };
930 SHAPE (adr_offset)
931
932 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
933
934 i.e. a binary operation with uniform types, but with no scalar form. */
935 struct binary_def : public overloaded_base<0>
936 {
937 void
938 build (function_builder &b, const function_group_info &group) const OVERRIDE
939 {
940 b.add_overloaded_functions (group, MODE_none);
941 build_all (b, "v0,v0,v0", group, MODE_none);
942 }
943
944 tree
945 resolve (function_resolver &r) const OVERRIDE
946 {
947 return r.resolve_uniform (2);
948 }
949 };
950 SHAPE (binary)
951
952 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int>_t)
953 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:int>_t).
954
955 i.e. a version of the standard binary shape binary_opt_n in which
956 the final argument is always a signed integer. */
957 struct binary_int_opt_n_def : public overloaded_base<0>
958 {
959 void
960 build (function_builder &b, const function_group_info &group) const OVERRIDE
961 {
962 b.add_overloaded_functions (group, MODE_none);
963 build_all (b, "v0,v0,vs0", group, MODE_none);
964 build_all (b, "v0,v0,ss0", group, MODE_n);
965 }
966
967 tree
968 resolve (function_resolver &r) const OVERRIDE
969 {
970 unsigned int i, nargs;
971 type_suffix_index type;
972 if (!r.check_gp_argument (2, i, nargs)
973 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
974 return error_mark_node;
975
976 return r.finish_opt_n_resolution (i + 1, i, type, TYPE_signed);
977 }
978 };
979 SHAPE (binary_int_opt_n)
980
981 /* sv<t0>_t svfoo_<t0>(sv<t0>_t, sv<t0>_t, uint64_t)
982
983 where the final argument is an integer constant expression in the
984 range [0, 16 / sizeof (<t0>_t) - 1]. */
985 struct binary_lane_def : public overloaded_base<0>
986 {
987 void
988 build (function_builder &b, const function_group_info &group) const OVERRIDE
989 {
990 b.add_overloaded_functions (group, MODE_none);
991 build_all (b, "v0,v0,v0,su64", group, MODE_none);
992 }
993
994 tree
995 resolve (function_resolver &r) const OVERRIDE
996 {
997 return r.resolve_uniform (2, 1);
998 }
999
1000 bool
1001 check (function_checker &c) const OVERRIDE
1002 {
1003 return c.require_immediate_lane_index (2);
1004 }
1005 };
1006 SHAPE (binary_lane)
1007
1008 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t, uint64_t).
1009
1010 where the final argument is an integer constant expression in the
1011 range [0, 32 / sizeof (<t0>_t) - 1]. */
1012 struct binary_long_lane_def : public overloaded_base<0>
1013 {
1014 void
1015 build (function_builder &b, const function_group_info &group) const OVERRIDE
1016 {
1017 b.add_overloaded_functions (group, MODE_none);
1018 build_all (b, "v0,vh0,vh0,su64", group, MODE_none);
1019 }
1020
1021 tree
1022 resolve (function_resolver &r) const OVERRIDE
1023 {
1024 unsigned int i, nargs;
1025 type_suffix_index type, result_type;
1026 if (!r.check_gp_argument (3, i, nargs)
1027 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1028 || !r.require_matching_vector_type (i + 1, type)
1029 || !r.require_integer_immediate (i + 2)
1030 || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
1031 return error_mark_node;
1032
1033 if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
1034 return res;
1035
1036 return r.report_no_such_form (type);
1037 }
1038
1039 bool
1040 check (function_checker &c) const OVERRIDE
1041 {
1042 return c.require_immediate_lane_index (2);
1043 }
1044 };
1045 SHAPE (binary_long_lane)
1046
1047 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t)
1048 sv<t0>_t svfoo[_n_t0](sv<t0:half>_t, <t0:half>_t). */
1049 struct binary_long_opt_n_def : public overloaded_base<0>
1050 {
1051 void
1052 build (function_builder &b, const function_group_info &group) const OVERRIDE
1053 {
1054 b.add_overloaded_functions (group, MODE_none);
1055 build_all (b, "v0,vh0,vh0", group, MODE_none);
1056 build_all (b, "v0,vh0,sh0", group, MODE_n);
1057 }
1058
1059 tree
1060 resolve (function_resolver &r) const OVERRIDE
1061 {
1062 unsigned int i, nargs;
1063 type_suffix_index type, result_type;
1064 if (!r.check_gp_argument (2, i, nargs)
1065 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1066 || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
1067 return error_mark_node;
1068
1069 return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
1070 r.SAME_SIZE, result_type);
1071 }
1072 };
1073 SHAPE (binary_long_opt_n)
1074
1075 /* sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t).
1076
1077 i.e. a binary operation in which the final argument is always a scalar
1078 rather than a vector. */
1079 struct binary_n_def : public overloaded_base<0>
1080 {
1081 void
1082 build (function_builder &b, const function_group_info &group) const OVERRIDE
1083 {
1084 b.add_overloaded_functions (group, MODE_n);
1085 build_all (b, "v0,v0,s0", group, MODE_n);
1086 }
1087
1088 tree
1089 resolve (function_resolver &r) const OVERRIDE
1090 {
1091 unsigned int i, nargs;
1092 type_suffix_index type;
1093 if (!r.check_gp_argument (2, i, nargs)
1094 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1095 || !r.require_derived_scalar_type (i + 1, r.SAME_TYPE_CLASS))
1096 return error_mark_node;
1097
1098 return r.resolve_to (r.mode_suffix_id, type);
1099 }
1100 };
1101 SHAPE (binary_n)
1102
1103 /* sv<t0:half>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1104 sv<t0:half>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1105
1106 i.e. a version of binary_opt_n in which the output elements are half the
1107 width of the input elements. */
1108 struct binary_narrowb_opt_n_def : public overloaded_base<0>
1109 {
1110 void
1111 build (function_builder &b, const function_group_info &group) const OVERRIDE
1112 {
1113 b.add_overloaded_functions (group, MODE_none);
1114 build_all (b, "vh0,v0,v0", group, MODE_none);
1115 build_all (b, "vh0,v0,s0", group, MODE_n);
1116 }
1117
1118 tree
1119 resolve (function_resolver &r) const OVERRIDE
1120 {
1121 return r.resolve_uniform_opt_n (2);
1122 }
1123 };
1124 SHAPE (binary_narrowb_opt_n)
1125
1126 /* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t, sv<t0>_t)
1127 sv<t0:half>_t svfoo[_n_t0](sv<t0:half>_t, sv<t0>_t, <t0>_t)
1128
1129 This is the "top" counterpart to binary_narrowb_opt_n. */
1130 struct binary_narrowt_opt_n_def : public overloaded_base<0>
1131 {
1132 void
1133 build (function_builder &b, const function_group_info &group) const OVERRIDE
1134 {
1135 b.add_overloaded_functions (group, MODE_none);
1136 build_all (b, "vh0,vh0,v0,v0", group, MODE_none);
1137 build_all (b, "vh0,vh0,v0,s0", group, MODE_n);
1138 }
1139
1140 tree
1141 resolve (function_resolver &r) const OVERRIDE
1142 {
1143 unsigned int i, nargs;
1144 type_suffix_index type;
1145 if (!r.check_gp_argument (3, i, nargs)
1146 || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
1147 || !r.require_derived_vector_type (i, i + 1, type, r.SAME_TYPE_CLASS,
1148 r.HALF_SIZE))
1149 return error_mark_node;
1150
1151 return r.finish_opt_n_resolution (i + 2, i + 1, type);
1152 }
1153 };
1154 SHAPE (binary_narrowt_opt_n)
1155
1156 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1157 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1158
1159 i.e. the standard shape for binary operations that operate on
1160 uniform types. */
1161 struct binary_opt_n_def : public overloaded_base<0>
1162 {
1163 void
1164 build (function_builder &b, const function_group_info &group) const OVERRIDE
1165 {
1166 b.add_overloaded_functions (group, MODE_none);
1167 build_all (b, "v0,v0,v0", group, MODE_none);
1168 /* _b functions do not have an _n form, but are classified as
1169 binary_opt_n so that they can be overloaded with vector
1170 functions. */
1171 if (group.types[0][0] == TYPE_SUFFIX_b)
1172 gcc_assert (group.types[0][1] == NUM_TYPE_SUFFIXES);
1173 else
1174 build_all (b, "v0,v0,s0", group, MODE_n);
1175 }
1176
1177 tree
1178 resolve (function_resolver &r) const OVERRIDE
1179 {
1180 return r.resolve_uniform_opt_n (2);
1181 }
1182 };
1183 SHAPE (binary_opt_n)
1184
1185 /* svbool_t svfoo(svbool_t, svbool_t). */
1186 struct binary_pred_def : public nonoverloaded_base
1187 {
1188 void
1189 build (function_builder &b, const function_group_info &group) const OVERRIDE
1190 {
1191 build_all (b, "v0,v0,v0", group, MODE_none);
1192 }
1193 };
1194 SHAPE (binary_pred)
1195
1196 /* sv<t0>_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
1197
1198 where the final argument must be 90 or 270. */
1199 struct binary_rotate_def : public overloaded_base<0>
1200 {
1201 void
1202 build (function_builder &b, const function_group_info &group) const OVERRIDE
1203 {
1204 b.add_overloaded_functions (group, MODE_none);
1205 build_all (b, "v0,v0,v0,su64", group, MODE_none);
1206 }
1207
1208 tree
1209 resolve (function_resolver &r) const OVERRIDE
1210 {
1211 return r.resolve_uniform (2, 1);
1212 }
1213
1214 bool
1215 check (function_checker &c) const OVERRIDE
1216 {
1217 return c.require_immediate_either_or (2, 90, 270);
1218 }
1219 };
1220 SHAPE (binary_rotate)
1221
1222 /* sv<t0>_t svfoo_t0(<t0>_t, <t0>_t)
1223
1224 i.e. a binary function that takes two scalars and returns a vector.
1225 An explicit type suffix is required. */
1226 struct binary_scalar_def : public nonoverloaded_base
1227 {
1228 void
1229 build (function_builder &b, const function_group_info &group) const OVERRIDE
1230 {
1231 build_all (b, "v0,s0,s0", group, MODE_none);
1232 }
1233 };
1234 SHAPE (binary_scalar)
1235
1236 /* sv<t0:uint>_t svfoo[_t0](sv<t0>_t, sv<t0>_t).
1237
1238 i.e. a version of "binary" that returns unsigned integers. */
1239 struct binary_to_uint_def : public overloaded_base<0>
1240 {
1241 void
1242 build (function_builder &b, const function_group_info &group) const OVERRIDE
1243 {
1244 b.add_overloaded_functions (group, MODE_none);
1245 build_all (b, "vu0,v0,v0", group, MODE_none);
1246 }
1247
1248 tree
1249 resolve (function_resolver &r) const OVERRIDE
1250 {
1251 return r.resolve_uniform (2);
1252 }
1253 };
1254 SHAPE (binary_to_uint)
1255
1256 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1257
1258 i.e. a version of "binary" in which the final argument is always an
1259 unsigned integer. */
1260 struct binary_uint_def : public overloaded_base<0>
1261 {
1262 void
1263 build (function_builder &b, const function_group_info &group) const OVERRIDE
1264 {
1265 b.add_overloaded_functions (group, MODE_none);
1266 build_all (b, "v0,v0,vu0", group, MODE_none);
1267 }
1268
1269 tree
1270 resolve (function_resolver &r) const OVERRIDE
1271 {
1272 unsigned int i, nargs;
1273 type_suffix_index type;
1274 if (!r.check_gp_argument (2, i, nargs)
1275 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1276 || !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
1277 return error_mark_node;
1278
1279 return r.resolve_to (r.mode_suffix_id, type);
1280 }
1281 };
1282 SHAPE (binary_uint)
1283
1284 /* sv<t0>_t svfoo[_t0](sv<t0>_t, <t0:uint>_t)
1285
1286 i.e. a version of binary_n in which the final argument is always an
1287 unsigned integer. */
1288 struct binary_uint_n_def : public overloaded_base<0>
1289 {
1290 void
1291 build (function_builder &b, const function_group_info &group) const OVERRIDE
1292 {
1293 b.add_overloaded_functions (group, MODE_none);
1294 build_all (b, "v0,v0,su0", group, MODE_none);
1295 }
1296
1297 tree
1298 resolve (function_resolver &r) const OVERRIDE
1299 {
1300 unsigned int i, nargs;
1301 type_suffix_index type;
1302 if (!r.check_gp_argument (2, i, nargs)
1303 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1304 || !r.require_derived_scalar_type (i + 1, TYPE_unsigned))
1305 return error_mark_node;
1306
1307 return r.resolve_to (r.mode_suffix_id, type);
1308 }
1309 };
1310 SHAPE (binary_uint_n)
1311
1312 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1313 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:uint>_t)
1314
1315 i.e. a version of the standard binary shape binary_opt_n in which
1316 the final argument is always an unsigned integer. */
1317 struct binary_uint_opt_n_def : public overloaded_base<0>
1318 {
1319 void
1320 build (function_builder &b, const function_group_info &group) const OVERRIDE
1321 {
1322 b.add_overloaded_functions (group, MODE_none);
1323 build_all (b, "v0,v0,vu0", group, MODE_none);
1324 build_all (b, "v0,v0,su0", group, MODE_n);
1325 }
1326
1327 tree
1328 resolve (function_resolver &r) const OVERRIDE
1329 {
1330 unsigned int i, nargs;
1331 type_suffix_index type;
1332 if (!r.check_gp_argument (2, i, nargs)
1333 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1334 return error_mark_node;
1335
1336 return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned);
1337 }
1338 };
1339 SHAPE (binary_uint_opt_n)
1340
1341 /* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t).
1342
1343 i.e. a version of binary_n in which the final argument is always
1344 a 64-bit unsigned integer. */
1345 struct binary_uint64_n_def : public overloaded_base<0>
1346 {
1347 void
1348 build (function_builder &b, const function_group_info &group) const OVERRIDE
1349 {
1350 b.add_overloaded_functions (group, MODE_none);
1351 build_all (b, "v0,v0,su64", group, MODE_none);
1352 }
1353
1354 tree
1355 resolve (function_resolver &r) const OVERRIDE
1356 {
1357 unsigned int i, nargs;
1358 type_suffix_index type;
1359 if (!r.check_gp_argument (2, i, nargs)
1360 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1361 || !r.require_scalar_type (i + 1, "uint64_t"))
1362 return error_mark_node;
1363
1364 return r.resolve_to (r.mode_suffix_id, type);
1365 }
1366 };
1367 SHAPE (binary_uint64_n)
1368
1369 /* sv<t0>_t svfoo[_t0](sv<t0>_t, svuint64_t)
1370 sv<t0>_t svfoo[_n_t0](sv<t0>_t, uint64_t)
1371
1372 i.e. a version of the standard binary shape binary_opt_n in which
1373 the final argument is always a uint64_t. */
1374 struct binary_uint64_opt_n_def : public overloaded_base<0>
1375 {
1376 void
1377 build (function_builder &b, const function_group_info &group) const OVERRIDE
1378 {
1379 b.add_overloaded_functions (group, MODE_none);
1380 build_all (b, "v0,v0,vu64", group, MODE_none);
1381 build_all (b, "v0,v0,su64", group, MODE_n);
1382 }
1383
1384 tree
1385 resolve (function_resolver &r) const OVERRIDE
1386 {
1387 unsigned int i, nargs;
1388 type_suffix_index type;
1389 if (!r.check_gp_argument (2, i, nargs)
1390 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1391 return error_mark_node;
1392
1393 return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned, 64);
1394 }
1395 };
1396 SHAPE (binary_uint64_opt_n)
1397
1398 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t). */
1399 struct binary_wide_def : public overloaded_base<0>
1400 {
1401 void
1402 build (function_builder &b, const function_group_info &group) const OVERRIDE
1403 {
1404 b.add_overloaded_functions (group, MODE_none);
1405 build_all (b, "v0,v0,vh0", group, MODE_none);
1406 }
1407
1408 tree
1409 resolve (function_resolver &r) const OVERRIDE
1410 {
1411 unsigned int i, nargs;
1412 type_suffix_index type;
1413 if (!r.check_gp_argument (2, i, nargs)
1414 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1415 || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
1416 r.HALF_SIZE))
1417 return error_mark_node;
1418
1419 return r.resolve_to (r.mode_suffix_id, type);
1420 }
1421 };
1422 SHAPE (binary_wide)
1423
1424 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t)
1425 sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:half>_t). */
1426 struct binary_wide_opt_n_def : public overloaded_base<0>
1427 {
1428 void
1429 build (function_builder &b, const function_group_info &group) const OVERRIDE
1430 {
1431 b.add_overloaded_functions (group, MODE_none);
1432 build_all (b, "v0,v0,vh0", group, MODE_none);
1433 build_all (b, "v0,v0,sh0", group, MODE_n);
1434 }
1435
1436 tree
1437 resolve (function_resolver &r) const OVERRIDE
1438 {
1439 unsigned int i, nargs;
1440 type_suffix_index type;
1441 if (!r.check_gp_argument (2, i, nargs)
1442 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1443 return error_mark_node;
1444
1445 return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
1446 r.HALF_SIZE);
1447 }
1448 };
1449 SHAPE (binary_wide_opt_n)
1450
1451 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1452 <t0>_t svfoo[_n_t0](<t0>_t, sv<t0>_t). */
1453 struct clast_def : public overloaded_base<0>
1454 {
1455 void
1456 build (function_builder &b, const function_group_info &group) const OVERRIDE
1457 {
1458 b.add_overloaded_functions (group, MODE_none);
1459 build_all (b, "v0,v0,v0", group, MODE_none);
1460 build_all (b, "s0,s0,v0", group, MODE_n);
1461 }
1462
1463 tree
1464 resolve (function_resolver &r) const OVERRIDE
1465 {
1466 unsigned int i, nargs;
1467 if (!r.check_gp_argument (2, i, nargs)
1468 || !r.require_vector_or_scalar_type (i))
1469 return error_mark_node;
1470
1471 if (r.scalar_argument_p (i))
1472 {
1473 type_suffix_index type;
1474 if (!r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
1475 || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
1476 return error_mark_node;
1477 return r.resolve_to (MODE_n, type);
1478 }
1479 else
1480 {
1481 type_suffix_index type;
1482 if ((type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1483 || !r.require_matching_vector_type (i + 1, type))
1484 return error_mark_node;
1485 return r.resolve_to (MODE_none, type);
1486 }
1487 }
1488 };
1489 SHAPE (clast)
1490
1491 /* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t). */
1492 struct compare_def : public overloaded_base<0>
1493 {
1494 void
1495 build (function_builder &b, const function_group_info &group) const OVERRIDE
1496 {
1497 b.add_overloaded_functions (group, MODE_none);
1498 build_all (b, "vp,v0,v0", group, MODE_none);
1499 }
1500
1501 tree
1502 resolve (function_resolver &r) const OVERRIDE
1503 {
1504 return r.resolve_uniform (2);
1505 }
1506 };
1507 SHAPE (compare)
1508
1509 /* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1510 svbool_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1511
1512 i.e. a comparison between two vectors, or between a vector and a scalar. */
1513 struct compare_opt_n_def : public overloaded_base<0>
1514 {
1515 void
1516 build (function_builder &b, const function_group_info &group) const OVERRIDE
1517 {
1518 b.add_overloaded_functions (group, MODE_none);
1519 build_all (b, "vp,v0,v0", group, MODE_none);
1520 build_all (b, "vp,v0,s0", group, MODE_n);
1521 }
1522
1523 tree
1524 resolve (function_resolver &r) const OVERRIDE
1525 {
1526 return r.resolve_uniform_opt_n (2);
1527 }
1528 };
1529 SHAPE (compare_opt_n)
1530
1531 /* svbool_t svfoo[_t0](const <t0>_t *, const <t0>_t *). */
1532 struct compare_ptr_def : public overloaded_base<0>
1533 {
1534 void
1535 build (function_builder &b, const function_group_info &group) const OVERRIDE
1536 {
1537 b.add_overloaded_functions (group, MODE_none);
1538 build_all (b, "vp,al,al", group, MODE_none);
1539 }
1540
1541 tree
1542 resolve (function_resolver &r) const OVERRIDE
1543 {
1544 unsigned int i, nargs;
1545 type_suffix_index type;
1546 if (!r.check_gp_argument (2, i, nargs)
1547 || (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
1548 || !r.require_matching_pointer_type (i + 1, i, type))
1549 return error_mark_node;
1550
1551 return r.resolve_to (r.mode_suffix_id, type);
1552 }
1553 };
1554 SHAPE (compare_ptr)
1555
1556 /* svbool_t svfoo_t0[_t1](<t1>_t, <t1>_t)
1557
1558 where _t0 is a _b<bits> suffix that describes the predicate result.
1559 There is no direct relationship between the element sizes of _t0
1560 and _t1. */
1561 struct compare_scalar_def : public overloaded_base<1>
1562 {
1563 void
1564 build (function_builder &b, const function_group_info &group) const OVERRIDE
1565 {
1566 b.add_overloaded_functions (group, MODE_none);
1567 build_all (b, "vp,s1,s1", group, MODE_none);
1568 }
1569
1570 tree
1571 resolve (function_resolver &r) const OVERRIDE
1572 {
1573 unsigned int i, nargs;
1574 type_suffix_index type;
1575 if (!r.check_gp_argument (2, i, nargs)
1576 || (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
1577 || !r.require_matching_integer_scalar_type (i + 1, i, type))
1578 return error_mark_node;
1579
1580 return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
1581 }
1582 };
1583 SHAPE (compare_scalar)
1584
1585 /* svbool_t svfoo[_t0](sv<t0>_t, svint64_t) (for signed t0)
1586 svbool_t svfoo[_n_t0](sv<t0>_t, int64_t) (for signed t0)
1587 svbool_t svfoo[_t0](sv<t0>_t, svuint64_t) (for unsigned t0)
1588 svbool_t svfoo[_n_t0](sv<t0>_t, uint64_t) (for unsigned t0)
1589
1590 i.e. a comparison in which the second argument is 64 bits. */
1591 struct compare_wide_opt_n_def : public overloaded_base<0>
1592 {
1593 void
1594 build (function_builder &b, const function_group_info &group) const OVERRIDE
1595 {
1596 b.add_overloaded_functions (group, MODE_none);
1597 build_all (b, "vp,v0,vw0", group, MODE_none);
1598 build_all (b, "vp,v0,sw0", group, MODE_n);
1599 }
1600
1601 tree
1602 resolve (function_resolver &r) const OVERRIDE
1603 {
1604 unsigned int i, nargs;
1605 type_suffix_index type;
1606 if (!r.check_gp_argument (2, i, nargs)
1607 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1608 return error_mark_node;
1609
1610 return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS, 64);
1611 }
1612 };
1613 SHAPE (compare_wide_opt_n)
1614
1615 /* uint64_t svfoo(). */
1616 struct count_inherent_def : public nonoverloaded_base
1617 {
1618 void
1619 build (function_builder &b, const function_group_info &group) const OVERRIDE
1620 {
1621 build_all (b, "su64", group, MODE_none);
1622 }
1623 };
1624 SHAPE (count_inherent)
1625
1626 /* uint64_t svfoo(enum svpattern). */
1627 struct count_pat_def : public nonoverloaded_base
1628 {
1629 void
1630 build (function_builder &b, const function_group_info &group) const OVERRIDE
1631 {
1632 build_all (b, "su64,epattern", group, MODE_none);
1633 }
1634 };
1635 SHAPE (count_pat)
1636
1637 /* uint64_t svfoo(svbool_t). */
1638 struct count_pred_def : public nonoverloaded_base
1639 {
1640 void
1641 build (function_builder &b, const function_group_info &group) const OVERRIDE
1642 {
1643 build_all (b, "su64,vp", group, MODE_none);
1644 }
1645 };
1646 SHAPE (count_pred)
1647
1648 /* uint64_t svfoo[_t0](sv<t0>_t). */
1649 struct count_vector_def : public overloaded_base<0>
1650 {
1651 void
1652 build (function_builder &b, const function_group_info &group) const OVERRIDE
1653 {
1654 b.add_overloaded_functions (group, MODE_none);
1655 build_all (b, "su64,v0", group, MODE_none);
1656 }
1657
1658 tree
1659 resolve (function_resolver &r) const OVERRIDE
1660 {
1661 return r.resolve_uniform (1);
1662 }
1663 };
1664 SHAPE (count_vector)
1665
1666 /* sv<t0>xN_t svfoo[_t0](sv<t0>_t, ..., sv<t0>_t)
1667
1668 where there are N arguments in total. */
1669 struct create_def : public overloaded_base<0>
1670 {
1671 void
1672 build (function_builder &b, const function_group_info &group) const OVERRIDE
1673 {
1674 b.add_overloaded_functions (group, MODE_none);
1675 build_all (b, "t0,v0*t", group, MODE_none);
1676 }
1677
1678 tree
1679 resolve (function_resolver &r) const OVERRIDE
1680 {
1681 return r.resolve_uniform (r.vectors_per_tuple ());
1682 }
1683 };
1684 SHAPE (create)
1685
1686 /* sv<t0>_t svfoo[_n]_t0(<t0>_t, ..., <t0>_t)
1687
1688 where there are enough arguments to fill 128 bits of data (or to
1689 control 128 bits of data in the case of predicates). */
1690 struct dupq_def : public overloaded_base<1>
1691 {
1692 void
1693 build (function_builder &b, const function_group_info &group) const OVERRIDE
1694 {
1695 /* The "_n" suffix is optional; the full name has it, but the short
1696 name doesn't. */
1697 build_all (b, "v0,s0*q", group, MODE_n, true);
1698 }
1699
1700 tree
1701 resolve (function_resolver &) const OVERRIDE
1702 {
1703 /* The short forms just make "_n" implicit, so no resolution is needed. */
1704 gcc_unreachable ();
1705 }
1706 };
1707 SHAPE (dupq)
1708
1709 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, uint64_t)
1710
1711 where the final argument is an integer constant expression that when
1712 multiplied by the number of bytes in t0 is in the range [0, 255]. */
1713 struct ext_def : public overloaded_base<0>
1714 {
1715 void
1716 build (function_builder &b, const function_group_info &group) const OVERRIDE
1717 {
1718 b.add_overloaded_functions (group, MODE_none);
1719 build_all (b, "v0,v0,v0,su64", group, MODE_none);
1720 }
1721
1722 tree
1723 resolve (function_resolver &r) const OVERRIDE
1724 {
1725 return r.resolve_uniform (2, 1);
1726 }
1727
1728 bool
1729 check (function_checker &c) const OVERRIDE
1730 {
1731 unsigned int bytes = c.type_suffix (0).element_bytes;
1732 return c.require_immediate_range (2, 0, 256 / bytes - 1);
1733 }
1734 };
1735 SHAPE (ext)
1736
1737 /* <t0>_t svfoo[_t0](<t0>_t, sv<t0>_t). */
1738 struct fold_left_def : public overloaded_base<0>
1739 {
1740 void
1741 build (function_builder &b, const function_group_info &group) const OVERRIDE
1742 {
1743 b.add_overloaded_functions (group, MODE_none);
1744 build_all (b, "s0,s0,v0", group, MODE_none);
1745 }
1746
1747 tree
1748 resolve (function_resolver &r) const OVERRIDE
1749 {
1750 unsigned int i, nargs;
1751 type_suffix_index type;
1752 if (!r.check_gp_argument (2, i, nargs)
1753 || !r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
1754 || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
1755 return error_mark_node;
1756
1757 return r.resolve_to (r.mode_suffix_id, type);
1758 }
1759 };
1760 SHAPE (fold_left)
1761
1762 /* sv<t0>_t svfoo[_t0](sv<t0>xN_t, uint64_t)
1763
1764 where the final argument is an integer constant expression in
1765 the range [0, N - 1]. */
1766 struct get_def : public overloaded_base<0>
1767 {
1768 void
1769 build (function_builder &b, const function_group_info &group) const OVERRIDE
1770 {
1771 b.add_overloaded_functions (group, MODE_none);
1772 build_all (b, "v0,t0,su64", group, MODE_none);
1773 }
1774
1775 tree
1776 resolve (function_resolver &r) const OVERRIDE
1777 {
1778 unsigned int i, nargs;
1779 type_suffix_index type;
1780 if (!r.check_gp_argument (2, i, nargs)
1781 || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
1782 || !r.require_integer_immediate (i + 1))
1783 return error_mark_node;
1784
1785 return r.resolve_to (r.mode_suffix_id, type);
1786 }
1787
1788 bool
1789 check (function_checker &c) const OVERRIDE
1790 {
1791 unsigned int nvectors = c.vectors_per_tuple ();
1792 return c.require_immediate_range (1, 0, nvectors - 1);
1793 }
1794 };
1795 SHAPE (get)
1796
1797 /* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t)
1798 <t0>_t svfoo[_n_t0](<t0>_t, uint64_t)
1799
1800 where the t0 in the vector form is a signed or unsigned integer
1801 whose size is tied to the [bhwd] suffix of "svfoo". */
1802 struct inc_dec_def : public inc_dec_base
1803 {
1804 CONSTEXPR inc_dec_def () : inc_dec_base (false) {}
1805
1806 void
1807 build (function_builder &b, const function_group_info &group) const OVERRIDE
1808 {
1809 b.add_overloaded_functions (group, MODE_none);
1810 /* These functions are unusual in that the type suffixes for
1811 the scalar and vector forms are not related. The vector
1812 form always has exactly two potential suffixes while the
1813 scalar form always has four. */
1814 if (group.types[2][0] == NUM_TYPE_SUFFIXES)
1815 build_all (b, "v0,v0,su64", group, MODE_none);
1816 else
1817 build_all (b, "s0,s0,su64", group, MODE_n);
1818 }
1819 };
1820 SHAPE (inc_dec)
1821
1822 /* sv<t0>_t svfoo[_t0](sv<t0>_t, enum svpattern, uint64_t)
1823 <t0>_t svfoo[_n_t0](<t0>_t, enum svpattern, uint64_t)
1824
1825 where the t0 in the vector form is a signed or unsigned integer
1826 whose size is tied to the [bhwd] suffix of "svfoo". */
1827 struct inc_dec_pat_def : public inc_dec_base
1828 {
1829 CONSTEXPR inc_dec_pat_def () : inc_dec_base (true) {}
1830
1831 void
1832 build (function_builder &b, const function_group_info &group) const OVERRIDE
1833 {
1834 b.add_overloaded_functions (group, MODE_none);
1835 /* These functions are unusual in that the type suffixes for
1836 the scalar and vector forms are not related. The vector
1837 form always has exactly two potential suffixes while the
1838 scalar form always has four. */
1839 if (group.types[2][0] == NUM_TYPE_SUFFIXES)
1840 build_all (b, "v0,v0,epattern,su64", group, MODE_none);
1841 else
1842 build_all (b, "s0,s0,epattern,su64", group, MODE_n);
1843 }
1844 };
1845 SHAPE (inc_dec_pat)
1846
1847 /* sv<t0>_t svfoo[_t0](sv<t0>_t, svbool_t). */
1848 struct inc_dec_pred_def : public overloaded_base<0>
1849 {
1850 void
1851 build (function_builder &b, const function_group_info &group) const OVERRIDE
1852 {
1853 b.add_overloaded_functions (group, MODE_none);
1854 build_all (b, "v0,v0,vp", group, MODE_none);
1855 }
1856
1857 tree
1858 resolve (function_resolver &r) const OVERRIDE
1859 {
1860 unsigned int i, nargs;
1861 type_suffix_index type;
1862 if (!r.check_gp_argument (2, i, nargs)
1863 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1864 || !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
1865 return error_mark_node;
1866
1867 return r.resolve_to (r.mode_suffix_id, type);
1868 }
1869 };
1870 SHAPE (inc_dec_pred)
1871
1872 /* <t0>_t svfoo[_n_t0]_t1(<t0>_t, svbool_t)
1873
1874 where _t1 is a _b<bits> suffix that describes the svbool_t argument. */
1875 struct inc_dec_pred_scalar_def : public overloaded_base<2>
1876 {
1877 void
1878 build (function_builder &b, const function_group_info &group) const OVERRIDE
1879 {
1880 b.add_overloaded_functions (group, MODE_n);
1881 build_all (b, "s0,s0,vp", group, MODE_n);
1882 }
1883
1884 tree
1885 resolve (function_resolver &r) const OVERRIDE
1886 {
1887 unsigned int i, nargs;
1888 type_suffix_index type;
1889 if (!r.check_gp_argument (2, i, nargs)
1890 || (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
1891 || !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
1892 return error_mark_node;
1893
1894 return r.resolve_to (r.mode_suffix_id, type, r.type_suffix_ids[1]);
1895 }
1896 };
1897 SHAPE (inc_dec_pred_scalar)
1898
1899 /* sv<t0>[xN]_t svfoo_t0(). */
1900 struct inherent_def : public nonoverloaded_base
1901 {
1902 void
1903 build (function_builder &b, const function_group_info &group) const OVERRIDE
1904 {
1905 build_all (b, "t0", group, MODE_none);
1906 }
1907 };
1908 SHAPE (inherent)
1909
1910 /* svbool_t svfoo[_b](). */
1911 struct inherent_b_def : public overloaded_base<0>
1912 {
1913 void
1914 build (function_builder &b, const function_group_info &group) const OVERRIDE
1915 {
1916 /* The "_b" suffix is optional; the full name has it, but the short
1917 name doesn't. */
1918 build_all (b, "v0", group, MODE_none, true);
1919 }
1920
1921 tree
1922 resolve (function_resolver &) const OVERRIDE
1923 {
1924 /* The short forms just make "_b" implicit, so no resolution is needed. */
1925 gcc_unreachable ();
1926 }
1927 };
1928 SHAPE (inherent_b)
1929
1930 /* sv<t0>[xN]_t svfoo[_t0](const <t0>_t *)
1931 sv<t0>[xN]_t svfoo_vnum[_t0](const <t0>_t *, int64_t). */
1932 struct load_def : public load_contiguous_base
1933 {
1934 void
1935 build (function_builder &b, const function_group_info &group) const OVERRIDE
1936 {
1937 b.add_overloaded_functions (group, MODE_none);
1938 b.add_overloaded_functions (group, MODE_vnum);
1939 build_all (b, "t0,al", group, MODE_none);
1940 build_all (b, "t0,al,ss64", group, MODE_vnum);
1941 }
1942 };
1943 SHAPE (load)
1944
1945 /* sv<t0>_t svfoo_t0(const <X>_t *)
1946 sv<t0>_t svfoo_vnum_t0(const <X>_t *, int64_t)
1947
1948 where <X> is determined by the function base name. */
1949 struct load_ext_def : public nonoverloaded_base
1950 {
1951 void
1952 build (function_builder &b, const function_group_info &group) const OVERRIDE
1953 {
1954 build_all (b, "t0,al", group, MODE_none);
1955 build_all (b, "t0,al,ss64", group, MODE_vnum);
1956 }
1957 };
1958 SHAPE (load_ext)
1959
1960 /* sv<t0>_t svfoo_[s32]index_t0(const <X>_t *, svint32_t)
1961 sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
1962 sv<t0>_t svfoo_[u32]index_t0(const <X>_t *, svuint32_t)
1963 sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
1964
1965 sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
1966 sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
1967
1968 where <X> is determined by the function base name. */
1969 struct load_ext_gather_index_def : public load_ext_gather_base
1970 {
1971 void
1972 build (function_builder &b, const function_group_info &group) const OVERRIDE
1973 {
1974 b.add_overloaded_functions (group, MODE_index);
1975 build_sv_index (b, "t0,al,d", group);
1976 build_vs_index (b, "t0,b,ss64", group);
1977 }
1978 };
1979 SHAPE (load_ext_gather_index)
1980
1981 /* sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
1982 sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
1983
1984 sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
1985 sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
1986
1987 where <X> is determined by the function base name. This is
1988 load_ext_gather_index that doesn't support 32-bit vector indices. */
1989 struct load_ext_gather_index_restricted_def : public load_ext_gather_base
1990 {
1991 void
1992 build (function_builder &b, const function_group_info &group) const OVERRIDE
1993 {
1994 b.add_overloaded_functions (group, MODE_index);
1995 build_sv_index64 (b, "t0,al,d", group);
1996 build_vs_index (b, "t0,b,ss64", group);
1997 }
1998 };
1999 SHAPE (load_ext_gather_index_restricted)
2000
2001 /* sv<t0>_t svfoo_[s32]offset_t0(const <X>_t *, svint32_t)
2002 sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2003 sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2004 sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2005
2006 sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2007 sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2008
2009 sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2010 sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2011
2012 where <X> is determined by the function base name. */
2013 struct load_ext_gather_offset_def : public load_ext_gather_base
2014 {
2015 void
2016 build (function_builder &b, const function_group_info &group) const OVERRIDE
2017 {
2018 b.add_overloaded_functions (group, MODE_offset);
2019 build_sv_offset (b, "t0,al,d", group);
2020 build_v_base (b, "t0,b", group, true);
2021 build_vs_offset (b, "t0,b,ss64", group);
2022 }
2023 };
2024 SHAPE (load_ext_gather_offset)
2025
2026 /* sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2027 sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2028 sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2029
2030 sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2031 sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2032
2033 sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2034 sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2035
2036 where <X> is determined by the function base name. This is
2037 load_ext_gather_offset without the s32 vector offset form. */
2038 struct load_ext_gather_offset_restricted_def : public load_ext_gather_base
2039 {
2040 void
2041 build (function_builder &b, const function_group_info &group) const OVERRIDE
2042 {
2043 b.add_overloaded_functions (group, MODE_offset);
2044 build_sv_uint_offset (b, "t0,al,d", group);
2045 build_v_base (b, "t0,b", group, true);
2046 build_vs_offset (b, "t0,b,ss64", group);
2047 }
2048 };
2049 SHAPE (load_ext_gather_offset_restricted)
2050
2051 /* sv<t0>_t svfoo_[s32]index[_t0](const <t0>_t *, svint32_t)
2052 sv<t0>_t svfoo_[s64]index[_t0](const <t0>_t *, svint64_t)
2053 sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2054 sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2055
2056 sv<t0>_t svfoo_[s32]offset[_t0](const <t0>_t *, svint32_t)
2057 sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2058 sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2059 sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t). */
2060 struct load_gather_sv_def : public load_gather_sv_base
2061 {
2062 void
2063 build (function_builder &b, const function_group_info &group) const OVERRIDE
2064 {
2065 b.add_overloaded_functions (group, MODE_index);
2066 b.add_overloaded_functions (group, MODE_offset);
2067 build_sv_index (b, "t0,al,d", group);
2068 build_sv_offset (b, "t0,al,d", group);
2069 }
2070 };
2071 SHAPE (load_gather_sv)
2072
2073 /* sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2074 sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2075
2076 sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2077 sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2078 sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t)
2079
2080 This is load_gather_sv without the 32-bit vector index forms and
2081 without the s32 vector offset form. */
2082 struct load_gather_sv_restricted_def : public load_gather_sv_base
2083 {
2084 void
2085 build (function_builder &b, const function_group_info &group) const OVERRIDE
2086 {
2087 b.add_overloaded_functions (group, MODE_index);
2088 b.add_overloaded_functions (group, MODE_offset);
2089 build_sv_index64 (b, "t0,al,d", group);
2090 build_sv_uint_offset (b, "t0,al,d", group);
2091 }
2092 };
2093 SHAPE (load_gather_sv_restricted)
2094
2095 /* sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2096 sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2097
2098 sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2099 sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2100
2101 sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2102 sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t). */
2103 struct load_gather_vs_def : public overloaded_base<1>
2104 {
2105 void
2106 build (function_builder &b, const function_group_info &group) const OVERRIDE
2107 {
2108 /* The base vector mode is optional; the full name has it but the
2109 short name doesn't. There is no ambiguity with SHAPE_load_gather_sv
2110 because the latter uses an implicit type suffix. */
2111 build_v_base (b, "t0,b", group, true);
2112 build_vs_index (b, "t0,b,ss64", group, true);
2113 build_vs_offset (b, "t0,b,ss64", group, true);
2114 }
2115
2116 tree
2117 resolve (function_resolver &) const OVERRIDE
2118 {
2119 /* The short name just makes the base vector mode implicit;
2120 no resolution is needed. */
2121 gcc_unreachable ();
2122 }
2123 };
2124 SHAPE (load_gather_vs)
2125
2126 /* sv<t0>_t svfoo[_t0](const <t0>_t *)
2127
2128 The only difference from "load" is that this shape has no vnum form. */
2129 struct load_replicate_def : public load_contiguous_base
2130 {
2131 void
2132 build (function_builder &b, const function_group_info &group) const OVERRIDE
2133 {
2134 b.add_overloaded_functions (group, MODE_none);
2135 build_all (b, "t0,al", group, MODE_none);
2136 }
2137 };
2138 SHAPE (load_replicate)
2139
2140 /* svbool_t svfoo(enum svpattern). */
2141 struct pattern_pred_def : public nonoverloaded_base
2142 {
2143 void
2144 build (function_builder &b, const function_group_info &group) const OVERRIDE
2145 {
2146 build_all (b, "vp,epattern", group, MODE_none);
2147 }
2148 };
2149 SHAPE (pattern_pred)
2150
2151 /* void svfoo(const void *, svprfop)
2152 void svfoo_vnum(const void *, int64_t, svprfop). */
2153 struct prefetch_def : public nonoverloaded_base
2154 {
2155 void
2156 build (function_builder &b, const function_group_info &group) const OVERRIDE
2157 {
2158 build_all (b, "_,ap,eprfop", group, MODE_none);
2159 build_all (b, "_,ap,ss64,eprfop", group, MODE_vnum);
2160 }
2161 };
2162 SHAPE (prefetch)
2163
2164 /* void svfoo_[s32]index(const void *, svint32_t, svprfop)
2165 void svfoo_[s64]index(const void *, svint64_t, svprfop)
2166 void svfoo_[u32]index(const void *, svuint32_t, svprfop)
2167 void svfoo_[u64]index(const void *, svuint64_t, svprfop)
2168
2169 void svfoo[_u32base](svuint32_t, svprfop)
2170 void svfoo[_u64base](svuint64_t, svprfop)
2171
2172 void svfoo[_u32base]_index(svuint32_t, int64_t, svprfop)
2173 void svfoo[_u64base]_index(svuint64_t, int64_t, svprfop). */
2174 struct prefetch_gather_index_def : public prefetch_gather_base
2175 {
2176 void
2177 build (function_builder &b, const function_group_info &group) const OVERRIDE
2178 {
2179 b.add_overloaded_functions (group, MODE_none);
2180 b.add_overloaded_functions (group, MODE_index);
2181 build_sv_index (b, "_,ap,d,eprfop", group);
2182 build_v_base (b, "_,b,eprfop", group);
2183 build_vs_index (b, "_,b,ss64,eprfop", group);
2184 }
2185 };
2186 SHAPE (prefetch_gather_index)
2187
2188 /* void svfoo_[s32]offset(const void *, svint32_t, svprfop)
2189 void svfoo_[s64]offset(const void *, svint64_t, svprfop)
2190 void svfoo_[u32]offset(const void *, svuint32_t, svprfop)
2191 void svfoo_[u64]offset(const void *, svuint64_t, svprfop)
2192
2193 void svfoo[_u32base](svuint32_t, svprfop)
2194 void svfoo[_u64base](svuint64_t, svprfop)
2195
2196 void svfoo[_u32base]_offset(svuint32_t, int64_t, svprfop)
2197 void svfoo[_u64base]_offset(svuint64_t, int64_t, svprfop). */
2198 struct prefetch_gather_offset_def : public prefetch_gather_base
2199 {
2200 void
2201 build (function_builder &b, const function_group_info &group) const OVERRIDE
2202 {
2203 b.add_overloaded_functions (group, MODE_none);
2204 b.add_overloaded_functions (group, MODE_offset);
2205 build_sv_offset (b, "_,ap,d,eprfop", group);
2206 build_v_base (b, "_,b,eprfop", group);
2207 build_vs_offset (b, "_,b,ss64,eprfop", group);
2208 }
2209 };
2210 SHAPE (prefetch_gather_offset)
2211
2212 /* bool svfoo(svbool_t). */
2213 struct ptest_def : public nonoverloaded_base
2214 {
2215 void
2216 build (function_builder &b, const function_group_info &group) const OVERRIDE
2217 {
2218 build_all (b, "sp,vp", group, MODE_none);
2219 }
2220 };
2221 SHAPE (ptest)
2222
2223 /* svbool_t svfoo(). */
2224 struct rdffr_def : public nonoverloaded_base
2225 {
2226 void
2227 build (function_builder &b, const function_group_info &group) const OVERRIDE
2228 {
2229 build_all (b, "vp", group, MODE_none);
2230 }
2231 };
2232 SHAPE (rdffr)
2233
2234 /* <t0>_t svfoo[_t0](sv<t0>_t). */
2235 struct reduction_def : public overloaded_base<0>
2236 {
2237 void
2238 build (function_builder &b, const function_group_info &group) const OVERRIDE
2239 {
2240 b.add_overloaded_functions (group, MODE_none);
2241 build_all (b, "s0,v0", group, MODE_none);
2242 }
2243
2244 tree
2245 resolve (function_resolver &r) const OVERRIDE
2246 {
2247 return r.resolve_uniform (1);
2248 }
2249 };
2250 SHAPE (reduction)
2251
2252 /* int64_t svfoo[_t0](sv<t0>_t) (for signed t0)
2253 uint64_t svfoo[_t0](sv<t0>_t) (for unsigned t0)
2254 <t0>_t svfoo[_t0](sv<t0>_t) (for floating-point t0)
2255
2256 i.e. a version of "reduction" in which the return type for integers
2257 always has 64 bits. */
2258 struct reduction_wide_def : public overloaded_base<0>
2259 {
2260 void
2261 build (function_builder &b, const function_group_info &group) const OVERRIDE
2262 {
2263 b.add_overloaded_functions (group, MODE_none);
2264 build_all (b, "sw0,v0", group, MODE_none);
2265 }
2266
2267 tree
2268 resolve (function_resolver &r) const OVERRIDE
2269 {
2270 return r.resolve_uniform (1);
2271 }
2272 };
2273 SHAPE (reduction_wide)
2274
2275 /* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
2276
2277 where the second argument is an integer constant expression in the
2278 range [0, N - 1]. */
2279 struct set_def : public overloaded_base<0>
2280 {
2281 void
2282 build (function_builder &b, const function_group_info &group) const OVERRIDE
2283 {
2284 b.add_overloaded_functions (group, MODE_none);
2285 build_all (b, "t0,t0,su64,v0", group, MODE_none);
2286 }
2287
2288 tree
2289 resolve (function_resolver &r) const OVERRIDE
2290 {
2291 unsigned int i, nargs;
2292 type_suffix_index type;
2293 if (!r.check_gp_argument (3, i, nargs)
2294 || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
2295 || !r.require_integer_immediate (i + 1)
2296 || !r.require_derived_vector_type (i + 2, i, type))
2297 return error_mark_node;
2298
2299 return r.resolve_to (r.mode_suffix_id, type);
2300 }
2301
2302 bool
2303 check (function_checker &c) const OVERRIDE
2304 {
2305 unsigned int nvectors = c.vectors_per_tuple ();
2306 return c.require_immediate_range (1, 0, nvectors - 1);
2307 }
2308 };
2309 SHAPE (set)
2310
2311 /* void svfoo(). */
2312 struct setffr_def : public nonoverloaded_base
2313 {
2314 void
2315 build (function_builder &b, const function_group_info &group) const OVERRIDE
2316 {
2317 build_all (b, "_", group, MODE_none);
2318 }
2319 };
2320 SHAPE (setffr)
2321
2322 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2323
2324 where the final argument must be an integer constant expression in the
2325 range [0, sizeof (<t0>_t) * 8 - 1]. */
2326 struct shift_left_imm_def : public overloaded_base<0>
2327 {
2328 void
2329 build (function_builder &b, const function_group_info &group) const OVERRIDE
2330 {
2331 b.add_overloaded_functions (group, MODE_n);
2332 build_all (b, "v0,v0,su64", group, MODE_n);
2333 }
2334
2335 tree
2336 resolve (function_resolver &r) const OVERRIDE
2337 {
2338 return r.resolve_uniform (1, 1);
2339 }
2340
2341 bool
2342 check (function_checker &c) const OVERRIDE
2343 {
2344 unsigned int bits = c.type_suffix (0).element_bits;
2345 return c.require_immediate_range (1, 0, bits - 1);
2346 }
2347 };
2348 SHAPE (shift_left_imm)
2349
2350 /* sv<t0>_t svfoo[_n_t0])(sv<t0:half>_t, uint64_t)
2351
2352 where the final argument must be an integer constant expression in the
2353 range [0, sizeof (<t0>_t) * 4 - 1]. */
2354 struct shift_left_imm_long_def : public binary_imm_long_base
2355 {
2356 bool
2357 check (function_checker &c) const OVERRIDE
2358 {
2359 unsigned int bits = c.type_suffix (0).element_bits / 2;
2360 return c.require_immediate_range (1, 0, bits - 1);
2361 }
2362 };
2363 SHAPE (shift_left_imm_long)
2364
2365 /* sv<t0:uint>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2366
2367 where the final argument must be an integer constant expression in the
2368 range [0, sizeof (<t0>_t) * 8 - 1]. */
2369 struct shift_left_imm_to_uint_def : public shift_left_imm_def
2370 {
2371 void
2372 build (function_builder &b, const function_group_info &group) const OVERRIDE
2373 {
2374 b.add_overloaded_functions (group, MODE_n);
2375 build_all (b, "vu0,v0,su64", group, MODE_n);
2376 }
2377 };
2378 SHAPE (shift_left_imm_to_uint)
2379
2380 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2381
2382 where the final argument must be an integer constant expression in the
2383 range [1, sizeof (<t0>_t) * 8]. */
2384 struct shift_right_imm_def : public overloaded_base<0>
2385 {
2386 void
2387 build (function_builder &b, const function_group_info &group) const OVERRIDE
2388 {
2389 b.add_overloaded_functions (group, MODE_n);
2390 build_all (b, "v0,v0,su64", group, MODE_n);
2391 }
2392
2393 tree
2394 resolve (function_resolver &r) const OVERRIDE
2395 {
2396 return r.resolve_uniform (1, 1);
2397 }
2398
2399 bool
2400 check (function_checker &c) const OVERRIDE
2401 {
2402 unsigned int bits = c.type_suffix (0).element_bits;
2403 return c.require_immediate_range (1, 1, bits);
2404 }
2405 };
2406 SHAPE (shift_right_imm)
2407
2408 /* sv<t0:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2409
2410 where the final argument must be an integer constant expression in the
2411 range [1, sizeof (<t0>_t) * 4]. */
2412 typedef shift_right_imm_narrow_wrapper<binary_imm_narrowb_base<>, 1>
2413 shift_right_imm_narrowb_def;
2414 SHAPE (shift_right_imm_narrowb)
2415
2416 /* sv<t0:half>_t svfoo[_n_t0])(sv<t0:half>_t, sv<t0>_t, uint64_t)
2417
2418 where the final argument must be an integer constant expression in the
2419 range [1, sizeof (<t0>_t) * 4]. */
2420 typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base<>, 2>
2421 shift_right_imm_narrowt_def;
2422 SHAPE (shift_right_imm_narrowt)
2423
2424 /* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2425
2426 where the final argument must be an integer constant expression in the
2427 range [1, sizeof (<t0>_t) * 4]. */
2428 typedef binary_imm_narrowb_base<TYPE_unsigned>
2429 binary_imm_narrowb_base_unsigned;
2430 typedef shift_right_imm_narrow_wrapper<binary_imm_narrowb_base_unsigned, 1>
2431 shift_right_imm_narrowb_to_uint_def;
2432 SHAPE (shift_right_imm_narrowb_to_uint)
2433
2434 /* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0:uint:half>_t, sv<t0>_t, uint64_t)
2435
2436 where the final argument must be an integer constant expression in the
2437 range [1, sizeof (<t0>_t) * 4]. */
2438 typedef binary_imm_narrowt_base<TYPE_unsigned>
2439 binary_imm_narrowt_base_unsigned;
2440 typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base_unsigned, 2>
2441 shift_right_imm_narrowt_to_uint_def;
2442 SHAPE (shift_right_imm_narrowt_to_uint)
2443
2444 /* void svfoo[_t0](<X>_t *, sv<t0>[xN]_t)
2445 void svfoo_vnum[_t0](<X>_t *, int64_t, sv<t0>[xN]_t)
2446
2447 where <X> might be tied to <t0> (for non-truncating stores) or might
2448 depend on the function base name (for truncating stores). */
2449 struct store_def : public overloaded_base<0>
2450 {
2451 void
2452 build (function_builder &b, const function_group_info &group) const OVERRIDE
2453 {
2454 b.add_overloaded_functions (group, MODE_none);
2455 b.add_overloaded_functions (group, MODE_vnum);
2456 build_all (b, "_,as,t0", group, MODE_none);
2457 build_all (b, "_,as,ss64,t0", group, MODE_vnum);
2458 }
2459
2460 tree
2461 resolve (function_resolver &r) const OVERRIDE
2462 {
2463 bool vnum_p = r.mode_suffix_id == MODE_vnum;
2464 gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
2465
2466 unsigned int i, nargs;
2467 type_suffix_index type;
2468 if (!r.check_gp_argument (vnum_p ? 3 : 2, i, nargs)
2469 || !r.require_pointer_type (i)
2470 || (vnum_p && !r.require_scalar_type (i + 1, "int64_t"))
2471 || ((type = r.infer_tuple_type (nargs - 1)) == NUM_TYPE_SUFFIXES))
2472 return error_mark_node;
2473
2474 return r.resolve_to (r.mode_suffix_id, type);
2475 }
2476 };
2477 SHAPE (store)
2478
2479 /* void svfoo_[s32]index[_t0](<X>_t *, svint32_t, sv<t0>_t)
2480 void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2481 void svfoo_[u32]index[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2482 void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2483
2484 void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2485 void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2486
2487 where <X> might be tied to <t0> (for non-truncating stores) or might
2488 depend on the function base name (for truncating stores). */
2489 struct store_scatter_index_def : public store_scatter_base
2490 {
2491 void
2492 build (function_builder &b, const function_group_info &group) const OVERRIDE
2493 {
2494 b.add_overloaded_functions (group, MODE_index);
2495 build_sv_index (b, "_,as,d,t0", group);
2496 build_vs_index (b, "_,b,ss64,t0", group);
2497 }
2498 };
2499 SHAPE (store_scatter_index)
2500
2501 /* void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2502 void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2503
2504 void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2505 void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2506
2507 i.e. a version of store_scatter_index that doesn't support 32-bit
2508 vector indices. */
2509 struct store_scatter_index_restricted_def : public store_scatter_base
2510 {
2511 void
2512 build (function_builder &b, const function_group_info &group) const OVERRIDE
2513 {
2514 b.add_overloaded_functions (group, MODE_index);
2515 build_sv_index64 (b, "_,as,d,t0", group);
2516 build_vs_index (b, "_,b,ss64,t0", group);
2517 }
2518 };
2519 SHAPE (store_scatter_index_restricted)
2520
2521 /* void svfoo_[s32]offset[_t0](<X>_t *, svint32_t, sv<t0>_t)
2522 void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2523 void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2524 void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2525
2526 void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2527 void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2528
2529 void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2530 void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2531
2532 where <X> might be tied to <t0> (for non-truncating stores) or might
2533 depend on the function base name (for truncating stores). */
2534 struct store_scatter_offset_def : public store_scatter_base
2535 {
2536 void
2537 build (function_builder &b, const function_group_info &group) const OVERRIDE
2538 {
2539 b.add_overloaded_functions (group, MODE_none);
2540 b.add_overloaded_functions (group, MODE_offset);
2541 build_sv_offset (b, "_,as,d,t0", group);
2542 build_v_base (b, "_,b,t0", group);
2543 build_vs_offset (b, "_,b,ss64,t0", group);
2544 }
2545 };
2546 SHAPE (store_scatter_offset)
2547
2548 /* void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2549 void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2550 void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2551
2552 void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2553 void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2554
2555 void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2556 void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2557
2558 i.e. a version of store_scatter_offset that doesn't support svint32_t
2559 offsets. */
2560 struct store_scatter_offset_restricted_def : public store_scatter_base
2561 {
2562 void
2563 build (function_builder &b, const function_group_info &group) const OVERRIDE
2564 {
2565 b.add_overloaded_functions (group, MODE_none);
2566 b.add_overloaded_functions (group, MODE_offset);
2567 build_sv_uint_offset (b, "_,as,d,t0", group);
2568 build_v_base (b, "_,b,t0", group);
2569 build_vs_offset (b, "_,b,ss64,t0", group);
2570 }
2571 };
2572 SHAPE (store_scatter_offset_restricted)
2573
2574 /* sv<t0>_t svfoo[_t0](sv<t0>xN_t, sv<t0:uint>_t). */
2575 struct tbl_tuple_def : public overloaded_base<0>
2576 {
2577 void
2578 build (function_builder &b, const function_group_info &group) const OVERRIDE
2579 {
2580 b.add_overloaded_functions (group, MODE_none);
2581 build_all (b, "v0,t0,vu0", group, MODE_none);
2582 }
2583
2584 tree
2585 resolve (function_resolver &r) const OVERRIDE
2586 {
2587 unsigned int i, nargs;
2588 type_suffix_index type;
2589 if (!r.check_gp_argument (2, i, nargs)
2590 || (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
2591 || !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
2592 return error_mark_node;
2593
2594 return r.resolve_to (r.mode_suffix_id, type);
2595 }
2596 };
2597 SHAPE (tbl_tuple)
2598
2599 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
2600
2601 where the final argument is an integer constant expression in the
2602 range [0, 16 / sizeof (<t0>_t) - 1]. */
2603 struct ternary_lane_def : public overloaded_base<0>
2604 {
2605 void
2606 build (function_builder &b, const function_group_info &group) const OVERRIDE
2607 {
2608 b.add_overloaded_functions (group, MODE_none);
2609 build_all (b, "v0,v0,v0,v0,su64", group, MODE_none);
2610 }
2611
2612 tree
2613 resolve (function_resolver &r) const OVERRIDE
2614 {
2615 return r.resolve_uniform (3, 1);
2616 }
2617
2618 bool
2619 check (function_checker &c) const OVERRIDE
2620 {
2621 return c.require_immediate_lane_index (3);
2622 }
2623 };
2624 SHAPE (ternary_lane)
2625
2626 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t, uint64_t)
2627
2628 where the penultimate argument is an integer constant expression in
2629 the range [0, 8 / sizeof (<t0>_t) - 1] and where the final argument
2630 is an integer constant expression in {0, 90, 180, 270}. */
2631 struct ternary_lane_rotate_def : public overloaded_base<0>
2632 {
2633 void
2634 build (function_builder &b, const function_group_info &group) const OVERRIDE
2635 {
2636 b.add_overloaded_functions (group, MODE_none);
2637 build_all (b, "v0,v0,v0,v0,su64,su64", group, MODE_none);
2638 }
2639
2640 tree
2641 resolve (function_resolver &r) const OVERRIDE
2642 {
2643 return r.resolve_uniform (3, 2);
2644 }
2645
2646 bool
2647 check (function_checker &c) const OVERRIDE
2648 {
2649 return (c.require_immediate_lane_index (3, 2)
2650 && c.require_immediate_one_of (4, 0, 90, 180, 270));
2651 }
2652 };
2653 SHAPE (ternary_lane_rotate)
2654
2655 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t, uint64_t)
2656
2657 where the final argument is an integer constant expression in the range
2658 [0, 32 / sizeof (<t0>_t) - 1]. */
2659 struct ternary_long_lane_def
2660 : public ternary_resize2_lane_base<function_resolver::HALF_SIZE>
2661 {
2662 void
2663 build (function_builder &b, const function_group_info &group) const OVERRIDE
2664 {
2665 b.add_overloaded_functions (group, MODE_none);
2666 build_all (b, "v0,v0,vh0,vh0,su64", group, MODE_none);
2667 }
2668
2669 bool
2670 check (function_checker &c) const OVERRIDE
2671 {
2672 return c.require_immediate_lane_index (3);
2673 }
2674 };
2675 SHAPE (ternary_long_lane)
2676
2677 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t)
2678 sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:half>_t, <t0:half>_t)
2679
2680 i.e. a version of the standard ternary shape ternary_opt_n in which
2681 the element type of the last two arguments is the half-sized
2682 equivalent of <t0>. */
2683 struct ternary_long_opt_n_def
2684 : public ternary_resize2_opt_n_base<function_resolver::HALF_SIZE>
2685 {
2686 void
2687 build (function_builder &b, const function_group_info &group) const OVERRIDE
2688 {
2689 b.add_overloaded_functions (group, MODE_none);
2690 build_all (b, "v0,v0,vh0,vh0", group, MODE_none);
2691 build_all (b, "v0,v0,vh0,sh0", group, MODE_n);
2692 }
2693 };
2694 SHAPE (ternary_long_opt_n)
2695
2696 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)
2697 sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0>_t, <t0>_t)
2698
2699 i.e. the standard shape for ternary operations that operate on
2700 uniform types. */
2701 struct ternary_opt_n_def : public overloaded_base<0>
2702 {
2703 void
2704 build (function_builder &b, const function_group_info &group) const OVERRIDE
2705 {
2706 b.add_overloaded_functions (group, MODE_none);
2707 build_all (b, "v0,v0,v0,v0", group, MODE_none);
2708 build_all (b, "v0,v0,v0,s0", group, MODE_n);
2709 }
2710
2711 tree
2712 resolve (function_resolver &r) const OVERRIDE
2713 {
2714 return r.resolve_uniform_opt_n (3);
2715 }
2716 };
2717 SHAPE (ternary_opt_n)
2718
2719 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0.quarter>_t, sv<t0.quarter>_t, uint64_t)
2720
2721 where the final argument is an integer constant expression in the range
2722 [0, 16 / sizeof (<t0>_t) - 1]. */
2723 struct ternary_qq_lane_def
2724 : public ternary_resize2_lane_base<function_resolver::QUARTER_SIZE>
2725 {
2726 void
2727 build (function_builder &b, const function_group_info &group) const OVERRIDE
2728 {
2729 b.add_overloaded_functions (group, MODE_none);
2730 build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
2731 }
2732
2733 bool
2734 check (function_checker &c) const OVERRIDE
2735 {
2736 return c.require_immediate_lane_index (3, 4);
2737 }
2738 };
2739 SHAPE (ternary_qq_lane)
2740
2741 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2742 uint64_t)
2743
2744 where the final argument is an integer constant expression in
2745 {0, 90, 180, 270}. */
2746 struct ternary_qq_lane_rotate_def : public overloaded_base<0>
2747 {
2748 void
2749 build (function_builder &b, const function_group_info &group) const OVERRIDE
2750 {
2751 b.add_overloaded_functions (group, MODE_none);
2752 build_all (b, "v0,v0,vq0,vq0,su64,su64", group, MODE_none);
2753 }
2754
2755 tree
2756 resolve (function_resolver &r) const OVERRIDE
2757 {
2758 unsigned int i, nargs;
2759 type_suffix_index type;
2760 if (!r.check_gp_argument (5, i, nargs)
2761 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
2762 || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
2763 r.QUARTER_SIZE)
2764 || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
2765 r.QUARTER_SIZE)
2766 || !r.require_integer_immediate (i + 3)
2767 || !r.require_integer_immediate (i + 4))
2768 return error_mark_node;
2769
2770 return r.resolve_to (r.mode_suffix_id, type);
2771 }
2772
2773 bool
2774 check (function_checker &c) const OVERRIDE
2775 {
2776 return (c.require_immediate_lane_index (3, 4)
2777 && c.require_immediate_one_of (4, 0, 90, 180, 270));
2778 }
2779 };
2780 SHAPE (ternary_qq_lane_rotate)
2781
2782 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0.quarter>_t, sv<t0.quarter>_t)
2783 sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0.quarter>_t, <t0.quarter>_t)
2784
2785 i.e. a version of the standard ternary shape ternary_opt_n in which
2786 the element type of the last two arguments is the quarter-sized
2787 equivalent of <t0>. */
2788 struct ternary_qq_opt_n_def
2789 : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE>
2790 {
2791 void
2792 build (function_builder &b, const function_group_info &group) const OVERRIDE
2793 {
2794 b.add_overloaded_functions (group, MODE_none);
2795 build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
2796 build_all (b, "v0,v0,vq0,sq0", group, MODE_n);
2797 }
2798 };
2799 SHAPE (ternary_qq_opt_n)
2800
2801 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2802 uint64_t)
2803
2804 where the final argument is an integer constant expression in
2805 {0, 90, 180, 270}. */
2806 struct ternary_qq_rotate_def : public overloaded_base<0>
2807 {
2808 void
2809 build (function_builder &b, const function_group_info &group) const OVERRIDE
2810 {
2811 b.add_overloaded_functions (group, MODE_none);
2812 build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
2813 }
2814
2815 tree
2816 resolve (function_resolver &r) const OVERRIDE
2817 {
2818 unsigned int i, nargs;
2819 type_suffix_index type;
2820 if (!r.check_gp_argument (4, i, nargs)
2821 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
2822 || !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
2823 r.QUARTER_SIZE)
2824 || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
2825 r.QUARTER_SIZE)
2826 || !r.require_integer_immediate (i + 3))
2827 return error_mark_node;
2828
2829 return r.resolve_to (r.mode_suffix_id, type);
2830 }
2831
2832 bool
2833 check (function_checker &c) const OVERRIDE
2834 {
2835 return c.require_immediate_one_of (3, 0, 90, 180, 270);
2836 }
2837 };
2838 SHAPE (ternary_qq_rotate)
2839
2840 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
2841
2842 where the final argument is an integer constant expression in
2843 {0, 90, 180, 270}. */
2844 struct ternary_rotate_def : public overloaded_base<0>
2845 {
2846 void
2847 build (function_builder &b, const function_group_info &group) const OVERRIDE
2848 {
2849 b.add_overloaded_functions (group, MODE_none);
2850 build_all (b, "v0,v0,v0,v0,su64", group, MODE_none);
2851 }
2852
2853 tree
2854 resolve (function_resolver &r) const OVERRIDE
2855 {
2856 return r.resolve_uniform (3, 1);
2857 }
2858
2859 bool
2860 check (function_checker &c) const OVERRIDE
2861 {
2862 return c.require_immediate_one_of (3, 0, 90, 180, 270);
2863 }
2864 };
2865 SHAPE (ternary_rotate)
2866
2867 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
2868
2869 where the final argument must be an integer constant expression in the
2870 range [0, sizeof (<t0>_t) * 8 - 1]. */
2871 struct ternary_shift_left_imm_def : public ternary_shift_imm_base
2872 {
2873 bool
2874 check (function_checker &c) const OVERRIDE
2875 {
2876 unsigned int bits = c.type_suffix (0).element_bits;
2877 return c.require_immediate_range (2, 0, bits - 1);
2878 }
2879 };
2880 SHAPE (ternary_shift_left_imm)
2881
2882 /* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
2883
2884 where the final argument must be an integer constant expression in the
2885 range [1, sizeof (<t0>_t) * 8]. */
2886 struct ternary_shift_right_imm_def : public ternary_shift_imm_base
2887 {
2888 bool
2889 check (function_checker &c) const OVERRIDE
2890 {
2891 unsigned int bits = c.type_suffix (0).element_bits;
2892 return c.require_immediate_range (2, 1, bits);
2893 }
2894 };
2895 SHAPE (ternary_shift_right_imm)
2896
2897 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0:uint>_t). */
2898 struct ternary_uint_def : public overloaded_base<0>
2899 {
2900 void
2901 build (function_builder &b, const function_group_info &group) const OVERRIDE
2902 {
2903 b.add_overloaded_functions (group, MODE_none);
2904 build_all (b, "v0,v0,v0,vu0", group, MODE_none);
2905 }
2906
2907 tree
2908 resolve (function_resolver &r) const OVERRIDE
2909 {
2910 unsigned int i, nargs;
2911 type_suffix_index type;
2912 if (!r.check_gp_argument (3, i, nargs)
2913 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
2914 || !r.require_matching_vector_type (i + 1, type)
2915 || !r.require_derived_vector_type (i + 2, i, type, TYPE_unsigned))
2916 return error_mark_node;
2917
2918 return r.resolve_to (r.mode_suffix_id, type);
2919 }
2920 };
2921 SHAPE (ternary_uint)
2922
2923 /* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
2924
2925 where the final argument is an integer constant expression in the
2926 range [0, 7]. */
2927 struct tmad_def : public overloaded_base<0>
2928 {
2929 void
2930 build (function_builder &b, const function_group_info &group) const OVERRIDE
2931 {
2932 b.add_overloaded_functions (group, MODE_none);
2933 build_all (b, "v0,v0,v0,su64", group, MODE_none);
2934 }
2935
2936 tree
2937 resolve (function_resolver &r) const OVERRIDE
2938 {
2939 return r.resolve_uniform (2, 1);
2940 }
2941
2942 bool
2943 check (function_checker &c) const OVERRIDE
2944 {
2945 return c.require_immediate_range (2, 0, 7);
2946 }
2947 };
2948 SHAPE (tmad)
2949
2950 /* sv<t0>_t svfoo[_t0](sv<t0>_t)
2951
2952 i.e. the standard shape for unary operations that operate on
2953 uniform types. */
2954 struct unary_def : public overloaded_base<0>
2955 {
2956 void
2957 build (function_builder &b, const function_group_info &group) const OVERRIDE
2958 {
2959 b.add_overloaded_functions (group, MODE_none);
2960 build_all (b, "v0,v0", group, MODE_none);
2961 }
2962
2963 tree
2964 resolve (function_resolver &r) const OVERRIDE
2965 {
2966 return r.resolve_unary ();
2967 }
2968 };
2969 SHAPE (unary)
2970
2971 /* sv<t0>_t svfoo_t0[_t1](sv<t1>_t)
2972
2973 where the target type <t0> must be specified explicitly but the source
2974 type <t1> can be inferred. */
2975 struct unary_convert_def : public overloaded_base<1>
2976 {
2977 void
2978 build (function_builder &b, const function_group_info &group) const OVERRIDE
2979 {
2980 b.add_overloaded_functions (group, MODE_none);
2981 build_all (b, "v0,v1", group, MODE_none);
2982 }
2983
2984 tree
2985 resolve (function_resolver &r) const OVERRIDE
2986 {
2987 return r.resolve_unary (r.type_suffix (0).tclass,
2988 r.type_suffix (0).element_bits);
2989 }
2990 };
2991 SHAPE (unary_convert)
2992
2993 /* sv<t0>_t svfoo_t0[_t1](sv<t0>_t, sv<t1>_t)
2994
2995 This is a version of unary_convert in which the even-indexed
2996 elements are passed in as a first parameter, before any governing
2997 predicate. */
2998 struct unary_convert_narrowt_def : public overloaded_base<1>
2999 {
3000 void
3001 build (function_builder &b, const function_group_info &group) const OVERRIDE
3002 {
3003 b.add_overloaded_functions (group, MODE_none);
3004 build_all (b, "v0,v1", group, MODE_none);
3005 }
3006
3007 tree
3008 resolve (function_resolver &r) const OVERRIDE
3009 {
3010 return r.resolve_unary (r.type_suffix (0).tclass,
3011 r.type_suffix (0).element_bits, true);
3012 }
3013 };
3014 SHAPE (unary_convert_narrowt)
3015
3016 /* sv<t0>_t svfoo[_t0](sv<t0:half>_t). */
3017 struct unary_long_def : public overloaded_base<0>
3018 {
3019 void
3020 build (function_builder &b, const function_group_info &group) const OVERRIDE
3021 {
3022 b.add_overloaded_functions (group, MODE_none);
3023 build_all (b, "v0,vh0", group, MODE_none);
3024 }
3025
3026 tree
3027 resolve (function_resolver &r) const OVERRIDE
3028 {
3029 unsigned int i, nargs;
3030 type_suffix_index type, result_type;
3031 if (!r.check_gp_argument (1, i, nargs)
3032 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3033 || (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
3034 return error_mark_node;
3035
3036 if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
3037 return res;
3038
3039 return r.report_no_such_form (type);
3040 }
3041 };
3042 SHAPE (unary_long)
3043
3044 /* sv<t0>_t svfoo[_n]_t0(<t0>_t). */
3045 struct unary_n_def : public overloaded_base<1>
3046 {
3047 void
3048 build (function_builder &b, const function_group_info &group) const OVERRIDE
3049 {
3050 /* The "_n" suffix is optional; the full name has it, but the short
3051 name doesn't. */
3052 build_all (b, "v0,s0", group, MODE_n, true);
3053 }
3054
3055 tree
3056 resolve (function_resolver &) const OVERRIDE
3057 {
3058 /* The short forms just make "_n" implicit, so no resolution is needed. */
3059 gcc_unreachable ();
3060 }
3061 };
3062 SHAPE (unary_n)
3063
3064 /* sv<t0:half>_t svfoo[_t0](sv<t0>_t). */
3065 typedef unary_narrowb_base<> unary_narrowb_def;
3066 SHAPE (unary_narrowb)
3067
3068 /* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t). */
3069 typedef unary_narrowt_base<> unary_narrowt_def;
3070 SHAPE (unary_narrowt)
3071
3072 /* sv<t0:uint:half>_t svfoo[_t0](sv<t0>_t). */
3073 typedef unary_narrowb_base<TYPE_unsigned> unary_narrowb_to_uint_def;
3074 SHAPE (unary_narrowb_to_uint)
3075
3076 /* sv<t0:uint:half>_t svfoo[_t0](sv<t0:uint:half>_t, sv<t0>_t). */
3077 typedef unary_narrowt_base<TYPE_unsigned> unary_narrowt_to_uint_def;
3078 SHAPE (unary_narrowt_to_uint)
3079
3080 /* svbool_t svfoo(svbool_t). */
3081 struct unary_pred_def : public nonoverloaded_base
3082 {
3083 void
3084 build (function_builder &b, const function_group_info &group) const OVERRIDE
3085 {
3086 build_all (b, "v0,v0", group, MODE_none);
3087 }
3088 };
3089 SHAPE (unary_pred)
3090
3091 /* sv<t0:int>_t svfoo[_t0](sv<t0>_t)
3092
3093 i.e. a version of "unary" in which the returned vector contains
3094 signed integers. */
3095 struct unary_to_int_def : public overloaded_base<0>
3096 {
3097 void
3098 build (function_builder &b, const function_group_info &group) const OVERRIDE
3099 {
3100 b.add_overloaded_functions (group, MODE_none);
3101 build_all (b, "vs0,v0", group, MODE_none);
3102 }
3103
3104 tree
3105 resolve (function_resolver &r) const OVERRIDE
3106 {
3107 return r.resolve_unary (TYPE_signed);
3108 }
3109 };
3110 SHAPE (unary_to_int)
3111
3112 /* sv<t0:uint>_t svfoo[_t0](sv<t0>_t)
3113
3114 i.e. a version of "unary" in which the returned vector contains
3115 unsigned integers. */
3116 struct unary_to_uint_def : public overloaded_base<0>
3117 {
3118 void
3119 build (function_builder &b, const function_group_info &group) const OVERRIDE
3120 {
3121 b.add_overloaded_functions (group, MODE_none);
3122 build_all (b, "vu0,v0", group, MODE_none);
3123 }
3124
3125 tree
3126 resolve (function_resolver &r) const OVERRIDE
3127 {
3128 return r.resolve_unary (TYPE_unsigned);
3129 }
3130 };
3131 SHAPE (unary_to_uint)
3132
3133 /* sv<t0>_t svfoo[_t0](sv<t0:uint>_t)
3134
3135 where <t0> always belongs a certain type class, and where <t0:uint>
3136 therefore uniquely determines <t0>. */
3137 struct unary_uint_def : public overloaded_base<0>
3138 {
3139 void
3140 build (function_builder &b, const function_group_info &group) const OVERRIDE
3141 {
3142 b.add_overloaded_functions (group, MODE_none);
3143 build_all (b, "v0,vu0", group, MODE_none);
3144 }
3145
3146 tree
3147 resolve (function_resolver &r) const OVERRIDE
3148 {
3149 unsigned int i, nargs;
3150 type_suffix_index type;
3151 if (!r.check_gp_argument (1, i, nargs)
3152 || (type = r.infer_unsigned_vector_type (i)) == NUM_TYPE_SUFFIXES)
3153 return error_mark_node;
3154
3155 /* Search for a valid suffix with the same number of bits as TYPE. */
3156 unsigned int element_bits = type_suffixes[type].element_bits;
3157 if (type_suffixes[type].unsigned_p)
3158 for (unsigned int j = 0; j < NUM_TYPE_SUFFIXES; ++j)
3159 if (type_suffixes[j].element_bits == element_bits)
3160 if (tree res = r.lookup_form (r.mode_suffix_id,
3161 type_suffix_index (j)))
3162 return res;
3163
3164 return r.report_no_such_form (type);
3165 }
3166 };
3167 SHAPE (unary_uint)
3168
3169 /* sv<t0>_t svfoo[_<t0>](sv<t0:half>_t)
3170
3171 i.e. a version of "unary" in which the source elements are half the
3172 size of the destination elements, but have the same type class. */
3173 struct unary_widen_def : public overloaded_base<0>
3174 {
3175 void
3176 build (function_builder &b, const function_group_info &group) const OVERRIDE
3177 {
3178 b.add_overloaded_functions (group, MODE_none);
3179 build_all (b, "v0,vh0", group, MODE_none);
3180 }
3181
3182 tree
3183 resolve (function_resolver &r) const OVERRIDE
3184 {
3185 unsigned int i, nargs;
3186 type_suffix_index type;
3187 if (!r.check_gp_argument (1, i, nargs)
3188 || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
3189 return error_mark_node;
3190
3191 /* There is only a single form for predicates. */
3192 if (type == TYPE_SUFFIX_b)
3193 return r.resolve_to (r.mode_suffix_id, type);
3194
3195 if (type_suffixes[type].integer_p
3196 && type_suffixes[type].element_bits < 64)
3197 {
3198 type_suffix_index wide_suffix
3199 = find_type_suffix (type_suffixes[type].tclass,
3200 type_suffixes[type].element_bits * 2);
3201 if (tree res = r.lookup_form (r.mode_suffix_id, wide_suffix))
3202 return res;
3203 }
3204
3205 return r.report_no_such_form (type);
3206 }
3207 };
3208 SHAPE (unary_widen)
3209
3210 }