]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
Update copyright years.
[thirdparty/gcc.git] / gcc / config / aarch64 / aarch64-sve-builtins-sve2.cc
CommitLineData
0a09a948 1/* ACLE support for AArch64 SVE (__ARM_FEATURE_SVE2 intrinsics)
99dee823 2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
0a09a948
RS
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "tree.h"
25#include "rtl.h"
26#include "tm_p.h"
27#include "memmodel.h"
28#include "insn-codes.h"
29#include "optabs.h"
30#include "recog.h"
31#include "expr.h"
32#include "basic-block.h"
33#include "function.h"
34#include "fold-const.h"
35#include "gimple.h"
36#include "gimple-iterator.h"
37#include "gimplify.h"
38#include "explow.h"
39#include "emit-rtl.h"
40#include "tree-vector-builder.h"
41#include "rtx-vector-builder.h"
42#include "vec-perm-indices.h"
43#include "aarch64-sve-builtins.h"
44#include "aarch64-sve-builtins-shapes.h"
45#include "aarch64-sve-builtins-base.h"
46#include "aarch64-sve-builtins-sve2.h"
47#include "aarch64-sve-builtins-functions.h"
48
49using namespace aarch64_sve;
50
51namespace {
52
53/* Return the UNSPEC_CDOT* unspec for rotation amount ROT. */
54static int
55unspec_cdot (int rot)
56{
57 switch (rot)
58 {
59 case 0: return UNSPEC_CDOT;
60 case 90: return UNSPEC_CDOT90;
61 case 180: return UNSPEC_CDOT180;
62 case 270: return UNSPEC_CDOT270;
63 default: gcc_unreachable ();
64 }
65}
66
67/* Return the UNSPEC_SQRDCMLAH* unspec for rotation amount ROT. */
68static int
69unspec_sqrdcmlah (int rot)
70{
71 switch (rot)
72 {
73 case 0: return UNSPEC_SQRDCMLAH;
74 case 90: return UNSPEC_SQRDCMLAH90;
75 case 180: return UNSPEC_SQRDCMLAH180;
76 case 270: return UNSPEC_SQRDCMLAH270;
77 default: gcc_unreachable ();
78 }
79}
80
81class svaba_impl : public function_base
82{
83public:
84 rtx
85 expand (function_expander &e) const OVERRIDE
86 {
87 rtx_code max_code = e.type_suffix (0).unsigned_p ? UMAX : SMAX;
88 machine_mode mode = e.vector_mode (0);
89 return e.use_exact_insn (code_for_aarch64_sve2_aba (max_code, mode));
90 }
91};
92
93class svcdot_impl : public function_base
94{
95public:
96 rtx
97 expand (function_expander &e) const OVERRIDE
98 {
99 /* Convert the rotation amount into a specific unspec. */
100 int rot = INTVAL (e.args.pop ());
101 return e.use_exact_insn (code_for_aarch64_sve (unspec_cdot (rot),
102 e.vector_mode (0)));
103 }
104};
105
106class svcdot_lane_impl : public function_base
107{
108public:
109 rtx
110 expand (function_expander &e) const OVERRIDE
111 {
112 /* Convert the rotation amount into a specific unspec. */
113 int rot = INTVAL (e.args.pop ());
114 return e.use_exact_insn (code_for_aarch64_lane (unspec_cdot (rot),
115 e.vector_mode (0)));
116 }
117};
118
119class svldnt1_gather_impl : public full_width_access
120{
121public:
122 unsigned int
123 call_properties (const function_instance &) const OVERRIDE
124 {
125 return CP_READ_MEMORY;
126 }
127
128 rtx
129 expand (function_expander &e) const OVERRIDE
130 {
131 e.prepare_gather_address_operands (1, false);
132 machine_mode mem_mode = e.memory_vector_mode ();
133 return e.use_exact_insn (code_for_aarch64_gather_ldnt (mem_mode));
134 }
135};
136
137/* Implements extending forms of svldnt1_gather. */
138class svldnt1_gather_extend_impl : public extending_load
139{
140public:
141 CONSTEXPR svldnt1_gather_extend_impl (type_suffix_index memory_type)
142 : extending_load (memory_type) {}
143
144 rtx
145 expand (function_expander &e) const OVERRIDE
146 {
147 e.prepare_gather_address_operands (1, false);
148 /* Add a constant predicate for the extension rtx. */
149 e.args.quick_push (CONSTM1_RTX (VNx16BImode));
150 insn_code icode = code_for_aarch64_gather_ldnt (extend_rtx_code (),
151 e.vector_mode (0),
152 e.memory_vector_mode ());
153 return e.use_exact_insn (icode);
154 }
155};
156
157/* Implements both svmatch and svnmatch; the unspec parameter decides
158 between them. */
159class svmatch_svnmatch_impl : public function_base
160{
161public:
162 CONSTEXPR svmatch_svnmatch_impl (int unspec) : m_unspec (unspec) {}
163
164 rtx
165 expand (function_expander &e) const OVERRIDE
166 {
167 /* These are UNSPEC_PRED_Z operations and so need a hint operand. */
168 e.add_ptrue_hint (0, e.gp_mode (0));
169 return e.use_exact_insn (code_for_aarch64_pred (m_unspec,
170 e.vector_mode (0)));
171 }
172
173 int m_unspec;
174};
175
176/* Implements both svmovlb and svmovlt; the unspec parameters decide
177 between them. */
178class svmovl_lb_impl : public unspec_based_function_base
179{
180public:
181 CONSTEXPR svmovl_lb_impl (int unspec_for_sint, int unspec_for_uint,
182 int unspec_for_fp)
183 : unspec_based_function_base (unspec_for_sint, unspec_for_uint,
184 unspec_for_fp)
185 {}
186
187 rtx
188 expand (function_expander &e) const OVERRIDE
189 {
190 e.args.quick_push (const0_rtx);
191 return e.map_to_unspecs (m_unspec_for_sint, m_unspec_for_uint,
192 m_unspec_for_fp);
193 }
194};
195
196class svqcadd_impl : public function_base
197{
198public:
199 rtx
200 expand (function_expander &e) const OVERRIDE
201 {
202 /* Convert the rotation amount into a specific unspec. */
203 int rot = INTVAL (e.args.pop ());
204 if (rot == 90)
205 return e.map_to_unspecs (UNSPEC_SQCADD90, -1, -1);
206 if (rot == 270)
207 return e.map_to_unspecs (UNSPEC_SQCADD270, -1, -1);
208 gcc_unreachable ();
209 }
210};
211
212class svqrdcmlah_impl : public function_base
213{
214public:
215 rtx
216 expand (function_expander &e) const OVERRIDE
217 {
218 /* Convert the rotation amount into a specific unspec. */
219 int rot = INTVAL (e.args.pop ());
220 return e.use_exact_insn (code_for_aarch64_sve (unspec_sqrdcmlah (rot),
221 e.vector_mode (0)));
222 }
223};
224
225class svqrdcmlah_lane_impl : public function_base
226{
227public:
228 rtx
229 expand (function_expander &e) const OVERRIDE
230 {
231 /* Convert the rotation amount into a specific unspec. */
232 int rot = INTVAL (e.args.pop ());
233 return e.use_exact_insn (code_for_aarch64_lane (unspec_sqrdcmlah (rot),
234 e.vector_mode (0)));
235 }
236};
237
238class svqrshl_impl : public unspec_based_function
239{
240public:
241 CONSTEXPR svqrshl_impl ()
242 : unspec_based_function (UNSPEC_SQRSHL, UNSPEC_UQRSHL, -1) {}
243
244 gimple *
245 fold (gimple_folder &f) const OVERRIDE
246 {
247 if (tree amount = uniform_integer_cst_p (gimple_call_arg (f.call, 2)))
248 {
249 if (wi::to_widest (amount) >= 0)
250 {
251 /* The rounding has no effect, and [SU]QSHL has immediate forms
252 that we can use for sensible shift amounts. */
253 function_instance instance ("svqshl", functions::svqshl,
254 shapes::binary_int_opt_n, MODE_n,
255 f.type_suffix_ids, f.pred);
256 return f.redirect_call (instance);
257 }
258 else
259 {
260 /* The saturation has no effect, and [SU]RSHL has immediate forms
261 that we can use for sensible shift amounts. */
262 function_instance instance ("svrshl", functions::svrshl,
263 shapes::binary_int_opt_n, MODE_n,
264 f.type_suffix_ids, f.pred);
265 return f.redirect_call (instance);
266 }
267 }
268 return NULL;
269 }
270};
271
272class svqshl_impl : public unspec_based_function
273{
274public:
275 CONSTEXPR svqshl_impl ()
276 : unspec_based_function (UNSPEC_SQSHL, UNSPEC_UQSHL, -1) {}
277
278 gimple *
279 fold (gimple_folder &f) const OVERRIDE
280 {
281 if (tree amount = uniform_integer_cst_p (gimple_call_arg (f.call, 2)))
282 {
283 int element_bits = f.type_suffix (0).element_bits;
284 if (wi::to_widest (amount) >= -element_bits
285 && wi::to_widest (amount) < 0)
286 {
287 /* The saturation has no effect for right shifts, so we can
288 use the immediate form of ASR or LSR. */
289 amount = wide_int_to_tree (TREE_TYPE (amount),
290 -wi::to_wide (amount));
291 function_instance instance ("svasr", functions::svasr,
292 shapes::binary_uint_opt_n, MODE_n,
293 f.type_suffix_ids, f.pred);
294 if (f.type_suffix (0).unsigned_p)
295 {
296 instance.base_name = "svlsr";
297 instance.base = functions::svlsr;
298 }
299 gcall *call = as_a <gcall *> (f.redirect_call (instance));
300 gimple_call_set_arg (call, 2, amount);
301 return call;
302 }
303 }
304 return NULL;
305 }
306};
307
308class svrshl_impl : public unspec_based_function
309{
310public:
311 CONSTEXPR svrshl_impl ()
312 : unspec_based_function (UNSPEC_SRSHL, UNSPEC_URSHL, -1) {}
313
314 gimple *
315 fold (gimple_folder &f) const OVERRIDE
316 {
317 if (tree amount = uniform_integer_cst_p (gimple_call_arg (f.call, 2)))
318 {
319 if (wi::to_widest (amount) >= 0)
320 {
321 /* The rounding has no effect, and LSL has immediate forms
322 that we can use for sensible shift amounts. */
323 function_instance instance ("svlsl", functions::svlsl,
324 shapes::binary_uint_opt_n, MODE_n,
325 f.type_suffix_ids, f.pred);
326 gcall *call = as_a <gcall *> (f.redirect_call (instance));
327 gimple_call_set_arg (call, 2, amount);
328 return call;
329 }
330 int element_bits = f.type_suffix (0).element_bits;
331 if (wi::to_widest (amount) >= -element_bits)
332 {
333 /* The shift amount is in range of [SU]RSHR. */
334 amount = wide_int_to_tree (TREE_TYPE (amount),
335 -wi::to_wide (amount));
336 function_instance instance ("svrshr", functions::svrshr,
337 shapes::shift_right_imm, MODE_n,
338 f.type_suffix_ids, f.pred);
339 gcall *call = as_a <gcall *> (f.redirect_call (instance));
340 gimple_call_set_arg (call, 2, amount);
341 return call;
342 }
343 }
344 return NULL;
345 }
346};
347
348class svsqadd_impl : public function_base
349{
350public:
351 rtx
352 expand (function_expander &e) const OVERRIDE
353 {
354 machine_mode mode = e.vector_mode (0);
355 if (e.pred == PRED_x
356 && aarch64_sve_sqadd_sqsub_immediate_p (mode, e.args[2], false))
357 return e.map_to_rtx_codes (UNKNOWN, US_PLUS, -1);
358 return e.map_to_unspecs (-1, UNSPEC_USQADD, -1);
359 }
360};
361
362class svsra_impl : public function_base
363{
364public:
365 rtx
366 expand (function_expander &e) const OVERRIDE
367 {
368 rtx_code shift_code = e.type_suffix (0).unsigned_p ? LSHIFTRT : ASHIFTRT;
369 machine_mode mode = e.vector_mode (0);
370 return e.use_exact_insn (code_for_aarch64_sve_add (shift_code, mode));
371 }
372};
373
374class svstnt1_scatter_impl : public full_width_access
375{
376public:
377 unsigned int
378 call_properties (const function_instance &) const OVERRIDE
379 {
380 return CP_WRITE_MEMORY;
381 }
382
383 rtx
384 expand (function_expander &e) const OVERRIDE
385 {
386 e.prepare_gather_address_operands (1, false);
387 machine_mode mem_mode = e.memory_vector_mode ();
388 return e.use_exact_insn (code_for_aarch64_scatter_stnt (mem_mode));
389 }
390};
391
392/* Implements truncating forms of svstnt1_scatter. */
393class svstnt1_scatter_truncate_impl : public truncating_store
394{
395public:
396 CONSTEXPR svstnt1_scatter_truncate_impl (scalar_int_mode to_mode)
397 : truncating_store (to_mode) {}
398
399 rtx
400 expand (function_expander &e) const OVERRIDE
401 {
402 e.prepare_gather_address_operands (1, false);
403 insn_code icode = code_for_aarch64_scatter_stnt (e.vector_mode (0),
404 e.memory_vector_mode ());
405 return e.use_exact_insn (icode);
406 }
407};
408
409class svtbl2_impl : public quiet<multi_vector_function>
410{
411public:
412 CONSTEXPR svtbl2_impl () : quiet<multi_vector_function> (2) {}
413
414 rtx
415 expand (function_expander &e) const OVERRIDE
416 {
417 return e.use_exact_insn (code_for_aarch64_sve2_tbl2 (e.vector_mode (0)));
418 }
419};
420
421class svuqadd_impl : public function_base
422{
423public:
424 rtx
425 expand (function_expander &e) const OVERRIDE
426 {
427 machine_mode mode = e.vector_mode (0);
428 if (e.pred == PRED_x
429 && aarch64_sve_arith_immediate_p (mode, e.args[2], false))
430 return e.use_unpred_insn (code_for_aarch64_sve_suqadd_const (mode));
431 return e.map_to_unspecs (UNSPEC_SUQADD, -1, -1);
432 }
433};
434
435/* Implements both svwhilerw and svwhilewr; the unspec parameter decides
436 between them. */
437class svwhilerw_svwhilewr_impl : public full_width_access
438{
439public:
440 CONSTEXPR svwhilerw_svwhilewr_impl (int unspec) : m_unspec (unspec) {}
441
442 rtx
443 expand (function_expander &e) const OVERRIDE
444 {
fb15e2ba
RS
445 for (unsigned int i = 0; i < 2; ++i)
446 e.args[i] = e.convert_to_pmode (e.args[i]);
0a09a948
RS
447 return e.use_exact_insn (code_for_while (m_unspec, Pmode, e.gp_mode (0)));
448 }
449
450 int m_unspec;
451};
452
453} /* end anonymous namespace */
454
455namespace aarch64_sve {
456
457FUNCTION (svaba, svaba_impl,)
458FUNCTION (svabalb, unspec_based_add_function, (UNSPEC_SABDLB,
459 UNSPEC_UABDLB, -1))
460FUNCTION (svabalt, unspec_based_add_function, (UNSPEC_SABDLT,
461 UNSPEC_UABDLT, -1))
462FUNCTION (svadclb, unspec_based_function, (-1, UNSPEC_ADCLB, -1))
463FUNCTION (svadclt, unspec_based_function, (-1, UNSPEC_ADCLT, -1))
464FUNCTION (svaddhnb, unspec_based_function, (UNSPEC_ADDHNB, UNSPEC_ADDHNB, -1))
465FUNCTION (svaddhnt, unspec_based_function, (UNSPEC_ADDHNT, UNSPEC_ADDHNT, -1))
466FUNCTION (svabdlb, unspec_based_function, (UNSPEC_SABDLB, UNSPEC_UABDLB, -1))
467FUNCTION (svabdlt, unspec_based_function, (UNSPEC_SABDLT, UNSPEC_UABDLT, -1))
468FUNCTION (svadalp, unspec_based_function, (UNSPEC_SADALP, UNSPEC_UADALP, -1))
469FUNCTION (svaddlb, unspec_based_function, (UNSPEC_SADDLB, UNSPEC_UADDLB, -1))
470FUNCTION (svaddlbt, unspec_based_function, (UNSPEC_SADDLBT, -1, -1))
471FUNCTION (svaddlt, unspec_based_function, (UNSPEC_SADDLT, UNSPEC_UADDLT, -1))
472FUNCTION (svaddwb, unspec_based_function, (UNSPEC_SADDWB, UNSPEC_UADDWB, -1))
473FUNCTION (svaddwt, unspec_based_function, (UNSPEC_SADDWT, UNSPEC_UADDWT, -1))
474FUNCTION (svaddp, unspec_based_pred_function, (UNSPEC_ADDP, UNSPEC_ADDP,
475 UNSPEC_FADDP))
476FUNCTION (svaesd, fixed_insn_function, (CODE_FOR_aarch64_sve2_aesd))
477FUNCTION (svaese, fixed_insn_function, (CODE_FOR_aarch64_sve2_aese))
478FUNCTION (svaesimc, fixed_insn_function, (CODE_FOR_aarch64_sve2_aesimc))
479FUNCTION (svaesmc, fixed_insn_function, (CODE_FOR_aarch64_sve2_aesmc))
480FUNCTION (svbcax, CODE_FOR_MODE0 (aarch64_sve2_bcax),)
481FUNCTION (svbdep, unspec_based_function, (UNSPEC_BDEP, UNSPEC_BDEP, -1))
482FUNCTION (svbext, unspec_based_function, (UNSPEC_BEXT, UNSPEC_BEXT, -1))
483FUNCTION (svbgrp, unspec_based_function, (UNSPEC_BGRP, UNSPEC_BGRP, -1))
484FUNCTION (svbsl, CODE_FOR_MODE0 (aarch64_sve2_bsl),)
485FUNCTION (svbsl1n, CODE_FOR_MODE0 (aarch64_sve2_bsl1n),)
486FUNCTION (svbsl2n, CODE_FOR_MODE0 (aarch64_sve2_bsl2n),)
487FUNCTION (svcdot, svcdot_impl,)
488FUNCTION (svcdot_lane, svcdot_lane_impl,)
489FUNCTION (svcvtlt, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTLT))
0a09a948
RS
490FUNCTION (svcvtx, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTX))
491FUNCTION (svcvtxnt, CODE_FOR_MODE1 (aarch64_sve2_cvtxnt),)
492FUNCTION (sveor3, CODE_FOR_MODE0 (aarch64_sve2_eor3),)
493FUNCTION (sveorbt, unspec_based_function, (UNSPEC_EORBT, UNSPEC_EORBT, -1))
494FUNCTION (sveortb, unspec_based_function, (UNSPEC_EORTB, UNSPEC_EORTB, -1))
495FUNCTION (svhadd, unspec_based_function, (UNSPEC_SHADD, UNSPEC_UHADD, -1))
496FUNCTION (svhsub, unspec_based_function, (UNSPEC_SHSUB, UNSPEC_UHSUB, -1))
497FUNCTION (svhistcnt, CODE_FOR_MODE0 (aarch64_sve2_histcnt),)
498FUNCTION (svhistseg, CODE_FOR_MODE0 (aarch64_sve2_histseg),)
499FUNCTION (svhsubr, unspec_based_function_rotated, (UNSPEC_SHSUB,
500 UNSPEC_UHSUB, -1))
501FUNCTION (svldnt1_gather, svldnt1_gather_impl,)
502FUNCTION (svldnt1sb_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_s8))
503FUNCTION (svldnt1sh_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_s16))
504FUNCTION (svldnt1sw_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_s32))
505FUNCTION (svldnt1ub_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_u8))
506FUNCTION (svldnt1uh_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_u16))
507FUNCTION (svldnt1uw_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_u32))
508FUNCTION (svlogb, unspec_based_function, (-1, -1, UNSPEC_COND_FLOGB))
509FUNCTION (svmatch, svmatch_svnmatch_impl, (UNSPEC_MATCH))
510FUNCTION (svmaxp, unspec_based_pred_function, (UNSPEC_SMAXP, UNSPEC_UMAXP,
511 UNSPEC_FMAXP))
512FUNCTION (svmaxnmp, unspec_based_pred_function, (-1, -1, UNSPEC_FMAXNMP))
513FUNCTION (svminp, unspec_based_pred_function, (UNSPEC_SMINP, UNSPEC_UMINP,
514 UNSPEC_FMINP))
515FUNCTION (svminnmp, unspec_based_pred_function, (-1, -1, UNSPEC_FMINNMP))
516FUNCTION (svmlalb, unspec_based_mla_function, (UNSPEC_SMULLB,
517 UNSPEC_UMULLB, UNSPEC_FMLALB))
518FUNCTION (svmlalb_lane, unspec_based_mla_lane_function, (UNSPEC_SMULLB,
519 UNSPEC_UMULLB,
520 UNSPEC_FMLALB))
521FUNCTION (svmlalt, unspec_based_mla_function, (UNSPEC_SMULLT,
522 UNSPEC_UMULLT, UNSPEC_FMLALT))
523FUNCTION (svmlalt_lane, unspec_based_mla_lane_function, (UNSPEC_SMULLT,
524 UNSPEC_UMULLT,
525 UNSPEC_FMLALT))
526FUNCTION (svmlslb, unspec_based_mls_function, (UNSPEC_SMULLB,
527 UNSPEC_UMULLB, UNSPEC_FMLSLB))
528FUNCTION (svmlslb_lane, unspec_based_mls_lane_function, (UNSPEC_SMULLB,
529 UNSPEC_UMULLB,
530 UNSPEC_FMLSLB))
531FUNCTION (svmlslt, unspec_based_mls_function, (UNSPEC_SMULLT,
532 UNSPEC_UMULLT, UNSPEC_FMLSLT))
533FUNCTION (svmlslt_lane, unspec_based_mls_lane_function, (UNSPEC_SMULLT,
534 UNSPEC_UMULLT,
535 UNSPEC_FMLSLT))
536FUNCTION (svmovlb, svmovl_lb_impl, (UNSPEC_SSHLLB, UNSPEC_USHLLB, -1))
537FUNCTION (svmovlt, svmovl_lb_impl, (UNSPEC_SSHLLT, UNSPEC_USHLLT, -1))
538FUNCTION (svmullb, unspec_based_function, (UNSPEC_SMULLB, UNSPEC_UMULLB, -1))
539FUNCTION (svmullb_lane, unspec_based_lane_function, (UNSPEC_SMULLB,
540 UNSPEC_UMULLB, -1))
541FUNCTION (svmullt, unspec_based_function, (UNSPEC_SMULLT, UNSPEC_UMULLT, -1))
542FUNCTION (svmullt_lane, unspec_based_lane_function, (UNSPEC_SMULLT,
543 UNSPEC_UMULLT, -1))
544FUNCTION (svnbsl, CODE_FOR_MODE0 (aarch64_sve2_nbsl),)
545FUNCTION (svnmatch, svmatch_svnmatch_impl, (UNSPEC_NMATCH))
546FUNCTION (svpmul, CODE_FOR_MODE0 (aarch64_sve2_pmul),)
547FUNCTION (svpmullb, unspec_based_function, (-1, UNSPEC_PMULLB, -1))
548FUNCTION (svpmullb_pair, unspec_based_function, (-1, UNSPEC_PMULLB_PAIR, -1))
549FUNCTION (svpmullt, unspec_based_function, (-1, UNSPEC_PMULLT, -1))
550FUNCTION (svpmullt_pair, unspec_based_function, (-1, UNSPEC_PMULLT_PAIR, -1))
551FUNCTION (svqabs, rtx_code_function, (SS_ABS, UNKNOWN, UNKNOWN))
552FUNCTION (svqcadd, svqcadd_impl,)
553FUNCTION (svqdmlalb, unspec_based_qadd_function, (UNSPEC_SQDMULLB, -1, -1))
554FUNCTION (svqdmlalb_lane, unspec_based_qadd_lane_function, (UNSPEC_SQDMULLB,
555 -1, -1))
556FUNCTION (svqdmlalbt, unspec_based_qadd_function, (UNSPEC_SQDMULLBT, -1, -1))
557FUNCTION (svqdmlalt, unspec_based_qadd_function, (UNSPEC_SQDMULLT, -1, -1))
558FUNCTION (svqdmlalt_lane, unspec_based_qadd_lane_function, (UNSPEC_SQDMULLT,
559 -1, -1))
560FUNCTION (svqdmlslb, unspec_based_qsub_function, (UNSPEC_SQDMULLB, -1, -1))
561FUNCTION (svqdmlslb_lane, unspec_based_qsub_lane_function, (UNSPEC_SQDMULLB,
562 -1, -1))
563FUNCTION (svqdmlslbt, unspec_based_qsub_function, (UNSPEC_SQDMULLBT, -1, -1))
564FUNCTION (svqdmlslt, unspec_based_qsub_function, (UNSPEC_SQDMULLT, -1, -1))
565FUNCTION (svqdmlslt_lane, unspec_based_qsub_lane_function, (UNSPEC_SQDMULLT,
566 -1, -1))
567FUNCTION (svqdmulh, unspec_based_function, (UNSPEC_SQDMULH, -1, -1))
568FUNCTION (svqdmulh_lane, unspec_based_lane_function, (UNSPEC_SQDMULH, -1, -1))
569FUNCTION (svqdmullb, unspec_based_function, (UNSPEC_SQDMULLB, -1, -1))
570FUNCTION (svqdmullb_lane, unspec_based_lane_function, (UNSPEC_SQDMULLB,
571 -1, -1))
572FUNCTION (svqdmullt, unspec_based_function, (UNSPEC_SQDMULLT, -1, -1))
573FUNCTION (svqdmullt_lane, unspec_based_lane_function, (UNSPEC_SQDMULLT,
574 -1, -1))
575FUNCTION (svqneg, rtx_code_function, (SS_NEG, UNKNOWN, UNKNOWN))
576FUNCTION (svqrdcmlah, svqrdcmlah_impl,)
577FUNCTION (svqrdcmlah_lane, svqrdcmlah_lane_impl,)
578FUNCTION (svqrdmulh, unspec_based_function, (UNSPEC_SQRDMULH, -1, -1))
579FUNCTION (svqrdmulh_lane, unspec_based_lane_function, (UNSPEC_SQRDMULH,
580 -1, -1))
581FUNCTION (svqrdmlah, unspec_based_function, (UNSPEC_SQRDMLAH, -1, -1))
582FUNCTION (svqrdmlah_lane, unspec_based_lane_function, (UNSPEC_SQRDMLAH,
583 -1, -1))
584FUNCTION (svqrdmlsh, unspec_based_function, (UNSPEC_SQRDMLSH, -1, -1))
585FUNCTION (svqrdmlsh_lane, unspec_based_lane_function, (UNSPEC_SQRDMLSH,
586 -1, -1))
587FUNCTION (svqrshl, svqrshl_impl,)
588FUNCTION (svqrshrnb, unspec_based_function, (UNSPEC_SQRSHRNB,
589 UNSPEC_UQRSHRNB, -1))
590FUNCTION (svqrshrnt, unspec_based_function, (UNSPEC_SQRSHRNT,
591 UNSPEC_UQRSHRNT, -1))
592FUNCTION (svqrshrunb, unspec_based_function, (UNSPEC_SQRSHRUNB, -1, -1))
593FUNCTION (svqrshrunt, unspec_based_function, (UNSPEC_SQRSHRUNT, -1, -1))
594FUNCTION (svqshl, svqshl_impl,)
595FUNCTION (svqshlu, unspec_based_function, (UNSPEC_SQSHLU, -1, -1))
596FUNCTION (svqshrnb, unspec_based_function, (UNSPEC_SQSHRNB,
597 UNSPEC_UQSHRNB, -1))
598FUNCTION (svqshrnt, unspec_based_function, (UNSPEC_SQSHRNT,
599 UNSPEC_UQSHRNT, -1))
600FUNCTION (svqshrunb, unspec_based_function, (UNSPEC_SQSHRUNB, -1, -1))
601FUNCTION (svqshrunt, unspec_based_function, (UNSPEC_SQSHRUNT, -1, -1))
602FUNCTION (svqsubr, rtx_code_function_rotated, (SS_MINUS, US_MINUS, -1))
603FUNCTION (svqxtnb, unspec_based_function, (UNSPEC_SQXTNB, UNSPEC_UQXTNB, -1))
604FUNCTION (svqxtnt, unspec_based_function, (UNSPEC_SQXTNT, UNSPEC_UQXTNT, -1))
605FUNCTION (svqxtunb, unspec_based_function, (UNSPEC_SQXTUNB, -1, -1))
606FUNCTION (svqxtunt, unspec_based_function, (UNSPEC_SQXTUNT, -1, -1))
607FUNCTION (svraddhnb, unspec_based_function, (UNSPEC_RADDHNB,
608 UNSPEC_RADDHNB, -1))
609FUNCTION (svraddhnt, unspec_based_function, (UNSPEC_RADDHNT,
610 UNSPEC_RADDHNT, -1))
611FUNCTION (svrax1, fixed_insn_function, (CODE_FOR_aarch64_sve2_rax1))
612FUNCTION (svrhadd, unspec_based_function, (UNSPEC_SRHADD, UNSPEC_URHADD, -1))
613FUNCTION (svrshl, svrshl_impl,)
614FUNCTION (svrshr, unspec_based_function, (UNSPEC_SRSHR, UNSPEC_URSHR, -1))
615FUNCTION (svrshrnb, unspec_based_function, (UNSPEC_RSHRNB, UNSPEC_RSHRNB, -1))
616FUNCTION (svrshrnt, unspec_based_function, (UNSPEC_RSHRNT, UNSPEC_RSHRNT, -1))
617FUNCTION (svrsra, unspec_based_add_function, (UNSPEC_SRSHR, UNSPEC_URSHR, -1))
618FUNCTION (svrsubhnb, unspec_based_function, (UNSPEC_RSUBHNB,
619 UNSPEC_RSUBHNB, -1))
620FUNCTION (svrsubhnt, unspec_based_function, (UNSPEC_RSUBHNT,
621 UNSPEC_RSUBHNT, -1))
622FUNCTION (svsbclb, unspec_based_function, (-1, UNSPEC_SBCLB, -1))
623FUNCTION (svsbclt, unspec_based_function, (-1, UNSPEC_SBCLT, -1))
624FUNCTION (svshllb, unspec_based_function, (UNSPEC_SSHLLB, UNSPEC_USHLLB, -1))
625FUNCTION (svshllt, unspec_based_function, (UNSPEC_SSHLLT, UNSPEC_USHLLT, -1))
626FUNCTION (svshrnb, unspec_based_function, (UNSPEC_SHRNB, UNSPEC_SHRNB, -1))
627FUNCTION (svshrnt, unspec_based_function, (UNSPEC_SHRNT, UNSPEC_SHRNT, -1))
628FUNCTION (svsli, unspec_based_function, (UNSPEC_SLI, UNSPEC_SLI, -1))
629FUNCTION (svsm4e, fixed_insn_function, (CODE_FOR_aarch64_sve2_sm4e))
630FUNCTION (svsm4ekey, fixed_insn_function, (CODE_FOR_aarch64_sve2_sm4ekey))
631FUNCTION (svsqadd, svsqadd_impl,)
632FUNCTION (svsra, svsra_impl,)
633FUNCTION (svsri, unspec_based_function, (UNSPEC_SRI, UNSPEC_SRI, -1))
634FUNCTION (svstnt1_scatter, svstnt1_scatter_impl,)
635FUNCTION (svstnt1b_scatter, svstnt1_scatter_truncate_impl, (QImode))
636FUNCTION (svstnt1h_scatter, svstnt1_scatter_truncate_impl, (HImode))
637FUNCTION (svstnt1w_scatter, svstnt1_scatter_truncate_impl, (SImode))
638FUNCTION (svsubhnb, unspec_based_function, (UNSPEC_SUBHNB, UNSPEC_SUBHNB, -1))
639FUNCTION (svsubhnt, unspec_based_function, (UNSPEC_SUBHNT, UNSPEC_SUBHNT, -1))
640FUNCTION (svsublb, unspec_based_function, (UNSPEC_SSUBLB, UNSPEC_USUBLB, -1))
641FUNCTION (svsublbt, unspec_based_function, (UNSPEC_SSUBLBT, -1, -1))
642FUNCTION (svsublt, unspec_based_function, (UNSPEC_SSUBLT, UNSPEC_USUBLT, -1))
643FUNCTION (svsubltb, unspec_based_function, (UNSPEC_SSUBLTB, -1, -1))
644FUNCTION (svsubwb, unspec_based_function, (UNSPEC_SSUBWB, UNSPEC_USUBWB, -1))
645FUNCTION (svsubwt, unspec_based_function, (UNSPEC_SSUBWT, UNSPEC_USUBWT, -1))
646FUNCTION (svtbl2, svtbl2_impl,)
647FUNCTION (svtbx, CODE_FOR_MODE0 (aarch64_sve2_tbx),)
648FUNCTION (svuqadd, svuqadd_impl,)
649FUNCTION (svwhilege, while_comparison, (UNSPEC_WHILEGE, UNSPEC_WHILEHS))
650FUNCTION (svwhilegt, while_comparison, (UNSPEC_WHILEGT, UNSPEC_WHILEHI))
651FUNCTION (svwhilerw, svwhilerw_svwhilewr_impl, (UNSPEC_WHILERW))
652FUNCTION (svwhilewr, svwhilerw_svwhilewr_impl, (UNSPEC_WHILEWR))
653FUNCTION (svxar, CODE_FOR_MODE0 (aarch64_sve2_xar),)
654
655} /* end namespace aarch64_sve */