]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/riscv/riscv-protos.h
RISC-V: Implement IFN SAT_ADD for both the scalar and vector
[thirdparty/gcc.git] / gcc / config / riscv / riscv-protos.h
CommitLineData
09cae750 1/* Definition of RISC-V target for GNU compiler.
a945c346 2 Copyright (C) 2011-2024 Free Software Foundation, Inc.
09cae750
PD
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 3, or (at your option)
11any later version.
12
13GCC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#ifndef GCC_RISCV_PROTOS_H
23#define GCC_RISCV_PROTOS_H
24
942ab49b
PN
25#include "memmodel.h"
26
09cae750
PD
27/* Symbol types we understand. The order of this list must match that of
28 the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST. */
29enum riscv_symbol_type {
30 SYMBOL_ABSOLUTE,
d07d0e99 31 SYMBOL_FORCE_TO_MEM,
09cae750
PD
32 SYMBOL_PCREL,
33 SYMBOL_GOT_DISP,
34 SYMBOL_TLS,
35 SYMBOL_TLS_LE,
36 SYMBOL_TLS_IE,
97069657
TI
37 SYMBOL_TLS_GD,
38 SYMBOL_TLSDESC,
09cae750 39};
97069657 40#define NUM_SYMBOL_TYPES (SYMBOL_TLSDESC + 1)
09cae750 41
96ad6ab2
CM
42/* Classifies an address.
43
44 ADDRESS_REG
45 A natural register + offset address. The register satisfies
46 riscv_valid_base_register_p and the offset is a const_arith_operand.
47
2d65622f
CM
48 ADDRESS_REG_REG
49 A base register indexed by (optionally scaled) register.
50
51 ADDRESS_REG_UREG
52 A base register indexed by (optionally scaled) zero-extended register.
53
54 ADDRESS_REG_WB
55 A base register indexed by immediate offset with writeback.
56
96ad6ab2
CM
57 ADDRESS_LO_SUM
58 A LO_SUM rtx. The first operand is a valid base register and
59 the second operand is a symbolic address.
60
61 ADDRESS_CONST_INT
62 A signed 16-bit constant address.
63
64 ADDRESS_SYMBOLIC:
65 A constant symbolic address. */
66enum riscv_address_type {
67 ADDRESS_REG,
2d65622f
CM
68 ADDRESS_REG_REG,
69 ADDRESS_REG_UREG,
70 ADDRESS_REG_WB,
96ad6ab2
CM
71 ADDRESS_LO_SUM,
72 ADDRESS_CONST_INT,
73 ADDRESS_SYMBOLIC
74};
75
76/* Information about an address described by riscv_address_type.
77
78 ADDRESS_CONST_INT
79 No fields are used.
80
81 ADDRESS_REG
82 REG is the base register and OFFSET is the constant offset.
83
2d65622f
CM
84 ADDRESS_REG_REG and ADDRESS_REG_UREG
85 REG is the base register and OFFSET is the index register.
86
87 ADDRESS_REG_WB
88 REG is the base register, OFFSET is the constant offset, and
89 shift is the shift amount for the offset.
90
96ad6ab2
CM
91 ADDRESS_LO_SUM
92 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
93 is the type of symbol it references.
94
95 ADDRESS_SYMBOLIC
96 SYMBOL_TYPE is the type of symbol that the address references. */
97struct riscv_address_info {
98 enum riscv_address_type type;
99 rtx reg;
100 rtx offset;
101 enum riscv_symbol_type symbol_type;
2d65622f 102 int shift;
96ad6ab2
CM
103};
104
e53b6e56 105/* Routines implemented in riscv.cc. */
9a55cc62 106extern const char *riscv_asm_output_opcode (FILE *asm_out_file, const char *p);
09cae750
PD
107extern enum riscv_symbol_type riscv_classify_symbolic_expression (rtx);
108extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
30699b99 109extern int riscv_float_const_rtx_index_for_fli (rtx);
b8506a8a 110extern int riscv_regno_mode_ok_for_base_p (int, machine_mode, bool);
2d65622f 111extern bool riscv_valid_base_register_p (rtx, machine_mode, bool);
42360427
CM
112extern enum reg_class riscv_index_reg_class ();
113extern int riscv_regno_ok_for_index_p (int);
b8506a8a 114extern int riscv_address_insns (rtx, machine_mode, bool);
09cae750
PD
115extern int riscv_const_insns (rtx);
116extern int riscv_split_const_insns (rtx);
117extern int riscv_load_store_insns (rtx, rtx_insn *);
118extern rtx riscv_emit_move (rtx, rtx);
05302544 119extern bool riscv_split_symbol (rtx, rtx, machine_mode, rtx *);
09cae750
PD
120extern bool riscv_split_symbol_type (enum riscv_symbol_type);
121extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
05302544 122extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT, machine_mode);
b8506a8a 123extern bool riscv_legitimize_move (machine_mode, rtx, rtx);
09cae750
PD
124extern rtx riscv_subword (rtx, bool);
125extern bool riscv_split_64bit_move_p (rtx, rtx);
126extern void riscv_split_doubleword_move (rtx, rtx);
127extern const char *riscv_output_move (rtx, rtx);
8cad5b14 128extern const char *riscv_output_return ();
4abcc500 129extern void riscv_declare_function_name (FILE *, const char *, tree);
5f110561 130extern void riscv_declare_function_size (FILE *, const char *, tree);
4abcc500
LD
131extern void riscv_asm_output_alias (FILE *, const tree, const tree);
132extern void riscv_asm_output_external (FILE *, const tree, const char *);
3d1d3132
FG
133extern bool
134riscv_zcmp_valid_stack_adj_bytes_p (HOST_WIDE_INT, int);
0a5170b5 135extern void riscv_legitimize_poly_move (machine_mode, rtx, rtx, rtx);
34ed2b45 136extern void riscv_expand_usadd (rtx, rtx, rtx);
02fcaf41 137
09cae750 138#ifdef RTX_CODE
8ae83274 139extern void riscv_expand_int_scc (rtx, enum rtx_code, rtx, rtx, bool *invert_ptr = 0);
9a1a2e98
MR
140extern void riscv_expand_float_scc (rtx, enum rtx_code, rtx, rtx,
141 bool *invert_ptr = nullptr);
09cae750 142extern void riscv_expand_conditional_branch (rtx, enum rtx_code, rtx, rtx);
4daeedcb 143extern rtx riscv_emit_unary (enum rtx_code code, rtx dest, rtx x);
99bfdb07 144extern rtx riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y);
09cae750 145#endif
8e7ffe12 146extern bool riscv_expand_conditional_move (rtx, rtx, rtx, rtx);
09cae750
PD
147extern rtx riscv_legitimize_call_address (rtx);
148extern void riscv_set_return_address (rtx, rtx);
09cae750 149extern rtx riscv_return_addr (int, rtx);
3496ca4e 150extern poly_int64 riscv_initial_elimination_offset (int, int);
09cae750 151extern void riscv_expand_prologue (void);
fd1e52dc 152extern void riscv_expand_epilogue (int);
d0ebdd9f 153extern bool riscv_epilogue_uses (unsigned int);
09cae750 154extern bool riscv_can_use_return_insn (void);
6ed01e6b 155extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
88108b27 156extern bool riscv_store_data_bypass_p (rtx_insn *, rtx_insn *);
d0e0c130
KC
157extern rtx riscv_gen_gpr_save_insn (struct riscv_frame_info *);
158extern bool riscv_gpr_save_operation_p (rtx);
b4feb49c 159extern void riscv_reinit (void);
f556cd8b 160extern poly_uint64 riscv_regmode_natural_size (machine_mode);
7e924ba3 161extern bool riscv_v_ext_vector_mode_p (machine_mode);
12847288 162extern bool riscv_v_ext_tuple_mode_p (machine_mode);
33b153ff 163extern bool riscv_v_ext_vls_mode_p (machine_mode);
6ae5565e 164extern int riscv_get_v_regno_alignment (machine_mode);
787ac959 165extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT);
f797260a
PN
166extern void riscv_subword_address (rtx, rtx *, rtx *, rtx *, rtx *);
167extern void riscv_lshift_subword (machine_mode, rtx, rtx, rtx *);
942ab49b 168extern enum memmodel riscv_union_memmodels (enum memmodel, enum memmodel);
4bfc4585 169extern bool riscv_reg_frame_related (rtx);
09cae750 170
e53b6e56 171/* Routines implemented in riscv-c.cc. */
09cae750 172void riscv_cpu_cpp_builtins (cpp_reader *);
7d935cdd 173void riscv_register_pragmas (void);
09cae750 174
e53b6e56 175/* Routines implemented in riscv-builtins.cc. */
09cae750 176extern void riscv_atomic_assign_expand_fenv (tree *, tree *, tree *);
60bd33bc 177extern bool riscv_gimple_fold_builtin (gimple_stmt_iterator *);
b8506a8a 178extern rtx riscv_expand_builtin (tree, rtx, rtx, machine_mode, int);
09cae750
PD
179extern tree riscv_builtin_decl (unsigned int, bool);
180extern void riscv_init_builtins (void);
181
e53b6e56 182/* Routines implemented in riscv-common.cc. */
f908b69c 183extern std::string riscv_arch_str (bool version_p = true);
b4feb49c 184extern void riscv_parse_arch_string (const char *, struct gcc_options *, location_t);
8e966210 185
e0a5b313
KC
186extern bool riscv_hard_regno_rename_ok (unsigned, unsigned);
187
de6320a8 188rtl_opt_pass * make_pass_shorten_memrefs (gcc::context *ctxt);
e37bc2cf 189rtl_opt_pass * make_pass_avlprop (gcc::context *ctxt);
9243c3d1 190rtl_opt_pass * make_pass_vsetvl (gcc::context *ctxt);
de6320a8 191
32874560 192/* Routines implemented in riscv-string.c. */
4bf1aa1a 193extern bool riscv_expand_block_compare (rtx, rtx, rtx, rtx);
32874560 194extern bool riscv_expand_block_move (rtx, rtx, rtx);
54ba8d44 195extern bool riscv_expand_block_clear (rtx, rtx);
32874560 196
72eb8335
KC
197/* Information about one CPU we know about. */
198struct riscv_cpu_info {
199 /* This CPU's canonical name. */
200 const char *name;
201
202 /* Default arch for this CPU, could be NULL if no default arch. */
203 const char *arch;
204
205 /* Which automaton to use for tuning. */
206 const char *tune;
207};
208
209extern const riscv_cpu_info *riscv_find_cpu (const char *);
210
5e0f67b8
JZ
211/* Common vector costs in any kind of vectorization (e.g VLA and VLS). */
212struct common_vector_cost
213{
214 /* Cost of any integer vector operation, excluding the ones handled
215 specially below. */
216 const int int_stmt_cost;
217
218 /* Cost of any fp vector operation, excluding the ones handled
219 specially below. */
220 const int fp_stmt_cost;
221
222 /* Gather/scatter vectorization cost. */
223 const int gather_load_cost;
224 const int scatter_store_cost;
225
e0b9c8ad
RD
226 /* Segment load/store permute cost. */
227 const int segment_permute_2;
228 const int segment_permute_3;
229 const int segment_permute_4;
230 const int segment_permute_5;
231 const int segment_permute_6;
232 const int segment_permute_7;
233 const int segment_permute_8;
234
5e0f67b8
JZ
235 /* Cost of a vector-to-scalar operation. */
236 const int vec_to_scalar_cost;
237
238 /* Cost of a scalar-to-vector operation. */
239 const int scalar_to_vec_cost;
240
241 /* Cost of a permute operation. */
242 const int permute_cost;
243
244 /* Cost of an aligned vector load. */
245 const int align_load_cost;
246
247 /* Cost of an aligned vector store. */
248 const int align_store_cost;
249
250 /* Cost of an unaligned vector load. */
251 const int unalign_load_cost;
252
253 /* Cost of an unaligned vector store. */
254 const int unalign_store_cost;
255};
256
257/* scalable vectorization (VLA) specific cost. */
258struct scalable_vector_cost : common_vector_cost
259{
260 CONSTEXPR scalable_vector_cost (const common_vector_cost &base)
261 : common_vector_cost (base)
262 {}
263
264 /* TODO: We will need more other kinds of vector cost for VLA.
265 E.g. fold_left reduction cost, lanes load/store cost, ..., etc. */
266};
267
0acb6367
JZ
268/* Additional costs for register copies. Cost is for one register. */
269struct regmove_vector_cost
270{
271 const int GR2VR;
272 const int FR2VR;
7be87b7d
JZ
273 const int VR2GR;
274 const int VR2FR;
0acb6367
JZ
275};
276
5e0f67b8
JZ
277/* Cost for vector insn classes. */
278struct cpu_vector_cost
279{
280 /* Cost of any integer scalar operation, excluding load and store. */
281 const int scalar_int_stmt_cost;
282
283 /* Cost of any fp scalar operation, excluding load and store. */
284 const int scalar_fp_stmt_cost;
285
286 /* Cost of a scalar load. */
287 const int scalar_load_cost;
288
289 /* Cost of a scalar store. */
290 const int scalar_store_cost;
291
292 /* Cost of a taken branch. */
293 const int cond_taken_branch_cost;
294
295 /* Cost of a not-taken branch. */
296 const int cond_not_taken_branch_cost;
297
298 /* Cost of an VLS modes operations. */
299 const common_vector_cost *vls;
300
301 /* Cost of an VLA modes operations. */
302 const scalable_vector_cost *vla;
0acb6367
JZ
303
304 /* Cost of vector register move operations. */
305 const regmove_vector_cost *regmove;
5e0f67b8
JZ
306};
307
b4feb49c 308/* Routines implemented in riscv-selftests.cc. */
309#if CHECKING_P
310namespace selftest {
3b6d44f4 311void riscv_run_selftests (void);
b4feb49c 312} // namespace selftest
313#endif
314
7d935cdd 315namespace riscv_vector {
01260a82 316#define RVV_VLMAX regno_reg_rtx[X0_REGNUM]
272e119d 317#define RVV_VUNDEF(MODE) \
01260a82 318 gen_rtx_UNSPEC (MODE, gen_rtvec (1, RVV_VLMAX), UNSPEC_VUNDEF)
b3176bdc 319
79ab19bc
LD
320/* These flags describe how to pass the operands to a rvv insn pattern.
321 e.g.:
322 If a insn has this flags:
323 HAS_DEST_P | HAS_MASK_P | USE_VUNDEF_MERGE_P
324 | TU_POLICY_P | BINARY_OP_P | FRM_DYN_P
325 that means:
326 operands[0] is the dest operand
327 operands[1] is the mask operand
328 operands[2] is the merge operand
329 operands[3] and operands[4] is the two operand to do the operation.
330 operands[5] is the vl operand
331 operands[6] is the tail policy operand
332 operands[7] is the mask policy operands
333 operands[8] is the rounding mode operands
334
335 Then you can call `emit_vlmax_insn (flags, icode, ops)` to emit a insn.
336 and ops[0] is the dest operand (operands[0]), ops[1] is the mask
337 operand (operands[1]), ops[2] and ops[3] is the two
338 operands (operands[3], operands[4]) to do the operation. Other operands
339 will be created by emit_vlmax_insn according to the flags information.
340*/
341enum insn_flags : unsigned int
51fd69ec 342{
79ab19bc
LD
343 /* flags for dest, mask, merge operands. */
344 /* Means INSN has dest operand. False for STORE insn. */
345 HAS_DEST_P = 1 << 0,
346 /* Means INSN has mask operand. */
347 HAS_MASK_P = 1 << 1,
348 /* Means using ALL_TRUES for mask operand. */
349 USE_ALL_TRUES_MASK_P = 1 << 2,
350 /* Means using ONE_TRUE for mask operand. */
351 USE_ONE_TRUE_MASK_P = 1 << 3,
352 /* Means INSN has merge operand. */
353 HAS_MERGE_P = 1 << 4,
354 /* Means using VUNDEF for merge operand. */
355 USE_VUNDEF_MERGE_P = 1 << 5,
356
357 /* flags for tail policy and mask plicy operands. */
358 /* Means the tail policy is TAIL_UNDISTURBED. */
359 TU_POLICY_P = 1 << 6,
360 /* Means the tail policy is default (return by get_prefer_tail_policy). */
361 TDEFAULT_POLICY_P = 1 << 7,
362 /* Means the mask policy is MASK_UNDISTURBED. */
363 MU_POLICY_P = 1 << 8,
364 /* Means the mask policy is default (return by get_prefer_mask_policy). */
365 MDEFAULT_POLICY_P = 1 << 9,
366
367 /* flags for the number operands to do the operation. */
368 /* Means INSN need zero operand to do the operation. e.g. vid.v */
369 NULLARY_OP_P = 1 << 10,
370 /* Means INSN need one operand to do the operation. */
371 UNARY_OP_P = 1 << 11,
372 /* Means INSN need two operands to do the operation. */
373 BINARY_OP_P = 1 << 12,
374 /* Means INSN need two operands to do the operation. */
375 TERNARY_OP_P = 1 << 13,
376
dd6e5d29
LD
377 /* flags for get vtype mode from the index number. default from dest operand. */
378 VTYPE_MODE_FROM_OP1_P = 1 << 14,
79ab19bc
LD
379
380 /* flags for the floating-point rounding mode. */
381 /* Means INSN has FRM operand and the value is FRM_DYN. */
382 FRM_DYN_P = 1 << 15,
8bf5636e
PL
383
384 /* Means INSN has FRM operand and the value is FRM_RUP. */
385 FRM_RUP_P = 1 << 16,
83441e75
PL
386
387 /* Means INSN has FRM operand and the value is FRM_RDN. */
388 FRM_RDN_P = 1 << 17,
d324984f
PL
389
390 /* Means INSN has FRM operand and the value is FRM_RMM. */
391 FRM_RMM_P = 1 << 18,
fcbbf158
PL
392
393 /* Means INSN has FRM operand and the value is FRM_RNE. */
394 FRM_RNE_P = 1 << 19,
0141ee79
JZ
395
396 /* Means INSN has VXRM operand and the value is VXRM_RNU. */
397 VXRM_RNU_P = 1 << 20,
398
399 /* Means INSN has VXRM operand and the value is VXRM_RDN. */
400 VXRM_RDN_P = 1 << 21,
51fd69ec 401};
79ab19bc
LD
402
403enum insn_type : unsigned int
404{
405 /* some flags macros. */
406 /* For non-mask insn with tama. */
407 __NORMAL_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
408 | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P,
409 /* For non-mask insn with ta, without mask policy operand. */
410 __NORMAL_OP_TA = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
411 | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P,
412 /* For non-mask insn with ta, without mask operand and mask policy operand. */
413 __NORMAL_OP_TA2
414 = HAS_DEST_P | HAS_MERGE_P | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P,
415 /* For non-mask insn with ma, without tail policy operand. */
416 __NORMAL_OP_MA = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
417 | USE_VUNDEF_MERGE_P | MDEFAULT_POLICY_P,
418 /* For mask insn with tama. */
419 __MASK_OP_TAMA = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | USE_VUNDEF_MERGE_P
420 | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P,
421 /* For mask insn with tamu. */
422 __MASK_OP_TAMU
423 = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | TDEFAULT_POLICY_P | MU_POLICY_P,
424 /* For mask insn with tuma. */
425 __MASK_OP_TUMA = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
426 | TU_POLICY_P | MDEFAULT_POLICY_P,
427 /* For mask insn with mu. */
428 __MASK_OP_MU = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | MU_POLICY_P,
429 /* For mask insn with ta, without mask policy operand. */
430 __MASK_OP_TA = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | USE_VUNDEF_MERGE_P
431 | TDEFAULT_POLICY_P,
432
433 /* Nullary operator. e.g. vid.v */
434 NULLARY_OP = __NORMAL_OP | NULLARY_OP_P,
435
436 /* Unary operator. */
437 UNARY_OP = __NORMAL_OP | UNARY_OP_P,
438 UNARY_OP_TAMA = __MASK_OP_TAMA | UNARY_OP_P,
439 UNARY_OP_TAMU = __MASK_OP_TAMU | UNARY_OP_P,
440 UNARY_OP_FRM_DYN = UNARY_OP | FRM_DYN_P,
2cc4f58a 441 UNARY_OP_FRM_RMM = UNARY_OP | FRM_RMM_P,
51f7bfaa 442 UNARY_OP_FRM_RUP = UNARY_OP | FRM_RUP_P,
8f52040e 443 UNARY_OP_FRM_RDN = UNARY_OP | FRM_RDN_P,
85858c71
PL
444 UNARY_OP_TAMA_FRM_DYN = UNARY_OP_TAMA | FRM_DYN_P,
445 UNARY_OP_TAMA_FRM_RUP = UNARY_OP_TAMA | FRM_RUP_P,
446 UNARY_OP_TAMA_FRM_RDN = UNARY_OP_TAMA | FRM_RDN_P,
447 UNARY_OP_TAMA_FRM_RMM = UNARY_OP_TAMA | FRM_RMM_P,
448 UNARY_OP_TAMA_FRM_RNE = UNARY_OP_TAMA | FRM_RNE_P,
e2023d2d 449 UNARY_OP_TAMU_FRM_DYN = UNARY_OP_TAMU | FRM_DYN_P,
8bf5636e 450 UNARY_OP_TAMU_FRM_RUP = UNARY_OP_TAMU | FRM_RUP_P,
83441e75 451 UNARY_OP_TAMU_FRM_RDN = UNARY_OP_TAMU | FRM_RDN_P,
d324984f 452 UNARY_OP_TAMU_FRM_RMM = UNARY_OP_TAMU | FRM_RMM_P,
fcbbf158 453 UNARY_OP_TAMU_FRM_RNE = UNARY_OP_TAMU | FRM_RNE_P,
79ab19bc
LD
454
455 /* Binary operator. */
456 BINARY_OP = __NORMAL_OP | BINARY_OP_P,
457 BINARY_OP_TAMA = __MASK_OP_TAMA | BINARY_OP_P,
458 BINARY_OP_TAMU = __MASK_OP_TAMU | BINARY_OP_P,
459 BINARY_OP_TUMA = __MASK_OP_TUMA | BINARY_OP_P,
460 BINARY_OP_FRM_DYN = BINARY_OP | FRM_DYN_P,
0141ee79
JZ
461 BINARY_OP_VXRM_RNU = BINARY_OP | VXRM_RNU_P,
462 BINARY_OP_VXRM_RDN = BINARY_OP | VXRM_RDN_P,
79ab19bc
LD
463
464 /* Ternary operator. Always have real merge operand. */
465 TERNARY_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
466 | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P | TERNARY_OP_P,
467 TERNARY_OP_FRM_DYN = TERNARY_OP | FRM_DYN_P,
468
469 /* For vwmacc, no merge operand. */
470 WIDEN_TERNARY_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P
471 | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P | TERNARY_OP_P,
472 WIDEN_TERNARY_OP_FRM_DYN = WIDEN_TERNARY_OP | FRM_DYN_P,
473
474 /* For vmerge, no mask operand, no mask policy operand. */
475 MERGE_OP = __NORMAL_OP_TA2 | TERNARY_OP_P,
476
0c42741a
RD
477 /* For vmerge with TU policy. */
478 MERGE_OP_TU = HAS_DEST_P | HAS_MERGE_P | TERNARY_OP_P | TU_POLICY_P,
479
79ab19bc
LD
480 /* For vm<compare>, no tail policy operand. */
481 COMPARE_OP = __NORMAL_OP_MA | TERNARY_OP_P,
482 COMPARE_OP_MU = __MASK_OP_MU | TERNARY_OP_P,
483
484 /* For scatter insn: no dest operand, no merge operand, no tail and mask
485 policy operands. */
486 SCATTER_OP_M = HAS_MASK_P | TERNARY_OP_P,
487
488 /* For vcpop.m, no merge operand, no tail and mask policy operands. */
489 CPOP_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | UNARY_OP_P
dd6e5d29 490 | VTYPE_MODE_FROM_OP1_P,
79ab19bc
LD
491
492 /* For mask instrunctions, no tail and mask policy operands. */
493 UNARY_MASK_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
494 | USE_VUNDEF_MERGE_P | UNARY_OP_P,
495 BINARY_MASK_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
496 | USE_VUNDEF_MERGE_P | BINARY_OP_P,
497
498 /* For vcompress.vm */
499 COMPRESS_OP = __NORMAL_OP_TA2 | BINARY_OP_P,
500 /* has merge operand but use ta. */
501 COMPRESS_OP_MERGE
502 = HAS_DEST_P | HAS_MERGE_P | TDEFAULT_POLICY_P | BINARY_OP_P,
503
6aaf72ff
JZ
504 /* For vslideup.up has merge operand but use ta. */
505 SLIDEUP_OP_MERGE = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P
506 | HAS_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P
507 | BINARY_OP_P,
508
79ab19bc 509 /* For vreduce, no mask policy operand. */
dd6e5d29 510 REDUCE_OP = __NORMAL_OP_TA | BINARY_OP_P | VTYPE_MODE_FROM_OP1_P,
5bc8c83d 511 REDUCE_OP_M = __MASK_OP_TA | BINARY_OP_P | VTYPE_MODE_FROM_OP1_P,
dd6e5d29 512 REDUCE_OP_FRM_DYN = REDUCE_OP | FRM_DYN_P | VTYPE_MODE_FROM_OP1_P,
79ab19bc 513 REDUCE_OP_M_FRM_DYN
dd6e5d29 514 = __MASK_OP_TA | BINARY_OP_P | FRM_DYN_P | VTYPE_MODE_FROM_OP1_P,
79ab19bc
LD
515
516 /* For vmv.s.x/vfmv.s.f. */
517 SCALAR_MOVE_OP = HAS_DEST_P | HAS_MASK_P | USE_ONE_TRUE_MASK_P | HAS_MERGE_P
518 | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P
519 | UNARY_OP_P,
28f16f6d
PL
520
521 SCALAR_MOVE_MERGED_OP = HAS_DEST_P | HAS_MASK_P | USE_ONE_TRUE_MASK_P
522 | HAS_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P
523 | UNARY_OP_P,
79ab19bc
LD
524};
525
3b16afeb
JZZ
526enum vlmul_type
527{
528 LMUL_1 = 0,
529 LMUL_2 = 1,
530 LMUL_4 = 2,
531 LMUL_8 = 3,
532 LMUL_RESERVED = 4,
533 LMUL_F8 = 5,
534 LMUL_F4 = 6,
535 LMUL_F2 = 7,
ec99ffab 536 NUM_LMUL = 8
3b16afeb 537};
9243c3d1 538
e99cdab8
LD
539/* The RISC-V vsetvli pass uses "known vlmax" operations for optimization.
540 Whether or not an instruction actually is a vlmax operation is not
541 recognizable from the length operand alone but the avl_type operand
542 is used instead. In general, there are two cases:
543
544 - Emit a vlmax operation by calling emit_vlmax_insn[_lra]. Here we emit
545 a vsetvli with vlmax configuration and set the avl_type to VLMAX for
546 VLA modes or VLS for VLS modes.
547 - Emit an operation that uses the existing (last-set) length and
548 set the avl_type to NONVLMAX.
549
550 Sometimes we also need to set the VLMAX or VLS avl_type to an operation that
551 already uses a given length register. This can happen during or after
552 register allocation when we are not allowed to create a new register.
553 For that case we also allow to set the avl_type to VLMAX or VLS.
554*/
9243c3d1
JZZ
555enum avl_type
556{
e99cdab8
LD
557 NONVLMAX = 0,
558 VLMAX = 1,
559 VLS = 2,
9243c3d1 560};
7d935cdd 561/* Routines implemented in riscv-vector-builtins.cc. */
3b6d44f4 562void init_builtins (void);
af3a9807 563void reinit_builtins (void);
3b6d44f4 564const char *mangle_builtin_type (const_tree);
509c10a6 565tree lookup_vector_type_attribute (const_tree);
94a4b932 566bool builtin_type_p (const_tree);
7d935cdd 567#ifdef GCC_TARGET_H
3b6d44f4 568bool verify_type_context (location_t, type_context_kind, const_tree, bool);
631e86b7
JZ
569bool expand_vec_perm_const (machine_mode, machine_mode, rtx, rtx, rtx,
570 const vec_perm_indices &);
7d935cdd 571#endif
3b6d44f4
JZZ
572void handle_pragma_vector (void);
573tree builtin_decl (unsigned, bool);
60bd33bc 574gimple *gimple_fold_builtin (unsigned int, gimple_stmt_iterator *, gcall *);
3b6d44f4 575rtx expand_builtin (unsigned int, tree, rtx);
7caa1ae5
JZZ
576bool check_builtin_call (location_t, vec<location_t>, unsigned int,
577 tree, unsigned int, tree *);
db5c3f6d 578tree resolve_overloaded_builtin (location_t, unsigned int, tree, vec<tree, va_gc> *);
3b6d44f4 579bool const_vec_all_same_in_range_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
f416a3fd 580bool legitimize_move (rtx, rtx *);
cd0c433e 581void emit_vlmax_vsetvl (machine_mode, rtx);
40fc8e3d 582void emit_hard_vlmax_vsetvl (machine_mode, rtx);
79ab19bc
LD
583void emit_vlmax_insn (unsigned, unsigned, rtx *);
584void emit_nonvlmax_insn (unsigned, unsigned, rtx *, rtx);
585void emit_vlmax_insn_lra (unsigned, unsigned, rtx *, rtx);
3b6d44f4 586enum vlmul_type get_vlmul (machine_mode);
b3176bdc 587rtx get_vlmax_rtx (machine_mode);
3b6d44f4 588unsigned int get_ratio (machine_mode);
12847288
JZZ
589unsigned int get_nf (machine_mode);
590machine_mode get_subpart_mode (machine_mode);
3b6d44f4
JZZ
591int get_ta (rtx);
592int get_ma (rtx);
593int get_avl_type (rtx);
594unsigned int calculate_ratio (unsigned int, enum vlmul_type);
f556cd8b
JZZ
595enum tail_policy
596{
597 TAIL_UNDISTURBED = 0,
598 TAIL_AGNOSTIC = 1,
9243c3d1 599 TAIL_ANY = 2,
f556cd8b
JZZ
600};
601
602enum mask_policy
603{
604 MASK_UNDISTURBED = 0,
605 MASK_AGNOSTIC = 1,
9243c3d1 606 MASK_ANY = 2,
f556cd8b 607};
8390a2af 608
e69d050f
LD
609/* Return true if VALUE is agnostic or any policy. */
610#define IS_AGNOSTIC(VALUE) (bool) (VALUE & 0x1 || (VALUE >> 1 & 0x1))
611
9243c3d1
JZZ
612enum tail_policy get_prefer_tail_policy ();
613enum mask_policy get_prefer_mask_policy ();
a143c3f7 614rtx get_avl_type_rtx (enum avl_type);
6c9bcb6c 615opt_machine_mode get_vector_mode (scalar_mode, poly_uint64);
12847288 616opt_machine_mode get_tuple_mode (machine_mode, unsigned int);
3b6d44f4
JZZ
617bool simm5_p (rtx);
618bool neg_simm5_p (rtx);
a035d133 619#ifdef RTX_CODE
3b6d44f4 620bool has_vi_variant_p (rtx_code, rtx);
1cd8254e 621void expand_vec_cmp (rtx, rtx_code, rtx, rtx, rtx = nullptr, rtx = nullptr);
e0600a02 622bool expand_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool);
4d1c8b04
LD
623void expand_cond_len_unop (unsigned, rtx *);
624void expand_cond_len_binop (unsigned, rtx *);
e6413b5d 625void expand_reduction (unsigned, unsigned, rtx *, rtx);
8bf5636e 626void expand_vec_ceil (rtx, rtx, machine_mode, machine_mode);
83441e75 627void expand_vec_floor (rtx, rtx, machine_mode, machine_mode);
e2023d2d 628void expand_vec_nearbyint (rtx, rtx, machine_mode, machine_mode);
e4cf5f54 629void expand_vec_rint (rtx, rtx, machine_mode, machine_mode);
d324984f 630void expand_vec_round (rtx, rtx, machine_mode, machine_mode);
1c4ca595 631void expand_vec_trunc (rtx, rtx, machine_mode, machine_mode);
fcbbf158 632void expand_vec_roundeven (rtx, rtx, machine_mode, machine_mode);
5dfa501d
PL
633void expand_vec_lrint (rtx, rtx, machine_mode, machine_mode, machine_mode);
634void expand_vec_lround (rtx, rtx, machine_mode, machine_mode, machine_mode);
51f7bfaa 635void expand_vec_lceil (rtx, rtx, machine_mode, machine_mode);
8f52040e 636void expand_vec_lfloor (rtx, rtx, machine_mode, machine_mode);
34ed2b45 637void expand_vec_usadd (rtx, rtx, rtx, machine_mode);
a035d133 638#endif
51fd69ec 639bool sew64_scalar_helper (rtx *, rtx *, rtx, machine_mode,
eb1cdb3e 640 bool, void (*)(rtx *, rtx), enum avl_type);
ec99ffab 641rtx gen_scalar_move_mask (machine_mode);
9c032218 642rtx gen_no_side_effects_vsetvl_rtx (machine_mode, rtx, rtx);
1bff101b
JZZ
643
644/* RVV vector register sizes.
645 TODO: Currently, we only add RVV_32/RVV_64/RVV_128, we may need to
646 support other values in the future. */
647enum vlen_enum
648{
649 RVV_32 = 32,
650 RVV_64 = 64,
651 RVV_65536 = 65536
652};
653bool slide1_sew64_helper (int, machine_mode, machine_mode,
654 machine_mode, rtx *);
db4f7a9b 655rtx gen_avl_for_scalar_move (rtx);
51fd69ec 656void expand_tuple_move (rtx *);
9464e72b 657bool expand_block_move (rtx, rtx, rtx);
2d76f2b4 658machine_mode preferred_simd_mode (scalar_mode);
1349f530 659machine_mode get_mask_mode (machine_mode);
71a5ac67 660void expand_vec_series (rtx, rtx, rtx, rtx = 0);
1c1a9d8e 661void expand_vec_init (rtx, rtx);
2418cdfc 662void expand_vec_perm (rtx, rtx, rtx, rtx);
55dcf277 663void expand_select_vl (rtx *);
d42d199e 664void expand_load_store (rtx *, bool);
f048af2a 665void expand_gather_scatter (rtx *, bool);
0d2673e9 666void expand_cond_len_ternop (unsigned, rtx *);
95d2ce05 667void prepare_ternary_operands (rtx *);
fe578886 668void expand_lanes_load_store (rtx *, bool);
e7545cad 669void expand_fold_extract_last (rtx *);
8a87ba0b
JZ
670void expand_cond_unop (unsigned, rtx *);
671void expand_cond_binop (unsigned, rtx *);
672void expand_cond_ternop (unsigned, rtx *);
82bbbb73 673void expand_popcount (rtx *);
2664964b 674void expand_rawmemchr (machine_mode, rtx, rtx, rtx, bool = false);
d468718c 675bool expand_strcmp (rtx, rtx, rtx, rtx, unsigned HOST_WIDE_INT, bool);
0a5170b5 676void emit_vec_extract (rtx, rtx, rtx);
47ffabaf 677
5ed88078 678/* Rounding mode bitfield for fixed point VXRM. */
47ffabaf 679enum fixed_point_rounding_mode
5ed88078
JZ
680{
681 VXRM_RNU,
682 VXRM_RNE,
683 VXRM_RDN,
684 VXRM_ROD
685};
47ffabaf 686
7f4644f8
PL
687/* Rounding mode bitfield for floating point FRM. The value of enum comes
688 from the below link.
689 https://github.com/riscv/riscv-isa-manual/blob/main/src/f-st-ext.adoc#floating-point-control-and-status-register
690 */
47ffabaf 691enum floating_point_rounding_mode
8cd140d3 692{
7f4644f8
PL
693 FRM_RNE = 0, /* Aka 0b000. */
694 FRM_RTZ = 1, /* Aka 0b001. */
695 FRM_RDN = 2, /* Aka 0b010. */
696 FRM_RUP = 3, /* Aka 0b011. */
697 FRM_RMM = 4, /* Aka 0b100. */
698 FRM_DYN = 7, /* Aka 0b111. */
4d1e97f5
PL
699 FRM_STATIC_MIN = FRM_RNE,
700 FRM_STATIC_MAX = FRM_RMM,
4cede0de
PL
701 FRM_DYN_EXIT = 8,
702 FRM_DYN_CALL = 9,
703 FRM_NONE = 10,
8cd140d3 704};
25907509 705
4cede0de 706enum floating_point_rounding_mode get_frm_mode (rtx);
25907509
RD
707opt_machine_mode vectorize_related_mode (machine_mode, scalar_mode,
708 poly_uint64);
709unsigned int autovectorize_vector_modes (vec<machine_mode> *, bool);
d05aac04
JZ
710bool cmp_lmul_le_one (machine_mode);
711bool cmp_lmul_gt_one (machine_mode);
66c26e5c 712bool vls_mode_valid_p (machine_mode);
5e714992 713bool vlmax_avl_type_p (rtx_insn *);
8064e7e2
JZ
714bool has_vl_op (rtx_insn *);
715bool tail_agnostic_p (rtx_insn *);
716void validate_change_or_fail (rtx, rtx *, rtx, bool);
717bool nonvlmax_avl_type_p (rtx_insn *);
718bool vlmax_avl_p (rtx);
719uint8_t get_sew (rtx_insn *);
720enum vlmul_type get_vlmul (rtx_insn *);
721int count_regno_occurrences (rtx_insn *, unsigned int);
5ea3c039 722bool imm_avl_p (machine_mode);
418bd642 723bool can_be_broadcasted_p (rtx);
8b93a0f3 724bool gather_scatter_valid_offset_p (machine_mode);
fda2e1ab 725HOST_WIDE_INT estimated_poly_value (poly_int64, unsigned int);
9873f13d 726bool whole_reg_to_reg_move_p (rtx *, machine_mode, int);
f9df0034 727bool splat_to_scalar_move_p (rtx *);
f652a358 728rtx get_fp_rounding_coefficient (machine_mode);
7d935cdd
JZZ
729}
730
cbd50570
JZZ
731/* We classify builtin types into two classes:
732 1. General builtin class which is defined in riscv_builtins.
733 2. Vector builtin class which is a special builtin architecture
734 that implement intrinsic short into "pragma". */
735enum riscv_builtin_class
736{
737 RISCV_BUILTIN_GENERAL,
738 RISCV_BUILTIN_VECTOR
739};
740
741const unsigned int RISCV_BUILTIN_SHIFT = 1;
742
743/* Mask that selects the riscv_builtin_class part of a function code. */
744const unsigned int RISCV_BUILTIN_CLASS = (1 << RISCV_BUILTIN_SHIFT) - 1;
745
df48285b 746/* Routines implemented in riscv-string.cc. */
949f1ccf 747extern bool riscv_expand_strcmp (rtx, rtx, rtx, rtx, rtx);
df48285b
CM
748extern bool riscv_expand_strlen (rtx, rtx, rtx, rtx);
749
02fcaf41 750/* Routines implemented in thead.cc. */
c177f28d 751extern bool extract_base_offset_in_addr (rtx, rtx *, rtx *);
02fcaf41
CM
752extern bool th_mempair_operands_p (rtx[4], bool, machine_mode);
753extern void th_mempair_order_operands (rtx[4], bool, machine_mode);
754extern void th_mempair_prepare_save_restore_operands (rtx[4], bool,
755 machine_mode,
756 int, HOST_WIDE_INT,
757 int, HOST_WIDE_INT);
758extern void th_mempair_save_restore_regs (rtx[4], bool, machine_mode);
52e809d5
JM
759extern unsigned int th_int_get_mask (unsigned int);
760extern unsigned int th_int_get_save_adjustment (void);
761extern rtx th_int_adjust_cfi_prologue (unsigned int);
9a55cc62 762extern const char *th_asm_output_opcode (FILE *asm_out_file, const char *p);
02fcaf41
CM
763#ifdef RTX_CODE
764extern const char*
765th_mempair_output_move (rtx[4], bool, machine_mode, RTX_CODE);
2d65622f
CM
766extern bool th_memidx_legitimate_modify_p (rtx);
767extern bool th_memidx_legitimate_modify_p (rtx, bool);
768extern bool th_memidx_legitimate_index_p (rtx);
769extern bool th_memidx_legitimate_index_p (rtx, bool);
770extern bool th_classify_address (struct riscv_address_info *,
771 rtx, machine_mode, bool);
772extern const char *th_output_move (rtx, rtx);
773extern bool th_print_operand_address (FILE *, machine_mode, rtx);
02fcaf41
CM
774#endif
775
065be0ff 776extern bool riscv_use_divmod_expander (void);
1d4d302a 777void riscv_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int);
5f110561
KC
778extern bool
779riscv_option_valid_attribute_p (tree, tree, tree, int);
780extern void
781riscv_override_options_internal (struct gcc_options *);
af3a9807 782extern void riscv_option_override (void);
5f110561
KC
783
784struct riscv_tune_param;
785/* Information about one micro-arch we know about. */
786struct riscv_tune_info {
787 /* This micro-arch canonical name. */
788 const char *name;
789
790 /* Which automaton to use for tuning. */
791 enum riscv_microarchitecture_type microarchitecture;
792
793 /* Tuning parameters for this micro-arch. */
794 const struct riscv_tune_param *tune_param;
795};
796
797const struct riscv_tune_info *
798riscv_parse_tune (const char *, bool);
0acb6367 799const cpu_vector_cost *get_vector_costs ();
1d4d302a 800
7af0f1e1
KC
801enum
802{
803 RISCV_MAJOR_VERSION_BASE = 1000000,
804 RISCV_MINOR_VERSION_BASE = 1000,
805 RISCV_REVISION_VERSION_BASE = 1,
806};
807
09cae750 808#endif /* ! GCC_RISCV_PROTOS_H */