]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/riscv/riscv-protos.h
Update copyright years.
[thirdparty/gcc.git] / gcc / config / riscv / riscv-protos.h
1 /* Definition of RISC-V target for GNU compiler.
2 Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #ifndef GCC_RISCV_PROTOS_H
23 #define GCC_RISCV_PROTOS_H
24
25 #include "memmodel.h"
26
27 /* Symbol types we understand. The order of this list must match that of
28 the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST. */
29 enum riscv_symbol_type {
30 SYMBOL_ABSOLUTE,
31 SYMBOL_FORCE_TO_MEM,
32 SYMBOL_PCREL,
33 SYMBOL_GOT_DISP,
34 SYMBOL_TLS,
35 SYMBOL_TLS_LE,
36 SYMBOL_TLS_IE,
37 SYMBOL_TLS_GD
38 };
39 #define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
40
41 /* Classifies an address.
42
43 ADDRESS_REG
44 A natural register + offset address. The register satisfies
45 riscv_valid_base_register_p and the offset is a const_arith_operand.
46
47 ADDRESS_REG_REG
48 A base register indexed by (optionally scaled) register.
49
50 ADDRESS_REG_UREG
51 A base register indexed by (optionally scaled) zero-extended register.
52
53 ADDRESS_REG_WB
54 A base register indexed by immediate offset with writeback.
55
56 ADDRESS_LO_SUM
57 A LO_SUM rtx. The first operand is a valid base register and
58 the second operand is a symbolic address.
59
60 ADDRESS_CONST_INT
61 A signed 16-bit constant address.
62
63 ADDRESS_SYMBOLIC:
64 A constant symbolic address. */
65 enum riscv_address_type {
66 ADDRESS_REG,
67 ADDRESS_REG_REG,
68 ADDRESS_REG_UREG,
69 ADDRESS_REG_WB,
70 ADDRESS_LO_SUM,
71 ADDRESS_CONST_INT,
72 ADDRESS_SYMBOLIC
73 };
74
75 /* Information about an address described by riscv_address_type.
76
77 ADDRESS_CONST_INT
78 No fields are used.
79
80 ADDRESS_REG
81 REG is the base register and OFFSET is the constant offset.
82
83 ADDRESS_REG_REG and ADDRESS_REG_UREG
84 REG is the base register and OFFSET is the index register.
85
86 ADDRESS_REG_WB
87 REG is the base register, OFFSET is the constant offset, and
88 shift is the shift amount for the offset.
89
90 ADDRESS_LO_SUM
91 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
92 is the type of symbol it references.
93
94 ADDRESS_SYMBOLIC
95 SYMBOL_TYPE is the type of symbol that the address references. */
96 struct riscv_address_info {
97 enum riscv_address_type type;
98 rtx reg;
99 rtx offset;
100 enum riscv_symbol_type symbol_type;
101 int shift;
102 };
103
104 /* Routines implemented in riscv.cc. */
105 extern enum riscv_symbol_type riscv_classify_symbolic_expression (rtx);
106 extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
107 extern int riscv_float_const_rtx_index_for_fli (rtx);
108 extern int riscv_regno_mode_ok_for_base_p (int, machine_mode, bool);
109 extern bool riscv_valid_base_register_p (rtx, machine_mode, bool);
110 extern enum reg_class riscv_index_reg_class ();
111 extern int riscv_regno_ok_for_index_p (int);
112 extern int riscv_address_insns (rtx, machine_mode, bool);
113 extern int riscv_const_insns (rtx);
114 extern int riscv_split_const_insns (rtx);
115 extern int riscv_load_store_insns (rtx, rtx_insn *);
116 extern rtx riscv_emit_move (rtx, rtx);
117 extern bool riscv_split_symbol (rtx, rtx, machine_mode, rtx *);
118 extern bool riscv_split_symbol_type (enum riscv_symbol_type);
119 extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
120 extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT, machine_mode);
121 extern bool riscv_legitimize_move (machine_mode, rtx, rtx);
122 extern rtx riscv_subword (rtx, bool);
123 extern bool riscv_split_64bit_move_p (rtx, rtx);
124 extern void riscv_split_doubleword_move (rtx, rtx);
125 extern const char *riscv_output_move (rtx, rtx);
126 extern const char *riscv_output_return ();
127 extern void riscv_declare_function_name (FILE *, const char *, tree);
128 extern void riscv_declare_function_size (FILE *, const char *, tree);
129 extern void riscv_asm_output_alias (FILE *, const tree, const tree);
130 extern void riscv_asm_output_external (FILE *, const tree, const char *);
131 extern bool
132 riscv_zcmp_valid_stack_adj_bytes_p (HOST_WIDE_INT, int);
133 extern void riscv_legitimize_poly_move (machine_mode, rtx, rtx, rtx);
134
135 #ifdef RTX_CODE
136 extern void riscv_expand_int_scc (rtx, enum rtx_code, rtx, rtx, bool *invert_ptr = 0);
137 extern void riscv_expand_float_scc (rtx, enum rtx_code, rtx, rtx,
138 bool *invert_ptr = nullptr);
139 extern void riscv_expand_conditional_branch (rtx, enum rtx_code, rtx, rtx);
140 extern rtx riscv_emit_unary (enum rtx_code code, rtx dest, rtx x);
141 extern rtx riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y);
142 #endif
143 extern bool riscv_expand_conditional_move (rtx, rtx, rtx, rtx);
144 extern rtx riscv_legitimize_call_address (rtx);
145 extern void riscv_set_return_address (rtx, rtx);
146 extern rtx riscv_return_addr (int, rtx);
147 extern poly_int64 riscv_initial_elimination_offset (int, int);
148 extern void riscv_expand_prologue (void);
149 extern void riscv_expand_epilogue (int);
150 extern bool riscv_epilogue_uses (unsigned int);
151 extern bool riscv_can_use_return_insn (void);
152 extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
153 extern bool riscv_store_data_bypass_p (rtx_insn *, rtx_insn *);
154 extern rtx riscv_gen_gpr_save_insn (struct riscv_frame_info *);
155 extern bool riscv_gpr_save_operation_p (rtx);
156 extern void riscv_reinit (void);
157 extern poly_uint64 riscv_regmode_natural_size (machine_mode);
158 extern bool riscv_v_ext_vector_mode_p (machine_mode);
159 extern bool riscv_v_ext_tuple_mode_p (machine_mode);
160 extern bool riscv_v_ext_vls_mode_p (machine_mode);
161 extern int riscv_get_v_regno_alignment (machine_mode);
162 extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT);
163 extern void riscv_subword_address (rtx, rtx *, rtx *, rtx *, rtx *);
164 extern void riscv_lshift_subword (machine_mode, rtx, rtx, rtx *);
165 extern enum memmodel riscv_union_memmodels (enum memmodel, enum memmodel);
166
167 /* Routines implemented in riscv-c.cc. */
168 void riscv_cpu_cpp_builtins (cpp_reader *);
169 void riscv_register_pragmas (void);
170
171 /* Routines implemented in riscv-builtins.cc. */
172 extern void riscv_atomic_assign_expand_fenv (tree *, tree *, tree *);
173 extern bool riscv_gimple_fold_builtin (gimple_stmt_iterator *);
174 extern rtx riscv_expand_builtin (tree, rtx, rtx, machine_mode, int);
175 extern tree riscv_builtin_decl (unsigned int, bool);
176 extern void riscv_init_builtins (void);
177
178 /* Routines implemented in riscv-common.cc. */
179 extern std::string riscv_arch_str (bool version_p = true);
180 extern void riscv_parse_arch_string (const char *, struct gcc_options *, location_t);
181
182 extern bool riscv_hard_regno_rename_ok (unsigned, unsigned);
183
184 rtl_opt_pass * make_pass_shorten_memrefs (gcc::context *ctxt);
185 rtl_opt_pass * make_pass_avlprop (gcc::context *ctxt);
186 rtl_opt_pass * make_pass_vsetvl (gcc::context *ctxt);
187
188 /* Routines implemented in riscv-string.c. */
189 extern bool riscv_expand_block_move (rtx, rtx, rtx);
190
191 /* Information about one CPU we know about. */
192 struct riscv_cpu_info {
193 /* This CPU's canonical name. */
194 const char *name;
195
196 /* Default arch for this CPU, could be NULL if no default arch. */
197 const char *arch;
198
199 /* Which automaton to use for tuning. */
200 const char *tune;
201 };
202
203 extern const riscv_cpu_info *riscv_find_cpu (const char *);
204
205 /* Common vector costs in any kind of vectorization (e.g VLA and VLS). */
206 struct common_vector_cost
207 {
208 /* Cost of any integer vector operation, excluding the ones handled
209 specially below. */
210 const int int_stmt_cost;
211
212 /* Cost of any fp vector operation, excluding the ones handled
213 specially below. */
214 const int fp_stmt_cost;
215
216 /* Gather/scatter vectorization cost. */
217 const int gather_load_cost;
218 const int scatter_store_cost;
219
220 /* Cost of a vector-to-scalar operation. */
221 const int vec_to_scalar_cost;
222
223 /* Cost of a scalar-to-vector operation. */
224 const int scalar_to_vec_cost;
225
226 /* Cost of a permute operation. */
227 const int permute_cost;
228
229 /* Cost of an aligned vector load. */
230 const int align_load_cost;
231
232 /* Cost of an aligned vector store. */
233 const int align_store_cost;
234
235 /* Cost of an unaligned vector load. */
236 const int unalign_load_cost;
237
238 /* Cost of an unaligned vector store. */
239 const int unalign_store_cost;
240 };
241
242 /* scalable vectorization (VLA) specific cost. */
243 struct scalable_vector_cost : common_vector_cost
244 {
245 CONSTEXPR scalable_vector_cost (const common_vector_cost &base)
246 : common_vector_cost (base)
247 {}
248
249 /* TODO: We will need more other kinds of vector cost for VLA.
250 E.g. fold_left reduction cost, lanes load/store cost, ..., etc. */
251 };
252
253 /* Cost for vector insn classes. */
254 struct cpu_vector_cost
255 {
256 /* Cost of any integer scalar operation, excluding load and store. */
257 const int scalar_int_stmt_cost;
258
259 /* Cost of any fp scalar operation, excluding load and store. */
260 const int scalar_fp_stmt_cost;
261
262 /* Cost of a scalar load. */
263 const int scalar_load_cost;
264
265 /* Cost of a scalar store. */
266 const int scalar_store_cost;
267
268 /* Cost of a taken branch. */
269 const int cond_taken_branch_cost;
270
271 /* Cost of a not-taken branch. */
272 const int cond_not_taken_branch_cost;
273
274 /* Cost of an VLS modes operations. */
275 const common_vector_cost *vls;
276
277 /* Cost of an VLA modes operations. */
278 const scalable_vector_cost *vla;
279 };
280
281 /* Routines implemented in riscv-selftests.cc. */
282 #if CHECKING_P
283 namespace selftest {
284 void riscv_run_selftests (void);
285 } // namespace selftest
286 #endif
287
288 namespace riscv_vector {
289 #define RVV_VLMAX gen_rtx_REG (Pmode, X0_REGNUM)
290 #define RVV_VUNDEF(MODE) \
291 gen_rtx_UNSPEC (MODE, gen_rtvec (1, gen_rtx_REG (SImode, X0_REGNUM)), \
292 UNSPEC_VUNDEF)
293
294 /* These flags describe how to pass the operands to a rvv insn pattern.
295 e.g.:
296 If a insn has this flags:
297 HAS_DEST_P | HAS_MASK_P | USE_VUNDEF_MERGE_P
298 | TU_POLICY_P | BINARY_OP_P | FRM_DYN_P
299 that means:
300 operands[0] is the dest operand
301 operands[1] is the mask operand
302 operands[2] is the merge operand
303 operands[3] and operands[4] is the two operand to do the operation.
304 operands[5] is the vl operand
305 operands[6] is the tail policy operand
306 operands[7] is the mask policy operands
307 operands[8] is the rounding mode operands
308
309 Then you can call `emit_vlmax_insn (flags, icode, ops)` to emit a insn.
310 and ops[0] is the dest operand (operands[0]), ops[1] is the mask
311 operand (operands[1]), ops[2] and ops[3] is the two
312 operands (operands[3], operands[4]) to do the operation. Other operands
313 will be created by emit_vlmax_insn according to the flags information.
314 */
315 enum insn_flags : unsigned int
316 {
317 /* flags for dest, mask, merge operands. */
318 /* Means INSN has dest operand. False for STORE insn. */
319 HAS_DEST_P = 1 << 0,
320 /* Means INSN has mask operand. */
321 HAS_MASK_P = 1 << 1,
322 /* Means using ALL_TRUES for mask operand. */
323 USE_ALL_TRUES_MASK_P = 1 << 2,
324 /* Means using ONE_TRUE for mask operand. */
325 USE_ONE_TRUE_MASK_P = 1 << 3,
326 /* Means INSN has merge operand. */
327 HAS_MERGE_P = 1 << 4,
328 /* Means using VUNDEF for merge operand. */
329 USE_VUNDEF_MERGE_P = 1 << 5,
330
331 /* flags for tail policy and mask plicy operands. */
332 /* Means the tail policy is TAIL_UNDISTURBED. */
333 TU_POLICY_P = 1 << 6,
334 /* Means the tail policy is default (return by get_prefer_tail_policy). */
335 TDEFAULT_POLICY_P = 1 << 7,
336 /* Means the mask policy is MASK_UNDISTURBED. */
337 MU_POLICY_P = 1 << 8,
338 /* Means the mask policy is default (return by get_prefer_mask_policy). */
339 MDEFAULT_POLICY_P = 1 << 9,
340
341 /* flags for the number operands to do the operation. */
342 /* Means INSN need zero operand to do the operation. e.g. vid.v */
343 NULLARY_OP_P = 1 << 10,
344 /* Means INSN need one operand to do the operation. */
345 UNARY_OP_P = 1 << 11,
346 /* Means INSN need two operands to do the operation. */
347 BINARY_OP_P = 1 << 12,
348 /* Means INSN need two operands to do the operation. */
349 TERNARY_OP_P = 1 << 13,
350
351 /* flags for get vtype mode from the index number. default from dest operand. */
352 VTYPE_MODE_FROM_OP1_P = 1 << 14,
353
354 /* flags for the floating-point rounding mode. */
355 /* Means INSN has FRM operand and the value is FRM_DYN. */
356 FRM_DYN_P = 1 << 15,
357
358 /* Means INSN has FRM operand and the value is FRM_RUP. */
359 FRM_RUP_P = 1 << 16,
360
361 /* Means INSN has FRM operand and the value is FRM_RDN. */
362 FRM_RDN_P = 1 << 17,
363
364 /* Means INSN has FRM operand and the value is FRM_RMM. */
365 FRM_RMM_P = 1 << 18,
366
367 /* Means INSN has FRM operand and the value is FRM_RNE. */
368 FRM_RNE_P = 1 << 19,
369 };
370
371 enum insn_type : unsigned int
372 {
373 /* some flags macros. */
374 /* For non-mask insn with tama. */
375 __NORMAL_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
376 | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P,
377 /* For non-mask insn with ta, without mask policy operand. */
378 __NORMAL_OP_TA = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
379 | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P,
380 /* For non-mask insn with ta, without mask operand and mask policy operand. */
381 __NORMAL_OP_TA2
382 = HAS_DEST_P | HAS_MERGE_P | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P,
383 /* For non-mask insn with ma, without tail policy operand. */
384 __NORMAL_OP_MA = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
385 | USE_VUNDEF_MERGE_P | MDEFAULT_POLICY_P,
386 /* For mask insn with tama. */
387 __MASK_OP_TAMA = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | USE_VUNDEF_MERGE_P
388 | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P,
389 /* For mask insn with tamu. */
390 __MASK_OP_TAMU
391 = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | TDEFAULT_POLICY_P | MU_POLICY_P,
392 /* For mask insn with tuma. */
393 __MASK_OP_TUMA = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
394 | TU_POLICY_P | MDEFAULT_POLICY_P,
395 /* For mask insn with mu. */
396 __MASK_OP_MU = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | MU_POLICY_P,
397 /* For mask insn with ta, without mask policy operand. */
398 __MASK_OP_TA = HAS_DEST_P | HAS_MASK_P | HAS_MERGE_P | USE_VUNDEF_MERGE_P
399 | TDEFAULT_POLICY_P,
400
401 /* Nullary operator. e.g. vid.v */
402 NULLARY_OP = __NORMAL_OP | NULLARY_OP_P,
403
404 /* Unary operator. */
405 UNARY_OP = __NORMAL_OP | UNARY_OP_P,
406 UNARY_OP_TAMA = __MASK_OP_TAMA | UNARY_OP_P,
407 UNARY_OP_TAMU = __MASK_OP_TAMU | UNARY_OP_P,
408 UNARY_OP_FRM_DYN = UNARY_OP | FRM_DYN_P,
409 UNARY_OP_FRM_RMM = UNARY_OP | FRM_RMM_P,
410 UNARY_OP_FRM_RUP = UNARY_OP | FRM_RUP_P,
411 UNARY_OP_FRM_RDN = UNARY_OP | FRM_RDN_P,
412 UNARY_OP_TAMA_FRM_DYN = UNARY_OP_TAMA | FRM_DYN_P,
413 UNARY_OP_TAMA_FRM_RUP = UNARY_OP_TAMA | FRM_RUP_P,
414 UNARY_OP_TAMA_FRM_RDN = UNARY_OP_TAMA | FRM_RDN_P,
415 UNARY_OP_TAMA_FRM_RMM = UNARY_OP_TAMA | FRM_RMM_P,
416 UNARY_OP_TAMA_FRM_RNE = UNARY_OP_TAMA | FRM_RNE_P,
417 UNARY_OP_TAMU_FRM_DYN = UNARY_OP_TAMU | FRM_DYN_P,
418 UNARY_OP_TAMU_FRM_RUP = UNARY_OP_TAMU | FRM_RUP_P,
419 UNARY_OP_TAMU_FRM_RDN = UNARY_OP_TAMU | FRM_RDN_P,
420 UNARY_OP_TAMU_FRM_RMM = UNARY_OP_TAMU | FRM_RMM_P,
421 UNARY_OP_TAMU_FRM_RNE = UNARY_OP_TAMU | FRM_RNE_P,
422
423 /* Binary operator. */
424 BINARY_OP = __NORMAL_OP | BINARY_OP_P,
425 BINARY_OP_TAMA = __MASK_OP_TAMA | BINARY_OP_P,
426 BINARY_OP_TAMU = __MASK_OP_TAMU | BINARY_OP_P,
427 BINARY_OP_TUMA = __MASK_OP_TUMA | BINARY_OP_P,
428 BINARY_OP_FRM_DYN = BINARY_OP | FRM_DYN_P,
429
430 /* Ternary operator. Always have real merge operand. */
431 TERNARY_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
432 | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P | TERNARY_OP_P,
433 TERNARY_OP_FRM_DYN = TERNARY_OP | FRM_DYN_P,
434
435 /* For vwmacc, no merge operand. */
436 WIDEN_TERNARY_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P
437 | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P | TERNARY_OP_P,
438 WIDEN_TERNARY_OP_FRM_DYN = WIDEN_TERNARY_OP | FRM_DYN_P,
439
440 /* For vmerge, no mask operand, no mask policy operand. */
441 MERGE_OP = __NORMAL_OP_TA2 | TERNARY_OP_P,
442
443 /* For vmerge with TU policy. */
444 MERGE_OP_TU = HAS_DEST_P | HAS_MERGE_P | TERNARY_OP_P | TU_POLICY_P,
445
446 /* For vm<compare>, no tail policy operand. */
447 COMPARE_OP = __NORMAL_OP_MA | TERNARY_OP_P,
448 COMPARE_OP_MU = __MASK_OP_MU | TERNARY_OP_P,
449
450 /* For scatter insn: no dest operand, no merge operand, no tail and mask
451 policy operands. */
452 SCATTER_OP_M = HAS_MASK_P | TERNARY_OP_P,
453
454 /* For vcpop.m, no merge operand, no tail and mask policy operands. */
455 CPOP_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | UNARY_OP_P
456 | VTYPE_MODE_FROM_OP1_P,
457
458 /* For mask instrunctions, no tail and mask policy operands. */
459 UNARY_MASK_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
460 | USE_VUNDEF_MERGE_P | UNARY_OP_P,
461 BINARY_MASK_OP = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P | HAS_MERGE_P
462 | USE_VUNDEF_MERGE_P | BINARY_OP_P,
463
464 /* For vcompress.vm */
465 COMPRESS_OP = __NORMAL_OP_TA2 | BINARY_OP_P,
466 /* has merge operand but use ta. */
467 COMPRESS_OP_MERGE
468 = HAS_DEST_P | HAS_MERGE_P | TDEFAULT_POLICY_P | BINARY_OP_P,
469
470 /* For vslideup.up has merge operand but use ta. */
471 SLIDEUP_OP_MERGE = HAS_DEST_P | HAS_MASK_P | USE_ALL_TRUES_MASK_P
472 | HAS_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P
473 | BINARY_OP_P,
474
475 /* For vreduce, no mask policy operand. */
476 REDUCE_OP = __NORMAL_OP_TA | BINARY_OP_P | VTYPE_MODE_FROM_OP1_P,
477 REDUCE_OP_M = __MASK_OP_TA | BINARY_OP_P | VTYPE_MODE_FROM_OP1_P,
478 REDUCE_OP_FRM_DYN = REDUCE_OP | FRM_DYN_P | VTYPE_MODE_FROM_OP1_P,
479 REDUCE_OP_M_FRM_DYN
480 = __MASK_OP_TA | BINARY_OP_P | FRM_DYN_P | VTYPE_MODE_FROM_OP1_P,
481
482 /* For vmv.s.x/vfmv.s.f. */
483 SCALAR_MOVE_OP = HAS_DEST_P | HAS_MASK_P | USE_ONE_TRUE_MASK_P | HAS_MERGE_P
484 | USE_VUNDEF_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P
485 | UNARY_OP_P,
486
487 SCALAR_MOVE_MERGED_OP = HAS_DEST_P | HAS_MASK_P | USE_ONE_TRUE_MASK_P
488 | HAS_MERGE_P | TDEFAULT_POLICY_P | MDEFAULT_POLICY_P
489 | UNARY_OP_P,
490 };
491
492 enum vlmul_type
493 {
494 LMUL_1 = 0,
495 LMUL_2 = 1,
496 LMUL_4 = 2,
497 LMUL_8 = 3,
498 LMUL_RESERVED = 4,
499 LMUL_F8 = 5,
500 LMUL_F4 = 6,
501 LMUL_F2 = 7,
502 NUM_LMUL = 8
503 };
504
505 /* The RISC-V vsetvli pass uses "known vlmax" operations for optimization.
506 Whether or not an instruction actually is a vlmax operation is not
507 recognizable from the length operand alone but the avl_type operand
508 is used instead. In general, there are two cases:
509
510 - Emit a vlmax operation by calling emit_vlmax_insn[_lra]. Here we emit
511 a vsetvli with vlmax configuration and set the avl_type to VLMAX for
512 VLA modes or VLS for VLS modes.
513 - Emit an operation that uses the existing (last-set) length and
514 set the avl_type to NONVLMAX.
515
516 Sometimes we also need to set the VLMAX or VLS avl_type to an operation that
517 already uses a given length register. This can happen during or after
518 register allocation when we are not allowed to create a new register.
519 For that case we also allow to set the avl_type to VLMAX or VLS.
520 */
521 enum avl_type
522 {
523 NONVLMAX = 0,
524 VLMAX = 1,
525 VLS = 2,
526 };
527 /* Routines implemented in riscv-vector-builtins.cc. */
528 void init_builtins (void);
529 const char *mangle_builtin_type (const_tree);
530 tree lookup_vector_type_attribute (const_tree);
531 bool builtin_type_p (const_tree);
532 #ifdef GCC_TARGET_H
533 bool verify_type_context (location_t, type_context_kind, const_tree, bool);
534 bool expand_vec_perm_const (machine_mode, machine_mode, rtx, rtx, rtx,
535 const vec_perm_indices &);
536 #endif
537 void handle_pragma_vector (void);
538 tree builtin_decl (unsigned, bool);
539 gimple *gimple_fold_builtin (unsigned int, gimple_stmt_iterator *, gcall *);
540 rtx expand_builtin (unsigned int, tree, rtx);
541 bool check_builtin_call (location_t, vec<location_t>, unsigned int,
542 tree, unsigned int, tree *);
543 tree resolve_overloaded_builtin (unsigned int, vec<tree, va_gc> *);
544 bool const_vec_all_same_in_range_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
545 bool legitimize_move (rtx, rtx *);
546 void emit_vlmax_vsetvl (machine_mode, rtx);
547 void emit_hard_vlmax_vsetvl (machine_mode, rtx);
548 void emit_vlmax_insn (unsigned, unsigned, rtx *);
549 void emit_nonvlmax_insn (unsigned, unsigned, rtx *, rtx);
550 void emit_vlmax_insn_lra (unsigned, unsigned, rtx *, rtx);
551 enum vlmul_type get_vlmul (machine_mode);
552 rtx get_vlmax_rtx (machine_mode);
553 unsigned int get_ratio (machine_mode);
554 unsigned int get_nf (machine_mode);
555 machine_mode get_subpart_mode (machine_mode);
556 int get_ta (rtx);
557 int get_ma (rtx);
558 int get_avl_type (rtx);
559 unsigned int calculate_ratio (unsigned int, enum vlmul_type);
560 enum tail_policy
561 {
562 TAIL_UNDISTURBED = 0,
563 TAIL_AGNOSTIC = 1,
564 TAIL_ANY = 2,
565 };
566
567 enum mask_policy
568 {
569 MASK_UNDISTURBED = 0,
570 MASK_AGNOSTIC = 1,
571 MASK_ANY = 2,
572 };
573
574 /* Return true if VALUE is agnostic or any policy. */
575 #define IS_AGNOSTIC(VALUE) (bool) (VALUE & 0x1 || (VALUE >> 1 & 0x1))
576
577 enum tail_policy get_prefer_tail_policy ();
578 enum mask_policy get_prefer_mask_policy ();
579 rtx get_avl_type_rtx (enum avl_type);
580 opt_machine_mode get_vector_mode (scalar_mode, poly_uint64);
581 opt_machine_mode get_tuple_mode (machine_mode, unsigned int);
582 bool simm5_p (rtx);
583 bool neg_simm5_p (rtx);
584 #ifdef RTX_CODE
585 bool has_vi_variant_p (rtx_code, rtx);
586 void expand_vec_cmp (rtx, rtx_code, rtx, rtx);
587 bool expand_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool);
588 void expand_cond_len_unop (unsigned, rtx *);
589 void expand_cond_len_binop (unsigned, rtx *);
590 void expand_reduction (unsigned, unsigned, rtx *, rtx);
591 void expand_vec_ceil (rtx, rtx, machine_mode, machine_mode);
592 void expand_vec_floor (rtx, rtx, machine_mode, machine_mode);
593 void expand_vec_nearbyint (rtx, rtx, machine_mode, machine_mode);
594 void expand_vec_rint (rtx, rtx, machine_mode, machine_mode);
595 void expand_vec_round (rtx, rtx, machine_mode, machine_mode);
596 void expand_vec_trunc (rtx, rtx, machine_mode, machine_mode);
597 void expand_vec_roundeven (rtx, rtx, machine_mode, machine_mode);
598 void expand_vec_lrint (rtx, rtx, machine_mode, machine_mode, machine_mode);
599 void expand_vec_lround (rtx, rtx, machine_mode, machine_mode, machine_mode);
600 void expand_vec_lceil (rtx, rtx, machine_mode, machine_mode);
601 void expand_vec_lfloor (rtx, rtx, machine_mode, machine_mode);
602 #endif
603 bool sew64_scalar_helper (rtx *, rtx *, rtx, machine_mode,
604 bool, void (*)(rtx *, rtx), enum avl_type);
605 rtx gen_scalar_move_mask (machine_mode);
606 rtx gen_no_side_effects_vsetvl_rtx (machine_mode, rtx, rtx);
607
608 /* RVV vector register sizes.
609 TODO: Currently, we only add RVV_32/RVV_64/RVV_128, we may need to
610 support other values in the future. */
611 enum vlen_enum
612 {
613 RVV_32 = 32,
614 RVV_64 = 64,
615 RVV_65536 = 65536
616 };
617 bool slide1_sew64_helper (int, machine_mode, machine_mode,
618 machine_mode, rtx *);
619 rtx gen_avl_for_scalar_move (rtx);
620 void expand_tuple_move (rtx *);
621 bool expand_block_move (rtx, rtx, rtx);
622 machine_mode preferred_simd_mode (scalar_mode);
623 machine_mode get_mask_mode (machine_mode);
624 void expand_vec_series (rtx, rtx, rtx, rtx = 0);
625 void expand_vec_init (rtx, rtx);
626 void expand_vec_perm (rtx, rtx, rtx, rtx);
627 void expand_select_vl (rtx *);
628 void expand_load_store (rtx *, bool);
629 void expand_gather_scatter (rtx *, bool);
630 void expand_cond_len_ternop (unsigned, rtx *);
631 void prepare_ternary_operands (rtx *);
632 void expand_lanes_load_store (rtx *, bool);
633 void expand_fold_extract_last (rtx *);
634 void expand_cond_unop (unsigned, rtx *);
635 void expand_cond_binop (unsigned, rtx *);
636 void expand_cond_ternop (unsigned, rtx *);
637 void expand_popcount (rtx *);
638 void expand_rawmemchr (machine_mode, rtx, rtx, rtx, bool = false);
639 bool expand_strcmp (rtx, rtx, rtx, rtx, unsigned HOST_WIDE_INT, bool);
640 void emit_vec_extract (rtx, rtx, rtx);
641
642 /* Rounding mode bitfield for fixed point VXRM. */
643 enum fixed_point_rounding_mode
644 {
645 VXRM_RNU,
646 VXRM_RNE,
647 VXRM_RDN,
648 VXRM_ROD
649 };
650
651 /* Rounding mode bitfield for floating point FRM. The value of enum comes
652 from the below link.
653 https://github.com/riscv/riscv-isa-manual/blob/main/src/f-st-ext.adoc#floating-point-control-and-status-register
654 */
655 enum floating_point_rounding_mode
656 {
657 FRM_RNE = 0, /* Aka 0b000. */
658 FRM_RTZ = 1, /* Aka 0b001. */
659 FRM_RDN = 2, /* Aka 0b010. */
660 FRM_RUP = 3, /* Aka 0b011. */
661 FRM_RMM = 4, /* Aka 0b100. */
662 FRM_DYN = 7, /* Aka 0b111. */
663 FRM_STATIC_MIN = FRM_RNE,
664 FRM_STATIC_MAX = FRM_RMM,
665 FRM_DYN_EXIT = 8,
666 FRM_DYN_CALL = 9,
667 FRM_NONE = 10,
668 };
669
670 enum floating_point_rounding_mode get_frm_mode (rtx);
671 opt_machine_mode vectorize_related_mode (machine_mode, scalar_mode,
672 poly_uint64);
673 unsigned int autovectorize_vector_modes (vec<machine_mode> *, bool);
674 bool cmp_lmul_le_one (machine_mode);
675 bool cmp_lmul_gt_one (machine_mode);
676 bool vls_mode_valid_p (machine_mode);
677 bool vlmax_avl_type_p (rtx_insn *);
678 bool has_vl_op (rtx_insn *);
679 bool tail_agnostic_p (rtx_insn *);
680 void validate_change_or_fail (rtx, rtx *, rtx, bool);
681 bool nonvlmax_avl_type_p (rtx_insn *);
682 bool vlmax_avl_p (rtx);
683 uint8_t get_sew (rtx_insn *);
684 enum vlmul_type get_vlmul (rtx_insn *);
685 int count_regno_occurrences (rtx_insn *, unsigned int);
686 bool imm_avl_p (machine_mode);
687 bool can_be_broadcasted_p (rtx);
688 bool gather_scatter_valid_offset_p (machine_mode);
689 HOST_WIDE_INT estimated_poly_value (poly_int64, unsigned int);
690 }
691
692 /* We classify builtin types into two classes:
693 1. General builtin class which is defined in riscv_builtins.
694 2. Vector builtin class which is a special builtin architecture
695 that implement intrinsic short into "pragma". */
696 enum riscv_builtin_class
697 {
698 RISCV_BUILTIN_GENERAL,
699 RISCV_BUILTIN_VECTOR
700 };
701
702 const unsigned int RISCV_BUILTIN_SHIFT = 1;
703
704 /* Mask that selects the riscv_builtin_class part of a function code. */
705 const unsigned int RISCV_BUILTIN_CLASS = (1 << RISCV_BUILTIN_SHIFT) - 1;
706
707 /* Routines implemented in riscv-string.cc. */
708 extern bool riscv_expand_strcmp (rtx, rtx, rtx, rtx, rtx);
709 extern bool riscv_expand_strlen (rtx, rtx, rtx, rtx);
710
711 /* Routines implemented in thead.cc. */
712 extern bool extract_base_offset_in_addr (rtx, rtx *, rtx *);
713 extern bool th_mempair_operands_p (rtx[4], bool, machine_mode);
714 extern void th_mempair_order_operands (rtx[4], bool, machine_mode);
715 extern void th_mempair_prepare_save_restore_operands (rtx[4], bool,
716 machine_mode,
717 int, HOST_WIDE_INT,
718 int, HOST_WIDE_INT);
719 extern void th_mempair_save_restore_regs (rtx[4], bool, machine_mode);
720 #ifdef RTX_CODE
721 extern const char*
722 th_mempair_output_move (rtx[4], bool, machine_mode, RTX_CODE);
723 extern bool th_memidx_legitimate_modify_p (rtx);
724 extern bool th_memidx_legitimate_modify_p (rtx, bool);
725 extern bool th_memidx_legitimate_index_p (rtx);
726 extern bool th_memidx_legitimate_index_p (rtx, bool);
727 extern bool th_classify_address (struct riscv_address_info *,
728 rtx, machine_mode, bool);
729 extern const char *th_output_move (rtx, rtx);
730 extern bool th_print_operand_address (FILE *, machine_mode, rtx);
731 #endif
732
733 extern bool riscv_use_divmod_expander (void);
734 void riscv_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int);
735 extern bool
736 riscv_option_valid_attribute_p (tree, tree, tree, int);
737 extern void
738 riscv_override_options_internal (struct gcc_options *);
739
740 struct riscv_tune_param;
741 /* Information about one micro-arch we know about. */
742 struct riscv_tune_info {
743 /* This micro-arch canonical name. */
744 const char *name;
745
746 /* Which automaton to use for tuning. */
747 enum riscv_microarchitecture_type microarchitecture;
748
749 /* Tuning parameters for this micro-arch. */
750 const struct riscv_tune_param *tune_param;
751 };
752
753 const struct riscv_tune_info *
754 riscv_parse_tune (const char *, bool);
755
756 #endif /* ! GCC_RISCV_PROTOS_H */