]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/riscv/riscv.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / riscv / riscv.c
1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2011-2021 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #define IN_TARGET_CODE 1
23
24 #define INCLUDE_STRING
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "regs.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "recog.h"
34 #include "output.h"
35 #include "alias.h"
36 #include "tree.h"
37 #include "stringpool.h"
38 #include "attribs.h"
39 #include "varasm.h"
40 #include "stor-layout.h"
41 #include "calls.h"
42 #include "function.h"
43 #include "explow.h"
44 #include "memmodel.h"
45 #include "emit-rtl.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "basic-block.h"
51 #include "expr.h"
52 #include "optabs.h"
53 #include "bitmap.h"
54 #include "df.h"
55 #include "diagnostic.h"
56 #include "builtins.h"
57 #include "predict.h"
58 #include "tree-pass.h"
59
60 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
65
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
68 XVECEXP (X, 0, 0)
69
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
73
74 /* True if bit BIT is set in VALUE. */
75 #define BITSET_P(VALUE, BIT) (((VALUE) & (1ULL << (BIT))) != 0)
76
77 /* Classifies an address.
78
79 ADDRESS_REG
80 A natural register + offset address. The register satisfies
81 riscv_valid_base_register_p and the offset is a const_arith_operand.
82
83 ADDRESS_LO_SUM
84 A LO_SUM rtx. The first operand is a valid base register and
85 the second operand is a symbolic address.
86
87 ADDRESS_CONST_INT
88 A signed 16-bit constant address.
89
90 ADDRESS_SYMBOLIC:
91 A constant symbolic address. */
92 enum riscv_address_type {
93 ADDRESS_REG,
94 ADDRESS_LO_SUM,
95 ADDRESS_CONST_INT,
96 ADDRESS_SYMBOLIC
97 };
98
99 /* Information about a function's frame layout. */
100 struct GTY(()) riscv_frame_info {
101 /* The size of the frame in bytes. */
102 HOST_WIDE_INT total_size;
103
104 /* Bit X is set if the function saves or restores GPR X. */
105 unsigned int mask;
106
107 /* Likewise FPR X. */
108 unsigned int fmask;
109
110 /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
111 unsigned save_libcall_adjustment;
112
113 /* Offsets of fixed-point and floating-point save areas from frame bottom */
114 HOST_WIDE_INT gp_sp_offset;
115 HOST_WIDE_INT fp_sp_offset;
116
117 /* Offset of virtual frame pointer from stack pointer/frame bottom */
118 HOST_WIDE_INT frame_pointer_offset;
119
120 /* Offset of hard frame pointer from stack pointer/frame bottom */
121 HOST_WIDE_INT hard_frame_pointer_offset;
122
123 /* The offset of arg_pointer_rtx from the bottom of the frame. */
124 HOST_WIDE_INT arg_pointer_offset;
125 };
126
127 enum riscv_privilege_levels {
128 UNKNOWN_MODE, USER_MODE, SUPERVISOR_MODE, MACHINE_MODE
129 };
130
131 struct GTY(()) machine_function {
132 /* The number of extra stack bytes taken up by register varargs.
133 This area is allocated by the callee at the very top of the frame. */
134 int varargs_size;
135
136 /* True if current function is a naked function. */
137 bool naked_p;
138
139 /* True if current function is an interrupt function. */
140 bool interrupt_handler_p;
141 /* For an interrupt handler, indicates the privilege level. */
142 enum riscv_privilege_levels interrupt_mode;
143
144 /* True if attributes on current function have been checked. */
145 bool attributes_checked_p;
146
147 /* The current frame information, calculated by riscv_compute_frame_info. */
148 struct riscv_frame_info frame;
149 };
150
151 /* Information about a single argument. */
152 struct riscv_arg_info {
153 /* True if the argument is at least partially passed on the stack. */
154 bool stack_p;
155
156 /* The number of integer registers allocated to this argument. */
157 unsigned int num_gprs;
158
159 /* The offset of the first register used, provided num_gprs is nonzero.
160 If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */
161 unsigned int gpr_offset;
162
163 /* The number of floating-point registers allocated to this argument. */
164 unsigned int num_fprs;
165
166 /* The offset of the first register used, provided num_fprs is nonzero. */
167 unsigned int fpr_offset;
168 };
169
170 /* Information about an address described by riscv_address_type.
171
172 ADDRESS_CONST_INT
173 No fields are used.
174
175 ADDRESS_REG
176 REG is the base register and OFFSET is the constant offset.
177
178 ADDRESS_LO_SUM
179 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
180 is the type of symbol it references.
181
182 ADDRESS_SYMBOLIC
183 SYMBOL_TYPE is the type of symbol that the address references. */
184 struct riscv_address_info {
185 enum riscv_address_type type;
186 rtx reg;
187 rtx offset;
188 enum riscv_symbol_type symbol_type;
189 };
190
191 /* One stage in a constant building sequence. These sequences have
192 the form:
193
194 A = VALUE[0]
195 A = A CODE[1] VALUE[1]
196 A = A CODE[2] VALUE[2]
197 ...
198
199 where A is an accumulator, each CODE[i] is a binary rtl operation
200 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
201 struct riscv_integer_op {
202 enum rtx_code code;
203 unsigned HOST_WIDE_INT value;
204 };
205
206 /* The largest number of operations needed to load an integer constant.
207 The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI. */
208 #define RISCV_MAX_INTEGER_OPS 8
209
210 /* Costs of various operations on the different architectures. */
211
212 struct riscv_tune_param
213 {
214 unsigned short fp_add[2];
215 unsigned short fp_mul[2];
216 unsigned short fp_div[2];
217 unsigned short int_mul[2];
218 unsigned short int_div[2];
219 unsigned short issue_rate;
220 unsigned short branch_cost;
221 unsigned short memory_cost;
222 bool slow_unaligned_access;
223 };
224
225 /* Information about one micro-arch we know about. */
226 struct riscv_tune_info {
227 /* This micro-arch canonical name. */
228 const char *name;
229
230 /* Which automaton to use for tuning. */
231 enum riscv_microarchitecture_type microarchitecture;
232
233 /* Tuning parameters for this micro-arch. */
234 const struct riscv_tune_param *tune_param;
235 };
236
237 /* Global variables for machine-dependent things. */
238
239 /* Whether unaligned accesses execute very slowly. */
240 bool riscv_slow_unaligned_access_p;
241
242 /* Stack alignment to assume/maintain. */
243 unsigned riscv_stack_boundary;
244
245 /* If non-zero, this is an offset to be added to SP to redefine the CFA
246 when restoring the FP register from the stack. Only valid when generating
247 the epilogue. */
248 static int epilogue_cfa_sp_offset;
249
250 /* Which tuning parameters to use. */
251 static const struct riscv_tune_param *tune_param;
252
253 /* Which automaton to use for tuning. */
254 enum riscv_microarchitecture_type riscv_microarchitecture;
255
256 /* Index R is the smallest register class that contains register R. */
257 const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
258 GR_REGS, GR_REGS, GR_REGS, GR_REGS,
259 GR_REGS, GR_REGS, SIBCALL_REGS, SIBCALL_REGS,
260 JALR_REGS, JALR_REGS, SIBCALL_REGS, SIBCALL_REGS,
261 SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS,
262 SIBCALL_REGS, SIBCALL_REGS, JALR_REGS, JALR_REGS,
263 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
264 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
265 SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS,
266 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
267 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
268 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
269 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
270 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
271 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
272 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
273 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
274 FRAME_REGS, FRAME_REGS,
275 };
276
277 /* Costs to use when optimizing for rocket. */
278 static const struct riscv_tune_param rocket_tune_info = {
279 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
280 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
281 {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
282 {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
283 {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
284 1, /* issue_rate */
285 3, /* branch_cost */
286 5, /* memory_cost */
287 true, /* slow_unaligned_access */
288 };
289
290 /* Costs to use when optimizing for Sifive 7 Series. */
291 static const struct riscv_tune_param sifive_7_tune_info = {
292 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
293 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
294 {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
295 {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
296 {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
297 2, /* issue_rate */
298 4, /* branch_cost */
299 3, /* memory_cost */
300 true, /* slow_unaligned_access */
301 };
302
303 /* Costs to use when optimizing for size. */
304 static const struct riscv_tune_param optimize_size_tune_info = {
305 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
306 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
307 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
308 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
309 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
310 1, /* issue_rate */
311 1, /* branch_cost */
312 2, /* memory_cost */
313 false, /* slow_unaligned_access */
314 };
315
316 static tree riscv_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
317 static tree riscv_handle_type_attribute (tree *, tree, tree, int, bool *);
318
319 /* Defining target-specific uses of __attribute__. */
320 static const struct attribute_spec riscv_attribute_table[] =
321 {
322 /* Syntax: { name, min_len, max_len, decl_required, type_required,
323 function_type_required, affects_type_identity, handler,
324 exclude } */
325
326 /* The attribute telling no prologue/epilogue. */
327 { "naked", 0, 0, true, false, false, false,
328 riscv_handle_fndecl_attribute, NULL },
329 /* This attribute generates prologue/epilogue for interrupt handlers. */
330 { "interrupt", 0, 1, false, true, true, false,
331 riscv_handle_type_attribute, NULL },
332
333 /* The last attribute spec is set to be NULL. */
334 { NULL, 0, 0, false, false, false, false, NULL, NULL }
335 };
336
337 /* Order for the CLOBBERs/USEs of gpr_save. */
338 static const unsigned gpr_save_reg_order[] = {
339 INVALID_REGNUM, T0_REGNUM, T1_REGNUM, RETURN_ADDR_REGNUM,
340 S0_REGNUM, S1_REGNUM, S2_REGNUM, S3_REGNUM, S4_REGNUM,
341 S5_REGNUM, S6_REGNUM, S7_REGNUM, S8_REGNUM, S9_REGNUM,
342 S10_REGNUM, S11_REGNUM
343 };
344
345 /* A table describing all the processors GCC knows about. */
346 static const struct riscv_tune_info riscv_tune_info_table[] = {
347 { "rocket", generic, &rocket_tune_info },
348 { "sifive-3-series", generic, &rocket_tune_info },
349 { "sifive-5-series", generic, &rocket_tune_info },
350 { "sifive-7-series", sifive_7, &sifive_7_tune_info },
351 { "size", generic, &optimize_size_tune_info },
352 };
353
354 /* Return the riscv_tune_info entry for the given name string. */
355
356 static const struct riscv_tune_info *
357 riscv_parse_tune (const char *tune_string)
358 {
359 const riscv_cpu_info *cpu = riscv_find_cpu (tune_string);
360
361 if (cpu)
362 tune_string = cpu->tune;
363
364 for (unsigned i = 0; i < ARRAY_SIZE (riscv_tune_info_table); i++)
365 if (strcmp (riscv_tune_info_table[i].name, tune_string) == 0)
366 return riscv_tune_info_table + i;
367
368 error ("unknown cpu %qs for %<-mtune%>", tune_string);
369 return riscv_tune_info_table;
370 }
371
372 /* Helper function for riscv_build_integer; arguments are as for
373 riscv_build_integer. */
374
375 static int
376 riscv_build_integer_1 (struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS],
377 HOST_WIDE_INT value, machine_mode mode)
378 {
379 HOST_WIDE_INT low_part = CONST_LOW_PART (value);
380 int cost = RISCV_MAX_INTEGER_OPS + 1, alt_cost;
381 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
382
383 if (SMALL_OPERAND (value) || LUI_OPERAND (value))
384 {
385 /* Simply ADDI or LUI. */
386 codes[0].code = UNKNOWN;
387 codes[0].value = value;
388 return 1;
389 }
390
391 /* End with ADDI. When constructing HImode constants, do not generate any
392 intermediate value that is not itself a valid HImode constant. The
393 XORI case below will handle those remaining HImode constants. */
394 if (low_part != 0
395 && (mode != HImode
396 || value - low_part <= ((1 << (GET_MODE_BITSIZE (HImode) - 1)) - 1)))
397 {
398 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value - low_part, mode);
399 if (alt_cost < cost)
400 {
401 alt_codes[alt_cost-1].code = PLUS;
402 alt_codes[alt_cost-1].value = low_part;
403 memcpy (codes, alt_codes, sizeof (alt_codes));
404 cost = alt_cost;
405 }
406 }
407
408 /* End with XORI. */
409 if (cost > 2 && (low_part < 0 || mode == HImode))
410 {
411 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
412 if (alt_cost < cost)
413 {
414 alt_codes[alt_cost-1].code = XOR;
415 alt_codes[alt_cost-1].value = low_part;
416 memcpy (codes, alt_codes, sizeof (alt_codes));
417 cost = alt_cost;
418 }
419 }
420
421 /* Eliminate trailing zeros and end with SLLI. */
422 if (cost > 2 && (value & 1) == 0)
423 {
424 int shift = ctz_hwi (value);
425 unsigned HOST_WIDE_INT x = value;
426 x = sext_hwi (x >> shift, HOST_BITS_PER_WIDE_INT - shift);
427
428 /* Don't eliminate the lower 12 bits if LUI might apply. */
429 if (shift > IMM_BITS && !SMALL_OPERAND (x) && LUI_OPERAND (x << IMM_BITS))
430 shift -= IMM_BITS, x <<= IMM_BITS;
431
432 alt_cost = 1 + riscv_build_integer_1 (alt_codes, x, mode);
433 if (alt_cost < cost)
434 {
435 alt_codes[alt_cost-1].code = ASHIFT;
436 alt_codes[alt_cost-1].value = shift;
437 memcpy (codes, alt_codes, sizeof (alt_codes));
438 cost = alt_cost;
439 }
440 }
441
442 gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
443 return cost;
444 }
445
446 /* Fill CODES with a sequence of rtl operations to load VALUE.
447 Return the number of operations needed. */
448
449 static int
450 riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
451 machine_mode mode)
452 {
453 int cost = riscv_build_integer_1 (codes, value, mode);
454
455 /* Eliminate leading zeros and end with SRLI. */
456 if (value > 0 && cost > 2)
457 {
458 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
459 int alt_cost, shift = clz_hwi (value);
460 HOST_WIDE_INT shifted_val;
461
462 /* Try filling trailing bits with 1s. */
463 shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
464 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
465 if (alt_cost < cost)
466 {
467 alt_codes[alt_cost-1].code = LSHIFTRT;
468 alt_codes[alt_cost-1].value = shift;
469 memcpy (codes, alt_codes, sizeof (alt_codes));
470 cost = alt_cost;
471 }
472
473 /* Try filling trailing bits with 0s. */
474 shifted_val = value << shift;
475 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
476 if (alt_cost < cost)
477 {
478 alt_codes[alt_cost-1].code = LSHIFTRT;
479 alt_codes[alt_cost-1].value = shift;
480 memcpy (codes, alt_codes, sizeof (alt_codes));
481 cost = alt_cost;
482 }
483 }
484
485 return cost;
486 }
487
488 /* Return the cost of constructing VAL in the event that a scratch
489 register is available. */
490
491 static int
492 riscv_split_integer_cost (HOST_WIDE_INT val)
493 {
494 int cost;
495 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
496 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
497 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
498
499 cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
500 if (loval != hival)
501 cost += riscv_build_integer (codes, hival, VOIDmode);
502
503 return cost;
504 }
505
506 /* Return the cost of constructing the integer constant VAL. */
507
508 static int
509 riscv_integer_cost (HOST_WIDE_INT val)
510 {
511 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
512 return MIN (riscv_build_integer (codes, val, VOIDmode),
513 riscv_split_integer_cost (val));
514 }
515
516 /* Try to split a 64b integer into 32b parts, then reassemble. */
517
518 static rtx
519 riscv_split_integer (HOST_WIDE_INT val, machine_mode mode)
520 {
521 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
522 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
523 rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
524
525 riscv_move_integer (hi, hi, hival, mode, FALSE);
526 riscv_move_integer (lo, lo, loval, mode, FALSE);
527
528 hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
529 hi = force_reg (mode, hi);
530
531 return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
532 }
533
534 /* Return true if X is a thread-local symbol. */
535
536 static bool
537 riscv_tls_symbol_p (const_rtx x)
538 {
539 return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0;
540 }
541
542 /* Return true if symbol X binds locally. */
543
544 static bool
545 riscv_symbol_binds_local_p (const_rtx x)
546 {
547 if (SYMBOL_REF_P (x))
548 return (SYMBOL_REF_DECL (x)
549 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
550 : SYMBOL_REF_LOCAL_P (x));
551 else
552 return false;
553 }
554
555 /* Return the method that should be used to access SYMBOL_REF or
556 LABEL_REF X. */
557
558 static enum riscv_symbol_type
559 riscv_classify_symbol (const_rtx x)
560 {
561 if (riscv_tls_symbol_p (x))
562 return SYMBOL_TLS;
563
564 if (GET_CODE (x) == SYMBOL_REF && flag_pic && !riscv_symbol_binds_local_p (x))
565 return SYMBOL_GOT_DISP;
566
567 return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
568 }
569
570 /* Classify the base of symbolic expression X. */
571
572 enum riscv_symbol_type
573 riscv_classify_symbolic_expression (rtx x)
574 {
575 rtx offset;
576
577 split_const (x, &x, &offset);
578 if (UNSPEC_ADDRESS_P (x))
579 return UNSPEC_ADDRESS_TYPE (x);
580
581 return riscv_classify_symbol (x);
582 }
583
584 /* Return true if X is a symbolic constant. If it is, store the type of
585 the symbol in *SYMBOL_TYPE. */
586
587 bool
588 riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
589 {
590 rtx offset;
591
592 split_const (x, &x, &offset);
593 if (UNSPEC_ADDRESS_P (x))
594 {
595 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
596 x = UNSPEC_ADDRESS (x);
597 }
598 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
599 *symbol_type = riscv_classify_symbol (x);
600 else
601 return false;
602
603 if (offset == const0_rtx)
604 return true;
605
606 /* Nonzero offsets are only valid for references that don't use the GOT. */
607 switch (*symbol_type)
608 {
609 case SYMBOL_ABSOLUTE:
610 case SYMBOL_PCREL:
611 case SYMBOL_TLS_LE:
612 /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */
613 return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
614
615 default:
616 return false;
617 }
618 }
619
620 /* Returns the number of instructions necessary to reference a symbol. */
621
622 static int riscv_symbol_insns (enum riscv_symbol_type type)
623 {
624 switch (type)
625 {
626 case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
627 case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference. */
628 case SYMBOL_PCREL: return 2; /* AUIPC + the reference. */
629 case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference. */
630 case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference. */
631 default: gcc_unreachable ();
632 }
633 }
634
635 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
636
637 static bool
638 riscv_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
639 {
640 return riscv_const_insns (x) > 0;
641 }
642
643 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
644
645 static bool
646 riscv_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
647 {
648 enum riscv_symbol_type type;
649 rtx base, offset;
650
651 /* There is no assembler syntax for expressing an address-sized
652 high part. */
653 if (GET_CODE (x) == HIGH)
654 return true;
655
656 split_const (x, &base, &offset);
657 if (riscv_symbolic_constant_p (base, &type))
658 {
659 /* As an optimization, don't spill symbolic constants that are as
660 cheap to rematerialize as to access in the constant pool. */
661 if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
662 return true;
663
664 /* As an optimization, avoid needlessly generate dynamic relocations. */
665 if (flag_pic)
666 return true;
667 }
668
669 /* TLS symbols must be computed by riscv_legitimize_move. */
670 if (tls_referenced_p (x))
671 return true;
672
673 return false;
674 }
675
676 /* Return true if register REGNO is a valid base register for mode MODE.
677 STRICT_P is true if REG_OK_STRICT is in effect. */
678
679 int
680 riscv_regno_mode_ok_for_base_p (int regno,
681 machine_mode mode ATTRIBUTE_UNUSED,
682 bool strict_p)
683 {
684 if (!HARD_REGISTER_NUM_P (regno))
685 {
686 if (!strict_p)
687 return true;
688 regno = reg_renumber[regno];
689 }
690
691 /* These fake registers will be eliminated to either the stack or
692 hard frame pointer, both of which are usually valid base registers.
693 Reload deals with the cases where the eliminated form isn't valid. */
694 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
695 return true;
696
697 return GP_REG_P (regno);
698 }
699
700 /* Return true if X is a valid base register for mode MODE.
701 STRICT_P is true if REG_OK_STRICT is in effect. */
702
703 static bool
704 riscv_valid_base_register_p (rtx x, machine_mode mode, bool strict_p)
705 {
706 if (!strict_p && GET_CODE (x) == SUBREG)
707 x = SUBREG_REG (x);
708
709 return (REG_P (x)
710 && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
711 }
712
713 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
714 can address a value of mode MODE. */
715
716 static bool
717 riscv_valid_offset_p (rtx x, machine_mode mode)
718 {
719 /* Check that X is a signed 12-bit number. */
720 if (!const_arith_operand (x, Pmode))
721 return false;
722
723 /* We may need to split multiword moves, so make sure that every word
724 is accessible. */
725 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
726 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
727 return false;
728
729 return true;
730 }
731
732 /* Should a symbol of type SYMBOL_TYPE should be split in two? */
733
734 bool
735 riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
736 {
737 if (symbol_type == SYMBOL_TLS_LE)
738 return true;
739
740 if (!TARGET_EXPLICIT_RELOCS)
741 return false;
742
743 return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
744 }
745
746 /* Return true if a LO_SUM can address a value of mode MODE when the
747 LO_SUM symbol has type SYM_TYPE. X is the LO_SUM second operand, which
748 is used when the mode is BLKmode. */
749
750 static bool
751 riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, machine_mode mode,
752 rtx x)
753 {
754 int align, size;
755
756 /* Check that symbols of type SYMBOL_TYPE can be used to access values
757 of mode MODE. */
758 if (riscv_symbol_insns (sym_type) == 0)
759 return false;
760
761 /* Check that there is a known low-part relocation. */
762 if (!riscv_split_symbol_type (sym_type))
763 return false;
764
765 /* We can't tell size or alignment when we have BLKmode, so try extracing a
766 decl from the symbol if possible. */
767 if (mode == BLKmode)
768 {
769 rtx offset;
770
771 /* Extract the symbol from the LO_SUM operand, if any. */
772 split_const (x, &x, &offset);
773
774 /* Might be a CODE_LABEL. We can compute align but not size for that,
775 so don't bother trying to handle it. */
776 if (!SYMBOL_REF_P (x))
777 return false;
778
779 /* Use worst case assumptions if we don't have a SYMBOL_REF_DECL. */
780 align = (SYMBOL_REF_DECL (x)
781 ? DECL_ALIGN (SYMBOL_REF_DECL (x))
782 : 1);
783 size = (SYMBOL_REF_DECL (x) && DECL_SIZE (SYMBOL_REF_DECL (x))
784 ? tree_to_uhwi (DECL_SIZE (SYMBOL_REF_DECL (x)))
785 : 2*BITS_PER_WORD);
786 }
787 else
788 {
789 align = GET_MODE_ALIGNMENT (mode);
790 size = GET_MODE_BITSIZE (mode);
791 }
792
793 /* We may need to split multiword moves, so make sure that each word
794 can be accessed without inducing a carry. */
795 if (size > BITS_PER_WORD
796 && (!TARGET_STRICT_ALIGN || size > align))
797 return false;
798
799 return true;
800 }
801
802 /* Return true if X is a valid address for machine mode MODE. If it is,
803 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
804 effect. */
805
806 static bool
807 riscv_classify_address (struct riscv_address_info *info, rtx x,
808 machine_mode mode, bool strict_p)
809 {
810 switch (GET_CODE (x))
811 {
812 case REG:
813 case SUBREG:
814 info->type = ADDRESS_REG;
815 info->reg = x;
816 info->offset = const0_rtx;
817 return riscv_valid_base_register_p (info->reg, mode, strict_p);
818
819 case PLUS:
820 info->type = ADDRESS_REG;
821 info->reg = XEXP (x, 0);
822 info->offset = XEXP (x, 1);
823 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
824 && riscv_valid_offset_p (info->offset, mode));
825
826 case LO_SUM:
827 info->type = ADDRESS_LO_SUM;
828 info->reg = XEXP (x, 0);
829 info->offset = XEXP (x, 1);
830 /* We have to trust the creator of the LO_SUM to do something vaguely
831 sane. Target-independent code that creates a LO_SUM should also
832 create and verify the matching HIGH. Target-independent code that
833 adds an offset to a LO_SUM must prove that the offset will not
834 induce a carry. Failure to do either of these things would be
835 a bug, and we are not required to check for it here. The RISC-V
836 backend itself should only create LO_SUMs for valid symbolic
837 constants, with the high part being either a HIGH or a copy
838 of _gp. */
839 info->symbol_type
840 = riscv_classify_symbolic_expression (info->offset);
841 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
842 && riscv_valid_lo_sum_p (info->symbol_type, mode, info->offset));
843
844 case CONST_INT:
845 /* Small-integer addresses don't occur very often, but they
846 are legitimate if x0 is a valid base register. */
847 info->type = ADDRESS_CONST_INT;
848 return SMALL_OPERAND (INTVAL (x));
849
850 default:
851 return false;
852 }
853 }
854
855 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
856
857 static bool
858 riscv_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
859 {
860 struct riscv_address_info addr;
861
862 return riscv_classify_address (&addr, x, mode, strict_p);
863 }
864
865 /* Return true if hard reg REGNO can be used in compressed instructions. */
866
867 static bool
868 riscv_compressed_reg_p (int regno)
869 {
870 /* x8-x15/f8-f15 are compressible registers. */
871 return (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
872 || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)));
873 }
874
875 /* Return true if x is an unsigned 5-bit immediate scaled by 4. */
876
877 static bool
878 riscv_compressed_lw_offset_p (rtx x)
879 {
880 return (CONST_INT_P (x)
881 && (INTVAL (x) & 3) == 0
882 && IN_RANGE (INTVAL (x), 0, CSW_MAX_OFFSET));
883 }
884
885 /* Return true if load/store from/to address x can be compressed. */
886
887 static bool
888 riscv_compressed_lw_address_p (rtx x)
889 {
890 struct riscv_address_info addr;
891 bool result = riscv_classify_address (&addr, x, GET_MODE (x),
892 reload_completed);
893
894 /* Before reload, assuming all load/stores of valid addresses get compressed
895 gives better code size than checking if the address is reg + small_offset
896 early on. */
897 if (result && !reload_completed)
898 return true;
899
900 /* Return false if address is not compressed_reg + small_offset. */
901 if (!result
902 || addr.type != ADDRESS_REG
903 || (!riscv_compressed_reg_p (REGNO (addr.reg))
904 && addr.reg != stack_pointer_rtx)
905 || !riscv_compressed_lw_offset_p (addr.offset))
906 return false;
907
908 return result;
909 }
910
911 /* Return the number of instructions needed to load or store a value
912 of mode MODE at address X. Return 0 if X isn't valid for MODE.
913 Assume that multiword moves may need to be split into word moves
914 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
915 enough. */
916
917 int
918 riscv_address_insns (rtx x, machine_mode mode, bool might_split_p)
919 {
920 struct riscv_address_info addr = {};
921 int n = 1;
922
923 if (!riscv_classify_address (&addr, x, mode, false))
924 {
925 /* This could be a pattern from the pic.md file. In which case we want
926 this address to always have a cost of 3 to make it as expensive as the
927 most expensive symbol. This prevents constant propagation from
928 preferring symbols over register plus offset. */
929 return 3;
930 }
931
932 /* BLKmode is used for single unaligned loads and stores and should
933 not count as a multiword mode. */
934 if (mode != BLKmode && might_split_p)
935 n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
936
937 if (addr.type == ADDRESS_LO_SUM)
938 n += riscv_symbol_insns (addr.symbol_type) - 1;
939
940 return n;
941 }
942
943 /* Return the number of instructions needed to load constant X.
944 Return 0 if X isn't a valid constant. */
945
946 int
947 riscv_const_insns (rtx x)
948 {
949 enum riscv_symbol_type symbol_type;
950 rtx offset;
951
952 switch (GET_CODE (x))
953 {
954 case HIGH:
955 if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
956 || !riscv_split_symbol_type (symbol_type))
957 return 0;
958
959 /* This is simply an LUI. */
960 return 1;
961
962 case CONST_INT:
963 {
964 int cost = riscv_integer_cost (INTVAL (x));
965 /* Force complicated constants to memory. */
966 return cost < 4 ? cost : 0;
967 }
968
969 case CONST_DOUBLE:
970 case CONST_VECTOR:
971 /* We can use x0 to load floating-point zero. */
972 return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
973
974 case CONST:
975 /* See if we can refer to X directly. */
976 if (riscv_symbolic_constant_p (x, &symbol_type))
977 return riscv_symbol_insns (symbol_type);
978
979 /* Otherwise try splitting the constant into a base and offset. */
980 split_const (x, &x, &offset);
981 if (offset != 0)
982 {
983 int n = riscv_const_insns (x);
984 if (n != 0)
985 return n + riscv_integer_cost (INTVAL (offset));
986 }
987 return 0;
988
989 case SYMBOL_REF:
990 case LABEL_REF:
991 return riscv_symbol_insns (riscv_classify_symbol (x));
992
993 default:
994 return 0;
995 }
996 }
997
998 /* X is a doubleword constant that can be handled by splitting it into
999 two words and loading each word separately. Return the number of
1000 instructions required to do this. */
1001
1002 int
1003 riscv_split_const_insns (rtx x)
1004 {
1005 unsigned int low, high;
1006
1007 low = riscv_const_insns (riscv_subword (x, false));
1008 high = riscv_const_insns (riscv_subword (x, true));
1009 gcc_assert (low > 0 && high > 0);
1010 return low + high;
1011 }
1012
1013 /* Return the number of instructions needed to implement INSN,
1014 given that it loads from or stores to MEM. */
1015
1016 int
1017 riscv_load_store_insns (rtx mem, rtx_insn *insn)
1018 {
1019 machine_mode mode;
1020 bool might_split_p;
1021 rtx set;
1022
1023 gcc_assert (MEM_P (mem));
1024 mode = GET_MODE (mem);
1025
1026 /* Try to prove that INSN does not need to be split. */
1027 might_split_p = true;
1028 if (GET_MODE_BITSIZE (mode) <= 32)
1029 might_split_p = false;
1030 else if (GET_MODE_BITSIZE (mode) == 64)
1031 {
1032 set = single_set (insn);
1033 if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
1034 might_split_p = false;
1035 }
1036
1037 return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
1038 }
1039
1040 /* Emit a move from SRC to DEST. Assume that the move expanders can
1041 handle all moves if !can_create_pseudo_p (). The distinction is
1042 important because, unlike emit_move_insn, the move expanders know
1043 how to force Pmode objects into the constant pool even when the
1044 constant pool address is not itself legitimate. */
1045
1046 rtx
1047 riscv_emit_move (rtx dest, rtx src)
1048 {
1049 return (can_create_pseudo_p ()
1050 ? emit_move_insn (dest, src)
1051 : emit_move_insn_1 (dest, src));
1052 }
1053
1054 /* Emit an instruction of the form (set TARGET SRC). */
1055
1056 static rtx
1057 riscv_emit_set (rtx target, rtx src)
1058 {
1059 emit_insn (gen_rtx_SET (target, src));
1060 return target;
1061 }
1062
1063 /* Emit an instruction of the form (set DEST (CODE X Y)). */
1064
1065 static rtx
1066 riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y)
1067 {
1068 return riscv_emit_set (dest, gen_rtx_fmt_ee (code, GET_MODE (dest), x, y));
1069 }
1070
1071 /* Compute (CODE X Y) and store the result in a new register
1072 of mode MODE. Return that new register. */
1073
1074 static rtx
1075 riscv_force_binary (machine_mode mode, enum rtx_code code, rtx x, rtx y)
1076 {
1077 return riscv_emit_binary (code, gen_reg_rtx (mode), x, y);
1078 }
1079
1080 /* Copy VALUE to a register and return that register. If new pseudos
1081 are allowed, copy it into a new register, otherwise use DEST. */
1082
1083 static rtx
1084 riscv_force_temporary (rtx dest, rtx value, bool in_splitter)
1085 {
1086 /* We can't call gen_reg_rtx from a splitter, because this might realloc
1087 the regno_reg_rtx array, which would invalidate reg rtx pointers in the
1088 combine undo buffer. */
1089 if (can_create_pseudo_p () && !in_splitter)
1090 return force_reg (Pmode, value);
1091 else
1092 {
1093 riscv_emit_move (dest, value);
1094 return dest;
1095 }
1096 }
1097
1098 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
1099 then add CONST_INT OFFSET to the result. */
1100
1101 static rtx
1102 riscv_unspec_address_offset (rtx base, rtx offset,
1103 enum riscv_symbol_type symbol_type)
1104 {
1105 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1106 UNSPEC_ADDRESS_FIRST + symbol_type);
1107 if (offset != const0_rtx)
1108 base = gen_rtx_PLUS (Pmode, base, offset);
1109 return gen_rtx_CONST (Pmode, base);
1110 }
1111
1112 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1113 type SYMBOL_TYPE. */
1114
1115 rtx
1116 riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
1117 {
1118 rtx base, offset;
1119
1120 split_const (address, &base, &offset);
1121 return riscv_unspec_address_offset (base, offset, symbol_type);
1122 }
1123
1124 /* If OP is an UNSPEC address, return the address to which it refers,
1125 otherwise return OP itself. */
1126
1127 static rtx
1128 riscv_strip_unspec_address (rtx op)
1129 {
1130 rtx base, offset;
1131
1132 split_const (op, &base, &offset);
1133 if (UNSPEC_ADDRESS_P (base))
1134 op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
1135 return op;
1136 }
1137
1138 /* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1139 high part to BASE and return the result. Just return BASE otherwise.
1140 TEMP is as for riscv_force_temporary.
1141
1142 The returned expression can be used as the first operand to a LO_SUM. */
1143
1144 static rtx
1145 riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
1146 {
1147 addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
1148 return riscv_force_temporary (temp, addr, FALSE);
1149 }
1150
1151 /* Load an entry from the GOT for a TLS GD access. */
1152
1153 static rtx riscv_got_load_tls_gd (rtx dest, rtx sym)
1154 {
1155 if (Pmode == DImode)
1156 return gen_got_load_tls_gddi (dest, sym);
1157 else
1158 return gen_got_load_tls_gdsi (dest, sym);
1159 }
1160
1161 /* Load an entry from the GOT for a TLS IE access. */
1162
1163 static rtx riscv_got_load_tls_ie (rtx dest, rtx sym)
1164 {
1165 if (Pmode == DImode)
1166 return gen_got_load_tls_iedi (dest, sym);
1167 else
1168 return gen_got_load_tls_iesi (dest, sym);
1169 }
1170
1171 /* Add in the thread pointer for a TLS LE access. */
1172
1173 static rtx riscv_tls_add_tp_le (rtx dest, rtx base, rtx sym)
1174 {
1175 rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1176 if (Pmode == DImode)
1177 return gen_tls_add_tp_ledi (dest, base, tp, sym);
1178 else
1179 return gen_tls_add_tp_lesi (dest, base, tp, sym);
1180 }
1181
1182 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
1183 it appears in a MEM of that mode. Return true if ADDR is a legitimate
1184 constant in that context and can be split into high and low parts.
1185 If so, and if LOW_OUT is nonnull, emit the high part and store the
1186 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
1187
1188 TEMP is as for riscv_force_temporary and is used to load the high
1189 part into a register.
1190
1191 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
1192 a legitimize SET_SRC for an .md pattern, otherwise the low part
1193 is guaranteed to be a legitimate address for mode MODE. */
1194
1195 bool
1196 riscv_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out,
1197 bool in_splitter)
1198 {
1199 enum riscv_symbol_type symbol_type;
1200
1201 if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
1202 || !riscv_symbolic_constant_p (addr, &symbol_type)
1203 || riscv_symbol_insns (symbol_type) == 0
1204 || !riscv_split_symbol_type (symbol_type))
1205 return false;
1206
1207 if (low_out)
1208 switch (symbol_type)
1209 {
1210 case SYMBOL_ABSOLUTE:
1211 {
1212 rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
1213 high = riscv_force_temporary (temp, high, in_splitter);
1214 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
1215 }
1216 break;
1217
1218 case SYMBOL_PCREL:
1219 {
1220 static unsigned seqno;
1221 char buf[32];
1222 rtx label;
1223
1224 ssize_t bytes = snprintf (buf, sizeof (buf), ".LA%u", seqno);
1225 gcc_assert ((size_t) bytes < sizeof (buf));
1226
1227 label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
1228 SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
1229 /* ??? Ugly hack to make weak symbols work. May need to change the
1230 RTL for the auipc and/or low patterns to get a better fix for
1231 this. */
1232 if (! nonzero_address_p (addr))
1233 SYMBOL_REF_WEAK (label) = 1;
1234
1235 if (temp == NULL)
1236 temp = gen_reg_rtx (Pmode);
1237
1238 if (Pmode == DImode)
1239 emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
1240 else
1241 emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
1242
1243 *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
1244
1245 seqno++;
1246 }
1247 break;
1248
1249 default:
1250 gcc_unreachable ();
1251 }
1252
1253 return true;
1254 }
1255
1256 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1257 riscv_force_temporary; it is only needed when OFFSET is not a
1258 SMALL_OPERAND. */
1259
1260 static rtx
1261 riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1262 {
1263 if (!SMALL_OPERAND (offset))
1264 {
1265 rtx high;
1266
1267 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
1268 The addition inside the macro CONST_HIGH_PART may cause an
1269 overflow, so we need to force a sign-extension check. */
1270 high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
1271 offset = CONST_LOW_PART (offset);
1272 high = riscv_force_temporary (temp, high, FALSE);
1273 reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg),
1274 FALSE);
1275 }
1276 return plus_constant (Pmode, reg, offset);
1277 }
1278
1279 /* The __tls_get_attr symbol. */
1280 static GTY(()) rtx riscv_tls_symbol;
1281
1282 /* Return an instruction sequence that calls __tls_get_addr. SYM is
1283 the TLS symbol we are referencing and TYPE is the symbol type to use
1284 (either global dynamic or local dynamic). RESULT is an RTX for the
1285 return value location. */
1286
1287 static rtx_insn *
1288 riscv_call_tls_get_addr (rtx sym, rtx result)
1289 {
1290 rtx a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST), func;
1291 rtx_insn *insn;
1292
1293 if (!riscv_tls_symbol)
1294 riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
1295 func = gen_rtx_MEM (FUNCTION_MODE, riscv_tls_symbol);
1296
1297 start_sequence ();
1298
1299 emit_insn (riscv_got_load_tls_gd (a0, sym));
1300 insn = emit_call_insn (gen_call_value (result, func, const0_rtx, NULL));
1301 RTL_CONST_CALL_P (insn) = 1;
1302 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1303 insn = get_insns ();
1304
1305 end_sequence ();
1306
1307 return insn;
1308 }
1309
1310 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
1311 its address. The return value will be both a valid address and a valid
1312 SET_SRC (either a REG or a LO_SUM). */
1313
1314 static rtx
1315 riscv_legitimize_tls_address (rtx loc)
1316 {
1317 rtx dest, tp, tmp;
1318 enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
1319
1320 #if 0
1321 /* TLS copy relocs are now deprecated and should not be used. */
1322 /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
1323 if (!flag_pic)
1324 model = TLS_MODEL_LOCAL_EXEC;
1325 #endif
1326
1327 switch (model)
1328 {
1329 case TLS_MODEL_LOCAL_DYNAMIC:
1330 /* Rely on section anchors for the optimization that LDM TLS
1331 provides. The anchor's address is loaded with GD TLS. */
1332 case TLS_MODEL_GLOBAL_DYNAMIC:
1333 tmp = gen_rtx_REG (Pmode, GP_RETURN);
1334 dest = gen_reg_rtx (Pmode);
1335 emit_libcall_block (riscv_call_tls_get_addr (loc, tmp), dest, tmp, loc);
1336 break;
1337
1338 case TLS_MODEL_INITIAL_EXEC:
1339 /* la.tls.ie; tp-relative add */
1340 tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1341 tmp = gen_reg_rtx (Pmode);
1342 emit_insn (riscv_got_load_tls_ie (tmp, loc));
1343 dest = gen_reg_rtx (Pmode);
1344 emit_insn (gen_add3_insn (dest, tmp, tp));
1345 break;
1346
1347 case TLS_MODEL_LOCAL_EXEC:
1348 tmp = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
1349 dest = gen_reg_rtx (Pmode);
1350 emit_insn (riscv_tls_add_tp_le (dest, tmp, loc));
1351 dest = gen_rtx_LO_SUM (Pmode, dest,
1352 riscv_unspec_address (loc, SYMBOL_TLS_LE));
1353 break;
1354
1355 default:
1356 gcc_unreachable ();
1357 }
1358 return dest;
1359 }
1360 \f
1361 /* If X is not a valid address for mode MODE, force it into a register. */
1362
1363 static rtx
1364 riscv_force_address (rtx x, machine_mode mode)
1365 {
1366 if (!riscv_legitimate_address_p (mode, x, false))
1367 x = force_reg (Pmode, x);
1368 return x;
1369 }
1370
1371 /* Modify base + offset so that offset fits within a compressed load/store insn
1372 and the excess is added to base. */
1373
1374 static rtx
1375 riscv_shorten_lw_offset (rtx base, HOST_WIDE_INT offset)
1376 {
1377 rtx addr, high;
1378 /* Leave OFFSET as an unsigned 5-bit offset scaled by 4 and put the excess
1379 into HIGH. */
1380 high = GEN_INT (offset & ~CSW_MAX_OFFSET);
1381 offset &= CSW_MAX_OFFSET;
1382 if (!SMALL_OPERAND (INTVAL (high)))
1383 high = force_reg (Pmode, high);
1384 base = force_reg (Pmode, gen_rtx_PLUS (Pmode, high, base));
1385 addr = plus_constant (Pmode, base, offset);
1386 return addr;
1387 }
1388
1389 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
1390 be legitimized in a way that the generic machinery might not expect,
1391 return a new address, otherwise return NULL. MODE is the mode of
1392 the memory being accessed. */
1393
1394 static rtx
1395 riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1396 machine_mode mode)
1397 {
1398 rtx addr;
1399
1400 if (riscv_tls_symbol_p (x))
1401 return riscv_legitimize_tls_address (x);
1402
1403 /* See if the address can split into a high part and a LO_SUM. */
1404 if (riscv_split_symbol (NULL, x, mode, &addr, FALSE))
1405 return riscv_force_address (addr, mode);
1406
1407 /* Handle BASE + OFFSET. */
1408 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
1409 && INTVAL (XEXP (x, 1)) != 0)
1410 {
1411 rtx base = XEXP (x, 0);
1412 HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
1413
1414 if (!riscv_valid_base_register_p (base, mode, false))
1415 base = copy_to_mode_reg (Pmode, base);
1416 if (optimize_function_for_size_p (cfun)
1417 && (strcmp (current_pass->name, "shorten_memrefs") == 0)
1418 && mode == SImode)
1419 /* Convert BASE + LARGE_OFFSET into NEW_BASE + SMALL_OFFSET to allow
1420 possible compressed load/store. */
1421 addr = riscv_shorten_lw_offset (base, offset);
1422 else
1423 addr = riscv_add_offset (NULL, base, offset);
1424 return riscv_force_address (addr, mode);
1425 }
1426
1427 return x;
1428 }
1429
1430 /* Load VALUE into DEST. TEMP is as for riscv_force_temporary. ORIG_MODE
1431 is the original src mode before promotion. */
1432
1433 void
1434 riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value,
1435 machine_mode orig_mode, bool in_splitter)
1436 {
1437 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
1438 machine_mode mode;
1439 int i, num_ops;
1440 rtx x;
1441
1442 /* We can't call gen_reg_rtx from a splitter, because this might realloc
1443 the regno_reg_rtx array, which would invalidate reg rtx pointers in the
1444 combine undo buffer. */
1445 bool can_create_pseudo = can_create_pseudo_p () && ! in_splitter;
1446
1447 mode = GET_MODE (dest);
1448 /* We use the original mode for the riscv_build_integer call, because HImode
1449 values are given special treatment. */
1450 num_ops = riscv_build_integer (codes, value, orig_mode);
1451
1452 if (can_create_pseudo && num_ops > 2 /* not a simple constant */
1453 && num_ops >= riscv_split_integer_cost (value))
1454 x = riscv_split_integer (value, mode);
1455 else
1456 {
1457 /* Apply each binary operation to X. */
1458 x = GEN_INT (codes[0].value);
1459
1460 for (i = 1; i < num_ops; i++)
1461 {
1462 if (!can_create_pseudo)
1463 x = riscv_emit_set (temp, x);
1464 else
1465 x = force_reg (mode, x);
1466
1467 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1468 }
1469 }
1470
1471 riscv_emit_set (dest, x);
1472 }
1473
1474 /* Subroutine of riscv_legitimize_move. Move constant SRC into register
1475 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1476 move_operand. */
1477
1478 static void
1479 riscv_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
1480 {
1481 rtx base, offset;
1482
1483 /* Split moves of big integers into smaller pieces. */
1484 if (splittable_const_int_operand (src, mode))
1485 {
1486 riscv_move_integer (dest, dest, INTVAL (src), mode, FALSE);
1487 return;
1488 }
1489
1490 /* Split moves of symbolic constants into high/low pairs. */
1491 if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src, FALSE))
1492 {
1493 riscv_emit_set (dest, src);
1494 return;
1495 }
1496
1497 /* Generate the appropriate access sequences for TLS symbols. */
1498 if (riscv_tls_symbol_p (src))
1499 {
1500 riscv_emit_move (dest, riscv_legitimize_tls_address (src));
1501 return;
1502 }
1503
1504 /* If we have (const (plus symbol offset)), and that expression cannot
1505 be forced into memory, load the symbol first and add in the offset. Also
1506 prefer to do this even if the constant _can_ be forced into memory, as it
1507 usually produces better code. */
1508 split_const (src, &base, &offset);
1509 if (offset != const0_rtx
1510 && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
1511 {
1512 base = riscv_force_temporary (dest, base, FALSE);
1513 riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
1514 return;
1515 }
1516
1517 src = force_const_mem (mode, src);
1518
1519 /* When using explicit relocs, constant pool references are sometimes
1520 not legitimate addresses. */
1521 riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0), FALSE);
1522 riscv_emit_move (dest, src);
1523 }
1524
1525 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
1526 sequence that is valid. */
1527
1528 bool
1529 riscv_legitimize_move (machine_mode mode, rtx dest, rtx src)
1530 {
1531 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1532 {
1533 rtx reg;
1534
1535 if (GET_CODE (src) == CONST_INT)
1536 {
1537 /* Apply the equivalent of PROMOTE_MODE here for constants to
1538 improve cse. */
1539 machine_mode promoted_mode = mode;
1540 if (GET_MODE_CLASS (mode) == MODE_INT
1541 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
1542 promoted_mode = word_mode;
1543
1544 if (splittable_const_int_operand (src, mode))
1545 {
1546 reg = gen_reg_rtx (promoted_mode);
1547 riscv_move_integer (reg, reg, INTVAL (src), mode, FALSE);
1548 }
1549 else
1550 reg = force_reg (promoted_mode, src);
1551
1552 if (promoted_mode != mode)
1553 reg = gen_lowpart (mode, reg);
1554 }
1555 else
1556 reg = force_reg (mode, src);
1557 riscv_emit_move (dest, reg);
1558 return true;
1559 }
1560
1561 /* We need to deal with constants that would be legitimate
1562 immediate_operands but aren't legitimate move_operands. */
1563 if (CONSTANT_P (src) && !move_operand (src, mode))
1564 {
1565 riscv_legitimize_const_move (mode, dest, src);
1566 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1567 return true;
1568 }
1569
1570 /* RISC-V GCC may generate non-legitimate address due to we provide some
1571 pattern for optimize access PIC local symbol and it's make GCC generate
1572 unrecognizable instruction during optmizing. */
1573
1574 if (MEM_P (dest) && !riscv_legitimate_address_p (mode, XEXP (dest, 0),
1575 reload_completed))
1576 {
1577 XEXP (dest, 0) = riscv_force_address (XEXP (dest, 0), mode);
1578 }
1579
1580 if (MEM_P (src) && !riscv_legitimate_address_p (mode, XEXP (src, 0),
1581 reload_completed))
1582 {
1583 XEXP (src, 0) = riscv_force_address (XEXP (src, 0), mode);
1584 }
1585
1586 return false;
1587 }
1588
1589 /* Return true if there is an instruction that implements CODE and accepts
1590 X as an immediate operand. */
1591
1592 static int
1593 riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
1594 {
1595 switch (code)
1596 {
1597 case ASHIFT:
1598 case ASHIFTRT:
1599 case LSHIFTRT:
1600 /* All shift counts are truncated to a valid constant. */
1601 return true;
1602
1603 case AND:
1604 case IOR:
1605 case XOR:
1606 case PLUS:
1607 case LT:
1608 case LTU:
1609 /* These instructions take 12-bit signed immediates. */
1610 return SMALL_OPERAND (x);
1611
1612 case LE:
1613 /* We add 1 to the immediate and use SLT. */
1614 return SMALL_OPERAND (x + 1);
1615
1616 case LEU:
1617 /* Likewise SLTU, but reject the always-true case. */
1618 return SMALL_OPERAND (x + 1) && x + 1 != 0;
1619
1620 case GE:
1621 case GEU:
1622 /* We can emulate an immediate of 1 by using GT/GTU against x0. */
1623 return x == 1;
1624
1625 default:
1626 /* By default assume that x0 can be used for 0. */
1627 return x == 0;
1628 }
1629 }
1630
1631 /* Return the cost of binary operation X, given that the instruction
1632 sequence for a word-sized or smaller operation takes SIGNLE_INSNS
1633 instructions and that the sequence of a double-word operation takes
1634 DOUBLE_INSNS instructions. */
1635
1636 static int
1637 riscv_binary_cost (rtx x, int single_insns, int double_insns)
1638 {
1639 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
1640 return COSTS_N_INSNS (double_insns);
1641 return COSTS_N_INSNS (single_insns);
1642 }
1643
1644 /* Return the cost of sign- or zero-extending OP. */
1645
1646 static int
1647 riscv_extend_cost (rtx op, bool unsigned_p)
1648 {
1649 if (MEM_P (op))
1650 return 0;
1651
1652 if (unsigned_p && GET_MODE (op) == QImode)
1653 /* We can use ANDI. */
1654 return COSTS_N_INSNS (1);
1655
1656 if (!unsigned_p && GET_MODE (op) == SImode)
1657 /* We can use SEXT.W. */
1658 return COSTS_N_INSNS (1);
1659
1660 /* We need to use a shift left and a shift right. */
1661 return COSTS_N_INSNS (2);
1662 }
1663
1664 /* Implement TARGET_RTX_COSTS. */
1665
1666 #define SINGLE_SHIFT_COST 1
1667
1668 static bool
1669 riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
1670 int *total, bool speed)
1671 {
1672 bool float_mode_p = FLOAT_MODE_P (mode);
1673 int cost;
1674
1675 switch (GET_CODE (x))
1676 {
1677 case CONST_INT:
1678 if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
1679 {
1680 *total = 0;
1681 return true;
1682 }
1683 /* Fall through. */
1684
1685 case SYMBOL_REF:
1686 case LABEL_REF:
1687 case CONST_DOUBLE:
1688 case CONST:
1689 if ((cost = riscv_const_insns (x)) > 0)
1690 {
1691 /* If the constant is likely to be stored in a GPR, SETs of
1692 single-insn constants are as cheap as register sets; we
1693 never want to CSE them. */
1694 if (cost == 1 && outer_code == SET)
1695 *total = 0;
1696 /* When we load a constant more than once, it usually is better
1697 to duplicate the last operation in the sequence than to CSE
1698 the constant itself. */
1699 else if (outer_code == SET || GET_MODE (x) == VOIDmode)
1700 *total = COSTS_N_INSNS (1);
1701 }
1702 else /* The instruction will be fetched from the constant pool. */
1703 *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
1704 return true;
1705
1706 case MEM:
1707 /* If the address is legitimate, return the number of
1708 instructions it needs. */
1709 if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
1710 {
1711 *total = COSTS_N_INSNS (cost + tune_param->memory_cost);
1712 return true;
1713 }
1714 /* Otherwise use the default handling. */
1715 return false;
1716
1717 case NOT:
1718 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
1719 return false;
1720
1721 case AND:
1722 case IOR:
1723 case XOR:
1724 /* Double-word operations use two single-word operations. */
1725 *total = riscv_binary_cost (x, 1, 2);
1726 return false;
1727
1728 case ZERO_EXTRACT:
1729 /* This is an SImode shift. */
1730 if (outer_code == SET
1731 && CONST_INT_P (XEXP (x, 1))
1732 && CONST_INT_P (XEXP (x, 2))
1733 && (INTVAL (XEXP (x, 2)) > 0)
1734 && (INTVAL (XEXP (x, 1)) + INTVAL (XEXP (x, 2)) == 32))
1735 {
1736 *total = COSTS_N_INSNS (SINGLE_SHIFT_COST);
1737 return true;
1738 }
1739 return false;
1740
1741 case ASHIFT:
1742 case ASHIFTRT:
1743 case LSHIFTRT:
1744 *total = riscv_binary_cost (x, SINGLE_SHIFT_COST,
1745 CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
1746 return false;
1747
1748 case ABS:
1749 *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
1750 return false;
1751
1752 case LO_SUM:
1753 *total = set_src_cost (XEXP (x, 0), mode, speed);
1754 return true;
1755
1756 case LT:
1757 /* This is an SImode shift. */
1758 if (outer_code == SET && GET_MODE (x) == DImode
1759 && GET_MODE (XEXP (x, 0)) == SImode)
1760 {
1761 *total = COSTS_N_INSNS (SINGLE_SHIFT_COST);
1762 return true;
1763 }
1764 /* Fall through. */
1765 case LTU:
1766 case LE:
1767 case LEU:
1768 case GT:
1769 case GTU:
1770 case GE:
1771 case GEU:
1772 case EQ:
1773 case NE:
1774 /* Branch comparisons have VOIDmode, so use the first operand's
1775 mode instead. */
1776 mode = GET_MODE (XEXP (x, 0));
1777 if (float_mode_p)
1778 *total = tune_param->fp_add[mode == DFmode];
1779 else
1780 *total = riscv_binary_cost (x, 1, 3);
1781 return false;
1782
1783 case UNORDERED:
1784 case ORDERED:
1785 /* (FEQ(A, A) & FEQ(B, B)) compared against 0. */
1786 mode = GET_MODE (XEXP (x, 0));
1787 *total = tune_param->fp_add[mode == DFmode] + COSTS_N_INSNS (2);
1788 return false;
1789
1790 case UNEQ:
1791 /* (FEQ(A, A) & FEQ(B, B)) compared against FEQ(A, B). */
1792 mode = GET_MODE (XEXP (x, 0));
1793 *total = tune_param->fp_add[mode == DFmode] + COSTS_N_INSNS (3);
1794 return false;
1795
1796 case LTGT:
1797 /* (FLT(A, A) || FGT(B, B)). */
1798 mode = GET_MODE (XEXP (x, 0));
1799 *total = tune_param->fp_add[mode == DFmode] + COSTS_N_INSNS (2);
1800 return false;
1801
1802 case UNGE:
1803 case UNGT:
1804 case UNLE:
1805 case UNLT:
1806 /* FLT or FLE, but guarded by an FFLAGS read and write. */
1807 mode = GET_MODE (XEXP (x, 0));
1808 *total = tune_param->fp_add[mode == DFmode] + COSTS_N_INSNS (4);
1809 return false;
1810
1811 case MINUS:
1812 case PLUS:
1813 if (float_mode_p)
1814 *total = tune_param->fp_add[mode == DFmode];
1815 else
1816 *total = riscv_binary_cost (x, 1, 4);
1817 return false;
1818
1819 case NEG:
1820 {
1821 rtx op = XEXP (x, 0);
1822 if (GET_CODE (op) == FMA && !HONOR_SIGNED_ZEROS (mode))
1823 {
1824 *total = (tune_param->fp_mul[mode == DFmode]
1825 + set_src_cost (XEXP (op, 0), mode, speed)
1826 + set_src_cost (XEXP (op, 1), mode, speed)
1827 + set_src_cost (XEXP (op, 2), mode, speed));
1828 return true;
1829 }
1830 }
1831
1832 if (float_mode_p)
1833 *total = tune_param->fp_add[mode == DFmode];
1834 else
1835 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
1836 return false;
1837
1838 case MULT:
1839 if (float_mode_p)
1840 *total = tune_param->fp_mul[mode == DFmode];
1841 else if (!TARGET_MUL)
1842 /* Estimate the cost of a library call. */
1843 *total = COSTS_N_INSNS (speed ? 32 : 6);
1844 else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
1845 *total = 3 * tune_param->int_mul[0] + COSTS_N_INSNS (2);
1846 else if (!speed)
1847 *total = COSTS_N_INSNS (1);
1848 else
1849 *total = tune_param->int_mul[mode == DImode];
1850 return false;
1851
1852 case DIV:
1853 case SQRT:
1854 case MOD:
1855 if (float_mode_p)
1856 {
1857 *total = tune_param->fp_div[mode == DFmode];
1858 return false;
1859 }
1860 /* Fall through. */
1861
1862 case UDIV:
1863 case UMOD:
1864 if (!TARGET_DIV)
1865 /* Estimate the cost of a library call. */
1866 *total = COSTS_N_INSNS (speed ? 32 : 6);
1867 else if (speed)
1868 *total = tune_param->int_div[mode == DImode];
1869 else
1870 *total = COSTS_N_INSNS (1);
1871 return false;
1872
1873 case ZERO_EXTEND:
1874 /* This is an SImode shift. */
1875 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT)
1876 {
1877 *total = COSTS_N_INSNS (SINGLE_SHIFT_COST);
1878 return true;
1879 }
1880 /* Fall through. */
1881 case SIGN_EXTEND:
1882 *total = riscv_extend_cost (XEXP (x, 0), GET_CODE (x) == ZERO_EXTEND);
1883 return false;
1884
1885 case FLOAT:
1886 case UNSIGNED_FLOAT:
1887 case FIX:
1888 case FLOAT_EXTEND:
1889 case FLOAT_TRUNCATE:
1890 *total = tune_param->fp_add[mode == DFmode];
1891 return false;
1892
1893 case FMA:
1894 *total = (tune_param->fp_mul[mode == DFmode]
1895 + set_src_cost (XEXP (x, 0), mode, speed)
1896 + set_src_cost (XEXP (x, 1), mode, speed)
1897 + set_src_cost (XEXP (x, 2), mode, speed));
1898 return true;
1899
1900 case UNSPEC:
1901 if (XINT (x, 1) == UNSPEC_AUIPC)
1902 {
1903 /* Make AUIPC cheap to avoid spilling its result to the stack. */
1904 *total = 1;
1905 return true;
1906 }
1907 return false;
1908
1909 default:
1910 return false;
1911 }
1912 }
1913
1914 /* Implement TARGET_ADDRESS_COST. */
1915
1916 static int
1917 riscv_address_cost (rtx addr, machine_mode mode,
1918 addr_space_t as ATTRIBUTE_UNUSED,
1919 bool speed ATTRIBUTE_UNUSED)
1920 {
1921 /* When optimizing for size, make uncompressible 32-bit addresses more
1922 * expensive so that compressible 32-bit addresses are preferred. */
1923 if (TARGET_RVC && !speed && riscv_mshorten_memrefs && mode == SImode
1924 && !riscv_compressed_lw_address_p (addr))
1925 return riscv_address_insns (addr, mode, false) + 1;
1926 return riscv_address_insns (addr, mode, false);
1927 }
1928
1929 /* Return one word of double-word value OP. HIGH_P is true to select the
1930 high part or false to select the low part. */
1931
1932 rtx
1933 riscv_subword (rtx op, bool high_p)
1934 {
1935 unsigned int byte = high_p ? UNITS_PER_WORD : 0;
1936 machine_mode mode = GET_MODE (op);
1937
1938 if (mode == VOIDmode)
1939 mode = TARGET_64BIT ? TImode : DImode;
1940
1941 if (MEM_P (op))
1942 return adjust_address (op, word_mode, byte);
1943
1944 if (REG_P (op))
1945 gcc_assert (!FP_REG_RTX_P (op));
1946
1947 return simplify_gen_subreg (word_mode, op, mode, byte);
1948 }
1949
1950 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
1951
1952 bool
1953 riscv_split_64bit_move_p (rtx dest, rtx src)
1954 {
1955 if (TARGET_64BIT)
1956 return false;
1957
1958 /* Allow FPR <-> FPR and FPR <-> MEM moves, and permit the special case
1959 of zeroing an FPR with FCVT.D.W. */
1960 if (TARGET_DOUBLE_FLOAT
1961 && ((FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
1962 || (FP_REG_RTX_P (dest) && MEM_P (src))
1963 || (FP_REG_RTX_P (src) && MEM_P (dest))
1964 || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))))
1965 return false;
1966
1967 return true;
1968 }
1969
1970 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
1971 this function handles 64-bit moves for which riscv_split_64bit_move_p
1972 holds. For 64-bit targets, this function handles 128-bit moves. */
1973
1974 void
1975 riscv_split_doubleword_move (rtx dest, rtx src)
1976 {
1977 rtx low_dest;
1978
1979 /* The operation can be split into two normal moves. Decide in
1980 which order to do them. */
1981 low_dest = riscv_subword (dest, false);
1982 if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
1983 {
1984 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1985 riscv_emit_move (low_dest, riscv_subword (src, false));
1986 }
1987 else
1988 {
1989 riscv_emit_move (low_dest, riscv_subword (src, false));
1990 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1991 }
1992 }
1993 \f
1994 /* Return the appropriate instructions to move SRC into DEST. Assume
1995 that SRC is operand 1 and DEST is operand 0. */
1996
1997 const char *
1998 riscv_output_move (rtx dest, rtx src)
1999 {
2000 enum rtx_code dest_code, src_code;
2001 machine_mode mode;
2002 bool dbl_p;
2003
2004 dest_code = GET_CODE (dest);
2005 src_code = GET_CODE (src);
2006 mode = GET_MODE (dest);
2007 dbl_p = (GET_MODE_SIZE (mode) == 8);
2008
2009 if (dbl_p && riscv_split_64bit_move_p (dest, src))
2010 return "#";
2011
2012 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2013 {
2014 if (src_code == REG && FP_REG_P (REGNO (src)))
2015 return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.w\t%0,%1";
2016
2017 if (src_code == MEM)
2018 switch (GET_MODE_SIZE (mode))
2019 {
2020 case 1: return "lbu\t%0,%1";
2021 case 2: return "lhu\t%0,%1";
2022 case 4: return "lw\t%0,%1";
2023 case 8: return "ld\t%0,%1";
2024 }
2025
2026 if (src_code == CONST_INT)
2027 return "li\t%0,%1";
2028
2029 if (src_code == HIGH)
2030 return "lui\t%0,%h1";
2031
2032 if (symbolic_operand (src, VOIDmode))
2033 switch (riscv_classify_symbolic_expression (src))
2034 {
2035 case SYMBOL_GOT_DISP: return "la\t%0,%1";
2036 case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
2037 case SYMBOL_PCREL: return "lla\t%0,%1";
2038 default: gcc_unreachable ();
2039 }
2040 }
2041 if ((src_code == REG && GP_REG_P (REGNO (src)))
2042 || (src == CONST0_RTX (mode)))
2043 {
2044 if (dest_code == REG)
2045 {
2046 if (GP_REG_P (REGNO (dest)))
2047 return "mv\t%0,%z1";
2048
2049 if (FP_REG_P (REGNO (dest)))
2050 {
2051 if (!dbl_p)
2052 return "fmv.w.x\t%0,%z1";
2053 if (TARGET_64BIT)
2054 return "fmv.d.x\t%0,%z1";
2055 /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
2056 gcc_assert (src == CONST0_RTX (mode));
2057 return "fcvt.d.w\t%0,x0";
2058 }
2059 }
2060 if (dest_code == MEM)
2061 switch (GET_MODE_SIZE (mode))
2062 {
2063 case 1: return "sb\t%z1,%0";
2064 case 2: return "sh\t%z1,%0";
2065 case 4: return "sw\t%z1,%0";
2066 case 8: return "sd\t%z1,%0";
2067 }
2068 }
2069 if (src_code == REG && FP_REG_P (REGNO (src)))
2070 {
2071 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2072 return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
2073
2074 if (dest_code == MEM)
2075 return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
2076 }
2077 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2078 {
2079 if (src_code == MEM)
2080 return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
2081 }
2082 gcc_unreachable ();
2083 }
2084
2085 const char *
2086 riscv_output_return ()
2087 {
2088 if (cfun->machine->naked_p)
2089 return "";
2090
2091 return "ret";
2092 }
2093
2094 \f
2095 /* Return true if CMP1 is a suitable second operand for integer ordering
2096 test CODE. See also the *sCC patterns in riscv.md. */
2097
2098 static bool
2099 riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
2100 {
2101 switch (code)
2102 {
2103 case GT:
2104 case GTU:
2105 return reg_or_0_operand (cmp1, VOIDmode);
2106
2107 case GE:
2108 case GEU:
2109 return cmp1 == const1_rtx;
2110
2111 case LT:
2112 case LTU:
2113 return arith_operand (cmp1, VOIDmode);
2114
2115 case LE:
2116 return sle_operand (cmp1, VOIDmode);
2117
2118 case LEU:
2119 return sleu_operand (cmp1, VOIDmode);
2120
2121 default:
2122 gcc_unreachable ();
2123 }
2124 }
2125
2126 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
2127 integer ordering test *CODE, or if an equivalent combination can
2128 be formed by adjusting *CODE and *CMP1. When returning true, update
2129 *CODE and *CMP1 with the chosen code and operand, otherwise leave
2130 them alone. */
2131
2132 static bool
2133 riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
2134 machine_mode mode)
2135 {
2136 HOST_WIDE_INT plus_one;
2137
2138 if (riscv_int_order_operand_ok_p (*code, *cmp1))
2139 return true;
2140
2141 if (CONST_INT_P (*cmp1))
2142 switch (*code)
2143 {
2144 case LE:
2145 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
2146 if (INTVAL (*cmp1) < plus_one)
2147 {
2148 *code = LT;
2149 *cmp1 = force_reg (mode, GEN_INT (plus_one));
2150 return true;
2151 }
2152 break;
2153
2154 case LEU:
2155 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
2156 if (plus_one != 0)
2157 {
2158 *code = LTU;
2159 *cmp1 = force_reg (mode, GEN_INT (plus_one));
2160 return true;
2161 }
2162 break;
2163
2164 default:
2165 break;
2166 }
2167 return false;
2168 }
2169
2170 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
2171 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
2172 is nonnull, it's OK to set TARGET to the inverse of the result and
2173 flip *INVERT_PTR instead. */
2174
2175 static void
2176 riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
2177 rtx target, rtx cmp0, rtx cmp1)
2178 {
2179 machine_mode mode;
2180
2181 /* First see if there is a RISCV instruction that can do this operation.
2182 If not, try doing the same for the inverse operation. If that also
2183 fails, force CMP1 into a register and try again. */
2184 mode = GET_MODE (cmp0);
2185 if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
2186 riscv_emit_binary (code, target, cmp0, cmp1);
2187 else
2188 {
2189 enum rtx_code inv_code = reverse_condition (code);
2190 if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
2191 {
2192 cmp1 = force_reg (mode, cmp1);
2193 riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
2194 }
2195 else if (invert_ptr == 0)
2196 {
2197 rtx inv_target = riscv_force_binary (GET_MODE (target),
2198 inv_code, cmp0, cmp1);
2199 riscv_emit_binary (XOR, target, inv_target, const1_rtx);
2200 }
2201 else
2202 {
2203 *invert_ptr = !*invert_ptr;
2204 riscv_emit_binary (inv_code, target, cmp0, cmp1);
2205 }
2206 }
2207 }
2208
2209 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2210 The register will have the same mode as CMP0. */
2211
2212 static rtx
2213 riscv_zero_if_equal (rtx cmp0, rtx cmp1)
2214 {
2215 if (cmp1 == const0_rtx)
2216 return cmp0;
2217
2218 return expand_binop (GET_MODE (cmp0), sub_optab,
2219 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2220 }
2221
2222 /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */
2223
2224 static void
2225 riscv_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
2226 {
2227 /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */
2228 if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0)))
2229 {
2230 /* It is more profitable to zero-extend QImode values. But not if the
2231 first operand has already been sign-extended, and the second one is
2232 is a constant or has already been sign-extended also. */
2233 if (unsigned_condition (code) == code
2234 && (GET_MODE (*op0) == QImode
2235 && ! (GET_CODE (*op0) == SUBREG
2236 && SUBREG_PROMOTED_VAR_P (*op0)
2237 && SUBREG_PROMOTED_SIGNED_P (*op0)
2238 && (CONST_INT_P (*op1)
2239 || (GET_CODE (*op1) == SUBREG
2240 && SUBREG_PROMOTED_VAR_P (*op1)
2241 && SUBREG_PROMOTED_SIGNED_P (*op1))))))
2242 {
2243 *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0);
2244 if (CONST_INT_P (*op1))
2245 *op1 = GEN_INT ((uint8_t) INTVAL (*op1));
2246 else
2247 *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1);
2248 }
2249 else
2250 {
2251 *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0);
2252 if (*op1 != const0_rtx)
2253 *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1);
2254 }
2255 }
2256 }
2257
2258 /* Convert a comparison into something that can be used in a branch. On
2259 entry, *OP0 and *OP1 are the values being compared and *CODE is the code
2260 used to compare them. Update them to describe the final comparison. */
2261
2262 static void
2263 riscv_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1)
2264 {
2265 if (splittable_const_int_operand (*op1, VOIDmode))
2266 {
2267 HOST_WIDE_INT rhs = INTVAL (*op1);
2268
2269 if (*code == EQ || *code == NE)
2270 {
2271 /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
2272 if (SMALL_OPERAND (-rhs))
2273 {
2274 *op0 = riscv_force_binary (GET_MODE (*op0), PLUS, *op0,
2275 GEN_INT (-rhs));
2276 *op1 = const0_rtx;
2277 }
2278 }
2279 else
2280 {
2281 static const enum rtx_code mag_comparisons[][2] = {
2282 {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}
2283 };
2284
2285 /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */
2286 for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++)
2287 {
2288 HOST_WIDE_INT new_rhs;
2289 bool increment = *code == mag_comparisons[i][0];
2290 bool decrement = *code == mag_comparisons[i][1];
2291 if (!increment && !decrement)
2292 continue;
2293
2294 new_rhs = rhs + (increment ? 1 : -1);
2295 if (riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs)
2296 && (rhs < 0) == (new_rhs < 0))
2297 {
2298 *op1 = GEN_INT (new_rhs);
2299 *code = mag_comparisons[i][increment];
2300 }
2301 break;
2302 }
2303 }
2304 }
2305
2306 riscv_extend_comparands (*code, op0, op1);
2307
2308 *op0 = force_reg (word_mode, *op0);
2309 if (*op1 != const0_rtx)
2310 *op1 = force_reg (word_mode, *op1);
2311 }
2312
2313 /* Like riscv_emit_int_compare, but for floating-point comparisons. */
2314
2315 static void
2316 riscv_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1)
2317 {
2318 rtx tmp0, tmp1, cmp_op0 = *op0, cmp_op1 = *op1;
2319 enum rtx_code fp_code = *code;
2320 *code = NE;
2321
2322 switch (fp_code)
2323 {
2324 case UNORDERED:
2325 *code = EQ;
2326 /* Fall through. */
2327
2328 case ORDERED:
2329 /* a == a && b == b */
2330 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2331 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2332 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2333 *op1 = const0_rtx;
2334 break;
2335
2336 case UNEQ:
2337 /* ordered(a, b) > (a == b) */
2338 *code = EQ;
2339 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2340 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2341 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2342 *op1 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op1);
2343 break;
2344
2345 #define UNORDERED_COMPARISON(CODE, CMP) \
2346 case CODE: \
2347 *code = EQ; \
2348 *op0 = gen_reg_rtx (word_mode); \
2349 if (GET_MODE (cmp_op0) == SFmode && TARGET_64BIT) \
2350 emit_insn (gen_f##CMP##_quietsfdi4 (*op0, cmp_op0, cmp_op1)); \
2351 else if (GET_MODE (cmp_op0) == SFmode) \
2352 emit_insn (gen_f##CMP##_quietsfsi4 (*op0, cmp_op0, cmp_op1)); \
2353 else if (GET_MODE (cmp_op0) == DFmode && TARGET_64BIT) \
2354 emit_insn (gen_f##CMP##_quietdfdi4 (*op0, cmp_op0, cmp_op1)); \
2355 else if (GET_MODE (cmp_op0) == DFmode) \
2356 emit_insn (gen_f##CMP##_quietdfsi4 (*op0, cmp_op0, cmp_op1)); \
2357 else \
2358 gcc_unreachable (); \
2359 *op1 = const0_rtx; \
2360 break;
2361
2362 case UNLT:
2363 std::swap (cmp_op0, cmp_op1);
2364 gcc_fallthrough ();
2365
2366 UNORDERED_COMPARISON(UNGT, le)
2367
2368 case UNLE:
2369 std::swap (cmp_op0, cmp_op1);
2370 gcc_fallthrough ();
2371
2372 UNORDERED_COMPARISON(UNGE, lt)
2373 #undef UNORDERED_COMPARISON
2374
2375 case NE:
2376 fp_code = EQ;
2377 *code = EQ;
2378 /* Fall through. */
2379
2380 case EQ:
2381 case LE:
2382 case LT:
2383 case GE:
2384 case GT:
2385 /* We have instructions for these cases. */
2386 *op0 = riscv_force_binary (word_mode, fp_code, cmp_op0, cmp_op1);
2387 *op1 = const0_rtx;
2388 break;
2389
2390 case LTGT:
2391 /* (a < b) | (a > b) */
2392 tmp0 = riscv_force_binary (word_mode, LT, cmp_op0, cmp_op1);
2393 tmp1 = riscv_force_binary (word_mode, GT, cmp_op0, cmp_op1);
2394 *op0 = riscv_force_binary (word_mode, IOR, tmp0, tmp1);
2395 *op1 = const0_rtx;
2396 break;
2397
2398 default:
2399 gcc_unreachable ();
2400 }
2401 }
2402
2403 /* CODE-compare OP0 and OP1. Store the result in TARGET. */
2404
2405 void
2406 riscv_expand_int_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2407 {
2408 riscv_extend_comparands (code, &op0, &op1);
2409 op0 = force_reg (word_mode, op0);
2410
2411 if (code == EQ || code == NE)
2412 {
2413 rtx zie = riscv_zero_if_equal (op0, op1);
2414 riscv_emit_binary (code, target, zie, const0_rtx);
2415 }
2416 else
2417 riscv_emit_int_order_test (code, 0, target, op0, op1);
2418 }
2419
2420 /* Like riscv_expand_int_scc, but for floating-point comparisons. */
2421
2422 void
2423 riscv_expand_float_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2424 {
2425 riscv_emit_float_compare (&code, &op0, &op1);
2426
2427 rtx cmp = riscv_force_binary (word_mode, code, op0, op1);
2428 riscv_emit_set (target, lowpart_subreg (SImode, cmp, word_mode));
2429 }
2430
2431 /* Jump to LABEL if (CODE OP0 OP1) holds. */
2432
2433 void
2434 riscv_expand_conditional_branch (rtx label, rtx_code code, rtx op0, rtx op1)
2435 {
2436 if (FLOAT_MODE_P (GET_MODE (op1)))
2437 riscv_emit_float_compare (&code, &op0, &op1);
2438 else
2439 riscv_emit_int_compare (&code, &op0, &op1);
2440
2441 rtx condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2442 emit_jump_insn (gen_condjump (condition, label));
2443 }
2444
2445 /* If (CODE OP0 OP1) holds, move CONS to DEST; else move ALT to DEST. */
2446
2447 void
2448 riscv_expand_conditional_move (rtx dest, rtx cons, rtx alt, rtx_code code,
2449 rtx op0, rtx op1)
2450 {
2451 riscv_emit_int_compare (&code, &op0, &op1);
2452 rtx cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
2453 emit_insn (gen_rtx_SET (dest, gen_rtx_IF_THEN_ELSE (GET_MODE (dest), cond,
2454 cons, alt)));
2455 }
2456
2457 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
2458 least PARM_BOUNDARY bits of alignment, but will be given anything up
2459 to PREFERRED_STACK_BOUNDARY bits if the type requires it. */
2460
2461 static unsigned int
2462 riscv_function_arg_boundary (machine_mode mode, const_tree type)
2463 {
2464 unsigned int alignment;
2465
2466 /* Use natural alignment if the type is not aggregate data. */
2467 if (type && !AGGREGATE_TYPE_P (type))
2468 alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
2469 else
2470 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
2471
2472 return MIN (PREFERRED_STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment));
2473 }
2474
2475 /* If MODE represents an argument that can be passed or returned in
2476 floating-point registers, return the number of registers, else 0. */
2477
2478 static unsigned
2479 riscv_pass_mode_in_fpr_p (machine_mode mode)
2480 {
2481 if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG)
2482 {
2483 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2484 return 1;
2485
2486 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
2487 return 2;
2488 }
2489
2490 return 0;
2491 }
2492
2493 typedef struct {
2494 const_tree type;
2495 HOST_WIDE_INT offset;
2496 } riscv_aggregate_field;
2497
2498 /* Identify subfields of aggregates that are candidates for passing in
2499 floating-point registers. */
2500
2501 static int
2502 riscv_flatten_aggregate_field (const_tree type,
2503 riscv_aggregate_field fields[2],
2504 int n, HOST_WIDE_INT offset,
2505 bool ignore_zero_width_bit_field_p)
2506 {
2507 switch (TREE_CODE (type))
2508 {
2509 case RECORD_TYPE:
2510 /* Can't handle incomplete types nor sizes that are not fixed. */
2511 if (!COMPLETE_TYPE_P (type)
2512 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2513 || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2514 return -1;
2515
2516 for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f))
2517 if (TREE_CODE (f) == FIELD_DECL)
2518 {
2519 if (!TYPE_P (TREE_TYPE (f)))
2520 return -1;
2521
2522 /* The C++ front end strips zero-length bit-fields from structs.
2523 So we need to ignore them in the C front end to make C code
2524 compatible with C++ code. */
2525 if (ignore_zero_width_bit_field_p
2526 && DECL_BIT_FIELD (f)
2527 && (DECL_SIZE (f) == NULL_TREE
2528 || integer_zerop (DECL_SIZE (f))))
2529 ;
2530 else
2531 {
2532 HOST_WIDE_INT pos = offset + int_byte_position (f);
2533 n = riscv_flatten_aggregate_field (TREE_TYPE (f),
2534 fields, n, pos,
2535 ignore_zero_width_bit_field_p);
2536 }
2537 if (n < 0)
2538 return -1;
2539 }
2540 return n;
2541
2542 case ARRAY_TYPE:
2543 {
2544 HOST_WIDE_INT n_elts;
2545 riscv_aggregate_field subfields[2];
2546 tree index = TYPE_DOMAIN (type);
2547 tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type));
2548 int n_subfields = riscv_flatten_aggregate_field (TREE_TYPE (type),
2549 subfields, 0, offset,
2550 ignore_zero_width_bit_field_p);
2551
2552 /* Can't handle incomplete types nor sizes that are not fixed. */
2553 if (n_subfields <= 0
2554 || !COMPLETE_TYPE_P (type)
2555 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2556 || !index
2557 || !TYPE_MAX_VALUE (index)
2558 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
2559 || !TYPE_MIN_VALUE (index)
2560 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
2561 || !tree_fits_uhwi_p (elt_size))
2562 return -1;
2563
2564 n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
2565 - tree_to_uhwi (TYPE_MIN_VALUE (index));
2566 gcc_assert (n_elts >= 0);
2567
2568 for (HOST_WIDE_INT i = 0; i < n_elts; i++)
2569 for (int j = 0; j < n_subfields; j++)
2570 {
2571 if (n >= 2)
2572 return -1;
2573
2574 fields[n] = subfields[j];
2575 fields[n++].offset += i * tree_to_uhwi (elt_size);
2576 }
2577
2578 return n;
2579 }
2580
2581 case COMPLEX_TYPE:
2582 {
2583 /* Complex type need consume 2 field, so n must be 0. */
2584 if (n != 0)
2585 return -1;
2586
2587 HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)));
2588
2589 if (elt_size <= UNITS_PER_FP_ARG)
2590 {
2591 fields[0].type = TREE_TYPE (type);
2592 fields[0].offset = offset;
2593 fields[1].type = TREE_TYPE (type);
2594 fields[1].offset = offset + elt_size;
2595
2596 return 2;
2597 }
2598
2599 return -1;
2600 }
2601
2602 default:
2603 if (n < 2
2604 && ((SCALAR_FLOAT_TYPE_P (type)
2605 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG)
2606 || (INTEGRAL_TYPE_P (type)
2607 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)))
2608 {
2609 fields[n].type = type;
2610 fields[n].offset = offset;
2611 return n + 1;
2612 }
2613 else
2614 return -1;
2615 }
2616 }
2617
2618 /* Identify candidate aggregates for passing in floating-point registers.
2619 Candidates have at most two fields after flattening. */
2620
2621 static int
2622 riscv_flatten_aggregate_argument (const_tree type,
2623 riscv_aggregate_field fields[2],
2624 bool ignore_zero_width_bit_field_p)
2625 {
2626 if (!type || TREE_CODE (type) != RECORD_TYPE)
2627 return -1;
2628
2629 return riscv_flatten_aggregate_field (type, fields, 0, 0,
2630 ignore_zero_width_bit_field_p);
2631 }
2632
2633 /* See whether TYPE is a record whose fields should be returned in one or
2634 two floating-point registers. If so, populate FIELDS accordingly. */
2635
2636 static unsigned
2637 riscv_pass_aggregate_in_fpr_pair_p (const_tree type,
2638 riscv_aggregate_field fields[2])
2639 {
2640 static int warned = 0;
2641
2642 /* This is the old ABI, which differs for C++ and C. */
2643 int n_old = riscv_flatten_aggregate_argument (type, fields, false);
2644 for (int i = 0; i < n_old; i++)
2645 if (!SCALAR_FLOAT_TYPE_P (fields[i].type))
2646 {
2647 n_old = -1;
2648 break;
2649 }
2650
2651 /* This is the new ABI, which is the same for C++ and C. */
2652 int n_new = riscv_flatten_aggregate_argument (type, fields, true);
2653 for (int i = 0; i < n_new; i++)
2654 if (!SCALAR_FLOAT_TYPE_P (fields[i].type))
2655 {
2656 n_new = -1;
2657 break;
2658 }
2659
2660 if ((n_old != n_new) && (warned == 0))
2661 {
2662 warning (0, "ABI for flattened struct with zero-length bit-fields "
2663 "changed in GCC 10");
2664 warned = 1;
2665 }
2666
2667 return n_new > 0 ? n_new : 0;
2668 }
2669
2670 /* See whether TYPE is a record whose fields should be returned in one or
2671 floating-point register and one integer register. If so, populate
2672 FIELDS accordingly. */
2673
2674 static bool
2675 riscv_pass_aggregate_in_fpr_and_gpr_p (const_tree type,
2676 riscv_aggregate_field fields[2])
2677 {
2678 static int warned = 0;
2679
2680 /* This is the old ABI, which differs for C++ and C. */
2681 unsigned num_int_old = 0, num_float_old = 0;
2682 int n_old = riscv_flatten_aggregate_argument (type, fields, false);
2683 for (int i = 0; i < n_old; i++)
2684 {
2685 num_float_old += SCALAR_FLOAT_TYPE_P (fields[i].type);
2686 num_int_old += INTEGRAL_TYPE_P (fields[i].type);
2687 }
2688
2689 /* This is the new ABI, which is the same for C++ and C. */
2690 unsigned num_int_new = 0, num_float_new = 0;
2691 int n_new = riscv_flatten_aggregate_argument (type, fields, true);
2692 for (int i = 0; i < n_new; i++)
2693 {
2694 num_float_new += SCALAR_FLOAT_TYPE_P (fields[i].type);
2695 num_int_new += INTEGRAL_TYPE_P (fields[i].type);
2696 }
2697
2698 if (((num_int_old == 1 && num_float_old == 1
2699 && (num_int_old != num_int_new || num_float_old != num_float_new))
2700 || (num_int_new == 1 && num_float_new == 1
2701 && (num_int_old != num_int_new || num_float_old != num_float_new)))
2702 && (warned == 0))
2703 {
2704 warning (0, "ABI for flattened struct with zero-length bit-fields "
2705 "changed in GCC 10");
2706 warned = 1;
2707 }
2708
2709 return num_int_new == 1 && num_float_new == 1;
2710 }
2711
2712 /* Return the representation of an argument passed or returned in an FPR
2713 when the value has mode VALUE_MODE and the type has TYPE_MODE. The
2714 two modes may be different for structures like:
2715
2716 struct __attribute__((packed)) foo { float f; }
2717
2718 where the SFmode value "f" is passed in REGNO but the struct itself
2719 has mode BLKmode. */
2720
2721 static rtx
2722 riscv_pass_fpr_single (machine_mode type_mode, unsigned regno,
2723 machine_mode value_mode,
2724 HOST_WIDE_INT offset)
2725 {
2726 rtx x = gen_rtx_REG (value_mode, regno);
2727
2728 if (type_mode != value_mode)
2729 {
2730 x = gen_rtx_EXPR_LIST (VOIDmode, x, GEN_INT (offset));
2731 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
2732 }
2733 return x;
2734 }
2735
2736 /* Pass or return a composite value in the FPR pair REGNO and REGNO + 1.
2737 MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and
2738 byte offset for the first value, likewise MODE2 and OFFSET2 for the
2739 second value. */
2740
2741 static rtx
2742 riscv_pass_fpr_pair (machine_mode mode, unsigned regno1,
2743 machine_mode mode1, HOST_WIDE_INT offset1,
2744 unsigned regno2, machine_mode mode2,
2745 HOST_WIDE_INT offset2)
2746 {
2747 return gen_rtx_PARALLEL
2748 (mode,
2749 gen_rtvec (2,
2750 gen_rtx_EXPR_LIST (VOIDmode,
2751 gen_rtx_REG (mode1, regno1),
2752 GEN_INT (offset1)),
2753 gen_rtx_EXPR_LIST (VOIDmode,
2754 gen_rtx_REG (mode2, regno2),
2755 GEN_INT (offset2))));
2756 }
2757
2758 /* Fill INFO with information about a single argument, and return an
2759 RTL pattern to pass or return the argument. CUM is the cumulative
2760 state for earlier arguments. MODE is the mode of this argument and
2761 TYPE is its type (if known). NAMED is true if this is a named
2762 (fixed) argument rather than a variable one. RETURN_P is true if
2763 returning the argument, or false if passing the argument. */
2764
2765 static rtx
2766 riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
2767 machine_mode mode, const_tree type, bool named,
2768 bool return_p)
2769 {
2770 unsigned num_bytes, num_words;
2771 unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST;
2772 unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST;
2773 unsigned alignment = riscv_function_arg_boundary (mode, type);
2774
2775 memset (info, 0, sizeof (*info));
2776 info->gpr_offset = cum->num_gprs;
2777 info->fpr_offset = cum->num_fprs;
2778
2779 if (named)
2780 {
2781 riscv_aggregate_field fields[2];
2782 unsigned fregno = fpr_base + info->fpr_offset;
2783 unsigned gregno = gpr_base + info->gpr_offset;
2784
2785 /* Pass one- or two-element floating-point aggregates in FPRs. */
2786 if ((info->num_fprs = riscv_pass_aggregate_in_fpr_pair_p (type, fields))
2787 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2788 switch (info->num_fprs)
2789 {
2790 case 1:
2791 return riscv_pass_fpr_single (mode, fregno,
2792 TYPE_MODE (fields[0].type),
2793 fields[0].offset);
2794
2795 case 2:
2796 return riscv_pass_fpr_pair (mode, fregno,
2797 TYPE_MODE (fields[0].type),
2798 fields[0].offset,
2799 fregno + 1,
2800 TYPE_MODE (fields[1].type),
2801 fields[1].offset);
2802
2803 default:
2804 gcc_unreachable ();
2805 }
2806
2807 /* Pass real and complex floating-point numbers in FPRs. */
2808 if ((info->num_fprs = riscv_pass_mode_in_fpr_p (mode))
2809 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2810 switch (GET_MODE_CLASS (mode))
2811 {
2812 case MODE_FLOAT:
2813 return gen_rtx_REG (mode, fregno);
2814
2815 case MODE_COMPLEX_FLOAT:
2816 return riscv_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0,
2817 fregno + 1, GET_MODE_INNER (mode),
2818 GET_MODE_UNIT_SIZE (mode));
2819
2820 default:
2821 gcc_unreachable ();
2822 }
2823
2824 /* Pass structs with one float and one integer in an FPR and a GPR. */
2825 if (riscv_pass_aggregate_in_fpr_and_gpr_p (type, fields)
2826 && info->gpr_offset < MAX_ARGS_IN_REGISTERS
2827 && info->fpr_offset < MAX_ARGS_IN_REGISTERS)
2828 {
2829 info->num_gprs = 1;
2830 info->num_fprs = 1;
2831
2832 if (!SCALAR_FLOAT_TYPE_P (fields[0].type))
2833 std::swap (fregno, gregno);
2834
2835 return riscv_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type),
2836 fields[0].offset,
2837 gregno, TYPE_MODE (fields[1].type),
2838 fields[1].offset);
2839 }
2840 }
2841
2842 /* Work out the size of the argument. */
2843 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2844 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2845
2846 /* Doubleword-aligned varargs start on an even register boundary. */
2847 if (!named && num_bytes != 0 && alignment > BITS_PER_WORD)
2848 info->gpr_offset += info->gpr_offset & 1;
2849
2850 /* Partition the argument between registers and stack. */
2851 info->num_fprs = 0;
2852 info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset);
2853 info->stack_p = (num_words - info->num_gprs) != 0;
2854
2855 if (info->num_gprs || return_p)
2856 return gen_rtx_REG (mode, gpr_base + info->gpr_offset);
2857
2858 return NULL_RTX;
2859 }
2860
2861 /* Implement TARGET_FUNCTION_ARG. */
2862
2863 static rtx
2864 riscv_function_arg (cumulative_args_t cum_v, const function_arg_info &arg)
2865 {
2866 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2867 struct riscv_arg_info info;
2868
2869 if (arg.end_marker_p ())
2870 return NULL;
2871
2872 return riscv_get_arg_info (&info, cum, arg.mode, arg.type, arg.named, false);
2873 }
2874
2875 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
2876
2877 static void
2878 riscv_function_arg_advance (cumulative_args_t cum_v,
2879 const function_arg_info &arg)
2880 {
2881 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2882 struct riscv_arg_info info;
2883
2884 riscv_get_arg_info (&info, cum, arg.mode, arg.type, arg.named, false);
2885
2886 /* Advance the register count. This has the effect of setting
2887 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
2888 argument required us to skip the final GPR and pass the whole
2889 argument on the stack. */
2890 cum->num_fprs = info.fpr_offset + info.num_fprs;
2891 cum->num_gprs = info.gpr_offset + info.num_gprs;
2892 }
2893
2894 /* Implement TARGET_ARG_PARTIAL_BYTES. */
2895
2896 static int
2897 riscv_arg_partial_bytes (cumulative_args_t cum,
2898 const function_arg_info &generic_arg)
2899 {
2900 struct riscv_arg_info arg;
2901
2902 riscv_get_arg_info (&arg, get_cumulative_args (cum), generic_arg.mode,
2903 generic_arg.type, generic_arg.named, false);
2904 return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0;
2905 }
2906
2907 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
2908 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
2909 VALTYPE is null and MODE is the mode of the return value. */
2910
2911 rtx
2912 riscv_function_value (const_tree type, const_tree func, machine_mode mode)
2913 {
2914 struct riscv_arg_info info;
2915 CUMULATIVE_ARGS args;
2916
2917 if (type)
2918 {
2919 int unsigned_p = TYPE_UNSIGNED (type);
2920
2921 mode = TYPE_MODE (type);
2922
2923 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
2924 return values, promote the mode here too. */
2925 mode = promote_function_mode (type, mode, &unsigned_p, func, 1);
2926 }
2927
2928 memset (&args, 0, sizeof args);
2929 return riscv_get_arg_info (&info, &args, mode, type, true, true);
2930 }
2931
2932 /* Implement TARGET_PASS_BY_REFERENCE. */
2933
2934 static bool
2935 riscv_pass_by_reference (cumulative_args_t cum_v, const function_arg_info &arg)
2936 {
2937 HOST_WIDE_INT size = arg.type_size_in_bytes ();
2938 struct riscv_arg_info info;
2939 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2940
2941 /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we
2942 never pass variadic arguments in floating-point registers, so we can
2943 avoid the call to riscv_get_arg_info in this case. */
2944 if (cum != NULL)
2945 {
2946 /* Don't pass by reference if we can use a floating-point register. */
2947 riscv_get_arg_info (&info, cum, arg.mode, arg.type, arg.named, false);
2948 if (info.num_fprs)
2949 return false;
2950 }
2951
2952 /* Pass by reference if the data do not fit in two integer registers. */
2953 return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD);
2954 }
2955
2956 /* Implement TARGET_RETURN_IN_MEMORY. */
2957
2958 static bool
2959 riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
2960 {
2961 CUMULATIVE_ARGS args;
2962 cumulative_args_t cum = pack_cumulative_args (&args);
2963
2964 /* The rules for returning in memory are the same as for passing the
2965 first named argument by reference. */
2966 memset (&args, 0, sizeof args);
2967 function_arg_info arg (const_cast<tree> (type), /*named=*/true);
2968 return riscv_pass_by_reference (cum, arg);
2969 }
2970
2971 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
2972
2973 static void
2974 riscv_setup_incoming_varargs (cumulative_args_t cum,
2975 const function_arg_info &arg,
2976 int *pretend_size ATTRIBUTE_UNUSED, int no_rtl)
2977 {
2978 CUMULATIVE_ARGS local_cum;
2979 int gp_saved;
2980
2981 /* The caller has advanced CUM up to, but not beyond, the last named
2982 argument. Advance a local copy of CUM past the last "real" named
2983 argument, to find out how many registers are left over. */
2984 local_cum = *get_cumulative_args (cum);
2985 riscv_function_arg_advance (pack_cumulative_args (&local_cum), arg);
2986
2987 /* Found out how many registers we need to save. */
2988 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
2989
2990 if (!no_rtl && gp_saved > 0)
2991 {
2992 rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
2993 REG_PARM_STACK_SPACE (cfun->decl)
2994 - gp_saved * UNITS_PER_WORD);
2995 rtx mem = gen_frame_mem (BLKmode, ptr);
2996 set_mem_alias_set (mem, get_varargs_alias_set ());
2997
2998 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
2999 mem, gp_saved);
3000 }
3001 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
3002 cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
3003 }
3004
3005 /* Handle an attribute requiring a FUNCTION_DECL;
3006 arguments as in struct attribute_spec.handler. */
3007 static tree
3008 riscv_handle_fndecl_attribute (tree *node, tree name,
3009 tree args ATTRIBUTE_UNUSED,
3010 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
3011 {
3012 if (TREE_CODE (*node) != FUNCTION_DECL)
3013 {
3014 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3015 name);
3016 *no_add_attrs = true;
3017 }
3018
3019 return NULL_TREE;
3020 }
3021
3022 /* Verify type based attributes. NODE is the what the attribute is being
3023 applied to. NAME is the attribute name. ARGS are the attribute args.
3024 FLAGS gives info about the context. NO_ADD_ATTRS should be set to true if
3025 the attribute should be ignored. */
3026
3027 static tree
3028 riscv_handle_type_attribute (tree *node ATTRIBUTE_UNUSED, tree name, tree args,
3029 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
3030 {
3031 /* Check for an argument. */
3032 if (is_attribute_p ("interrupt", name))
3033 {
3034 if (args)
3035 {
3036 tree cst = TREE_VALUE (args);
3037 const char *string;
3038
3039 if (TREE_CODE (cst) != STRING_CST)
3040 {
3041 warning (OPT_Wattributes,
3042 "%qE attribute requires a string argument",
3043 name);
3044 *no_add_attrs = true;
3045 return NULL_TREE;
3046 }
3047
3048 string = TREE_STRING_POINTER (cst);
3049 if (strcmp (string, "user") && strcmp (string, "supervisor")
3050 && strcmp (string, "machine"))
3051 {
3052 warning (OPT_Wattributes,
3053 "argument to %qE attribute is not \"user\", \"supervisor\", or \"machine\"",
3054 name);
3055 *no_add_attrs = true;
3056 }
3057 }
3058 }
3059
3060 return NULL_TREE;
3061 }
3062
3063 /* Return true if function TYPE is an interrupt function. */
3064 static bool
3065 riscv_interrupt_type_p (tree type)
3066 {
3067 return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
3068 }
3069
3070 /* Return true if FUNC is a naked function. */
3071 static bool
3072 riscv_naked_function_p (tree func)
3073 {
3074 tree func_decl = func;
3075 if (func == NULL_TREE)
3076 func_decl = current_function_decl;
3077 return NULL_TREE != lookup_attribute ("naked", DECL_ATTRIBUTES (func_decl));
3078 }
3079
3080 /* Implement TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. */
3081 static bool
3082 riscv_allocate_stack_slots_for_args ()
3083 {
3084 /* Naked functions should not allocate stack slots for arguments. */
3085 return !riscv_naked_function_p (current_function_decl);
3086 }
3087
3088 /* Implement TARGET_WARN_FUNC_RETURN. */
3089 static bool
3090 riscv_warn_func_return (tree decl)
3091 {
3092 /* Naked functions are implemented entirely in assembly, including the
3093 return sequence, so suppress warnings about this. */
3094 return !riscv_naked_function_p (decl);
3095 }
3096
3097 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
3098
3099 static void
3100 riscv_va_start (tree valist, rtx nextarg)
3101 {
3102 nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
3103 std_expand_builtin_va_start (valist, nextarg);
3104 }
3105
3106 /* Make ADDR suitable for use as a call or sibcall target. */
3107
3108 rtx
3109 riscv_legitimize_call_address (rtx addr)
3110 {
3111 if (!call_insn_operand (addr, VOIDmode))
3112 {
3113 rtx reg = RISCV_CALL_ADDRESS_TEMP (Pmode);
3114 riscv_emit_move (reg, addr);
3115 return reg;
3116 }
3117 return addr;
3118 }
3119
3120 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3121 Assume that the areas do not overlap. */
3122
3123 static void
3124 riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3125 {
3126 HOST_WIDE_INT offset, delta;
3127 unsigned HOST_WIDE_INT bits;
3128 int i;
3129 enum machine_mode mode;
3130 rtx *regs;
3131
3132 bits = MAX (BITS_PER_UNIT,
3133 MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))));
3134
3135 mode = mode_for_size (bits, MODE_INT, 0).require ();
3136 delta = bits / BITS_PER_UNIT;
3137
3138 /* Allocate a buffer for the temporary registers. */
3139 regs = XALLOCAVEC (rtx, length / delta);
3140
3141 /* Load as many BITS-sized chunks as possible. Use a normal load if
3142 the source has enough alignment, otherwise use left/right pairs. */
3143 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3144 {
3145 regs[i] = gen_reg_rtx (mode);
3146 riscv_emit_move (regs[i], adjust_address (src, mode, offset));
3147 }
3148
3149 /* Copy the chunks to the destination. */
3150 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3151 riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
3152
3153 /* Mop up any left-over bytes. */
3154 if (offset < length)
3155 {
3156 src = adjust_address (src, BLKmode, offset);
3157 dest = adjust_address (dest, BLKmode, offset);
3158 move_by_pieces (dest, src, length - offset,
3159 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), RETURN_BEGIN);
3160 }
3161 }
3162
3163 /* Helper function for doing a loop-based block operation on memory
3164 reference MEM. Each iteration of the loop will operate on LENGTH
3165 bytes of MEM.
3166
3167 Create a new base register for use within the loop and point it to
3168 the start of MEM. Create a new memory reference that uses this
3169 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3170
3171 static void
3172 riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3173 rtx *loop_reg, rtx *loop_mem)
3174 {
3175 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3176
3177 /* Although the new mem does not refer to a known location,
3178 it does keep up to LENGTH bytes of alignment. */
3179 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3180 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3181 }
3182
3183 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
3184 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
3185 the memory regions do not overlap. */
3186
3187 static void
3188 riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
3189 HOST_WIDE_INT bytes_per_iter)
3190 {
3191 rtx label, src_reg, dest_reg, final_src, test;
3192 HOST_WIDE_INT leftover;
3193
3194 leftover = length % bytes_per_iter;
3195 length -= leftover;
3196
3197 /* Create registers and memory references for use within the loop. */
3198 riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
3199 riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
3200
3201 /* Calculate the value that SRC_REG should have after the last iteration
3202 of the loop. */
3203 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3204 0, 0, OPTAB_WIDEN);
3205
3206 /* Emit the start of the loop. */
3207 label = gen_label_rtx ();
3208 emit_label (label);
3209
3210 /* Emit the loop body. */
3211 riscv_block_move_straight (dest, src, bytes_per_iter);
3212
3213 /* Move on to the next block. */
3214 riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
3215 riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
3216
3217 /* Emit the loop condition. */
3218 test = gen_rtx_NE (VOIDmode, src_reg, final_src);
3219 if (Pmode == DImode)
3220 emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
3221 else
3222 emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
3223
3224 /* Mop up any left-over bytes. */
3225 if (leftover)
3226 riscv_block_move_straight (dest, src, leftover);
3227 else
3228 emit_insn(gen_nop ());
3229 }
3230
3231 /* Expand a cpymemsi instruction, which copies LENGTH bytes from
3232 memory reference SRC to memory reference DEST. */
3233
3234 bool
3235 riscv_expand_block_move (rtx dest, rtx src, rtx length)
3236 {
3237 if (CONST_INT_P (length))
3238 {
3239 HOST_WIDE_INT factor, align;
3240
3241 align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
3242 factor = BITS_PER_WORD / align;
3243
3244 if (optimize_function_for_size_p (cfun)
3245 && INTVAL (length) * factor * UNITS_PER_WORD > MOVE_RATIO (false))
3246 return false;
3247
3248 if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
3249 {
3250 riscv_block_move_straight (dest, src, INTVAL (length));
3251 return true;
3252 }
3253 else if (optimize && align >= BITS_PER_WORD)
3254 {
3255 unsigned min_iter_words
3256 = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
3257 unsigned iter_words = min_iter_words;
3258 HOST_WIDE_INT bytes = INTVAL (length), words = bytes / UNITS_PER_WORD;
3259
3260 /* Lengthen the loop body if it shortens the tail. */
3261 for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
3262 {
3263 unsigned cur_cost = iter_words + words % iter_words;
3264 unsigned new_cost = i + words % i;
3265 if (new_cost <= cur_cost)
3266 iter_words = i;
3267 }
3268
3269 riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
3270 return true;
3271 }
3272 }
3273 return false;
3274 }
3275
3276 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
3277 in context CONTEXT. HI_RELOC indicates a high-part reloc. */
3278
3279 static void
3280 riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
3281 {
3282 const char *reloc;
3283
3284 switch (riscv_classify_symbolic_expression (op))
3285 {
3286 case SYMBOL_ABSOLUTE:
3287 reloc = hi_reloc ? "%hi" : "%lo";
3288 break;
3289
3290 case SYMBOL_PCREL:
3291 reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
3292 break;
3293
3294 case SYMBOL_TLS_LE:
3295 reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
3296 break;
3297
3298 default:
3299 output_operand_lossage ("invalid use of '%%%c'", hi_reloc ? 'h' : 'R');
3300 return;
3301 }
3302
3303 fprintf (file, "%s(", reloc);
3304 output_addr_const (file, riscv_strip_unspec_address (op));
3305 fputc (')', file);
3306 }
3307
3308 /* Return true if the .AQ suffix should be added to an AMO to implement the
3309 acquire portion of memory model MODEL. */
3310
3311 static bool
3312 riscv_memmodel_needs_amo_acquire (enum memmodel model)
3313 {
3314 switch (model)
3315 {
3316 case MEMMODEL_ACQ_REL:
3317 case MEMMODEL_SEQ_CST:
3318 case MEMMODEL_SYNC_SEQ_CST:
3319 case MEMMODEL_ACQUIRE:
3320 case MEMMODEL_CONSUME:
3321 case MEMMODEL_SYNC_ACQUIRE:
3322 return true;
3323
3324 case MEMMODEL_RELEASE:
3325 case MEMMODEL_SYNC_RELEASE:
3326 case MEMMODEL_RELAXED:
3327 return false;
3328
3329 default:
3330 gcc_unreachable ();
3331 }
3332 }
3333
3334 /* Return true if a FENCE should be emitted to before a memory access to
3335 implement the release portion of memory model MODEL. */
3336
3337 static bool
3338 riscv_memmodel_needs_release_fence (enum memmodel model)
3339 {
3340 switch (model)
3341 {
3342 case MEMMODEL_ACQ_REL:
3343 case MEMMODEL_SEQ_CST:
3344 case MEMMODEL_SYNC_SEQ_CST:
3345 case MEMMODEL_RELEASE:
3346 case MEMMODEL_SYNC_RELEASE:
3347 return true;
3348
3349 case MEMMODEL_ACQUIRE:
3350 case MEMMODEL_CONSUME:
3351 case MEMMODEL_SYNC_ACQUIRE:
3352 case MEMMODEL_RELAXED:
3353 return false;
3354
3355 default:
3356 gcc_unreachable ();
3357 }
3358 }
3359
3360 /* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
3361
3362 'h' Print the high-part relocation associated with OP, after stripping
3363 any outermost HIGH.
3364 'R' Print the low-part relocation associated with OP.
3365 'C' Print the integer branch condition for comparison OP.
3366 'A' Print the atomic operation suffix for memory model OP.
3367 'F' Print a FENCE if the memory model requires a release.
3368 'z' Print x0 if OP is zero, otherwise print OP normally.
3369 'i' Print i if the operand is not a register. */
3370
3371 static void
3372 riscv_print_operand (FILE *file, rtx op, int letter)
3373 {
3374 machine_mode mode = GET_MODE (op);
3375 enum rtx_code code = GET_CODE (op);
3376
3377 switch (letter)
3378 {
3379 case 'h':
3380 if (code == HIGH)
3381 op = XEXP (op, 0);
3382 riscv_print_operand_reloc (file, op, true);
3383 break;
3384
3385 case 'R':
3386 riscv_print_operand_reloc (file, op, false);
3387 break;
3388
3389 case 'C':
3390 /* The RTL names match the instruction names. */
3391 fputs (GET_RTX_NAME (code), file);
3392 break;
3393
3394 case 'A':
3395 if (riscv_memmodel_needs_amo_acquire ((enum memmodel) INTVAL (op)))
3396 fputs (".aq", file);
3397 break;
3398
3399 case 'F':
3400 if (riscv_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
3401 fputs ("fence iorw,ow; ", file);
3402 break;
3403
3404 case 'i':
3405 if (code != REG)
3406 fputs ("i", file);
3407 break;
3408
3409 default:
3410 switch (code)
3411 {
3412 case REG:
3413 if (letter && letter != 'z')
3414 output_operand_lossage ("invalid use of '%%%c'", letter);
3415 fprintf (file, "%s", reg_names[REGNO (op)]);
3416 break;
3417
3418 case MEM:
3419 if (letter && letter != 'z')
3420 output_operand_lossage ("invalid use of '%%%c'", letter);
3421 else
3422 output_address (mode, XEXP (op, 0));
3423 break;
3424
3425 default:
3426 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
3427 fputs (reg_names[GP_REG_FIRST], file);
3428 else if (letter && letter != 'z')
3429 output_operand_lossage ("invalid use of '%%%c'", letter);
3430 else
3431 output_addr_const (file, riscv_strip_unspec_address (op));
3432 break;
3433 }
3434 }
3435 }
3436
3437 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
3438
3439 static void
3440 riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3441 {
3442 struct riscv_address_info addr;
3443
3444 if (riscv_classify_address (&addr, x, word_mode, true))
3445 switch (addr.type)
3446 {
3447 case ADDRESS_REG:
3448 riscv_print_operand (file, addr.offset, 0);
3449 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
3450 return;
3451
3452 case ADDRESS_LO_SUM:
3453 riscv_print_operand_reloc (file, addr.offset, false);
3454 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
3455 return;
3456
3457 case ADDRESS_CONST_INT:
3458 output_addr_const (file, x);
3459 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
3460 return;
3461
3462 case ADDRESS_SYMBOLIC:
3463 output_addr_const (file, riscv_strip_unspec_address (x));
3464 return;
3465 }
3466 gcc_unreachable ();
3467 }
3468
3469 static bool
3470 riscv_size_ok_for_small_data_p (int size)
3471 {
3472 return g_switch_value && IN_RANGE (size, 1, g_switch_value);
3473 }
3474
3475 /* Return true if EXP should be placed in the small data section. */
3476
3477 static bool
3478 riscv_in_small_data_p (const_tree x)
3479 {
3480 if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
3481 return false;
3482
3483 if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
3484 {
3485 const char *sec = DECL_SECTION_NAME (x);
3486 return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
3487 }
3488
3489 return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
3490 }
3491
3492 /* Switch to the appropriate section for output of DECL. */
3493
3494 static section *
3495 riscv_select_section (tree decl, int reloc,
3496 unsigned HOST_WIDE_INT align)
3497 {
3498 switch (categorize_decl_for_section (decl, reloc))
3499 {
3500 case SECCAT_SRODATA:
3501 return get_named_section (decl, ".srodata", reloc);
3502
3503 default:
3504 return default_elf_select_section (decl, reloc, align);
3505 }
3506 }
3507
3508 /* Switch to the appropriate section for output of DECL. */
3509
3510 static void
3511 riscv_unique_section (tree decl, int reloc)
3512 {
3513 const char *prefix = NULL;
3514 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
3515
3516 switch (categorize_decl_for_section (decl, reloc))
3517 {
3518 case SECCAT_SRODATA:
3519 prefix = one_only ? ".sr" : ".srodata";
3520 break;
3521
3522 default:
3523 break;
3524 }
3525 if (prefix)
3526 {
3527 const char *name, *linkonce;
3528 char *string;
3529
3530 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3531 name = targetm.strip_name_encoding (name);
3532
3533 /* If we're using one_only, then there needs to be a .gnu.linkonce
3534 prefix to the section name. */
3535 linkonce = one_only ? ".gnu.linkonce" : "";
3536
3537 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
3538
3539 set_decl_section_name (decl, string);
3540 return;
3541 }
3542 default_unique_section (decl, reloc);
3543 }
3544
3545 /* Return a section for X, handling small data. */
3546
3547 static section *
3548 riscv_elf_select_rtx_section (machine_mode mode, rtx x,
3549 unsigned HOST_WIDE_INT align)
3550 {
3551 section *s = default_elf_select_rtx_section (mode, x, align);
3552
3553 if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
3554 {
3555 if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
3556 {
3557 /* Rename .rodata.cst* to .srodata.cst*. */
3558 char *name = (char *) alloca (strlen (s->named.name) + 2);
3559 sprintf (name, ".s%s", s->named.name + 1);
3560 return get_section (name, s->named.common.flags, NULL);
3561 }
3562
3563 if (s == data_section)
3564 return sdata_section;
3565 }
3566
3567 return s;
3568 }
3569
3570 /* Make the last instruction frame-related and note that it performs
3571 the operation described by FRAME_PATTERN. */
3572
3573 static void
3574 riscv_set_frame_expr (rtx frame_pattern)
3575 {
3576 rtx insn;
3577
3578 insn = get_last_insn ();
3579 RTX_FRAME_RELATED_P (insn) = 1;
3580 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3581 frame_pattern,
3582 REG_NOTES (insn));
3583 }
3584
3585 /* Return a frame-related rtx that stores REG at MEM.
3586 REG must be a single register. */
3587
3588 static rtx
3589 riscv_frame_set (rtx mem, rtx reg)
3590 {
3591 rtx set = gen_rtx_SET (mem, reg);
3592 RTX_FRAME_RELATED_P (set) = 1;
3593 return set;
3594 }
3595
3596 /* Return true if the current function must save register REGNO. */
3597
3598 static bool
3599 riscv_save_reg_p (unsigned int regno)
3600 {
3601 bool call_saved = !global_regs[regno] && !call_used_or_fixed_reg_p (regno);
3602 bool might_clobber = crtl->saves_all_registers
3603 || df_regs_ever_live_p (regno);
3604
3605 if (call_saved && might_clobber)
3606 return true;
3607
3608 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
3609 return true;
3610
3611 if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
3612 return true;
3613
3614 /* If this is an interrupt handler, then must save extra registers. */
3615 if (cfun->machine->interrupt_handler_p)
3616 {
3617 /* zero register is always zero. */
3618 if (regno == GP_REG_FIRST)
3619 return false;
3620
3621 /* The function will return the stack pointer to its original value. */
3622 if (regno == STACK_POINTER_REGNUM)
3623 return false;
3624
3625 /* By convention, we assume that gp and tp are safe. */
3626 if (regno == GP_REGNUM || regno == THREAD_POINTER_REGNUM)
3627 return false;
3628
3629 /* We must save every register used in this function. If this is not a
3630 leaf function, then we must save all temporary registers. */
3631 if (df_regs_ever_live_p (regno)
3632 || (!crtl->is_leaf && call_used_or_fixed_reg_p (regno)))
3633 return true;
3634 }
3635
3636 return false;
3637 }
3638
3639 /* Determine whether to call GPR save/restore routines. */
3640 static bool
3641 riscv_use_save_libcall (const struct riscv_frame_info *frame)
3642 {
3643 if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed
3644 || cfun->machine->interrupt_handler_p)
3645 return false;
3646
3647 return frame->save_libcall_adjustment != 0;
3648 }
3649
3650 /* Determine which GPR save/restore routine to call. */
3651
3652 static unsigned
3653 riscv_save_libcall_count (unsigned mask)
3654 {
3655 for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
3656 if (BITSET_P (mask, n))
3657 return CALLEE_SAVED_REG_NUMBER (n) + 1;
3658 abort ();
3659 }
3660
3661 /* Populate the current function's riscv_frame_info structure.
3662
3663 RISC-V stack frames grown downward. High addresses are at the top.
3664
3665 +-------------------------------+
3666 | |
3667 | incoming stack arguments |
3668 | |
3669 +-------------------------------+ <-- incoming stack pointer
3670 | |
3671 | callee-allocated save area |
3672 | for arguments that are |
3673 | split between registers and |
3674 | the stack |
3675 | |
3676 +-------------------------------+ <-- arg_pointer_rtx
3677 | |
3678 | callee-allocated save area |
3679 | for register varargs |
3680 | |
3681 +-------------------------------+ <-- hard_frame_pointer_rtx;
3682 | | stack_pointer_rtx + gp_sp_offset
3683 | GPR save area | + UNITS_PER_WORD
3684 | |
3685 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
3686 | | + UNITS_PER_HWVALUE
3687 | FPR save area |
3688 | |
3689 +-------------------------------+ <-- frame_pointer_rtx (virtual)
3690 | |
3691 | local variables |
3692 | |
3693 P +-------------------------------+
3694 | |
3695 | outgoing stack arguments |
3696 | |
3697 +-------------------------------+ <-- stack_pointer_rtx
3698
3699 Dynamic stack allocations such as alloca insert data at point P.
3700 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
3701 hard_frame_pointer_rtx unchanged. */
3702
3703 static HOST_WIDE_INT riscv_first_stack_step (struct riscv_frame_info *frame);
3704
3705 static void
3706 riscv_compute_frame_info (void)
3707 {
3708 struct riscv_frame_info *frame;
3709 HOST_WIDE_INT offset;
3710 bool interrupt_save_prologue_temp = false;
3711 unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
3712
3713 frame = &cfun->machine->frame;
3714
3715 /* In an interrupt function, if we have a large frame, then we need to
3716 save/restore t0. We check for this before clearing the frame struct. */
3717 if (cfun->machine->interrupt_handler_p)
3718 {
3719 HOST_WIDE_INT step1 = riscv_first_stack_step (frame);
3720 if (! SMALL_OPERAND (frame->total_size - step1))
3721 interrupt_save_prologue_temp = true;
3722 }
3723
3724 memset (frame, 0, sizeof (*frame));
3725
3726 if (!cfun->machine->naked_p)
3727 {
3728 /* Find out which GPRs we need to save. */
3729 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3730 if (riscv_save_reg_p (regno)
3731 || (interrupt_save_prologue_temp
3732 && (regno == RISCV_PROLOGUE_TEMP_REGNUM)))
3733 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3734
3735 /* If this function calls eh_return, we must also save and restore the
3736 EH data registers. */
3737 if (crtl->calls_eh_return)
3738 for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
3739 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3740
3741 /* Find out which FPRs we need to save. This loop must iterate over
3742 the same space as its companion in riscv_for_each_saved_reg. */
3743 if (TARGET_HARD_FLOAT)
3744 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3745 if (riscv_save_reg_p (regno))
3746 frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
3747 }
3748
3749 /* At the bottom of the frame are any outgoing stack arguments. */
3750 offset = RISCV_STACK_ALIGN (crtl->outgoing_args_size);
3751 /* Next are local stack variables. */
3752 offset += RISCV_STACK_ALIGN (get_frame_size ());
3753 /* The virtual frame pointer points above the local variables. */
3754 frame->frame_pointer_offset = offset;
3755 /* Next are the callee-saved FPRs. */
3756 if (frame->fmask)
3757 offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
3758 frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
3759 /* Next are the callee-saved GPRs. */
3760 if (frame->mask)
3761 {
3762 unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
3763 unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
3764
3765 /* Only use save/restore routines if they don't alter the stack size. */
3766 if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
3767 {
3768 /* Libcall saves/restores 3 registers at once, so we need to
3769 allocate 12 bytes for callee-saved register. */
3770 if (TARGET_RVE)
3771 x_save_size = 3 * UNITS_PER_WORD;
3772
3773 frame->save_libcall_adjustment = x_save_size;
3774 }
3775
3776 offset += x_save_size;
3777 }
3778 frame->gp_sp_offset = offset - UNITS_PER_WORD;
3779 /* The hard frame pointer points above the callee-saved GPRs. */
3780 frame->hard_frame_pointer_offset = offset;
3781 /* Above the hard frame pointer is the callee-allocated varags save area. */
3782 offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
3783 /* Next is the callee-allocated area for pretend stack arguments. */
3784 offset += RISCV_STACK_ALIGN (crtl->args.pretend_args_size);
3785 /* Arg pointer must be below pretend args, but must be above alignment
3786 padding. */
3787 frame->arg_pointer_offset = offset - crtl->args.pretend_args_size;
3788 frame->total_size = offset;
3789 /* Next points the incoming stack pointer and any incoming arguments. */
3790
3791 /* Only use save/restore routines when the GPRs are atop the frame. */
3792 if (frame->hard_frame_pointer_offset != frame->total_size)
3793 frame->save_libcall_adjustment = 0;
3794 }
3795
3796 /* Make sure that we're not trying to eliminate to the wrong hard frame
3797 pointer. */
3798
3799 static bool
3800 riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
3801 {
3802 return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
3803 }
3804
3805 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
3806 or argument pointer. TO is either the stack pointer or hard frame
3807 pointer. */
3808
3809 HOST_WIDE_INT
3810 riscv_initial_elimination_offset (int from, int to)
3811 {
3812 HOST_WIDE_INT src, dest;
3813
3814 riscv_compute_frame_info ();
3815
3816 if (to == HARD_FRAME_POINTER_REGNUM)
3817 dest = cfun->machine->frame.hard_frame_pointer_offset;
3818 else if (to == STACK_POINTER_REGNUM)
3819 dest = 0; /* The stack pointer is the base of all offsets, hence 0. */
3820 else
3821 gcc_unreachable ();
3822
3823 if (from == FRAME_POINTER_REGNUM)
3824 src = cfun->machine->frame.frame_pointer_offset;
3825 else if (from == ARG_POINTER_REGNUM)
3826 src = cfun->machine->frame.arg_pointer_offset;
3827 else
3828 gcc_unreachable ();
3829
3830 return src - dest;
3831 }
3832
3833 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
3834 previous frame. */
3835
3836 rtx
3837 riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
3838 {
3839 if (count != 0)
3840 return const0_rtx;
3841
3842 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
3843 }
3844
3845 /* Emit code to change the current function's return address to
3846 ADDRESS. SCRATCH is available as a scratch register, if needed.
3847 ADDRESS and SCRATCH are both word-mode GPRs. */
3848
3849 void
3850 riscv_set_return_address (rtx address, rtx scratch)
3851 {
3852 rtx slot_address;
3853
3854 gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
3855 slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
3856 cfun->machine->frame.gp_sp_offset);
3857 riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
3858 }
3859
3860 /* A function to save or store a register. The first argument is the
3861 register and the second is the stack slot. */
3862 typedef void (*riscv_save_restore_fn) (rtx, rtx);
3863
3864 /* Use FN to save or restore register REGNO. MODE is the register's
3865 mode and OFFSET is the offset of its save slot from the current
3866 stack pointer. */
3867
3868 static void
3869 riscv_save_restore_reg (machine_mode mode, int regno,
3870 HOST_WIDE_INT offset, riscv_save_restore_fn fn)
3871 {
3872 rtx mem;
3873
3874 mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
3875 fn (gen_rtx_REG (mode, regno), mem);
3876 }
3877
3878 /* Call FN for each register that is saved by the current function.
3879 SP_OFFSET is the offset of the current stack pointer from the start
3880 of the frame. */
3881
3882 static void
3883 riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn,
3884 bool epilogue, bool maybe_eh_return)
3885 {
3886 HOST_WIDE_INT offset;
3887
3888 /* Save the link register and s-registers. */
3889 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
3890 for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3891 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3892 {
3893 bool handle_reg = TRUE;
3894
3895 /* If this is a normal return in a function that calls the eh_return
3896 builtin, then do not restore the eh return data registers as that
3897 would clobber the return value. But we do still need to save them
3898 in the prologue, and restore them for an exception return, so we
3899 need special handling here. */
3900 if (epilogue && !maybe_eh_return && crtl->calls_eh_return)
3901 {
3902 unsigned int i, regnum;
3903
3904 for (i = 0; (regnum = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM;
3905 i++)
3906 if (regno == regnum)
3907 {
3908 handle_reg = FALSE;
3909 break;
3910 }
3911 }
3912
3913 if (handle_reg)
3914 riscv_save_restore_reg (word_mode, regno, offset, fn);
3915 offset -= UNITS_PER_WORD;
3916 }
3917
3918 /* This loop must iterate over the same space as its companion in
3919 riscv_compute_frame_info. */
3920 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
3921 for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3922 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
3923 {
3924 machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
3925
3926 riscv_save_restore_reg (mode, regno, offset, fn);
3927 offset -= GET_MODE_SIZE (mode);
3928 }
3929 }
3930
3931 /* Save register REG to MEM. Make the instruction frame-related. */
3932
3933 static void
3934 riscv_save_reg (rtx reg, rtx mem)
3935 {
3936 riscv_emit_move (mem, reg);
3937 riscv_set_frame_expr (riscv_frame_set (mem, reg));
3938 }
3939
3940 /* Restore register REG from MEM. */
3941
3942 static void
3943 riscv_restore_reg (rtx reg, rtx mem)
3944 {
3945 rtx insn = riscv_emit_move (reg, mem);
3946 rtx dwarf = NULL_RTX;
3947 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3948
3949 if (epilogue_cfa_sp_offset && REGNO (reg) == HARD_FRAME_POINTER_REGNUM)
3950 {
3951 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3952 GEN_INT (epilogue_cfa_sp_offset));
3953 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3954 }
3955
3956 REG_NOTES (insn) = dwarf;
3957 RTX_FRAME_RELATED_P (insn) = 1;
3958 }
3959
3960 /* For stack frames that can't be allocated with a single ADDI instruction,
3961 compute the best value to initially allocate. It must at a minimum
3962 allocate enough space to spill the callee-saved registers. If TARGET_RVC,
3963 try to pick a value that will allow compression of the register saves
3964 without adding extra instructions. */
3965
3966 static HOST_WIDE_INT
3967 riscv_first_stack_step (struct riscv_frame_info *frame)
3968 {
3969 if (SMALL_OPERAND (frame->total_size))
3970 return frame->total_size;
3971
3972 HOST_WIDE_INT min_first_step =
3973 RISCV_STACK_ALIGN (frame->total_size - frame->fp_sp_offset);
3974 HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8;
3975 HOST_WIDE_INT min_second_step = frame->total_size - max_first_step;
3976 gcc_assert (min_first_step <= max_first_step);
3977
3978 /* As an optimization, use the least-significant bits of the total frame
3979 size, so that the second adjustment step is just LUI + ADD. */
3980 if (!SMALL_OPERAND (min_second_step)
3981 && frame->total_size % IMM_REACH < IMM_REACH / 2
3982 && frame->total_size % IMM_REACH >= min_first_step)
3983 return frame->total_size % IMM_REACH;
3984
3985 if (TARGET_RVC)
3986 {
3987 /* If we need two subtracts, and one is small enough to allow compressed
3988 loads and stores, then put that one first. */
3989 if (IN_RANGE (min_second_step, 0,
3990 (TARGET_64BIT ? SDSP_REACH : SWSP_REACH)))
3991 return MAX (min_second_step, min_first_step);
3992
3993 /* If we need LUI + ADDI + ADD for the second adjustment step, then start
3994 with the minimum first step, so that we can get compressed loads and
3995 stores. */
3996 else if (!SMALL_OPERAND (min_second_step))
3997 return min_first_step;
3998 }
3999
4000 return max_first_step;
4001 }
4002
4003 static rtx
4004 riscv_adjust_libcall_cfi_prologue ()
4005 {
4006 rtx dwarf = NULL_RTX;
4007 rtx adjust_sp_rtx, reg, mem, insn;
4008 int saved_size = cfun->machine->frame.save_libcall_adjustment;
4009 int offset;
4010
4011 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
4012 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
4013 {
4014 /* The save order is ra, s0, s1, s2 to s11. */
4015 if (regno == RETURN_ADDR_REGNUM)
4016 offset = saved_size - UNITS_PER_WORD;
4017 else if (regno == S0_REGNUM)
4018 offset = saved_size - UNITS_PER_WORD * 2;
4019 else if (regno == S1_REGNUM)
4020 offset = saved_size - UNITS_PER_WORD * 3;
4021 else
4022 offset = saved_size - ((regno - S2_REGNUM + 4) * UNITS_PER_WORD);
4023
4024 reg = gen_rtx_REG (SImode, regno);
4025 mem = gen_frame_mem (SImode, plus_constant (Pmode,
4026 stack_pointer_rtx,
4027 offset));
4028
4029 insn = gen_rtx_SET (mem, reg);
4030 dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf);
4031 }
4032
4033 /* Debug info for adjust sp. */
4034 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
4035 stack_pointer_rtx, GEN_INT (-saved_size));
4036 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
4037 dwarf);
4038 return dwarf;
4039 }
4040
4041 static void
4042 riscv_emit_stack_tie (void)
4043 {
4044 if (Pmode == SImode)
4045 emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx));
4046 else
4047 emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx));
4048 }
4049
4050 /* Expand the "prologue" pattern. */
4051
4052 void
4053 riscv_expand_prologue (void)
4054 {
4055 struct riscv_frame_info *frame = &cfun->machine->frame;
4056 HOST_WIDE_INT size = frame->total_size;
4057 unsigned mask = frame->mask;
4058 rtx insn;
4059
4060 if (flag_stack_usage_info)
4061 current_function_static_stack_size = size;
4062
4063 if (cfun->machine->naked_p)
4064 return;
4065
4066 /* When optimizing for size, call a subroutine to save the registers. */
4067 if (riscv_use_save_libcall (frame))
4068 {
4069 rtx dwarf = NULL_RTX;
4070 dwarf = riscv_adjust_libcall_cfi_prologue ();
4071
4072 size -= frame->save_libcall_adjustment;
4073 insn = emit_insn (riscv_gen_gpr_save_insn (frame));
4074 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
4075
4076 RTX_FRAME_RELATED_P (insn) = 1;
4077 REG_NOTES (insn) = dwarf;
4078 }
4079
4080 /* Save the registers. */
4081 if ((frame->mask | frame->fmask) != 0)
4082 {
4083 HOST_WIDE_INT step1 = MIN (size, riscv_first_stack_step (frame));
4084
4085 insn = gen_add3_insn (stack_pointer_rtx,
4086 stack_pointer_rtx,
4087 GEN_INT (-step1));
4088 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
4089 size -= step1;
4090 riscv_for_each_saved_reg (size, riscv_save_reg, false, false);
4091 }
4092
4093 frame->mask = mask; /* Undo the above fib. */
4094
4095 /* Set up the frame pointer, if we're using one. */
4096 if (frame_pointer_needed)
4097 {
4098 insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
4099 GEN_INT (frame->hard_frame_pointer_offset - size));
4100 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
4101
4102 riscv_emit_stack_tie ();
4103 }
4104
4105 /* Allocate the rest of the frame. */
4106 if (size > 0)
4107 {
4108 if (SMALL_OPERAND (-size))
4109 {
4110 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
4111 GEN_INT (-size));
4112 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
4113 }
4114 else
4115 {
4116 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
4117 emit_insn (gen_add3_insn (stack_pointer_rtx,
4118 stack_pointer_rtx,
4119 RISCV_PROLOGUE_TEMP (Pmode)));
4120
4121 /* Describe the effect of the previous instructions. */
4122 insn = plus_constant (Pmode, stack_pointer_rtx, -size);
4123 insn = gen_rtx_SET (stack_pointer_rtx, insn);
4124 riscv_set_frame_expr (insn);
4125 }
4126 }
4127 }
4128
4129 static rtx
4130 riscv_adjust_libcall_cfi_epilogue ()
4131 {
4132 rtx dwarf = NULL_RTX;
4133 rtx adjust_sp_rtx, reg;
4134 int saved_size = cfun->machine->frame.save_libcall_adjustment;
4135
4136 /* Debug info for adjust sp. */
4137 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
4138 stack_pointer_rtx, GEN_INT (saved_size));
4139 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
4140 dwarf);
4141
4142 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
4143 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
4144 {
4145 reg = gen_rtx_REG (SImode, regno);
4146 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
4147 }
4148
4149 return dwarf;
4150 }
4151
4152 /* Expand an "epilogue", "sibcall_epilogue", or "eh_return_internal" pattern;
4153 style says which. */
4154
4155 void
4156 riscv_expand_epilogue (int style)
4157 {
4158 /* Split the frame into two. STEP1 is the amount of stack we should
4159 deallocate before restoring the registers. STEP2 is the amount we
4160 should deallocate afterwards.
4161
4162 Start off by assuming that no registers need to be restored. */
4163 struct riscv_frame_info *frame = &cfun->machine->frame;
4164 unsigned mask = frame->mask;
4165 HOST_WIDE_INT step1 = frame->total_size;
4166 HOST_WIDE_INT step2 = 0;
4167 bool use_restore_libcall = ((style == NORMAL_RETURN)
4168 && riscv_use_save_libcall (frame));
4169 rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
4170 rtx insn;
4171
4172 /* We need to add memory barrier to prevent read from deallocated stack. */
4173 bool need_barrier_p = (get_frame_size ()
4174 + cfun->machine->frame.arg_pointer_offset) != 0;
4175
4176 if (cfun->machine->naked_p)
4177 {
4178 gcc_assert (style == NORMAL_RETURN);
4179
4180 emit_jump_insn (gen_return ());
4181
4182 return;
4183 }
4184
4185 if ((style == NORMAL_RETURN) && riscv_can_use_return_insn ())
4186 {
4187 emit_jump_insn (gen_return ());
4188 return;
4189 }
4190
4191 /* Reset the epilogue cfa info before starting to emit the epilogue. */
4192 epilogue_cfa_sp_offset = 0;
4193
4194 /* Move past any dynamic stack allocations. */
4195 if (cfun->calls_alloca)
4196 {
4197 /* Emit a barrier to prevent loads from a deallocated stack. */
4198 riscv_emit_stack_tie ();
4199 need_barrier_p = false;
4200
4201 rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
4202 if (!SMALL_OPERAND (INTVAL (adjust)))
4203 {
4204 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
4205 adjust = RISCV_PROLOGUE_TEMP (Pmode);
4206 }
4207
4208 insn = emit_insn (
4209 gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
4210 adjust));
4211
4212 rtx dwarf = NULL_RTX;
4213 rtx cfa_adjust_value = gen_rtx_PLUS (
4214 Pmode, hard_frame_pointer_rtx,
4215 GEN_INT (-frame->hard_frame_pointer_offset));
4216 rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value);
4217 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf);
4218 RTX_FRAME_RELATED_P (insn) = 1;
4219
4220 REG_NOTES (insn) = dwarf;
4221 }
4222
4223 /* If we need to restore registers, deallocate as much stack as
4224 possible in the second step without going out of range. */
4225 if ((frame->mask | frame->fmask) != 0)
4226 {
4227 step2 = riscv_first_stack_step (frame);
4228 step1 -= step2;
4229 }
4230
4231 /* Set TARGET to BASE + STEP1. */
4232 if (step1 > 0)
4233 {
4234 /* Emit a barrier to prevent loads from a deallocated stack. */
4235 riscv_emit_stack_tie ();
4236 need_barrier_p = false;
4237
4238 /* Get an rtx for STEP1 that we can add to BASE. */
4239 rtx adjust = GEN_INT (step1);
4240 if (!SMALL_OPERAND (step1))
4241 {
4242 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
4243 adjust = RISCV_PROLOGUE_TEMP (Pmode);
4244 }
4245
4246 insn = emit_insn (
4247 gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
4248
4249 rtx dwarf = NULL_RTX;
4250 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
4251 GEN_INT (step2));
4252
4253 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
4254 RTX_FRAME_RELATED_P (insn) = 1;
4255
4256 REG_NOTES (insn) = dwarf;
4257 }
4258 else if (frame_pointer_needed)
4259 {
4260 /* Tell riscv_restore_reg to emit dwarf to redefine CFA when restoring
4261 old value of FP. */
4262 epilogue_cfa_sp_offset = step2;
4263 }
4264
4265 if (use_restore_libcall)
4266 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
4267
4268 /* Restore the registers. */
4269 riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg,
4270 true, style == EXCEPTION_RETURN);
4271
4272 if (use_restore_libcall)
4273 {
4274 frame->mask = mask; /* Undo the above fib. */
4275 gcc_assert (step2 >= frame->save_libcall_adjustment);
4276 step2 -= frame->save_libcall_adjustment;
4277 }
4278
4279 if (need_barrier_p)
4280 riscv_emit_stack_tie ();
4281
4282 /* Deallocate the final bit of the frame. */
4283 if (step2 > 0)
4284 {
4285 insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
4286 GEN_INT (step2)));
4287
4288 rtx dwarf = NULL_RTX;
4289 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
4290 const0_rtx);
4291 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
4292 RTX_FRAME_RELATED_P (insn) = 1;
4293
4294 REG_NOTES (insn) = dwarf;
4295 }
4296
4297 if (use_restore_libcall)
4298 {
4299 rtx dwarf = riscv_adjust_libcall_cfi_epilogue ();
4300 insn = emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
4301 RTX_FRAME_RELATED_P (insn) = 1;
4302 REG_NOTES (insn) = dwarf;
4303
4304 emit_jump_insn (gen_gpr_restore_return (ra));
4305 return;
4306 }
4307
4308 /* Add in the __builtin_eh_return stack adjustment. */
4309 if ((style == EXCEPTION_RETURN) && crtl->calls_eh_return)
4310 emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
4311 EH_RETURN_STACKADJ_RTX));
4312
4313 /* Return from interrupt. */
4314 if (cfun->machine->interrupt_handler_p)
4315 {
4316 enum riscv_privilege_levels mode = cfun->machine->interrupt_mode;
4317
4318 gcc_assert (mode != UNKNOWN_MODE);
4319
4320 if (mode == MACHINE_MODE)
4321 emit_jump_insn (gen_riscv_mret ());
4322 else if (mode == SUPERVISOR_MODE)
4323 emit_jump_insn (gen_riscv_sret ());
4324 else
4325 emit_jump_insn (gen_riscv_uret ());
4326 }
4327 else if (style != SIBCALL_RETURN)
4328 emit_jump_insn (gen_simple_return_internal (ra));
4329 }
4330
4331 /* Implement EPILOGUE_USES. */
4332
4333 bool
4334 riscv_epilogue_uses (unsigned int regno)
4335 {
4336 if (regno == RETURN_ADDR_REGNUM)
4337 return true;
4338
4339 if (epilogue_completed && cfun->machine->interrupt_handler_p)
4340 {
4341 /* An interrupt function restores temp regs, so we must indicate that
4342 they are live at function end. */
4343 if (df_regs_ever_live_p (regno)
4344 || (!crtl->is_leaf && call_used_or_fixed_reg_p (regno)))
4345 return true;
4346 }
4347
4348 return false;
4349 }
4350
4351 /* Return nonzero if this function is known to have a null epilogue.
4352 This allows the optimizer to omit jumps to jumps if no stack
4353 was created. */
4354
4355 bool
4356 riscv_can_use_return_insn (void)
4357 {
4358 return (reload_completed && cfun->machine->frame.total_size == 0
4359 && ! cfun->machine->interrupt_handler_p);
4360 }
4361
4362 /* Given that there exists at least one variable that is set (produced)
4363 by OUT_INSN and read (consumed) by IN_INSN, return true iff
4364 IN_INSN represents one or more memory store operations and none of
4365 the variables set by OUT_INSN is used by IN_INSN as the address of a
4366 store operation. If either IN_INSN or OUT_INSN does not represent
4367 a "single" RTL SET expression (as loosely defined by the
4368 implementation of the single_set function) or a PARALLEL with only
4369 SETs, CLOBBERs, and USEs inside, this function returns false.
4370
4371 Borrowed from rs6000, riscv_store_data_bypass_p checks for certain
4372 conditions that result in assertion failures in the generic
4373 store_data_bypass_p function and returns FALSE in such cases.
4374
4375 This is required to make -msave-restore work with the sifive-7
4376 pipeline description. */
4377
4378 bool
4379 riscv_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
4380 {
4381 rtx out_set, in_set;
4382 rtx out_pat, in_pat;
4383 rtx out_exp, in_exp;
4384 int i, j;
4385
4386 in_set = single_set (in_insn);
4387 if (in_set)
4388 {
4389 if (MEM_P (SET_DEST (in_set)))
4390 {
4391 out_set = single_set (out_insn);
4392 if (!out_set)
4393 {
4394 out_pat = PATTERN (out_insn);
4395 if (GET_CODE (out_pat) == PARALLEL)
4396 {
4397 for (i = 0; i < XVECLEN (out_pat, 0); i++)
4398 {
4399 out_exp = XVECEXP (out_pat, 0, i);
4400 if ((GET_CODE (out_exp) == CLOBBER)
4401 || (GET_CODE (out_exp) == USE))
4402 continue;
4403 else if (GET_CODE (out_exp) != SET)
4404 return false;
4405 }
4406 }
4407 }
4408 }
4409 }
4410 else
4411 {
4412 in_pat = PATTERN (in_insn);
4413 if (GET_CODE (in_pat) != PARALLEL)
4414 return false;
4415
4416 for (i = 0; i < XVECLEN (in_pat, 0); i++)
4417 {
4418 in_exp = XVECEXP (in_pat, 0, i);
4419 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
4420 continue;
4421 else if (GET_CODE (in_exp) != SET)
4422 return false;
4423
4424 if (MEM_P (SET_DEST (in_exp)))
4425 {
4426 out_set = single_set (out_insn);
4427 if (!out_set)
4428 {
4429 out_pat = PATTERN (out_insn);
4430 if (GET_CODE (out_pat) != PARALLEL)
4431 return false;
4432 for (j = 0; j < XVECLEN (out_pat, 0); j++)
4433 {
4434 out_exp = XVECEXP (out_pat, 0, j);
4435 if ((GET_CODE (out_exp) == CLOBBER)
4436 || (GET_CODE (out_exp) == USE))
4437 continue;
4438 else if (GET_CODE (out_exp) != SET)
4439 return false;
4440 }
4441 }
4442 }
4443 }
4444 }
4445
4446 return store_data_bypass_p (out_insn, in_insn);
4447 }
4448
4449 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
4450
4451 When floating-point registers are wider than integer ones, moves between
4452 them must go through memory. */
4453
4454 static bool
4455 riscv_secondary_memory_needed (machine_mode mode, reg_class_t class1,
4456 reg_class_t class2)
4457 {
4458 return (GET_MODE_SIZE (mode) > UNITS_PER_WORD
4459 && (class1 == FP_REGS) != (class2 == FP_REGS));
4460 }
4461
4462 /* Implement TARGET_REGISTER_MOVE_COST. */
4463
4464 static int
4465 riscv_register_move_cost (machine_mode mode,
4466 reg_class_t from, reg_class_t to)
4467 {
4468 return riscv_secondary_memory_needed (mode, from, to) ? 8 : 2;
4469 }
4470
4471 /* Implement TARGET_HARD_REGNO_NREGS. */
4472
4473 static unsigned int
4474 riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
4475 {
4476 if (FP_REG_P (regno))
4477 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
4478
4479 /* All other registers are word-sized. */
4480 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4481 }
4482
4483 /* Implement TARGET_HARD_REGNO_MODE_OK. */
4484
4485 static bool
4486 riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
4487 {
4488 unsigned int nregs = riscv_hard_regno_nregs (regno, mode);
4489
4490 if (GP_REG_P (regno))
4491 {
4492 if (!GP_REG_P (regno + nregs - 1))
4493 return false;
4494 }
4495 else if (FP_REG_P (regno))
4496 {
4497 if (!FP_REG_P (regno + nregs - 1))
4498 return false;
4499
4500 if (GET_MODE_CLASS (mode) != MODE_FLOAT
4501 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
4502 return false;
4503
4504 /* Only use callee-saved registers if a potential callee is guaranteed
4505 to spill the requisite width. */
4506 if (GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_REG
4507 || (!call_used_or_fixed_reg_p (regno)
4508 && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_ARG))
4509 return false;
4510 }
4511 else
4512 return false;
4513
4514 /* Require same callee-savedness for all registers. */
4515 for (unsigned i = 1; i < nregs; i++)
4516 if (call_used_or_fixed_reg_p (regno)
4517 != call_used_or_fixed_reg_p (regno + i))
4518 return false;
4519
4520 return true;
4521 }
4522
4523 /* Implement TARGET_MODES_TIEABLE_P.
4524
4525 Don't allow floating-point modes to be tied, since type punning of
4526 single-precision and double-precision is implementation defined. */
4527
4528 static bool
4529 riscv_modes_tieable_p (machine_mode mode1, machine_mode mode2)
4530 {
4531 return (mode1 == mode2
4532 || !(GET_MODE_CLASS (mode1) == MODE_FLOAT
4533 && GET_MODE_CLASS (mode2) == MODE_FLOAT));
4534 }
4535
4536 /* Implement CLASS_MAX_NREGS. */
4537
4538 static unsigned char
4539 riscv_class_max_nregs (reg_class_t rclass, machine_mode mode)
4540 {
4541 if (reg_class_subset_p (FP_REGS, rclass))
4542 return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
4543
4544 if (reg_class_subset_p (GR_REGS, rclass))
4545 return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
4546
4547 return 0;
4548 }
4549
4550 /* Implement TARGET_MEMORY_MOVE_COST. */
4551
4552 static int
4553 riscv_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in)
4554 {
4555 return (tune_param->memory_cost
4556 + memory_move_secondary_cost (mode, rclass, in));
4557 }
4558
4559 /* Return the number of instructions that can be issued per cycle. */
4560
4561 static int
4562 riscv_issue_rate (void)
4563 {
4564 return tune_param->issue_rate;
4565 }
4566
4567 /* Auxiliary function to emit RISC-V ELF attribute. */
4568 static void
4569 riscv_emit_attribute ()
4570 {
4571 fprintf (asm_out_file, "\t.attribute arch, \"%s\"\n",
4572 riscv_arch_str ().c_str ());
4573
4574 fprintf (asm_out_file, "\t.attribute unaligned_access, %d\n",
4575 TARGET_STRICT_ALIGN ? 0 : 1);
4576
4577 fprintf (asm_out_file, "\t.attribute stack_align, %d\n",
4578 riscv_stack_boundary / 8);
4579 }
4580
4581 /* Implement TARGET_ASM_FILE_START. */
4582
4583 static void
4584 riscv_file_start (void)
4585 {
4586 default_file_start ();
4587
4588 /* Instruct GAS to generate position-[in]dependent code. */
4589 fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
4590
4591 /* If the user specifies "-mno-relax" on the command line then disable linker
4592 relaxation in the assembler. */
4593 if (! riscv_mrelax)
4594 fprintf (asm_out_file, "\t.option norelax\n");
4595
4596 if (riscv_emit_attribute_p)
4597 riscv_emit_attribute ();
4598 }
4599
4600 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
4601 in order to avoid duplicating too much logic from elsewhere. */
4602
4603 static void
4604 riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
4605 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
4606 tree function)
4607 {
4608 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
4609 rtx this_rtx, temp1, temp2, fnaddr;
4610 rtx_insn *insn;
4611
4612 /* Pretend to be a post-reload pass while generating rtl. */
4613 reload_completed = 1;
4614
4615 /* Mark the end of the (empty) prologue. */
4616 emit_note (NOTE_INSN_PROLOGUE_END);
4617
4618 /* Determine if we can use a sibcall to call FUNCTION directly. */
4619 fnaddr = gen_rtx_MEM (FUNCTION_MODE, XEXP (DECL_RTL (function), 0));
4620
4621 /* We need two temporary registers in some cases. */
4622 temp1 = gen_rtx_REG (Pmode, RISCV_PROLOGUE_TEMP_REGNUM);
4623 temp2 = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
4624
4625 /* Find out which register contains the "this" pointer. */
4626 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
4627 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
4628 else
4629 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
4630
4631 /* Add DELTA to THIS_RTX. */
4632 if (delta != 0)
4633 {
4634 rtx offset = GEN_INT (delta);
4635 if (!SMALL_OPERAND (delta))
4636 {
4637 riscv_emit_move (temp1, offset);
4638 offset = temp1;
4639 }
4640 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
4641 }
4642
4643 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
4644 if (vcall_offset != 0)
4645 {
4646 rtx addr;
4647
4648 /* Set TEMP1 to *THIS_RTX. */
4649 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
4650
4651 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
4652 addr = riscv_add_offset (temp2, temp1, vcall_offset);
4653
4654 /* Load the offset and add it to THIS_RTX. */
4655 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
4656 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
4657 }
4658
4659 /* Jump to the target function. */
4660 insn = emit_call_insn (gen_sibcall (fnaddr, const0_rtx, NULL, const0_rtx));
4661 SIBLING_CALL_P (insn) = 1;
4662
4663 /* Run just enough of rest_of_compilation. This sequence was
4664 "borrowed" from alpha.c. */
4665 insn = get_insns ();
4666 split_all_insns_noflow ();
4667 shorten_branches (insn);
4668 assemble_start_function (thunk_fndecl, fnname);
4669 final_start_function (insn, file, 1);
4670 final (insn, file, 1);
4671 final_end_function ();
4672 assemble_end_function (thunk_fndecl, fnname);
4673
4674 /* Clean up the vars set above. Note that final_end_function resets
4675 the global pointer for us. */
4676 reload_completed = 0;
4677 }
4678
4679 /* Allocate a chunk of memory for per-function machine-dependent data. */
4680
4681 static struct machine_function *
4682 riscv_init_machine_status (void)
4683 {
4684 return ggc_cleared_alloc<machine_function> ();
4685 }
4686
4687 /* Implement TARGET_OPTION_OVERRIDE. */
4688
4689 static void
4690 riscv_option_override (void)
4691 {
4692 const struct riscv_tune_info *cpu;
4693
4694 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4695 SUBTARGET_OVERRIDE_OPTIONS;
4696 #endif
4697
4698 flag_pcc_struct_return = 0;
4699
4700 if (flag_pic)
4701 g_switch_value = 0;
4702
4703 /* The presence of the M extension implies that division instructions
4704 are present, so include them unless explicitly disabled. */
4705 if (TARGET_MUL && (target_flags_explicit & MASK_DIV) == 0)
4706 target_flags |= MASK_DIV;
4707 else if (!TARGET_MUL && TARGET_DIV)
4708 error ("%<-mdiv%> requires %<-march%> to subsume the %<M%> extension");
4709
4710 /* Likewise floating-point division and square root. */
4711 if (TARGET_HARD_FLOAT && (target_flags_explicit & MASK_FDIV) == 0)
4712 target_flags |= MASK_FDIV;
4713
4714 /* Handle -mtune, use -mcpu if -mtune is not given, and use default -mtune
4715 if -mtune and -mcpu both not not given. */
4716 cpu = riscv_parse_tune (riscv_tune_string ? riscv_tune_string :
4717 (riscv_cpu_string ? riscv_cpu_string :
4718 RISCV_TUNE_STRING_DEFAULT));
4719 riscv_microarchitecture = cpu->microarchitecture;
4720 tune_param = optimize_size ? &optimize_size_tune_info : cpu->tune_param;
4721
4722 /* Use -mtune's setting for slow_unaligned_access, even when optimizing
4723 for size. For architectures that trap and emulate unaligned accesses,
4724 the performance cost is too great, even for -Os. Similarly, if
4725 -m[no-]strict-align is left unspecified, heed -mtune's advice. */
4726 riscv_slow_unaligned_access_p = (cpu->tune_param->slow_unaligned_access
4727 || TARGET_STRICT_ALIGN);
4728 if ((target_flags_explicit & MASK_STRICT_ALIGN) == 0
4729 && cpu->tune_param->slow_unaligned_access)
4730 target_flags |= MASK_STRICT_ALIGN;
4731
4732 /* If the user hasn't specified a branch cost, use the processor's
4733 default. */
4734 if (riscv_branch_cost == 0)
4735 riscv_branch_cost = tune_param->branch_cost;
4736
4737 /* Function to allocate machine-dependent function status. */
4738 init_machine_status = &riscv_init_machine_status;
4739
4740 if (flag_pic)
4741 riscv_cmodel = CM_PIC;
4742
4743 /* We get better code with explicit relocs for CM_MEDLOW, but
4744 worse code for the others (for now). Pick the best default. */
4745 if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
4746 if (riscv_cmodel == CM_MEDLOW)
4747 target_flags |= MASK_EXPLICIT_RELOCS;
4748
4749 /* Require that the ISA supports the requested floating-point ABI. */
4750 if (UNITS_PER_FP_ARG > (TARGET_HARD_FLOAT ? UNITS_PER_FP_REG : 0))
4751 error ("requested ABI requires %<-march%> to subsume the %qc extension",
4752 UNITS_PER_FP_ARG > 8 ? 'Q' : (UNITS_PER_FP_ARG > 4 ? 'D' : 'F'));
4753
4754 if (TARGET_RVE && riscv_abi != ABI_ILP32E)
4755 error ("rv32e requires ilp32e ABI");
4756
4757 /* We do not yet support ILP32 on RV64. */
4758 if (BITS_PER_WORD != POINTER_SIZE)
4759 error ("ABI requires %<-march=rv%d%>", POINTER_SIZE);
4760
4761 /* Validate -mpreferred-stack-boundary= value. */
4762 riscv_stack_boundary = ABI_STACK_BOUNDARY;
4763 if (riscv_preferred_stack_boundary_arg)
4764 {
4765 int min = ctz_hwi (STACK_BOUNDARY / 8);
4766 int max = 8;
4767
4768 if (!IN_RANGE (riscv_preferred_stack_boundary_arg, min, max))
4769 error ("%<-mpreferred-stack-boundary=%d%> must be between %d and %d",
4770 riscv_preferred_stack_boundary_arg, min, max);
4771
4772 riscv_stack_boundary = 8 << riscv_preferred_stack_boundary_arg;
4773 }
4774
4775 if (riscv_emit_attribute_p < 0)
4776 #ifdef HAVE_AS_RISCV_ATTRIBUTE
4777 riscv_emit_attribute_p = TARGET_RISCV_ATTRIBUTE;
4778 #else
4779 riscv_emit_attribute_p = 0;
4780
4781 if (riscv_emit_attribute_p)
4782 error ("%<-mriscv-attribute%> RISC-V ELF attribute requires GNU as 2.32"
4783 " [%<-mriscv-attribute%>]");
4784 #endif
4785
4786 if (riscv_stack_protector_guard == SSP_GLOBAL
4787 && global_options_set.x_riscv_stack_protector_guard_offset_str)
4788 {
4789 error ("incompatible options %<-mstack-protector-guard=global%> and "
4790 "%<-mstack-protector-guard-offset=%s%>",
4791 riscv_stack_protector_guard_offset_str);
4792 }
4793
4794 if (riscv_stack_protector_guard == SSP_TLS
4795 && !(global_options_set.x_riscv_stack_protector_guard_offset_str
4796 && global_options_set.x_riscv_stack_protector_guard_reg_str))
4797 {
4798 error ("both %<-mstack-protector-guard-offset%> and "
4799 "%<-mstack-protector-guard-reg%> must be used "
4800 "with %<-mstack-protector-guard=sysreg%>");
4801 }
4802
4803 if (global_options_set.x_riscv_stack_protector_guard_reg_str)
4804 {
4805 const char *str = riscv_stack_protector_guard_reg_str;
4806 int reg = decode_reg_name (str);
4807
4808 if (!IN_RANGE (reg, GP_REG_FIRST + 1, GP_REG_LAST))
4809 error ("%qs is not a valid base register in %qs", str,
4810 "-mstack-protector-guard-reg=");
4811
4812 riscv_stack_protector_guard_reg = reg;
4813 }
4814
4815 if (global_options_set.x_riscv_stack_protector_guard_offset_str)
4816 {
4817 char *end;
4818 const char *str = riscv_stack_protector_guard_offset_str;
4819 errno = 0;
4820 long offs = strtol (riscv_stack_protector_guard_offset_str, &end, 0);
4821
4822 if (!*str || *end || errno)
4823 error ("%qs is not a valid number in %qs", str,
4824 "-mstack-protector-guard-offset=");
4825
4826 if (!SMALL_OPERAND (offs))
4827 error ("%qs is not a valid offset in %qs", str,
4828 "-mstack-protector-guard-offset=");
4829
4830 riscv_stack_protector_guard_offset = offs;
4831 }
4832
4833 }
4834
4835 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
4836
4837 static void
4838 riscv_conditional_register_usage (void)
4839 {
4840 /* We have only x0~x15 on RV32E. */
4841 if (TARGET_RVE)
4842 {
4843 for (int r = 16; r <= 31; r++)
4844 fixed_regs[r] = 1;
4845 }
4846
4847 if (riscv_abi == ABI_ILP32E)
4848 {
4849 for (int r = 16; r <= 31; r++)
4850 call_used_regs[r] = 1;
4851 }
4852
4853 if (!TARGET_HARD_FLOAT)
4854 {
4855 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4856 fixed_regs[regno] = call_used_regs[regno] = 1;
4857 }
4858
4859 /* In the soft-float ABI, there are no callee-saved FP registers. */
4860 if (UNITS_PER_FP_ARG == 0)
4861 {
4862 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4863 call_used_regs[regno] = 1;
4864 }
4865 }
4866
4867 /* Return a register priority for hard reg REGNO. */
4868
4869 static int
4870 riscv_register_priority (int regno)
4871 {
4872 /* Favor compressed registers to improve the odds of RVC instruction
4873 selection. */
4874 if (riscv_compressed_reg_p (regno))
4875 return 1;
4876
4877 return 0;
4878 }
4879
4880 /* Implement TARGET_TRAMPOLINE_INIT. */
4881
4882 static void
4883 riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
4884 {
4885 rtx addr, end_addr, mem;
4886 uint32_t trampoline[4];
4887 unsigned int i;
4888 HOST_WIDE_INT static_chain_offset, target_function_offset;
4889
4890 /* Work out the offsets of the pointers from the start of the
4891 trampoline code. */
4892 gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
4893
4894 /* Get pointers to the beginning and end of the code block. */
4895 addr = force_reg (Pmode, XEXP (m_tramp, 0));
4896 end_addr = riscv_force_binary (Pmode, PLUS, addr,
4897 GEN_INT (TRAMPOLINE_CODE_SIZE));
4898
4899
4900 if (Pmode == SImode)
4901 {
4902 chain_value = force_reg (Pmode, chain_value);
4903
4904 rtx target_function = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
4905 /* lui t2, hi(chain)
4906 lui t0, hi(func)
4907 addi t2, t2, lo(chain)
4908 jr t0, lo(func)
4909 */
4910 unsigned HOST_WIDE_INT lui_hi_chain_code, lui_hi_func_code;
4911 unsigned HOST_WIDE_INT lo_chain_code, lo_func_code;
4912
4913 rtx uimm_mask = force_reg (SImode, gen_int_mode (-IMM_REACH, SImode));
4914
4915 /* 0xfff. */
4916 rtx imm12_mask = gen_reg_rtx (SImode);
4917 emit_insn (gen_one_cmplsi2 (imm12_mask, uimm_mask));
4918
4919 rtx fixup_value = force_reg (SImode, gen_int_mode (IMM_REACH/2, SImode));
4920
4921 /* Gen lui t2, hi(chain). */
4922 rtx hi_chain = riscv_force_binary (SImode, PLUS, chain_value,
4923 fixup_value);
4924 hi_chain = riscv_force_binary (SImode, AND, hi_chain,
4925 uimm_mask);
4926 lui_hi_chain_code = OPCODE_LUI | (STATIC_CHAIN_REGNUM << SHIFT_RD);
4927 rtx lui_hi_chain = riscv_force_binary (SImode, IOR, hi_chain,
4928 gen_int_mode (lui_hi_chain_code, SImode));
4929
4930 mem = adjust_address (m_tramp, SImode, 0);
4931 riscv_emit_move (mem, lui_hi_chain);
4932
4933 /* Gen lui t0, hi(func). */
4934 rtx hi_func = riscv_force_binary (SImode, PLUS, target_function,
4935 fixup_value);
4936 hi_func = riscv_force_binary (SImode, AND, hi_func,
4937 uimm_mask);
4938 lui_hi_func_code = OPCODE_LUI | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD);
4939 rtx lui_hi_func = riscv_force_binary (SImode, IOR, hi_func,
4940 gen_int_mode (lui_hi_func_code, SImode));
4941
4942 mem = adjust_address (m_tramp, SImode, 1 * GET_MODE_SIZE (SImode));
4943 riscv_emit_move (mem, lui_hi_func);
4944
4945 /* Gen addi t2, t2, lo(chain). */
4946 rtx lo_chain = riscv_force_binary (SImode, AND, chain_value,
4947 imm12_mask);
4948 lo_chain = riscv_force_binary (SImode, ASHIFT, lo_chain, GEN_INT (20));
4949
4950 lo_chain_code = OPCODE_ADDI
4951 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
4952 | (STATIC_CHAIN_REGNUM << SHIFT_RS1);
4953
4954 rtx addi_lo_chain = riscv_force_binary (SImode, IOR, lo_chain,
4955 force_reg (SImode, GEN_INT (lo_chain_code)));
4956
4957 mem = adjust_address (m_tramp, SImode, 2 * GET_MODE_SIZE (SImode));
4958 riscv_emit_move (mem, addi_lo_chain);
4959
4960 /* Gen jr t0, lo(func). */
4961 rtx lo_func = riscv_force_binary (SImode, AND, target_function,
4962 imm12_mask);
4963 lo_func = riscv_force_binary (SImode, ASHIFT, lo_func, GEN_INT (20));
4964
4965 lo_func_code = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
4966
4967 rtx jr_lo_func = riscv_force_binary (SImode, IOR, lo_func,
4968 force_reg (SImode, GEN_INT (lo_func_code)));
4969
4970 mem = adjust_address (m_tramp, SImode, 3 * GET_MODE_SIZE (SImode));
4971 riscv_emit_move (mem, jr_lo_func);
4972 }
4973 else
4974 {
4975 static_chain_offset = TRAMPOLINE_CODE_SIZE;
4976 target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
4977
4978 /* auipc t2, 0
4979 l[wd] t0, target_function_offset(t2)
4980 l[wd] t2, static_chain_offset(t2)
4981 jr t0
4982 */
4983 trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
4984 trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
4985 | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
4986 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
4987 | (target_function_offset << SHIFT_IMM);
4988 trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
4989 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
4990 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
4991 | (static_chain_offset << SHIFT_IMM);
4992 trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
4993
4994 /* Copy the trampoline code. */
4995 for (i = 0; i < ARRAY_SIZE (trampoline); i++)
4996 {
4997 mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
4998 riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
4999 }
5000
5001 /* Set up the static chain pointer field. */
5002 mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
5003 riscv_emit_move (mem, chain_value);
5004
5005 /* Set up the target function field. */
5006 mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
5007 riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
5008 }
5009
5010 /* Flush the code part of the trampoline. */
5011 emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
5012 emit_insn (gen_clear_cache (addr, end_addr));
5013 }
5014
5015 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5016
5017 static bool
5018 riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
5019 tree exp ATTRIBUTE_UNUSED)
5020 {
5021 /* Don't use sibcalls when use save-restore routine. */
5022 if (TARGET_SAVE_RESTORE)
5023 return false;
5024
5025 /* Don't use sibcall for naked functions. */
5026 if (cfun->machine->naked_p)
5027 return false;
5028
5029 /* Don't use sibcall for interrupt functions. */
5030 if (cfun->machine->interrupt_handler_p)
5031 return false;
5032
5033 return true;
5034 }
5035
5036 /* Get the interrupt type, return UNKNOWN_MODE if it's not
5037 interrupt function. */
5038 static enum riscv_privilege_levels
5039 riscv_get_interrupt_type (tree decl)
5040 {
5041 gcc_assert (decl != NULL_TREE);
5042
5043 if ((TREE_CODE(decl) != FUNCTION_DECL)
5044 || (!riscv_interrupt_type_p (TREE_TYPE (decl))))
5045 return UNKNOWN_MODE;
5046
5047 tree attr_args
5048 = TREE_VALUE (lookup_attribute ("interrupt",
5049 TYPE_ATTRIBUTES (TREE_TYPE (decl))));
5050
5051 if (attr_args && TREE_CODE (TREE_VALUE (attr_args)) != VOID_TYPE)
5052 {
5053 const char *string = TREE_STRING_POINTER (TREE_VALUE (attr_args));
5054
5055 if (!strcmp (string, "user"))
5056 return USER_MODE;
5057 else if (!strcmp (string, "supervisor"))
5058 return SUPERVISOR_MODE;
5059 else /* Must be "machine". */
5060 return MACHINE_MODE;
5061 }
5062 else
5063 /* Interrupt attributes are machine mode by default. */
5064 return MACHINE_MODE;
5065 }
5066
5067 /* Implement `TARGET_SET_CURRENT_FUNCTION'. */
5068 /* Sanity cheching for above function attributes. */
5069 static void
5070 riscv_set_current_function (tree decl)
5071 {
5072 if (decl == NULL_TREE
5073 || current_function_decl == NULL_TREE
5074 || current_function_decl == error_mark_node
5075 || ! cfun->machine
5076 || cfun->machine->attributes_checked_p)
5077 return;
5078
5079 cfun->machine->naked_p = riscv_naked_function_p (decl);
5080 cfun->machine->interrupt_handler_p
5081 = riscv_interrupt_type_p (TREE_TYPE (decl));
5082
5083 if (cfun->machine->naked_p && cfun->machine->interrupt_handler_p)
5084 error ("function attributes %qs and %qs are mutually exclusive",
5085 "interrupt", "naked");
5086
5087 if (cfun->machine->interrupt_handler_p)
5088 {
5089 tree ret = TREE_TYPE (TREE_TYPE (decl));
5090 tree args = TYPE_ARG_TYPES (TREE_TYPE (decl));
5091
5092 if (TREE_CODE (ret) != VOID_TYPE)
5093 error ("%qs function cannot return a value", "interrupt");
5094
5095 if (args && TREE_CODE (TREE_VALUE (args)) != VOID_TYPE)
5096 error ("%qs function cannot have arguments", "interrupt");
5097
5098 cfun->machine->interrupt_mode = riscv_get_interrupt_type (decl);
5099
5100 gcc_assert (cfun->machine->interrupt_mode != UNKNOWN_MODE);
5101 }
5102
5103 /* Don't print the above diagnostics more than once. */
5104 cfun->machine->attributes_checked_p = 1;
5105 }
5106
5107 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
5108 static tree
5109 riscv_merge_decl_attributes (tree olddecl, tree newdecl)
5110 {
5111 tree combined_attrs;
5112
5113 enum riscv_privilege_levels old_interrupt_type
5114 = riscv_get_interrupt_type (olddecl);
5115 enum riscv_privilege_levels new_interrupt_type
5116 = riscv_get_interrupt_type (newdecl);
5117
5118 /* Check old and new has same interrupt type. */
5119 if ((old_interrupt_type != UNKNOWN_MODE)
5120 && (new_interrupt_type != UNKNOWN_MODE)
5121 && (old_interrupt_type != new_interrupt_type))
5122 error ("%qs function cannot have different interrupt type", "interrupt");
5123
5124 /* Create combined attributes. */
5125 combined_attrs = merge_attributes (DECL_ATTRIBUTES (olddecl),
5126 DECL_ATTRIBUTES (newdecl));
5127
5128 return combined_attrs;
5129 }
5130
5131 /* Implement TARGET_CANNOT_COPY_INSN_P. */
5132
5133 static bool
5134 riscv_cannot_copy_insn_p (rtx_insn *insn)
5135 {
5136 return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
5137 }
5138
5139 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. */
5140
5141 static bool
5142 riscv_slow_unaligned_access (machine_mode, unsigned int)
5143 {
5144 return riscv_slow_unaligned_access_p;
5145 }
5146
5147 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
5148
5149 static bool
5150 riscv_can_change_mode_class (machine_mode, machine_mode, reg_class_t rclass)
5151 {
5152 return !reg_classes_intersect_p (FP_REGS, rclass);
5153 }
5154
5155
5156 /* Implement TARGET_CONSTANT_ALIGNMENT. */
5157
5158 static HOST_WIDE_INT
5159 riscv_constant_alignment (const_tree exp, HOST_WIDE_INT align)
5160 {
5161 if ((TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR)
5162 && (riscv_align_data_type == riscv_align_data_type_xlen))
5163 return MAX (align, BITS_PER_WORD);
5164 return align;
5165 }
5166
5167 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
5168
5169 /* This function is equivalent to default_promote_function_mode_always_promote
5170 except that it returns a promoted mode even if type is NULL_TREE. This is
5171 needed by libcalls which have no type (only a mode) such as fixed conversion
5172 routines that take a signed or unsigned char/short/int argument and convert
5173 it to a fixed type. */
5174
5175 static machine_mode
5176 riscv_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
5177 machine_mode mode,
5178 int *punsignedp ATTRIBUTE_UNUSED,
5179 const_tree fntype ATTRIBUTE_UNUSED,
5180 int for_return ATTRIBUTE_UNUSED)
5181 {
5182 int unsignedp;
5183
5184 if (type != NULL_TREE)
5185 return promote_mode (type, mode, punsignedp);
5186
5187 unsignedp = *punsignedp;
5188 PROMOTE_MODE (mode, unsignedp, type);
5189 *punsignedp = unsignedp;
5190 return mode;
5191 }
5192
5193 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
5194
5195 static void
5196 riscv_reorg (void)
5197 {
5198 /* Do nothing unless we have -msave-restore */
5199 if (TARGET_SAVE_RESTORE)
5200 riscv_remove_unneeded_save_restore_calls ();
5201 }
5202
5203 /* Return nonzero if register FROM_REGNO can be renamed to register
5204 TO_REGNO. */
5205
5206 bool
5207 riscv_hard_regno_rename_ok (unsigned from_regno ATTRIBUTE_UNUSED,
5208 unsigned to_regno)
5209 {
5210 /* Interrupt functions can only use registers that have already been
5211 saved by the prologue, even if they would normally be
5212 call-clobbered. */
5213 return !cfun->machine->interrupt_handler_p || df_regs_ever_live_p (to_regno);
5214 }
5215
5216 /* Implement TARGET_NEW_ADDRESS_PROFITABLE_P. */
5217
5218 bool
5219 riscv_new_address_profitable_p (rtx memref, rtx_insn *insn, rtx new_addr)
5220 {
5221 /* Prefer old address if it is less expensive. */
5222 addr_space_t as = MEM_ADDR_SPACE (memref);
5223 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
5224 int old_cost = address_cost (XEXP (memref, 0), GET_MODE (memref), as, speed);
5225 int new_cost = address_cost (new_addr, GET_MODE (memref), as, speed);
5226 return new_cost <= old_cost;
5227 }
5228
5229 /* Helper function for generating gpr_save pattern. */
5230
5231 rtx
5232 riscv_gen_gpr_save_insn (struct riscv_frame_info *frame)
5233 {
5234 unsigned count = riscv_save_libcall_count (frame->mask);
5235 /* 1 for unspec 2 for clobber t0/t1 and 1 for ra. */
5236 unsigned veclen = 1 + 2 + 1 + count;
5237 rtvec vec = rtvec_alloc (veclen);
5238
5239 gcc_assert (veclen <= ARRAY_SIZE (gpr_save_reg_order));
5240
5241 RTVEC_ELT (vec, 0) =
5242 gen_rtx_UNSPEC_VOLATILE (VOIDmode,
5243 gen_rtvec (1, GEN_INT (count)), UNSPECV_GPR_SAVE);
5244
5245 for (unsigned i = 1; i < veclen; ++i)
5246 {
5247 unsigned regno = gpr_save_reg_order[i];
5248 rtx reg = gen_rtx_REG (Pmode, regno);
5249 rtx elt;
5250
5251 /* t0 and t1 are CLOBBERs, others are USEs. */
5252 if (i < 3)
5253 elt = gen_rtx_CLOBBER (Pmode, reg);
5254 else
5255 elt = gen_rtx_USE (Pmode, reg);
5256
5257 RTVEC_ELT (vec, i) = elt;
5258 }
5259
5260 /* Largest number of caller-save register must set in mask if we are
5261 not using __riscv_save_0. */
5262 gcc_assert ((count == 0) ||
5263 BITSET_P (frame->mask, gpr_save_reg_order[veclen - 1]));
5264
5265 return gen_rtx_PARALLEL (VOIDmode, vec);
5266 }
5267
5268 /* Return true if it's valid gpr_save pattern. */
5269
5270 bool
5271 riscv_gpr_save_operation_p (rtx op)
5272 {
5273 unsigned len = XVECLEN (op, 0);
5274
5275 if (len > ARRAY_SIZE (gpr_save_reg_order))
5276 return false;
5277
5278 for (unsigned i = 0; i < len; i++)
5279 {
5280 rtx elt = XVECEXP (op, 0, i);
5281 if (i == 0)
5282 {
5283 /* First element in parallel is unspec. */
5284 if (GET_CODE (elt) != UNSPEC_VOLATILE
5285 || GET_CODE (XVECEXP (elt, 0, 0)) != CONST_INT
5286 || XINT (elt, 1) != UNSPECV_GPR_SAVE)
5287 return false;
5288 }
5289 else
5290 {
5291 /* Two CLOBBER and USEs, must check the order. */
5292 unsigned expect_code = i < 3 ? CLOBBER : USE;
5293 if (GET_CODE (elt) != expect_code
5294 || !REG_P (XEXP (elt, 1))
5295 || (REGNO (XEXP (elt, 1)) != gpr_save_reg_order[i]))
5296 return false;
5297 }
5298 break;
5299 }
5300 return true;
5301 }
5302
5303 /* Implement TARGET_ASAN_SHADOW_OFFSET. */
5304
5305 static unsigned HOST_WIDE_INT
5306 riscv_asan_shadow_offset (void)
5307 {
5308 /* We only have libsanitizer support for RV64 at present.
5309
5310 This number must match kRiscv*_ShadowOffset* in the file
5311 libsanitizer/asan/asan_mapping.h which is currently 1<<29 for rv64,
5312 even though 1<<36 makes more sense. */
5313 return TARGET_64BIT ? (HOST_WIDE_INT_1 << 29) : 0;
5314 }
5315
5316 /* Initialize the GCC target structure. */
5317 #undef TARGET_ASM_ALIGNED_HI_OP
5318 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
5319 #undef TARGET_ASM_ALIGNED_SI_OP
5320 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
5321 #undef TARGET_ASM_ALIGNED_DI_OP
5322 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
5323
5324 #undef TARGET_OPTION_OVERRIDE
5325 #define TARGET_OPTION_OVERRIDE riscv_option_override
5326
5327 #undef TARGET_LEGITIMIZE_ADDRESS
5328 #define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
5329
5330 #undef TARGET_SCHED_ISSUE_RATE
5331 #define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
5332
5333 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
5334 #define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
5335
5336 #undef TARGET_SET_CURRENT_FUNCTION
5337 #define TARGET_SET_CURRENT_FUNCTION riscv_set_current_function
5338
5339 #undef TARGET_REGISTER_MOVE_COST
5340 #define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
5341 #undef TARGET_MEMORY_MOVE_COST
5342 #define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
5343 #undef TARGET_RTX_COSTS
5344 #define TARGET_RTX_COSTS riscv_rtx_costs
5345 #undef TARGET_ADDRESS_COST
5346 #define TARGET_ADDRESS_COST riscv_address_cost
5347
5348 #undef TARGET_ASM_FILE_START
5349 #define TARGET_ASM_FILE_START riscv_file_start
5350 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
5351 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
5352
5353 #undef TARGET_EXPAND_BUILTIN_VA_START
5354 #define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
5355
5356 #undef TARGET_PROMOTE_FUNCTION_MODE
5357 #define TARGET_PROMOTE_FUNCTION_MODE riscv_promote_function_mode
5358
5359 #undef TARGET_RETURN_IN_MEMORY
5360 #define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
5361
5362 #undef TARGET_ASM_OUTPUT_MI_THUNK
5363 #define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
5364 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
5365 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
5366
5367 #undef TARGET_PRINT_OPERAND
5368 #define TARGET_PRINT_OPERAND riscv_print_operand
5369 #undef TARGET_PRINT_OPERAND_ADDRESS
5370 #define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
5371
5372 #undef TARGET_SETUP_INCOMING_VARARGS
5373 #define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
5374 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
5375 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS riscv_allocate_stack_slots_for_args
5376 #undef TARGET_STRICT_ARGUMENT_NAMING
5377 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
5378 #undef TARGET_MUST_PASS_IN_STACK
5379 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
5380 #undef TARGET_PASS_BY_REFERENCE
5381 #define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
5382 #undef TARGET_ARG_PARTIAL_BYTES
5383 #define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
5384 #undef TARGET_FUNCTION_ARG
5385 #define TARGET_FUNCTION_ARG riscv_function_arg
5386 #undef TARGET_FUNCTION_ARG_ADVANCE
5387 #define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
5388 #undef TARGET_FUNCTION_ARG_BOUNDARY
5389 #define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
5390
5391 /* The generic ELF target does not always have TLS support. */
5392 #ifdef HAVE_AS_TLS
5393 #undef TARGET_HAVE_TLS
5394 #define TARGET_HAVE_TLS true
5395 #endif
5396
5397 #undef TARGET_CANNOT_FORCE_CONST_MEM
5398 #define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
5399
5400 #undef TARGET_LEGITIMATE_CONSTANT_P
5401 #define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
5402
5403 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
5404 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
5405
5406 #undef TARGET_LEGITIMATE_ADDRESS_P
5407 #define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
5408
5409 #undef TARGET_CAN_ELIMINATE
5410 #define TARGET_CAN_ELIMINATE riscv_can_eliminate
5411
5412 #undef TARGET_CONDITIONAL_REGISTER_USAGE
5413 #define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
5414
5415 #undef TARGET_CLASS_MAX_NREGS
5416 #define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
5417
5418 #undef TARGET_TRAMPOLINE_INIT
5419 #define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
5420
5421 #undef TARGET_IN_SMALL_DATA_P
5422 #define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
5423
5424 #undef TARGET_HAVE_SRODATA_SECTION
5425 #define TARGET_HAVE_SRODATA_SECTION true
5426
5427 #undef TARGET_ASM_SELECT_SECTION
5428 #define TARGET_ASM_SELECT_SECTION riscv_select_section
5429
5430 #undef TARGET_ASM_UNIQUE_SECTION
5431 #define TARGET_ASM_UNIQUE_SECTION riscv_unique_section
5432
5433 #undef TARGET_ASM_SELECT_RTX_SECTION
5434 #define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
5435
5436 #undef TARGET_MIN_ANCHOR_OFFSET
5437 #define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
5438
5439 #undef TARGET_MAX_ANCHOR_OFFSET
5440 #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
5441
5442 #undef TARGET_REGISTER_PRIORITY
5443 #define TARGET_REGISTER_PRIORITY riscv_register_priority
5444
5445 #undef TARGET_CANNOT_COPY_INSN_P
5446 #define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
5447
5448 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
5449 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV riscv_atomic_assign_expand_fenv
5450
5451 #undef TARGET_INIT_BUILTINS
5452 #define TARGET_INIT_BUILTINS riscv_init_builtins
5453
5454 #undef TARGET_BUILTIN_DECL
5455 #define TARGET_BUILTIN_DECL riscv_builtin_decl
5456
5457 #undef TARGET_EXPAND_BUILTIN
5458 #define TARGET_EXPAND_BUILTIN riscv_expand_builtin
5459
5460 #undef TARGET_HARD_REGNO_NREGS
5461 #define TARGET_HARD_REGNO_NREGS riscv_hard_regno_nregs
5462 #undef TARGET_HARD_REGNO_MODE_OK
5463 #define TARGET_HARD_REGNO_MODE_OK riscv_hard_regno_mode_ok
5464
5465 #undef TARGET_MODES_TIEABLE_P
5466 #define TARGET_MODES_TIEABLE_P riscv_modes_tieable_p
5467
5468 #undef TARGET_SLOW_UNALIGNED_ACCESS
5469 #define TARGET_SLOW_UNALIGNED_ACCESS riscv_slow_unaligned_access
5470
5471 #undef TARGET_SECONDARY_MEMORY_NEEDED
5472 #define TARGET_SECONDARY_MEMORY_NEEDED riscv_secondary_memory_needed
5473
5474 #undef TARGET_CAN_CHANGE_MODE_CLASS
5475 #define TARGET_CAN_CHANGE_MODE_CLASS riscv_can_change_mode_class
5476
5477 #undef TARGET_CONSTANT_ALIGNMENT
5478 #define TARGET_CONSTANT_ALIGNMENT riscv_constant_alignment
5479
5480 #undef TARGET_MERGE_DECL_ATTRIBUTES
5481 #define TARGET_MERGE_DECL_ATTRIBUTES riscv_merge_decl_attributes
5482
5483 #undef TARGET_ATTRIBUTE_TABLE
5484 #define TARGET_ATTRIBUTE_TABLE riscv_attribute_table
5485
5486 #undef TARGET_WARN_FUNC_RETURN
5487 #define TARGET_WARN_FUNC_RETURN riscv_warn_func_return
5488
5489 /* The low bit is ignored by jump instructions so is safe to use. */
5490 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
5491 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
5492
5493 #undef TARGET_MACHINE_DEPENDENT_REORG
5494 #define TARGET_MACHINE_DEPENDENT_REORG riscv_reorg
5495
5496 #undef TARGET_NEW_ADDRESS_PROFITABLE_P
5497 #define TARGET_NEW_ADDRESS_PROFITABLE_P riscv_new_address_profitable_p
5498
5499 #undef TARGET_ASAN_SHADOW_OFFSET
5500 #define TARGET_ASAN_SHADOW_OFFSET riscv_asan_shadow_offset
5501
5502 struct gcc_target targetm = TARGET_INITIALIZER;
5503
5504 #include "gt-riscv.h"