/* Definition of RISC-V target for GNU compiler.
- Copyright (C) 2011-2019 Free Software Foundation, Inc.
+ Copyright (C) 2011-2024 Free Software Foundation, Inc.
Contributed by Andrew Waterman (andrew@sifive.com).
Based on MIPS target for GNU compiler.
#ifndef GCC_RISCV_H
#define GCC_RISCV_H
+#include <stdbool.h>
#include "config/riscv/riscv-opts.h"
+#define SWITCHABLE_TARGET 1
+
/* Target CPU builtins. */
#define TARGET_CPU_CPP_BUILTINS() riscv_cpu_cpp_builtins (pfile)
-/* Target CPU versions for D. */
-#define TARGET_D_CPU_VERSIONS riscv_d_target_versions
+#ifdef TARGET_BIG_ENDIAN_DEFAULT
+#define DEFAULT_ENDIAN_SPEC "b"
+#else
+#define DEFAULT_ENDIAN_SPEC "l"
+#endif
/* Default target_flags if no switches are specified */
#define RISCV_TUNE_STRING_DEFAULT "rocket"
#endif
+extern const char *riscv_expand_arch (int argc, const char **argv);
+extern const char *riscv_expand_arch_from_cpu (int argc, const char **argv);
+extern const char *riscv_default_mtune (int argc, const char **argv);
+extern const char *riscv_multi_lib_check (int argc, const char **argv);
+
+# define EXTRA_SPEC_FUNCTIONS \
+ { "riscv_expand_arch", riscv_expand_arch }, \
+ { "riscv_expand_arch_from_cpu", riscv_expand_arch_from_cpu }, \
+ { "riscv_default_mtune", riscv_default_mtune }, \
+ { "riscv_multi_lib_check", riscv_multi_lib_check },
+
/* Support for a compile-time default CPU, et cetera. The rules are:
- --with-arch is ignored if -march is specified.
+ --with-arch is ignored if -march or -mcpu is specified.
--with-abi is ignored if -mabi is specified.
- --with-tune is ignored if -mtune is specified. */
+ --with-tune is ignored if -mtune or -mcpu is specified.
+ --with-isa-spec is ignored if -misa-spec is specified.
+
+ But using default -march/-mtune value if -mcpu don't have valid option. */
#define OPTION_DEFAULT_SPECS \
- {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
- {"arch", "%{!march=*:-march=%(VALUE)}" }, \
+ {"tune", "%{!mtune=*:" \
+ " %{!mcpu=*:-mtune=%(VALUE)}" \
+ " %{mcpu=*:-mtune=%:riscv_default_mtune(%* %(VALUE))}}" }, \
+ {"arch", "%{!march=*:" \
+ " %{!mcpu=*:-march=%(VALUE)}" \
+ " %{mcpu=*:%:riscv_expand_arch_from_cpu(%* %(VALUE))}}" }, \
{"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
+ {"isa_spec", "%{!misa-spec=*:-misa-spec=%(VALUE)}" }, \
#ifdef IN_LIBGCC2
#undef TARGET_64BIT
#define TARGET_64BIT (__riscv_xlen == 64)
#endif /* IN_LIBGCC2 */
+#ifdef HAVE_AS_MISA_SPEC
+#define ASM_MISA_SPEC "%{misa-spec=*}"
+#else
+#define ASM_MISA_SPEC ""
+#endif
+
+/* Reference:
+ https://gcc.gnu.org/onlinedocs/cpp/Stringizing.html#Stringizing */
+#define STRINGIZING(s) __STRINGIZING(s)
+#define __STRINGIZING(s) #s
+
+#define MULTILIB_DEFAULTS \
+ {"march=" STRINGIZING (TARGET_RISCV_DEFAULT_ARCH), \
+ "mabi=" STRINGIZING (TARGET_RISCV_DEFAULT_ABI) }
+
#undef ASM_SPEC
#define ASM_SPEC "\
%(subtarget_asm_debugging_spec) \
%{" FPIE_OR_FPIC_SPEC ":-fpic} \
%{march=*} \
%{mabi=*} \
-%(subtarget_asm_spec)"
+%{mno-relax} \
+%{mbig-endian} \
+%{mlittle-endian} \
+%(subtarget_asm_spec)" \
+ASM_MISA_SPEC
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+"%{march=*:%:riscv_expand_arch(%*)} " \
+"%{!march=*:%{mcpu=*:%:riscv_expand_arch_from_cpu(%*)}} "
#define TARGET_DEFAULT_CMODEL CM_MEDLOW
#define DWARF_CIE_DATA_ALIGNMENT -4
/* The mapping from gcc register number to DWARF 2 CFA column number. */
-#define DWARF_FRAME_REGNUM(REGNO) \
- (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
+#define DWARF_FRAME_REGNUM(REGNO) \
+ (FRM_REG_P (REGNO) ? RISCV_DWARF_FRM \
+ : VXRM_REG_P (REGNO) ? RISCV_DWARF_VXRM \
+ : VL_REG_P (REGNO) ? RISCV_DWARF_VL \
+ : VTYPE_REG_P (REGNO) \
+ ? RISCV_DWARF_VTYPE \
+ : (GP_REG_P (REGNO) || FP_REG_P (REGNO) || V_REG_P (REGNO) \
+ ? REGNO \
+ : INVALID_REGNUM))
/* The DWARF 2 CFA column which tracks the return address. */
#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
/* Target machine storage layout */
#define BITS_BIG_ENDIAN 0
-#define BYTES_BIG_ENDIAN 0
-#define WORDS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
#define MAX_BITS_PER_WORD 64
/* Width of a word, in units (bytes). */
#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#define BITS_PER_WORD (BITS_PER_UNIT * UNITS_PER_WORD)
#ifndef IN_LIBGCC2
#define MIN_UNITS_PER_WORD 4
#endif
+/* Allows SImode op in builtin overflow pattern, see internal-fn.cc. */
+#undef TARGET_MIN_ARITHMETIC_PRECISION
+#define TARGET_MIN_ARITHMETIC_PRECISION riscv_min_arithmetic_precision
+
/* The `Q' extension is not yet supported. */
#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4)
+/* Size per vector register. For VLEN = 32, size = poly (4, 4). Otherwise, size = poly (8, 8). */
+#define UNITS_PER_V_REG (riscv_vector_chunks * riscv_bytes_per_vector_chunk)
/* The largest type that can be passed in floating-point registers. */
#define UNITS_PER_FP_ARG \
((riscv_abi == ABI_ILP32 || riscv_abi == ABI_ILP32E \
- || riscv_abi == ABI_LP64) \
+ || riscv_abi == ABI_LP64 || riscv_abi == ABI_LP64E) \
? 0 \
: ((riscv_abi == ABI_ILP32F || riscv_abi == ABI_LP64F) ? 4 : 8))
#define PARM_BOUNDARY BITS_PER_WORD
/* Allocation boundary (in *bits*) for the code of a function. */
-#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
+#define FUNCTION_BOUNDARY ((TARGET_RVC || TARGET_ZCA) ? 16 : 32)
/* The smallest supported stack boundary the calling convention supports. */
#define STACK_BOUNDARY \
- (riscv_abi == ABI_ILP32E ? BITS_PER_WORD : 2 * BITS_PER_WORD)
+ (riscv_abi == ABI_ILP32E || riscv_abi == ABI_LP64E \
+ ? BITS_PER_WORD \
+ : 2 * BITS_PER_WORD)
/* The ABI stack alignment. */
-#define ABI_STACK_BOUNDARY (riscv_abi == ABI_ILP32E ? BITS_PER_WORD : 128)
+#define ABI_STACK_BOUNDARY \
+ (riscv_abi == ABI_ILP32E || riscv_abi == ABI_LP64E \
+ ? BITS_PER_WORD \
+ : 128)
/* There is no point aligning anything to a rounder boundary than this. */
#define BIGGEST_ALIGNMENT 128
mode that should actually be used. We allow pairs of registers. */
#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode)
+/* DATA_ALIGNMENT and LOCAL_ALIGNMENT common definition. */
+#define RISCV_EXPAND_ALIGNMENT(COND, TYPE, ALIGN) \
+ (((COND) && ((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
/* If defined, a C expression to compute the alignment for a static
variable. TYPE is the data type, and ALIGN is the alignment that
the object would ordinarily have. The value of this macro is used
cause character arrays to be word-aligned so that `strcpy' calls
that copy constants to character arrays can be done inline. */
-#define DATA_ALIGNMENT(TYPE, ALIGN) \
- ((((ALIGN) < BITS_PER_WORD) \
- && (TREE_CODE (TYPE) == ARRAY_TYPE \
- || TREE_CODE (TYPE) == UNION_TYPE \
- || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ RISCV_EXPAND_ALIGNMENT (riscv_align_data_type == riscv_align_data_type_xlen, \
+ TYPE, ALIGN)
/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
character arrays to be word-aligned so that `strcpy' calls that copy
constants to character arrays can be done inline, and 'strcmp' can be
optimised to use word loads. */
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
- DATA_ALIGNMENT (TYPE, ALIGN)
+ RISCV_EXPAND_ALIGNMENT (true, TYPE, ALIGN)
/* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified. */
- 32 floating point registers
- 2 fake registers:
- ARG_POINTER_REGNUM
- - FRAME_POINTER_REGNUM */
+ - FRAME_POINTER_REGNUM
+ - 1 vl register
+ - 1 vtype register
+ - 30 unused registers for future expansion
+ - 32 vector registers */
-#define FIRST_PSEUDO_REGISTER 66
+#define FIRST_PSEUDO_REGISTER 128
/* x0, sp, gp, and tp are fixed. */
#define FIXED_REGISTERS \
{ /* General registers. */ \
- 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
/* Floating-point registers. */ \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
/* Others. */ \
- 1, 1 \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* Vector registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 \
}
/* a0-a7, t0-t6, fa0-fa7, and ft0-ft11 are volatile across calls.
#define CALL_USED_REGISTERS \
{ /* General registers. */ \
- 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
/* Floating-point registers. */ \
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
/* Others. */ \
- 1, 1 \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* Vector registers. */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 \
}
+/* Select a register mode required for caller save of hard regno REGNO.
+ Contrary to what is documented, the default is not the smallest suitable
+ mode but the largest suitable mode for the given (REGNO, NREGS) pair and
+ it quickly creates paradoxical subregs that can be problematic. */
+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+ ((MODE) == VOIDmode ? choose_hard_reg_mode (REGNO, NREGS, NULL) : (MODE))
+
/* Internal macros to classify an ISA register's type. */
#define GP_REG_FIRST 0
#define FP_REG_LAST 63
#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
+#define V_REG_FIRST 96
+#define V_REG_LAST 127
+#define V_REG_NUM (V_REG_LAST - V_REG_FIRST + 1)
+
/* The DWARF 2 CFA column which tracks the return address from a
signal handler context. This means that to maintain backwards
compatibility, no hard register can be assigned this column if it
((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
#define FP_REG_P(REGNO) \
((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
+#define HARDFP_REG_P(REGNO) \
+ ((REGNO) >= FP_REG_FIRST && (REGNO) <= FP_REG_LAST)
+#define V_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - V_REG_FIRST) < V_REG_NUM)
+#define VL_REG_P(REGNO) ((REGNO) == VL_REGNUM)
+#define VTYPE_REG_P(REGNO) ((REGNO) == VTYPE_REGNUM)
+#define VXRM_REG_P(REGNO) ((REGNO) == VXRM_REGNUM)
+#define FRM_REG_P(REGNO) ((REGNO) == FRM_REGNUM)
+
+/* True when REGNO is in SIBCALL_REGS set. */
+#define SIBCALL_REG_P(REGNO) \
+ TEST_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], REGNO)
#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
#define ARG_POINTER_REGNUM 64
#define FRAME_POINTER_REGNUM 65
+/* Define Dwarf for RVV. */
+#define RISCV_DWARF_FRM (4096 + 0x003)
+#define RISCV_DWARF_VXRM (4096 + 0x00a)
+#define RISCV_DWARF_VL (4096 + 0xc20)
+#define RISCV_DWARF_VTYPE (4096 + 0xc21)
+#define RISCV_DWARF_VLENB (4096 + 0xc22)
+
/* Register in which static-chain is passed to a function. */
#define STATIC_CHAIN_REGNUM (GP_TEMP_FIRST + 2)
The epilogue temporary mustn't conflict with the return registers,
the frame pointer, the EH stack adjustment, or the EH data registers. */
-#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST)
#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
+#define RISCV_PROLOGUE_TEMP2_REGNUM (GP_TEMP_FIRST + 1)
+#define RISCV_PROLOGUE_TEMP2(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP2_REGNUM)
+
+#define RISCV_CALL_ADDRESS_TEMP_REGNUM (GP_TEMP_FIRST + 1)
+#define RISCV_CALL_ADDRESS_TEMP(MODE) \
+ gen_rtx_REG (MODE, RISCV_CALL_ADDRESS_TEMP_REGNUM)
+
+#define RETURN_ADDR_MASK (1 << RETURN_ADDR_REGNUM)
+#define S0_MASK (1 << S0_REGNUM)
+#define S1_MASK (1 << S1_REGNUM)
+#define S2_MASK (1 << S2_REGNUM)
+#define S3_MASK (1 << S3_REGNUM)
+#define S4_MASK (1 << S4_REGNUM)
+#define S5_MASK (1 << S5_REGNUM)
+#define S6_MASK (1 << S6_REGNUM)
+#define S7_MASK (1 << S7_REGNUM)
+#define S8_MASK (1 << S8_REGNUM)
+#define S9_MASK (1 << S9_REGNUM)
+#define S10_MASK (1 << S10_REGNUM)
+#define S11_MASK (1 << S11_REGNUM)
+
+#define MULTI_PUSH_GPR_MASK \
+ (RETURN_ADDR_MASK | S0_MASK | S1_MASK | S2_MASK | S3_MASK | S4_MASK \
+ | S5_MASK | S6_MASK | S7_MASK | S8_MASK | S9_MASK | S10_MASK | S11_MASK)
+#define ZCMP_MAX_SPIMM 3
+#define ZCMP_SP_INC_STEP 16
+#define ZCMP_INVALID_S0S10_SREGS_COUNTS 11
+#define ZCMP_S0S11_SREGS_COUNTS 12
+#define ZCMP_MAX_GRP_SLOTS 13
#define MCOUNT_NAME "_mcount"
GR_REGS, /* integer registers */
FP_REGS, /* floating-point registers */
FRAME_REGS, /* arg pointer and frame pointer */
+ VM_REGS, /* v0.t registers */
+ VD_REGS, /* vector registers except v0.t */
+ V_REGS, /* vector registers */
ALL_REGS, /* all registers */
LIM_REG_CLASSES /* max value + 1 */
};
"GR_REGS", \
"FP_REGS", \
"FRAME_REGS", \
+ "VM_REGS", \
+ "VD_REGS", \
+ "V_REGS", \
"ALL_REGS" \
}
#define REG_CLASS_CONTENTS \
{ \
- { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
- { 0xf00000c0, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \
- { 0xffffffc0, 0x00000000, 0x00000000 }, /* JALR_REGS */ \
- { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
- { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
- { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
- { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0xf003fcc0, 0x00000000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \
+ { 0xffffffc0, 0x00000000, 0x00000000, 0x00000000 }, /* JALR_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 }, /* GR_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000, 0x00000000 }, /* FP_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000003, 0x00000000 }, /* FRAME_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000001 }, /* V0_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0xfffffffe }, /* VNoV0_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0xffffffff }, /* V_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000003, 0xffffffff } /* ALL_REGS */ \
}
/* A C expression whose value is a register class containing hard
factor or added to another register (as well as added to a
displacement). */
-#define INDEX_REG_CLASS NO_REGS
+#define INDEX_REG_CLASS riscv_index_reg_class()
/* We generally want to put call-clobbered registers ahead of
call-saved ones. (IRA expects this.) */
60, 61, 62, 63, \
/* Call-saved FPRs. */ \
40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
+ /* v1 ~ v31 vector registers. */ \
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, \
+ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, \
+ 124, 125, 126, 127, \
+ /* The vector mask register. */ \
+ 96, \
/* None of the remaining classes have defined call-saved \
registers. */ \
- 64, 65 \
+ 64, 65, 66, 67 \
}
/* True if VALUE is a signed 12-bit number. */
#define SMALL_OPERAND(VALUE) \
((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
+#define POLY_SMALL_OPERAND_P(POLY_VALUE) \
+ (POLY_VALUE.is_constant () ? \
+ SMALL_OPERAND (POLY_VALUE.to_constant ()) : false)
+
/* True if VALUE can be loaded into a register using LUI. */
#define LUI_OPERAND(VALUE) \
(((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH) \
|| ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
+/* If this is a single bit mask, then we can load it with bseti. Special
+ handling of SImode 0x80000000 on RV64 is done in riscv_build_integer_1. */
+#define SINGLE_BIT_MASK_OPERAND(VALUE) \
+ (pow2p_hwi (TARGET_64BIT \
+ ? (VALUE) \
+ : ((VALUE) & ((HOST_WIDE_INT_1U << 32)-1))))
+
+/* True if VALUE can be represented as an immediate with 1 extra bit
+ set: we check that it is not a SMALL_OPERAND (as this would be true
+ for all small operands) unmodified and turns into a small operand
+ once we clear the top bit. */
+#define UIMM_EXTRA_BIT_OPERAND(VALUE) \
+ (!SMALL_OPERAND (VALUE) \
+ && SMALL_OPERAND (VALUE & ~(HOST_WIDE_INT_1U << floor_log2 (VALUE))))
+
/* Stack layout; function entry, exit and calling. */
#define STACK_GROWS_DOWNWARD 1
#define GP_RETURN GP_ARG_FIRST
#define FP_RETURN (UNITS_PER_FP_ARG == 0 ? GP_RETURN : FP_ARG_FIRST)
-#define MAX_ARGS_IN_REGISTERS (riscv_abi == ABI_ILP32E ? 6 : 8)
+#define MAX_ARGS_IN_REGISTERS \
+ (riscv_abi == ABI_ILP32E || riscv_abi == ABI_LP64E \
+ ? 6 \
+ : 8)
+
+#define MAX_ARGS_IN_VECTOR_REGISTERS (16)
+#define MAX_ARGS_IN_MASK_REGISTERS (1)
/* Symbolic macros for the first/last argument registers. */
#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
#define FP_ARG_FIRST (FP_REG_FIRST + 10)
#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+#define V_ARG_FIRST (V_REG_FIRST + 8)
+#define V_ARG_LAST (V_ARG_FIRST + MAX_ARGS_IN_VECTOR_REGISTERS - 1)
#define CALLEE_SAVED_REG_NUMBER(REGNO) \
((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 : \
(REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
+#define CALLEE_SAVED_FREG_NUMBER(REGNO) CALLEE_SAVED_REG_NUMBER (REGNO - 32)
+
#define LIBCALL_VALUE(MODE) \
riscv_function_value (NULL_TREE, NULL_TREE, MODE)
(IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \
|| (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST)))
+/* Define the standard RISC-V calling convention and variants. */
+
+enum riscv_cc
+{
+ RISCV_CC_BASE = 0, /* Base standard RISC-V ABI. */
+ RISCV_CC_V, /* For functions that pass or return values in V registers. */
+ RISCV_CC_UNKNOWN
+};
+
typedef struct {
+ /* The calling convention that current function used. */
+ enum riscv_cc variant_cc;
+
/* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
unsigned int num_gprs;
/* Number of floating-point registers used so far, likewise. */
unsigned int num_fprs;
+
+ int rvv_psabi_warning;
+
+ /* Number of mask registers used so far, up to MAX_ARGS_IN_MASK_REGISTERS. */
+ unsigned int num_mrs;
+
+ /* The used state of args in vector registers, true for used by prev arg,
+ initial to false. */
+ bool used_vrs[MAX_ARGS_IN_VECTOR_REGISTERS];
} CUMULATIVE_ARGS;
+/* Return riscv calling convention of call_insn. */
+extern enum riscv_cc get_riscv_cc (const rtx use);
+
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
- memset (&(CUM), 0, sizeof (CUM))
+ riscv_init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (INDIRECT), \
+ (N_NAMED_ARGS) != -1)
#define EPILOGUE_USES(REGNO) riscv_epilogue_uses (REGNO)
/* Addressing modes, and classification of registers for them. */
-#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ riscv_regno_ok_for_index_p (REGNO)
+
#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
#define CASE_VECTOR_MODE SImode
#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
+#define LOCAL_SYM_P(sym) \
+ ((SYMBOL_REF_P (sym) && SYMBOL_REF_LOCAL_P (sym)) \
+ || ((GET_CODE (sym) == CONST) \
+ && SYMBOL_REF_P (XEXP (XEXP (sym, 0),0)) \
+ && SYMBOL_REF_LOCAL_P (XEXP (XEXP (sym, 0),0))))
+
/* The load-address macro is used for PC-relative addressing of symbols
that bind locally. Don't use it for symbols that should be addressed
via the GOT. Also, avoid it for CM_MEDLOW, where LUI addressing
currently results in more opportunities for linker relaxation. */
#define USE_LOAD_ADDRESS_MACRO(sym) \
(!TARGET_EXPLICIT_RELOCS && \
- ((flag_pic \
- && ((SYMBOL_REF_P (sym) && SYMBOL_REF_LOCAL_P (sym)) \
- || ((GET_CODE (sym) == CONST) \
- && SYMBOL_REF_P (XEXP (XEXP (sym, 0),0)) \
- && SYMBOL_REF_LOCAL_P (XEXP (XEXP (sym, 0),0))))) \
- || riscv_cmodel == CM_MEDANY))
+ ((flag_pic && LOCAL_SYM_P (sym)) || riscv_cmodel == CM_MEDANY))
/* Define this as 1 if `char' should by default be signed; else as 0. */
#define DEFAULT_SIGNED_CHAR 0
#define Pmode word_mode
+/* Specify the machine mode that registers have. */
+
+#define Xmode (TARGET_64BIT ? DImode : SImode)
+
/* Give call MEMs SImode since it is the "most permissive" mode
for both 32-bit and 64-bit targets. */
#define BRANCH_COST(speed_p, predictable_p) \
((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
+/* True if the target optimizes short forward branches around integer
+ arithmetic instructions into predicated operations, e.g., for
+ conditional-move operations. The macro assumes that all branch
+ instructions (BEQ, BNE, BLT, BLTU, BGE, BGEU, C.BEQZ, and C.BNEZ)
+ support this feature. The macro further assumes that any integer
+ arithmetic and logical operation (ADD[I], SUB, SLL[I], SRL[I], SRA[I],
+ SLT[I][U], AND[I], XOR[I], OR[I], LUI, AUIPC, and their compressed
+ counterparts, including C.MV and C.LI) can be in the branch shadow. */
+
+#define TARGET_SFB_ALU (riscv_microarchitecture == sifive_7)
+
#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
/* Control the assembler format that we output. */
"fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
"fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
"fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
- "arg", "frame", }
+ "arg", "frame", "vl", "vtype", "vxrm", "frm", "N/A", "N/A", \
+ "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", \
+ "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", \
+ "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", "N/A", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",}
#define ADDITIONAL_REGISTER_NAMES \
{ \
#define ASM_COMMENT_START "#"
+/* Add output .variant_cc directive for specific function definition. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STR, NAME, DECL) \
+ riscv_declare_function_name (STR, NAME, DECL)
+
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ riscv_declare_function_size (FILE, FNAME, DECL)
+
+/* Add output .variant_cc directive for specific alias definition. */
+#undef ASM_OUTPUT_DEF_FROM_DECLS
+#define ASM_OUTPUT_DEF_FROM_DECLS(STR, DECL, TARGET) \
+ riscv_asm_output_alias (STR, DECL, TARGET)
+
+/* Add output .variant_cc directive for specific extern function. */
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(STR, DECL, NAME) \
+ riscv_asm_output_external (STR, DECL, NAME)
+
#undef SIZE_TYPE
#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
-/* The maximum number of bytes copied by one iteration of a movmemsi loop. */
+/* The maximum number of bytes copied by one iteration of a cpymemsi loop. */
#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
/* The maximum number of bytes that can be copied by a straight-line
- movmemsi implementation. */
+ cpymemsi implementation. */
#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 3)
/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction pairs, we will do a movmem or libcall instead.
+ move-instruction pairs, we will do a cpymem or libcall instead.
Do not use move_by_pieces at all when strict alignment is not
in effect but the target has slow unaligned accesses; in this
- case, movmem or libcall is more efficient. */
+ case, cpymem or libcall is more efficient. */
#define MOVE_RATIO(speed) \
(!STRICT_ALIGNMENT && riscv_slow_unaligned_access_p ? 1 : \
#ifndef USED_FOR_TARGET
extern const enum reg_class riscv_regno_to_class[];
extern bool riscv_slow_unaligned_access_p;
+extern bool riscv_user_wants_strict_align;
extern unsigned riscv_stack_boundary;
+extern unsigned riscv_bytes_per_vector_chunk;
+extern poly_uint16 riscv_vector_chunks;
+extern poly_int64 riscv_v_adjust_nunits (enum machine_mode, int);
+extern poly_int64 riscv_v_adjust_nunits (machine_mode, bool, int, int);
+extern poly_int64 riscv_v_adjust_precision (enum machine_mode, int);
+extern poly_int64 riscv_v_adjust_bytesize (enum machine_mode, int);
+/* The number of bits and bytes in a RVV vector. */
+#define BITS_PER_RISCV_VECTOR (poly_uint16 (riscv_vector_chunks * riscv_bytes_per_vector_chunk * 8))
+#define BYTES_PER_RISCV_VECTOR (poly_uint16 (riscv_vector_chunks * riscv_bytes_per_vector_chunk))
#endif
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
"%{mabi=ilp32f:ilp32f}" \
"%{mabi=ilp32d:ilp32d}" \
"%{mabi=lp64:lp64}" \
+ "%{mabi=lp64e:lp64e}" \
"%{mabi=lp64f:lp64f}" \
"%{mabi=lp64d:lp64d}" \
-#define STARTFILE_PREFIX_SPEC \
- "/lib" XLEN_SPEC "/" ABI_SPEC "/ " \
- "/usr/lib" XLEN_SPEC "/" ABI_SPEC "/ " \
- "/lib/ " \
- "/usr/lib/ "
-
/* ISA constants needed for code generation. */
#define OPCODE_LW 0x2003
#define OPCODE_LD 0x3003
#define SHIFT_RS1 15
#define SHIFT_IMM 20
#define IMM_BITS 12
+#define C_S_BITS 5
#define C_SxSP_BITS 6
#define IMM_REACH (1LL << IMM_BITS)
#define SWSP_REACH (4LL << C_SxSP_BITS)
#define SDSP_REACH (8LL << C_SxSP_BITS)
+/* This is the maximum value that can be represented in a compressed load/store
+ offset (an unsigned 5-bit value scaled by 4). */
+#define CSW_MAX_OFFSET (((4LL << C_S_BITS) - 1) & ~3)
+
+/* Called from RISCV_REORG, this is defined in riscv-sr.cc. */
+
+extern void riscv_remove_unneeded_save_restore_calls (void);
+
+#define HARD_REGNO_RENAME_OK(FROM, TO) riscv_hard_regno_rename_ok (FROM, TO)
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+
+#define TARGET_SUPPORTS_WIDE_INT 1
+
+#define REGISTER_TARGET_PRAGMAS() riscv_register_pragmas ()
+
+#define REGMODE_NATURAL_SIZE(MODE) riscv_regmode_natural_size (MODE)
+
+#define RISCV_DWARF_VLENB (4096 + 0xc22)
+
+#define DWARF_FRAME_REGISTERS (FIRST_PSEUDO_REGISTER + 1 /* VLENB */)
+
+#define DWARF_REG_TO_UNWIND_COLUMN(REGNO) \
+ ((REGNO == RISCV_DWARF_VLENB) ? (FIRST_PSEUDO_REGISTER + 1) : REGNO)
+
+/* Like s390, riscv also defined this macro for the vector comparision. Then
+ the simplify-rtx relational_result will canonicalize the result to the
+ CONST1_RTX for the simplification. */
+#define VECTOR_STORE_FLAG_VALUE(MODE) CONSTM1_RTX (GET_MODE_INNER (MODE))
+
+/* Mode switching (Lazy code motion) for RVV rounding mode instructions. */
+#define OPTIMIZE_MODE_SWITCHING(ENTITY) (TARGET_VECTOR)
+#define NUM_MODES_FOR_MODE_SWITCHING {VXRM_MODE_NONE, riscv_vector::FRM_NONE}
+
+/* The size difference between different RVV modes can be up to 64 times.
+ e.g. RVVMF64BI vs RVVMF1BI on zvl512b, which is [1, 1] vs [64, 64]. */
+#define MAX_POLY_VARIANT 64
+
+#define HAVE_POST_MODIFY_DISP TARGET_XTHEADMEMIDX
+#define HAVE_PRE_MODIFY_DISP TARGET_XTHEADMEMIDX
+
#endif /* ! GCC_RISCV_H */