if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
{
if (!cfun->machine->reg_is_wrapped_separately[regno])
- loongarch_save_restore_reg (word_mode, regno, offset, fn);
+ loongarch_save_restore_reg (mode, regno, offset, fn);
offset -= GET_MODE_SIZE (mode);
}
bool
loongarch_explicit_relocs_p (enum loongarch_symbol_type type)
{
+ if (TARGET_32BIT)
+ return false;
+
if (la_opt_explicit_relocs != EXPLICIT_RELOCS_AUTO)
return la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS;
or check that X is a signed 16-bit number
and offset 4 byte aligned. */
if (!(const_arith_operand (x, Pmode)
- || ((mode == E_SImode || mode == E_DImode)
+ /* FIXME: la32 atomic insns support 16-bit imm. */
+ || (TARGET_64BIT
+ && (mode == E_SImode || mode == E_DImode)
&& const_imm16_operand (x, Pmode)
&& (loongarch_signed_immediate_p (INTVAL (x), 14, 2)))))
return false;
loongarch_valid_lo_sum_p (enum loongarch_symbol_type symbol_type,
machine_mode mode, rtx x)
{
- int align, size;
+ int align, size, word_size;
/* Check that symbols of type SYMBOL_TYPE can be used to access values
of mode MODE. */
/* We may need to split multiword moves, so make sure that each word
can be accessed without inducing a carry. */
- if (size > BITS_PER_WORD
+ word_size = (GET_MODE_CLASS (mode) == MODE_FLOAT
+ ? (UNITS_PER_HWFPVALUE * BITS_PER_UNIT)
+ : BITS_PER_WORD);
+ if (size > word_size
&& (!TARGET_STRICT_ALIGN || size > align))
return false;
&& contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))])
index = SUBREG_REG (index);
- if (loongarch_valid_base_register_p (index, mode, strict_p))
+ /* LA32 does not provide LDX/STX. */
+ if (TARGET_64BIT && loongarch_valid_base_register_p (index, mode, strict_p))
{
info->type = ADDRESS_REG_REG;
info->offset = index;
{
rtx call;
- if (HAVE_AS_SUPPORT_CALL36)
- call = gen_call_value_internal (v0, loongarch_tls_symbol,
- const0_rtx);
- else
- {
- rtx reg = gen_reg_rtx (Pmode);
- emit_insn (gen_pcalau12i (Pmode, reg,
- loongarch_tls_symbol));
- call = gen_call_value_internal_1 (Pmode, v0, reg,
- loongarch_tls_symbol,
- const0_rtx);
- }
- insn = emit_call_insn (call);
+ /* Use call36 or call30.
+ TARGET_32BIT always support call30. */
+ if ((TARGET_64BIT && HAVE_AS_SUPPORT_CALL36)
+ || TARGET_32BIT)
+ call = gen_call_value_internal (v0, loongarch_tls_symbol,
+ const0_rtx);
+ else
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_pcalau12i (Pmode, reg,
+ loongarch_tls_symbol));
+ call = gen_call_value_internal_1 (Pmode, v0, reg,
+ loongarch_tls_symbol,
+ const0_rtx);
+ }
+ insn = emit_call_insn (call);
}
else
{
if (offset != 0)
{
/* Handle (plus (plus (mult (a) (mem_shadd_constant)) (fp)) (C)) case. */
- if (GET_CODE (base) == PLUS && mem_shadd_or_shadd_rtx_p (XEXP (base, 0))
+ if ((TARGET_64BIT || TARGET_32BIT_S)
+ && GET_CODE (base) == PLUS
+ && mem_shadd_or_shadd_rtx_p (XEXP (base, 0))
&& IMM12_OPERAND (offset))
{
rtx index = XEXP (base, 0);
void
loongarch_split_move (rtx dest, rtx src)
{
+ rtx low_dest;
+
gcc_checking_assert (loongarch_split_move_p (dest, src));
if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))
|| LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
loongarch_split_vector_move (dest, src);
+ else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
+ {
+ if (TARGET_32BIT && GET_MODE (dest) == DImode)
+ emit_insn (gen_move_doubleword_2_di (dest, src));
+ else if (TARGET_32BIT && GET_MODE (dest) == DFmode)
+ emit_insn (gen_move_doubleword_2_df (dest, src));
+ else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
+ emit_insn (gen_move_doubleword_2_tf (dest, src));
+ else
+ gcc_unreachable ();
+ }
else
- gcc_unreachable ();
+ {
+ /* The operation can be split into two normal moves. Decide in
+ which order to do them. */
+ low_dest = loongarch_subword (dest, false);
+ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
+ {
+ loongarch_emit_move (loongarch_subword (dest, true),
+ loongarch_subword (src, true));
+ loongarch_emit_move (low_dest, loongarch_subword (src, false));
+ }
+ else
+ {
+ loongarch_emit_move (low_dest, loongarch_subword (src, false));
+ loongarch_emit_move (loongarch_subword (dest, true),
+ loongarch_subword (src, true));
+ }
+ }
}
/* Check if adding an integer constant value for a specific mode can be
}
};
+ gcc_assert (TARGET_64BIT);
return insn[ldr][index];
}
/* Matching address type with a 12bit offset and
ADDRESS_LO_SUM. */
if (const_arith_operand (offset, Pmode)
- || GET_CODE (offset) == LO_SUM)
+ || GET_CODE (offset) == LO_SUM
+ || GET_CODE (XEXP (dest, 0)) == REG)
return "st.w\t%z1,%0";
else
- return "stptr.w\t%z1,%0";
+ {
+ gcc_assert (TARGET_64BIT);
+ return "stptr.w\t%z1,%0";
+ }
case 8:
if (const_arith_operand (offset, Pmode)
|| GET_CODE (offset) == LO_SUM)
/* Matching address type with a 12bit offset and
ADDRESS_LO_SUM. */
if (const_arith_operand (offset, Pmode)
- || GET_CODE (offset) == LO_SUM)
+ || GET_CODE (offset) == LO_SUM
+ || GET_CODE (XEXP (src, 0)) == REG)
return "ld.w\t%0,%1";
else
- return "ldptr.w\t%0,%1";
+ {
+ gcc_assert (TARGET_64BIT);
+ return "ldptr.w\t%0,%1";
+ }
case 8:
if (const_arith_operand (offset, Pmode)
|| GET_CODE (offset) == LO_SUM)
bool inverted_p)
{
const char *branch[2];
- if (operands[3] == const0_rtx)
+ if ((TARGET_64BIT || TARGET_32BIT_S)
+ && operands[3] == const0_rtx)
{
branch[!inverted_p] = LARCH_BRANCH ("b%C1z", "%2,%0");
branch[inverted_p] = LARCH_BRANCH ("b%N1z", "%2,%0");
/* Build up the code in TRAMPOLINE. */
i = 0;
- /*pcaddi $static_chain,0
+ /*pcaddu12i $static_chain,0
ld.[dw] $tmp,$static_chain,target_function_offset
ld.[dw] $static_chain,$static_chain,static_chain_offset
jirl $r0,$tmp,0 */
- trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST));
+ trampoline[i++] = OP (0x1c000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST));
trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000)
| 19 /* $t7 */
| ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5)
/* We can wrap general registers saved at [sp, sp + 32768) using the
ldptr/stptr instructions. For large offsets a pseudo register
might be needed which cannot be created during the shrink
- wrapping pass.
-
- TODO: This may need a revise when we add LA32 as ldptr.w is not
- guaranteed available by the manual. */
- if (offset < 32768)
+ wrapping pass. */
+ if ((TARGET_64BIT && IMM16_OPERAND (offset))
+ || IMM12_OPERAND (offset))
bitmap_set_bit (components, regno);
offset -= UNITS_PER_WORD;
loongarch_c_mode_for_floating_type (enum tree_index ti)
{
if (ti == TI_LONG_DOUBLE_TYPE)
- return TARGET_64BIT ? TFmode : DFmode;
+ return TFmode;
return default_mode_for_floating_type (ti);
}
bool
loongarch_bitint_type_info (int n, struct bitint_info *info)
{
+ /* LA32 not support BitInt. */
+ if (TARGET_32BIT)
+ return false;
+
if (n <= 8)
info->limb_mode = QImode;
else if (n <= 16)
#define LONG_LONG_TYPE_SIZE 64
/* LONG_DOUBLE_TYPE_SIZE get poisoned, so add LA_ prefix. */
-#define LA_LONG_DOUBLE_TYPE_SIZE (TARGET_64BIT ? 128 : 64)
+#define LA_LONG_DOUBLE_TYPE_SIZE 128
/* Define the sizes of fixed-point types. */
#define SHORT_FRACT_TYPE_SIZE 8
#define LONG_ACCUM_TYPE_SIZE 64
#define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64)
-/* long double is not a fixed mode, but the idea is that, if we
- support long double, we also want a 128-bit integer type. */
-#define MAX_FIXED_MODE_SIZE LA_LONG_DOUBLE_TYPE_SIZE
+/* An integer expression for the size in bits of the largest integer machine
+ mode that should actually be used. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode)
/* Width in bits of a pointer. */
#ifndef POINTER_SIZE
#define REG_PARM_STACK_SPACE(FNDECL) 0
+/* If the size of struct <= 2 * GRLEN, pass by registers if available. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
/* Define this if it is the responsibility of the caller to
allocate the area reserved for arguments passed in registers.
If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
`crtl->outgoing_args_size'. */
#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
-#define STACK_BOUNDARY (TARGET_ABI_LP64 ? 128 : 64)
+#define STACK_BOUNDARY 128
/* This value controls how many pages we manually unroll the loop for when
generating stack clash probes. */
/* Treat LOC as a byte offset from the stack pointer and round it up
to the next fully-aligned offset. */
-#define LARCH_STACK_ALIGN(LOC) \
- (TARGET_ABI_LP64 ? ROUND_UP ((LOC), 16) : ROUND_UP ((LOC), 8))
+#define LARCH_STACK_ALIGN(LOC) ROUND_UP ((LOC), 16)
#define MCOUNT_NAME "_mcount"
#define TRAMPOLINE_CODE_SIZE 16
#define TRAMPOLINE_SIZE \
- ((Pmode == SImode) ? TRAMPOLINE_CODE_SIZE \
- : (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2))
+ (TRAMPOLINE_CODE_SIZE + GET_MODE_SIZE (ptr_mode) * 2)
#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
/* loongarch_trampoline_init calls this library function to flush