/* Subroutines for insn-output.c for HPPA.
- Copyright (C) 1992-2017 Free Software Foundation, Inc.
+ Copyright (C) 1992-2020 Free Software Foundation, Inc.
Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
This file is part of GCC.
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+#define IN_TARGET_CODE 1
+
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "df.h"
#include "tm_p.h"
#include "stringpool.h"
+#include "attribs.h"
#include "optabs.h"
#include "regs.h"
#include "emit-rtl.h"
static bool forward_branch_p (rtx_insn *);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
-static int compute_movmem_length (rtx_insn *);
+static int compute_cpymem_length (rtx_insn *);
static int compute_clrmem_length (rtx_insn *);
static bool pa_assemble_integer (rtx, unsigned int, int);
static void remove_useless_addtr_insns (int);
static rtx pa_function_value (const_tree, const_tree, bool);
static rtx pa_libcall_value (machine_mode, const_rtx);
static bool pa_function_value_regno_p (const unsigned int);
-static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
+static void pa_output_function_prologue (FILE *) ATTRIBUTE_UNUSED;
+static void pa_linux_output_function_prologue (FILE *) ATTRIBUTE_UNUSED;
static void update_total_code_bytes (unsigned int);
-static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static void pa_output_function_epilogue (FILE *);
static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
-static int pa_adjust_priority (rtx_insn *, int);
static int pa_issue_rate (void);
static int pa_reloc_rw_mask (void);
static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
static rtx hppa_builtin_saveregs (void);
static void hppa_va_start (tree, rtx);
static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
-static bool pa_scalar_mode_supported_p (machine_mode);
+static bool pa_scalar_mode_supported_p (scalar_mode);
static bool pa_commutative_p (const_rtx x, int outer_code);
static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
static void output_deferred_plabels (void);
static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
-#ifdef ASM_OUTPUT_EXTERNAL_REAL
-static void pa_hpux_file_end (void);
-#endif
+static void pa_file_end (void);
static void pa_init_libfuncs (void);
static rtx pa_struct_value_rtx (tree, int);
-static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
- const_tree, bool);
-static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
- tree, bool);
-static void pa_function_arg_advance (cumulative_args_t, machine_mode,
- const_tree, bool);
-static rtx pa_function_arg (cumulative_args_t, machine_mode,
- const_tree, bool);
+static bool pa_pass_by_reference (cumulative_args_t,
+ const function_arg_info &);
+static int pa_arg_partial_bytes (cumulative_args_t, const function_arg_info &);
+static void pa_function_arg_advance (cumulative_args_t,
+ const function_arg_info &);
+static rtx pa_function_arg (cumulative_args_t, const function_arg_info &);
+static pad_direction pa_function_arg_padding (machine_mode, const_tree);
static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
static struct machine_function * pa_init_machine_status (void);
static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
machine_mode,
secondary_reload_info *);
+static bool pa_secondary_memory_needed (machine_mode,
+ reg_class_t, reg_class_t);
static void pa_extra_live_on_entry (bitmap);
static machine_mode pa_promote_function_mode (const_tree,
machine_mode, int *,
static bool pa_legitimate_constant_p (machine_mode, rtx);
static unsigned int pa_section_type_flags (tree, const char *, int);
static bool pa_legitimate_address_p (machine_mode, rtx, bool);
-static bool pa_callee_copies (cumulative_args_t, machine_mode,
- const_tree, bool);
+static bool pa_callee_copies (cumulative_args_t, const function_arg_info &);
+static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
+static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
+static bool pa_modes_tieable_p (machine_mode, machine_mode);
+static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
+static HOST_WIDE_INT pa_starting_frame_offset (void);
/* The following extra sections are only used for SOM. */
static GTY(()) section *som_readonly_data_section;
#undef TARGET_ASM_INTEGER
#define TARGET_ASM_INTEGER pa_assemble_integer
-#undef TARGET_ASM_FUNCTION_PROLOGUE
-#define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
#undef TARGET_SCHED_ADJUST_COST
#define TARGET_SCHED_ADJUST_COST pa_adjust_cost
-#undef TARGET_SCHED_ADJUST_PRIORITY
-#define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE pa_issue_rate
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
#undef TARGET_ASM_FILE_END
-#ifdef ASM_OUTPUT_EXTERNAL_REAL
-#define TARGET_ASM_FILE_END pa_hpux_file_end
-#else
-#define TARGET_ASM_FILE_END output_deferred_plabels
-#endif
+#define TARGET_ASM_FILE_END pa_file_end
#undef TARGET_ASM_RELOC_RW_MASK
#define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
#define TARGET_FUNCTION_ARG pa_function_arg
#undef TARGET_FUNCTION_ARG_ADVANCE
#define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
+#undef TARGET_FUNCTION_ARG_PADDING
+#define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
#undef TARGET_FUNCTION_ARG_BOUNDARY
#define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD pa_secondary_reload
+#undef TARGET_SECONDARY_MEMORY_NEEDED
+#define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
#undef TARGET_EXTRA_LIVE_ON_ENTRY
#define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
#undef TARGET_LRA_P
#define TARGET_LRA_P hook_bool_void_false
+#undef TARGET_HARD_REGNO_NREGS
+#define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
+#undef TARGET_HARD_REGNO_MODE_OK
+#define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
+#undef TARGET_MODES_TIEABLE_P
+#define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
+
+#undef TARGET_CAN_CHANGE_MODE_CLASS
+#define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
+
+#undef TARGET_CONSTANT_ALIGNMENT
+#define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
+
+#undef TARGET_STARTING_FRAME_OFFSET
+#define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
+
+#undef TARGET_HAVE_SPECULATION_SAFE_VALUE
+#define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* Parse the -mfixed-range= option string. */
dash = strchr (str, '-');
if (!dash)
{
- warning (0, "value of -mfixed-range must have form REG1-REG2");
+ warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
return;
}
*dash = '\0';
if (! TARGET_GAS && write_symbols != NO_DEBUG)
{
- warning (0, "-g is only supported when using GAS on this processor,");
- warning (0, "-g option disabled");
+ warning (0, "%<-g%> is only supported when using GAS on this processor");
+ warning (0, "%<-g%> option disabled");
write_symbols = NO_DEBUG;
}
if (flag_reorder_blocks_and_partition)
{
inform (input_location,
- "-freorder-blocks-and-partition does not work "
- "on this architecture");
+ "%<-freorder-blocks-and-partition%> does not work "
+ "on this architecture");
flag_reorder_blocks_and_partition = 0;
flag_reorder_blocks = 1;
}
int ignore ATTRIBUTE_UNUSED)
{
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ unsigned int fcode = DECL_MD_FUNCTION_CODE (fndecl);
switch (fcode)
{
ret = gen_reg_rtx (Pmode);
emit_library_call_value (gen_tls_get_addr (), ret,
- LCT_CONST, Pmode, 1, arg, Pmode);
+ LCT_CONST, Pmode, arg, Pmode);
return ret;
}
/* If the newoffset will not fit in 14 bits (ldo), then
handling this would take 4 or 5 instructions (2 to load
the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
- add the new offset and the SYMBOL_REF.) Combine can
- not handle 4->2 or 5->2 combinations, so do not create
+ add the new offset and the SYMBOL_REF.) Combine cannot
+ handle 4->2 or 5->2 combinations, so do not create
them. */
if (! VAL_14_BITS_P (newoffset)
&& GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
}
else
emit_move_insn (scratch_reg, XEXP (op1, 0));
- emit_insn (gen_rtx_SET (operand0,
- replace_equiv_address (op1, scratch_reg)));
- return 1;
+ op1 = replace_equiv_address (op1, scratch_reg);
}
}
else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
/* Load memory address into SCRATCH_REG. */
scratch_reg = force_mode (word_mode, scratch_reg);
emit_move_insn (scratch_reg, XEXP (op1, 0));
- emit_insn (gen_rtx_SET (operand0,
- replace_equiv_address (op1, scratch_reg)));
- return 1;
+ op1 = replace_equiv_address (op1, scratch_reg);
}
+ emit_insn (gen_rtx_SET (operand0, op1));
+ return 1;
}
else if (scratch_reg
&& FP_REG_P (operand1)
}
else
emit_move_insn (scratch_reg, XEXP (op0, 0));
- emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
- operand1));
- return 1;
+ op0 = replace_equiv_address (op0, scratch_reg);
}
}
else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
/* Load memory address into SCRATCH_REG. */
scratch_reg = force_mode (word_mode, scratch_reg);
emit_move_insn (scratch_reg, XEXP (op0, 0));
- emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
- operand1));
- return 1;
+ op0 = replace_equiv_address (op0, scratch_reg);
}
+ emit_insn (gen_rtx_SET (op0, operand1));
+ return 1;
}
/* Handle secondary reloads for loads of FP registers from constant
expressions by forcing the constant into memory. For the most part,
count insns rather than emit them. */
static int
-compute_movmem_length (rtx_insn *insn)
+compute_cpymem_length (rtx_insn *insn)
{
rtx pat = PATTERN (insn);
unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
}
HOST_WIDE_INT
-pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
+pa_compute_frame_size (poly_int64 size, int *fregs_live)
{
int freg_saved = 0;
int i, j;
size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
/* Space for previous frame pointer + filler. If any frame is
- allocated, we need to add in the STARTING_FRAME_OFFSET. We
+ allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
waste some space here for the sake of HP compatibility. The
first slot is only used when the frame pointer is needed. */
if (size || frame_pointer_needed)
- size += STARTING_FRAME_OFFSET;
+ size += pa_starting_frame_offset ();
/* If the current function calls __builtin_eh_return, then we need
to allocate stack space for registers that will hold data for
& ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
}
-/* Generate the assembly code for function entry. FILE is a stdio
- stream to output the code to. SIZE is an int: how many units of
- temporary storage to allocate.
-
- Refer to the array `regs_ever_live' to determine which registers to
- save; `regs_ever_live[I]' is nonzero if register number I is ever
- used in the function. This function is responsible for knowing
- which registers should not be saved even if used. */
-
-/* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
- of memory. If any fpu reg is used in the function, we allocate
- such a block here, at the bottom of the frame, just in case it's needed.
-
- If this function is a leaf procedure, then we may choose not
- to do a "save" insn. The decision about whether or not
- to do this is made in regclass.c. */
+/* Output function label, and associated .PROC and .CALLINFO statements. */
-static void
-pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+void
+pa_output_function_label (FILE *file)
{
/* The function's label and associated .PROC must never be
separated and must be output *after* any profiling declarations
fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
fputs ("\n\t.ENTRY\n", file);
+}
+/* Output function prologue. */
+
+static void
+pa_output_function_prologue (FILE *file)
+{
+ pa_output_function_label (file);
+ remove_useless_addtr_insns (0);
+}
+
+/* The label is output by ASM_DECLARE_FUNCTION_NAME on linux. */
+
+static void
+pa_linux_output_function_prologue (FILE *file ATTRIBUTE_UNUSED)
+{
remove_useless_addtr_insns (0);
}
and must be changed in tandem with this code. */
local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
if (local_fsize || frame_pointer_needed)
- local_fsize += STARTING_FRAME_OFFSET;
+ local_fsize += pa_starting_frame_offset ();
actual_fsize = pa_compute_frame_size (size, &save_fregs);
if (flag_stack_usage_info)
the callee registers. */
if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
merge_sp_adjust_with_store = 1;
- /* Can not optimize. Adjust the stack frame by actual_fsize
+ /* Cannot optimize. Adjust the stack frame by actual_fsize
bytes. */
else
set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
}
for (i = 18; i >= 4; i--)
- if (df_regs_ever_live_p (i) && ! call_used_regs[i])
+ if (df_regs_ever_live_p (i) && !call_used_or_fixed_reg_p (i))
{
store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
offset += UNITS_PER_WORD;
}
for (i = 18; i >= 3; i--)
- if (df_regs_ever_live_p (i) && ! call_used_regs[i])
+ if (df_regs_ever_live_p (i) && !call_used_or_fixed_reg_p (i))
{
/* If merge_sp_adjust_with_store is nonzero, then we can
optimize the first GR save. */
adjustments before returning. */
static void
-pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+pa_output_function_epilogue (FILE *file)
{
rtx_insn *insn = get_last_insn ();
bool extra_nop;
}
for (i = 18; i >= 4; i--)
- if (df_regs_ever_live_p (i) && ! call_used_regs[i])
+ if (df_regs_ever_live_p (i) && !call_used_or_fixed_reg_p (i))
{
load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
offset += UNITS_PER_WORD;
for (i = 18; i >= 3; i--)
{
- if (df_regs_ever_live_p (i) && ! call_used_regs[i])
+ if (df_regs_ever_live_p (i) && !call_used_or_fixed_reg_p (i))
{
/* Only for the first load.
merge_sp_adjust_with_load holds the register load
void
hppa_profile_hook (int label_no)
{
- /* We use SImode for the address of the function in both 32 and
- 64-bit code to avoid having to provide DImode versions of the
- lcla2 and load_offset_label_address insn patterns. */
- rtx reg = gen_reg_rtx (SImode);
rtx_code_label *label_rtx = gen_label_rtx ();
- rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
- rtx arg_bytes, begin_label_rtx;
+ rtx arg_bytes, begin_label_rtx, mcount, sym;
rtx_insn *call_insn;
char begin_label_name[16];
bool use_mcount_pcrel_call;
+ /* Set up call destination. */
+ sym = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
+ pa_encode_label (sym);
+ mcount = gen_rtx_MEM (Pmode, sym);
+
/* If we can reach _mcount with a pc-relative call, we can optimize
loading the address of the current function. This requires linker
long branch stub support. */
if (!use_mcount_pcrel_call)
{
/* The address of the function is loaded into %r25 with an instruction-
- relative sequence that avoids the use of relocations. The sequence
- is split so that the load_offset_label_address instruction can
- occupy the delay slot of the call to _mcount. */
+ relative sequence that avoids the use of relocations. We use SImode
+ for the address of the function in both 32 and 64-bit code to avoid
+ having to provide DImode versions of the lcla2 pattern. */
if (TARGET_PA_20)
- emit_insn (gen_lcla2 (reg, label_rtx));
+ emit_insn (gen_lcla2 (gen_rtx_REG (SImode, 25), label_rtx));
else
- emit_insn (gen_lcla1 (reg, label_rtx));
-
- emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
- reg,
- begin_label_rtx,
- label_rtx));
+ emit_insn (gen_lcla1 (gen_rtx_REG (SImode, 25), label_rtx));
}
if (!NO_DEFERRED_PROFILE_COUNTERS)
}
}
-/* Adjust scheduling priorities. We use this to try and keep addil
- and the next use of %r1 close together. */
-static int
-pa_adjust_priority (rtx_insn *insn, int priority)
-{
- rtx set = single_set (insn);
- rtx src, dest;
- if (set)
- {
- src = SET_SRC (set);
- dest = SET_DEST (set);
- if (GET_CODE (src) == LO_SUM
- && symbolic_operand (XEXP (src, 1), VOIDmode)
- && ! read_only_operand (XEXP (src, 1), VOIDmode))
- priority >>= 3;
-
- else if (GET_CODE (src) == MEM
- && GET_CODE (XEXP (src, 0)) == LO_SUM
- && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
- && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
- priority >>= 1;
-
- else if (GET_CODE (dest) == MEM
- && GET_CODE (XEXP (dest, 0)) == LO_SUM
- && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
- && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
- priority >>= 3;
- }
- return priority;
-}
-
/* The 700 can only issue a single insn at a time.
The 7XXX processors can issue two insns at a time.
The 8000 can issue 4 insns at a time. */
&& GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
&& GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
&& GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
- length += compute_movmem_length (insn) - 4;
+ length += compute_cpymem_length (insn) - 4;
/* Block clear pattern. */
else if (NONJUMP_INSN_P (insn)
&& GET_CODE (pat) == PARALLEL
{
switch (mode)
{
- case SImode:
+ case E_SImode:
sri->icode = CODE_FOR_reload_insi_r1;
break;
- case DImode:
+ case E_DImode:
sri->icode = CODE_FOR_reload_indi_r1;
break;
- case SFmode:
+ case E_SFmode:
sri->icode = CODE_FOR_reload_insf_r1;
break;
- case DFmode:
+ case E_DFmode:
sri->icode = CODE_FOR_reload_indf_r1;
break;
{
switch (mode)
{
- case SImode:
+ case E_SImode:
sri->icode = CODE_FOR_reload_insi_r1;
break;
- case DImode:
+ case E_DImode:
sri->icode = CODE_FOR_reload_indi_r1;
break;
return NO_REGS;
}
+/* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
+
+static bool
+pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t class1 ATTRIBUTE_UNUSED,
+ reg_class_t class2 ATTRIBUTE_UNUSED)
+{
+#ifdef PA_SECONDARY_MEMORY_NEEDED
+ return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
+#else
+ return false;
+#endif
+}
+
/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
is only marked as live on entry by df-scan when it is a fixed
register. It isn't a fixed register in the 64-bit runtime,
or updates the ABI. */
static bool
-pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode, const_tree type,
- bool named ATTRIBUTE_UNUSED)
+pa_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
{
- HOST_WIDE_INT size;
-
- if (type)
- size = int_size_in_bytes (type);
- else
- size = GET_MODE_SIZE (mode);
-
+ HOST_WIDE_INT size = arg.type_size_in_bytes ();
if (TARGET_64BIT)
return size <= 0;
else
return size <= 0 || size > 8;
}
-enum direction
+/* Implement TARGET_FUNCTION_ARG_PADDING. */
+
+static pad_direction
pa_function_arg_padding (machine_mode mode, const_tree type)
{
if (mode == BLKmode
|| TREE_CODE (type) == COMPLEX_TYPE
|| TREE_CODE (type) == VECTOR_TYPE)))
{
- /* Return none if justification is not required. */
+ /* Return PAD_NONE if justification is not required. */
if (type
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
&& (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
- return none;
+ return PAD_NONE;
/* The directions set here are ignored when a BLKmode argument larger
than a word is placed in a register. Different code is used for
the stack and in registers should be identical. */
if (TARGET_64BIT)
/* The 64-bit runtime specifies left justification for aggregates. */
- return upward;
+ return PAD_UPWARD;
else
/* The 32-bit runtime architecture specifies right justification.
When the argument is passed on the stack, the argument is padded
with garbage on the left. The HP compiler pads with zeros. */
- return downward;
+ return PAD_DOWNWARD;
}
if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
- return downward;
+ return PAD_DOWNWARD;
else
- return none;
+ return PAD_NONE;
}
\f
unsigned int size, ofs;
bool indirect;
- indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
+ indirect = pass_va_arg_by_reference (type);
if (indirect)
{
type = ptr;
2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
static bool
-pa_scalar_mode_supported_p (machine_mode mode)
+pa_scalar_mode_supported_p (scalar_mode mode)
{
int precision = GET_MODE_PRECISION (mode);
/* 64-bit plabel sequence. */
else if (TARGET_64BIT && !local_call)
- length += sibcall ? 28 : 24;
+ length += 24;
/* non-pic long absolute branch sequence. */
else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
xoperands[0] = pa_get_deferred_plabel (call_dest);
xoperands[1] = gen_label_rtx ();
- /* If this isn't a sibcall, we put the load of %r27 into the
- delay slot. We can't do this in a sibcall as we don't
- have a second call-clobbered scratch register available.
- We don't need to do anything when generating fast indirect
- calls. */
- if (seq_length != 0 && !sibcall)
+ /* Put the load of %r27 into the delay slot. We don't need to
+ do anything when generating fast indirect calls. */
+ if (seq_length != 0)
{
final_scan_insn (NEXT_INSN (insn), asm_out_file,
optimize, 0, NULL);
/* Now delete the delay insn. */
SET_INSN_DELETED (NEXT_INSN (insn));
- seq_length = 0;
}
output_asm_insn ("addil LT'%0,%%r27", xoperands);
output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
-
- if (sibcall)
- {
- output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
- output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
- output_asm_insn ("bve (%%r1)", xoperands);
- }
- else
- {
- output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
- output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
- output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
- seq_length = 1;
- }
+ output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
+ output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
+ output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
+ seq_length = 1;
}
else
{
{
output_asm_insn ("addil LT'%0,%%r19", xoperands);
output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
- output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
+ output_asm_insn ("ldw 0(%%r1),%%r22", xoperands);
}
else
{
output_asm_insn ("addil LR'%0-$global$,%%r27",
xoperands);
- output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
+ output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r22",
xoperands);
}
- output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
- output_asm_insn ("depi 0,31,2,%%r1", xoperands);
- output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
- output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
+ output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
+ output_asm_insn ("depi 0,31,2,%%r22", xoperands);
+ /* Should this be an ordered load to ensure the target
+ address is loaded before the global pointer? */
+ output_asm_insn ("ldw 0(%%r22),%%r1", xoperands);
+ output_asm_insn ("ldw 4(%%r22),%%r19", xoperands);
if (!sibcall && !TARGET_PA_20)
{
if (TARGET_PORTABLE_RUNTIME)
return 16;
- /* Inline version of $$dyncall. */
- if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
- return 20;
-
if (!TARGET_LONG_CALLS
&& ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
|| distance < MAX_PCREL17F_OFFSET))
if (!flag_pic)
return 12;
- /* Inline version of $$dyncall. */
- if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
- return 20;
-
+ /* Inline versions of $$dyncall. */
if (!optimize_size)
- return 36;
+ {
+ if (TARGET_NO_SPACE_REGS)
+ return 28;
+
+ if (TARGET_PA_20)
+ return 32;
+ }
/* Long PIC pc-relative call. */
return 20;
return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
}
- /* Maybe emit a fast inline version of $$dyncall. */
- if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
- {
- output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
- "ldw 2(%%r22),%%r19\n\t"
- "ldw -2(%%r22),%%r22", xoperands);
- pa_output_arg_descriptor (insn);
- if (TARGET_NO_SPACE_REGS)
- {
- if (TARGET_PA_20)
- return "bve,l,n (%%r22),%%r2\n\tnop";
- return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
- }
- return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
- }
-
/* Now the normal case -- we can reach $$dyncall directly or
we're sure that we can get there via a long-branch stub.
return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
}
- /* Maybe emit a fast inline version of $$dyncall. The long PIC
- pc-relative call sequence is five instructions. The inline PA 2.0
- version of $$dyncall is also five instructions. The PA 1.X versions
- are longer but still an overall win. */
- if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
+ /* The long PIC pc-relative call sequence is five instructions. So,
+ let's use an inline version of $$dyncall when the calling sequence
+ has a roughly similar number of instructions and we are not optimizing
+ for size. We need two instructions to load the return pointer plus
+ the $$dyncall implementation. */
+ if (!optimize_size)
{
- output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
- "ldw 2(%%r22),%%r19\n\t"
- "ldw -2(%%r22),%%r22", xoperands);
if (TARGET_NO_SPACE_REGS)
{
pa_output_arg_descriptor (insn);
- if (TARGET_PA_20)
- return "bve,l,n (%%r22),%%r2\n\tnop";
- return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
+ output_asm_insn ("bl .+8,%%r2\n\t"
+ "ldo 20(%%r2),%%r2\n\t"
+ "extru,<> %%r22,30,1,%%r0\n\t"
+ "bv,n %%r0(%%r22)\n\t"
+ "ldw -2(%%r22),%%r21\n\t"
+ "bv %%r0(%%r21)\n\t"
+ "ldw 2(%%r22),%%r19", xoperands);
+ return "";
}
if (TARGET_PA_20)
{
pa_output_arg_descriptor (insn);
- return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
+ output_asm_insn ("bl .+8,%%r2\n\t"
+ "ldo 24(%%r2),%%r2\n\t"
+ "stw %%r2,-24(%%sp)\n\t"
+ "extru,<> %r22,30,1,%%r0\n\t"
+ "bve,n (%%r22)\n\t"
+ "ldw -2(%%r22),%%r21\n\t"
+ "bve (%%r21)\n\t"
+ "ldw 2(%%r22),%%r19", xoperands);
+ return "";
}
- output_asm_insn ("bl .+8,%%r2\n\t"
- "ldo 16(%%r2),%%r2\n\t"
- "ldsid (%%r22),%%r1\n\t"
- "mtsp %%r1,%%sr0", xoperands);
- pa_output_arg_descriptor (insn);
- return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
}
-
+
/* We need a long PIC call to $$dyncall. */
xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
xoperands[1] = gen_rtx_REG (Pmode, 2);
HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
tree function)
{
+ const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
static unsigned int current_thunk_number;
int val_14 = VAL_14_BITS_P (delta);
unsigned int old_last_address = last_address, nbytes = 0;
xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
xoperands[2] = GEN_INT (delta);
+ assemble_start_function (thunk_fndecl, fnname);
final_start_function (emit_barrier (), file, 1);
/* Output the thunk. We know that the function is in the same
if (old_last_address > last_address)
last_address = UINT_MAX;
update_total_code_bytes (nbytes);
+ assemble_end_function (thunk_fndecl, fnname);
}
/* Only direct calls to static functions are allowed to be sibling (tail)
static bool
pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
- if (TARGET_PORTABLE_RUNTIME)
- return false;
-
/* Sibcalls are not ok because the arg pointer register is not a fixed
register. This prevents the sibcall optimization from occurring. In
addition, there are problems with stub placement using GNU ld. This
if (TARGET_64BIT)
return false;
+ if (TARGET_PORTABLE_RUNTIME)
+ return false;
+
/* Sibcalls are only ok within a translation unit. */
- return (decl && !TREE_PUBLIC (decl));
+ return decl && targetm.binds_local_p (decl);
}
/* ??? Addition is not commutative on the PA due to the weird implicit
return false;
}
-/* Update the data in CUM to advance over an argument
- of mode MODE and data type TYPE.
- (TYPE is null for libcalls where that information may not be available.) */
+/* Update the data in CUM to advance over argument ARG. */
static void
-pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+pa_function_arg_advance (cumulative_args_t cum_v,
+ const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
- int arg_size = FUNCTION_ARG_SIZE (mode, type);
+ int arg_size = pa_function_arg_size (arg.mode, arg.type);
cum->nargs_prototype--;
cum->words += (arg_size
+ ((cum->words & 01)
- && type != NULL_TREE
+ && arg.type != NULL_TREE
&& arg_size > 1));
}
??? We might want to restructure this so that it looks more like other
ports. */
static rtx
-pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
- const_tree type, bool named ATTRIBUTE_UNUSED)
+pa_function_arg (cumulative_args_t cum_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ tree type = arg.type;
+ machine_mode mode = arg.mode;
int max_arg_words = (TARGET_64BIT ? 8 : 4);
int alignment = 0;
int arg_size;
int gpr_reg_base;
rtx retval;
- if (mode == VOIDmode)
+ if (arg.end_marker_p ())
return NULL_RTX;
- arg_size = FUNCTION_ARG_SIZE (mode, type);
+ arg_size = pa_function_arg_size (mode, type);
/* If this arg would be passed partially or totally on the stack, then
this routine should return zero. pa_arg_partial_bytes will
then this routine should return zero. */
static int
-pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
- tree type, bool named ATTRIBUTE_UNUSED)
+pa_arg_partial_bytes (cumulative_args_t cum_v, const function_arg_info &arg)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
unsigned int max_arg_words = 8;
if (!TARGET_64BIT)
return 0;
- if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
+ if (pa_function_arg_size (arg.mode, arg.type) > 1 && (cum->words & 1))
offset = 1;
- if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
+ if (cum->words + offset + pa_function_arg_size (arg.mode, arg.type)
+ <= max_arg_words)
/* Arg fits fully into registers. */
return 0;
else if (cum->words + offset >= max_arg_words)
output_section_asm_op (data);
}
-/* Implement TARGET_ASM_INITIALIZE_SECTIONS */
+/* Implement TARGET_ASM_INIT_SECTIONS. */
static void
pa_som_asm_init_sections (void)
= get_unnamed_section (0, output_section_asm_op,
"\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
- /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
- which reference data within the $TEXT$ space (for example constant
+ /* HPUX ld generates incorrect GOT entries for "T" fixups which
+ reference data within the $TEXT$ space (for example constant
strings in the $LIT$ subspace).
The assemblers (GAS and HP as) both have problems with handling
- the difference of two symbols which is the other correct way to
+ the difference of two symbols. This is the other correct way to
reference constant data during PIC code generation.
- So, there's no way to reference constant data which is in the
- $TEXT$ space during PIC generation. Instead place all constant
- data into the $PRIVATE$ subspace (this reduces sharing, but it
- works correctly). */
- readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
+ Thus, we can't put constant data needing relocation in the $TEXT$
+ space during PIC generation.
+
+ Previously, we placed all constant data into the $DATA$ subspace
+ when generating PIC code. This reduces sharing, but it works
+ correctly. Now we rely on pa_reloc_rw_mask() for section selection.
+ This puts constant data not needing relocation into the $TEXT$ space. */
+ readonly_data_section = som_readonly_data_section;
/* We must not have a reference to an external symbol defined in a
shared library in a readonly section, else the SOM linker will
/* On hpux10, the linker will give an error if we have a reference
in the read-only data section to a symbol defined in a shared
- library. Therefore, expressions that might require a reloc can
- not be placed in the read-only data section. */
+ library. Therefore, expressions that might require a reloc
+ cannot be placed in the read-only data section. */
static section *
pa_select_section (tree exp, int reloc,
&& DECL_INITIAL (exp)
&& (DECL_INITIAL (exp) == error_mark_node
|| TREE_CONSTANT (DECL_INITIAL (exp)))
- && !reloc)
+ && !(reloc & pa_reloc_rw_mask ()))
{
if (TARGET_SOM
&& DECL_ONE_ONLY (exp)
else
return readonly_data_section;
}
- else if (CONSTANT_CLASS_P (exp) && !reloc)
+ else if (CONSTANT_CLASS_P (exp)
+ && !(reloc & pa_reloc_rw_mask ()))
return readonly_data_section;
else if (TARGET_SOM
&& TREE_CODE (exp) == VAR_DECL
static int
pa_reloc_rw_mask (void)
{
- /* We force (const (plus (symbol) (const_int))) to memory when the
- const_int doesn't fit in a 14-bit integer. The SOM linker can't
- handle this construct in read-only memory and we want to avoid
- this for ELF. So, we always force an RTX needing relocation to
- the data section. */
- return 3;
+ if (flag_pic || (TARGET_SOM && !TARGET_HPUX_11))
+ return 3;
+
+ /* HP linker does not support global relocs in readonly memory. */
+ return TARGET_SOM ? 2 : 0;
}
static void
extern_symbol p = {decl, name};
vec_safe_push (extern_symbols, p);
}
+#endif
/* Output text required at the end of an assembler file.
This includes deferred plabels and .import directives for
all external symbols that were actually referenced. */
static void
-pa_hpux_file_end (void)
+pa_file_end (void)
{
+#ifdef ASM_OUTPUT_EXTERNAL_REAL
unsigned int i;
extern_symbol *p;
if (!NO_DEFERRED_PROFILE_COUNTERS)
output_deferred_profile_counters ();
+#endif
output_deferred_plabels ();
+#ifdef ASM_OUTPUT_EXTERNAL_REAL
for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
{
tree decl = p->decl;
}
vec_free (extern_symbols);
-}
#endif
-/* Return true if a change from mode FROM to mode TO for a register
- in register class RCLASS is invalid. */
+ if (NEED_INDICATE_EXEC_STACK)
+ file_end_indicate_exec_stack ();
+}
-bool
-pa_cannot_change_mode_class (machine_mode from, machine_mode to,
- enum reg_class rclass)
+/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
+
+static bool
+pa_can_change_mode_class (machine_mode from, machine_mode to,
+ reg_class_t rclass)
{
if (from == to)
- return false;
+ return true;
if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
- return false;
+ return true;
/* Reject changes to/from modes with zero size. */
if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
- return true;
+ return false;
/* Reject changes to/from complex and vector modes. */
if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
|| COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
- return true;
+ return false;
/* There is no way to load QImode or HImode values directly from memory
to a FP register. SImode loads to the FP registers are not zero
extended. On the 64-bit target, this conflicts with the definition
- of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
- different sizes in the floating-point registers. */
+ of LOAD_EXTEND_OP. Thus, we reject all mode changes in the FP registers
+ except for DImode to SImode on the 64-bit target. It is handled by
+ register renaming in pa_print_operand. */
if (MAYBE_FP_REG_CLASS_P (rclass))
- return true;
+ return TARGET_64BIT && from == DImode && to == SImode;
- /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
+ /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
in specific sets of registers. Thus, we cannot allow changing
to a larger mode when it's larger than a word. */
if (GET_MODE_SIZE (to) > UNITS_PER_WORD
&& GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
- return true;
+ return false;
- return false;
+ return true;
}
-/* Returns TRUE if it is a good idea to tie two pseudo registers
- when one has mode MODE1 and one has mode MODE2.
- If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
- for any hard reg, then this must be FALSE for correct output.
+/* Implement TARGET_MODES_TIEABLE_P.
We should return FALSE for QImode and HImode because these modes
are not ok in the floating-point registers. However, this prevents
tieing these modes to SImode and DImode in the general registers.
- So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
- CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
+ So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
+ TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
in the floating-point registers. */
-bool
+static bool
pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
{
/* Don't tie modes in different classes. */
\f
/* Length in units of the trampoline instruction code. */
-#define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
+#define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 36 : 48))
/* Output assembler code for a block containing the constant parts
{
if (!TARGET_64BIT)
{
- fputs ("\tldw 36(%r22),%r21\n", f);
- fputs ("\tbb,>=,n %r21,30,.+16\n", f);
- if (ASSEMBLER_DIALECT == 0)
- fputs ("\tdepi 0,31,2,%r21\n", f);
- else
- fputs ("\tdepwi 0,31,2,%r21\n", f);
- fputs ("\tldw 4(%r21),%r19\n", f);
- fputs ("\tldw 0(%r21),%r21\n", f);
if (TARGET_PA_20)
{
- fputs ("\tbve (%r21)\n", f);
- fputs ("\tldw 40(%r22),%r29\n", f);
+ fputs ("\tmfia %r20\n", f);
+ fputs ("\tldw 48(%r20),%r22\n", f);
+ fputs ("\tcopy %r22,%r21\n", f);
+ fputs ("\tbb,>=,n %r22,30,.+16\n", f);
+ fputs ("\tdepwi 0,31,2,%r22\n", f);
+ fputs ("\tldw 0(%r22),%r21\n", f);
+ fputs ("\tldw 4(%r22),%r19\n", f);
+ fputs ("\tbve (%r21)\n", f);
+ fputs ("\tldw 52(%r1),%r29\n", f);
+ fputs ("\t.word 0\n", f);
fputs ("\t.word 0\n", f);
fputs ("\t.word 0\n", f);
}
else
{
+ if (ASSEMBLER_DIALECT == 0)
+ {
+ fputs ("\tbl .+8,%r20\n", f);
+ fputs ("\tdepi 0,31,2,%r20\n", f);
+ }
+ else
+ {
+ fputs ("\tb,l .+8,%r20\n", f);
+ fputs ("\tdepwi 0,31,2,%r20\n", f);
+ }
+ fputs ("\tldw 40(%r20),%r22\n", f);
+ fputs ("\tcopy %r22,%r21\n", f);
+ fputs ("\tbb,>=,n %r22,30,.+16\n", f);
+ if (ASSEMBLER_DIALECT == 0)
+ fputs ("\tdepi 0,31,2,%r22\n", f);
+ else
+ fputs ("\tdepwi 0,31,2,%r22\n", f);
+ fputs ("\tldw 0(%r22),%r21\n", f);
+ fputs ("\tldw 4(%r22),%r19\n", f);
fputs ("\tldsid (%r21),%r1\n", f);
fputs ("\tmtsp %r1,%sr0\n", f);
- fputs ("\tbe 0(%sr0,%r21)\n", f);
- fputs ("\tldw 40(%r22),%r29\n", f);
+ fputs ("\tbe 0(%sr0,%r21)\n", f);
+ fputs ("\tldw 44(%r20),%r29\n", f);
}
fputs ("\t.word 0\n", f);
fputs ("\t.word 0\n", f);
fputs ("\t.dword 0\n", f);
fputs ("\t.dword 0\n", f);
fputs ("\tmfia %r31\n", f);
- fputs ("\tldd 24(%r31),%r1\n", f);
- fputs ("\tldd 24(%r1),%r27\n", f);
- fputs ("\tldd 16(%r1),%r1\n", f);
- fputs ("\tbve (%r1)\n", f);
+ fputs ("\tldd 24(%r31),%r27\n", f);
fputs ("\tldd 32(%r31),%r31\n", f);
+ fputs ("\tldd 16(%r27),%r1\n", f);
+ fputs ("\tbve (%r1)\n", f);
+ fputs ("\tldd 24(%r27),%r27\n", f);
fputs ("\t.dword 0 ; fptr\n", f);
fputs ("\t.dword 0 ; static link\n", f);
}
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function.
- Move the function address to the trampoline template at offset 36.
- Move the static chain value to trampoline template at offset 40.
- Move the trampoline address to trampoline template at offset 44.
- Move r19 to trampoline template at offset 48. The latter two
+ Move the function address to the trampoline template at offset 48.
+ Move the static chain value to trampoline template at offset 52.
+ Move the trampoline address to trampoline template at offset 56.
+ Move r19 to trampoline template at offset 60. The latter two
words create a plabel for the indirect call to the trampoline.
A similar sequence is used for the 64-bit port but the plabel is
if (!TARGET_64BIT)
{
- tmp = adjust_address (m_tramp, Pmode, 36);
+ tmp = adjust_address (m_tramp, Pmode, 48);
emit_move_insn (tmp, fnaddr);
- tmp = adjust_address (m_tramp, Pmode, 40);
+ tmp = adjust_address (m_tramp, Pmode, 52);
emit_move_insn (tmp, chain_value);
/* Create a fat pointer for the trampoline. */
- tmp = adjust_address (m_tramp, Pmode, 44);
+ tmp = adjust_address (m_tramp, Pmode, 56);
emit_move_insn (tmp, r_tramp);
- tmp = adjust_address (m_tramp, Pmode, 48);
+ tmp = adjust_address (m_tramp, Pmode, 60);
emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
/* fdc and fic only use registers for the address to flush,
}
#ifdef HAVE_ENABLE_EXECUTE_STACK
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
- LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
+ LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
#endif
}
/* Perform any machine-specific adjustment in the address of the trampoline.
ADDR contains the address that was passed to pa_trampoline_init.
- Adjust the trampoline address to point to the plabel at offset 44. */
+ Adjust the trampoline address to point to the plabel at offset 56. */
static rtx
pa_trampoline_adjust_address (rtx addr)
{
if (!TARGET_64BIT)
- addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
+ addr = memory_address (Pmode, plus_constant (Pmode, addr, 58));
return addr;
}
if (!TARGET_DISABLE_INDEXING
&& GET_CODE (index) == MULT
- && MODE_OK_FOR_SCALED_INDEXING_P (mode)
+ /* Only accept base operands with the REG_POINTER flag prior to
+ reload on targets with non-equivalent space registers. */
+ && (TARGET_NO_SPACE_REGS
+ || (base == XEXP (x, 1)
+ && (reload_completed
+ || (reload_in_progress && HARD_REGISTER_P (base))
+ || REG_POINTER (base))))
&& REG_P (XEXP (index, 0))
&& GET_MODE (XEXP (index, 0)) == Pmode
+ && MODE_OK_FOR_SCALED_INDEXING_P (mode)
&& (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
: REG_OK_FOR_INDEX_P (XEXP (index, 0)))
&& GET_CODE (XEXP (index, 1)) == CONST_INT
{
int idx, vlen = XVECLEN (body, 0);
+ if (!TARGET_SOM)
+ fputs ("\t.align 4\n", asm_out_file);
targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
if (TARGET_GAS)
fputs ("\t.begin_brtab\n", asm_out_file);
/* Mark this jump predicted not taken. */
emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
- GET_MODE (success), 1, label, 0);
+ GET_MODE (success), 1, label,
+ profile_probability::guessed_never ());
return true;
}
in the 64-bit HP runtime. */
static bool
-pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED,
- bool named ATTRIBUTE_UNUSED)
+pa_callee_copies (cumulative_args_t, const function_arg_info &)
{
return !TARGET_CALLER_COPIES;
}
+/* Implement TARGET_HARD_REGNO_NREGS. */
+
+static unsigned int
+pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
+{
+ return PA_HARD_REGNO_NREGS (regno, mode);
+}
+
+/* Implement TARGET_HARD_REGNO_MODE_OK. */
+
+static bool
+pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
+{
+ return PA_HARD_REGNO_MODE_OK (regno, mode);
+}
+
+/* Implement TARGET_STARTING_FRAME_OFFSET.
+
+ On the 32-bit ports, we reserve one slot for the previous frame
+ pointer and one fill slot. The fill slot is for compatibility
+ with HP compiled programs. On the 64-bit ports, we reserve one
+ slot for the previous frame pointer. */
+
+static HOST_WIDE_INT
+pa_starting_frame_offset (void)
+{
+ return 8;
+}
+
+/* Figure out the size in words of the function argument. The size
+ returned by this function should always be greater than zero because
+ we pass variable and zero sized objects by reference. */
+
+HOST_WIDE_INT
+pa_function_arg_size (machine_mode mode, const_tree type)
+{
+ HOST_WIDE_INT size;
+
+ size = mode != BLKmode ? GET_MODE_SIZE (mode) : int_size_in_bytes (type);
+ return CEIL (size, UNITS_PER_WORD);
+}
+
#include "gt-pa.h"