/* Output routines for GCC for ARM.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
More major hacks by Richard Earnshaw (rearnsha@arm.com).
#include "except.h"
#include "tm_p.h"
#include "target.h"
+#include "sched-int.h"
#include "target-def.h"
#include "debug.h"
#include "langhooks.h"
#include "params.h"
#include "opts.h"
#include "dumpfile.h"
+#include "gimple-expr.h"
+#include "builtins.h"
/* Forward definitions of types. */
typedef struct minipool_node Mnode;
};
/* Forward function declarations. */
+static bool arm_const_not_ok_for_debug_p (rtx);
static bool arm_lra_p (void);
static bool arm_needs_doubleword_align (enum machine_mode, const_tree);
static int arm_compute_static_chain_stack_bytes (void);
static reg_class_t arm_preferred_reload_class (rtx, reg_class_t);
static rtx thumb_legitimize_address (rtx, rtx, enum machine_mode);
inline static int thumb1_index_register_rtx_p (rtx, int);
-static bool arm_legitimate_address_p (enum machine_mode, rtx, bool);
static int thumb_far_jump_used_p (void);
static bool thumb_force_lr_save (void);
static unsigned arm_size_return_regs (void);
static bool arm_assemble_integer (rtx, unsigned int, int);
+static void arm_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update);
static void arm_print_operand (FILE *, rtx, int);
static void arm_print_operand_address (FILE *, rtx);
static bool arm_print_operand_punct_valid_p (unsigned char code);
static const char *shift_op (rtx, HOST_WIDE_INT *);
static struct machine_function *arm_init_machine_status (void);
static void thumb_exit (FILE *, int);
-static HOST_WIDE_INT get_jump_table_size (rtx);
+static HOST_WIDE_INT get_jump_table_size (rtx_jump_table_data *);
static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
static Mnode *add_minipool_forward_ref (Mfix *);
static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
static Mnode *add_minipool_backward_ref (Mfix *);
static void assign_minipool_offsets (Mfix *);
static void arm_print_value (FILE *, rtx);
-static void dump_minipool (rtx);
+static void dump_minipool (rtx_insn *);
static int arm_barrier_cost (rtx);
static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
-static void push_minipool_barrier (rtx, HOST_WIDE_INT);
-static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
- rtx);
+static void push_minipool_barrier (rtx_insn *, HOST_WIDE_INT);
+static void push_minipool_fix (rtx_insn *, HOST_WIDE_INT, rtx *,
+ enum machine_mode, rtx);
static void arm_reorg (void);
-static void note_invalid_constants (rtx, HOST_WIDE_INT, int);
+static void note_invalid_constants (rtx_insn *, HOST_WIDE_INT, int);
static unsigned long arm_compute_save_reg0_reg12_mask (void);
static unsigned long arm_compute_save_reg_mask (void);
static unsigned long arm_isr_value (tree);
static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
static int arm_comp_type_attributes (const_tree, const_tree);
static void arm_set_default_type_attributes (tree);
-static int arm_adjust_cost (rtx, rtx, rtx, int);
-static int arm_sched_reorder (FILE *, int, rtx *, int *, int);
+static int arm_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
+static int arm_sched_reorder (FILE *, int, rtx_insn **, int *, int);
static int optimal_immediate_sequence (enum rtx_code code,
unsigned HOST_WIDE_INT val,
struct four_ints *return_sequence);
static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static tree arm_builtin_decl (unsigned, bool);
static void emit_constant_insn (rtx cond, rtx pattern);
-static rtx emit_set_insn (rtx, rtx);
-static rtx emit_multi_reg_push (unsigned long);
+static rtx_insn *emit_set_insn (rtx, rtx);
+static rtx emit_multi_reg_push (unsigned long, unsigned long);
static int arm_arg_partial_bytes (cumulative_args_t, enum machine_mode,
tree, bool);
static rtx arm_function_arg (cumulative_args_t, enum machine_mode,
static bool arm_must_pass_in_stack (enum machine_mode, const_tree);
static bool arm_return_in_memory (const_tree, const_tree);
#if ARM_UNWIND_INFO
-static void arm_unwind_emit (FILE *, rtx);
+static void arm_unwind_emit (FILE *, rtx_insn *);
static bool arm_output_ttype (rtx);
static void arm_asm_emit_except_personality (rtx);
static void arm_asm_init_sections (void);
static tree arm_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
static void arm_option_override (void);
static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
-static bool arm_cannot_copy_insn_p (rtx);
-static bool arm_tls_symbol_p (rtx x);
+static bool arm_cannot_copy_insn_p (rtx_insn *);
static int arm_issue_rate (void);
static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
static bool arm_output_addr_const_extra (FILE *, rtx);
#undef TARGET_MANGLE_TYPE
#define TARGET_MANGLE_TYPE arm_mangle_type
+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV arm_atomic_assign_expand_fenv
+
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST arm_build_builtin_va_list
#undef TARGET_EXPAND_BUILTIN_VA_START
#undef TARGET_CAN_USE_DOLOOP_P
#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
+#undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
+#define TARGET_CONST_NOT_OK_FOR_DEBUG_P arm_const_not_ok_for_debug_p
+
+#undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+#define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* Obstack for minipool constant handling. */
#define FL_FOR_ARCH6M (FL_FOR_ARCH6 & ~FL_NOTM)
#define FL_FOR_ARCH7 ((FL_FOR_ARCH6T2 & ~FL_NOTM) | FL_ARCH7)
#define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM | FL_ARCH6K)
+#define FL_FOR_ARCH7VE (FL_FOR_ARCH7A | FL_THUMB_DIV | FL_ARM_DIV)
#define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_THUMB_DIV)
#define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_THUMB_DIV)
#define FL_FOR_ARCH7EM (FL_FOR_ARCH7M | FL_ARCH7EM)
-#define FL_FOR_ARCH8A (FL_FOR_ARCH7 | FL_ARCH6K | FL_ARCH8 | FL_THUMB_DIV \
- | FL_ARM_DIV | FL_NOTM)
+#define FL_FOR_ARCH8A (FL_FOR_ARCH7VE | FL_ARCH8)
/* The bits in this mask specify which
instructions we are allowed to generate. */
/* The register number to be used for the PIC offset register. */
unsigned arm_pic_register = INVALID_REGNUM;
-/* Set to 1 after arm_reorg has started. Reset to start at the start of
- the next function. */
-static int after_arm_reorg = 0;
-
enum arm_pcs arm_pcs_default;
/* For an explanation of these variables, see final_prescan_insn below. */
{
/* ALU */
{
- 0, /* Arith. */
- 0, /* Logical. */
- 0, /* Shift. */
- COSTS_N_INSNS (1), /* Shift_reg. */
- COSTS_N_INSNS (1), /* Arith_shift. */
- COSTS_N_INSNS (2), /* Arith_shift_reg. */
- 0, /* Log_shift. */
- COSTS_N_INSNS (1), /* Log_shift_reg. */
- COSTS_N_INSNS (1), /* Extend. */
- COSTS_N_INSNS (2), /* Extend_arith. */
- COSTS_N_INSNS (1), /* Bfi. */
- COSTS_N_INSNS (1), /* Bfx. */
- 0, /* Clz. */
+ 0, /* arith. */
+ 0, /* logical. */
+ 0, /* shift. */
+ COSTS_N_INSNS (1), /* shift_reg. */
+ COSTS_N_INSNS (1), /* arith_shift. */
+ COSTS_N_INSNS (2), /* arith_shift_reg. */
+ 0, /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ COSTS_N_INSNS (1), /* extend. */
+ COSTS_N_INSNS (2), /* extend_arith. */
+ COSTS_N_INSNS (1), /* bfi. */
+ COSTS_N_INSNS (1), /* bfx. */
+ 0, /* clz. */
+ 0, /* rev. */
0, /* non_exec. */
true /* non_exec_costs_exec. */
},
{
/* MULT SImode */
{
- COSTS_N_INSNS (3), /* Simple. */
- COSTS_N_INSNS (3), /* Flag_setting. */
- COSTS_N_INSNS (2), /* Extend. */
- COSTS_N_INSNS (3), /* Add. */
- COSTS_N_INSNS (2), /* Extend_add. */
- COSTS_N_INSNS (30) /* Idiv. No HW div on Cortex A9. */
+ COSTS_N_INSNS (3), /* simple. */
+ COSTS_N_INSNS (3), /* flag_setting. */
+ COSTS_N_INSNS (2), /* extend. */
+ COSTS_N_INSNS (3), /* add. */
+ COSTS_N_INSNS (2), /* extend_add. */
+ COSTS_N_INSNS (30) /* idiv. No HW div on Cortex A9. */
},
/* MULT DImode */
{
- 0, /* Simple (N/A). */
- 0, /* Flag_setting (N/A). */
- COSTS_N_INSNS (4), /* Extend. */
- 0, /* Add (N/A). */
- COSTS_N_INSNS (4), /* Extend_add. */
- 0 /* Idiv (N/A). */
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (4), /* extend. */
+ 0, /* add (N/A). */
+ COSTS_N_INSNS (4), /* extend_add. */
+ 0 /* idiv (N/A). */
}
},
/* LD/ST */
{
- COSTS_N_INSNS (2), /* Load. */
- COSTS_N_INSNS (2), /* Load_sign_extend. */
- COSTS_N_INSNS (2), /* Ldrd. */
- COSTS_N_INSNS (2), /* Ldm_1st. */
- 1, /* Ldm_regs_per_insn_1st. */
- 2, /* Ldm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (5), /* Loadf. */
- COSTS_N_INSNS (5), /* Loadd. */
- COSTS_N_INSNS (1), /* Load_unaligned. */
- COSTS_N_INSNS (2), /* Store. */
- COSTS_N_INSNS (2), /* Strd. */
- COSTS_N_INSNS (2), /* Stm_1st. */
- 1, /* Stm_regs_per_insn_1st. */
- 2, /* Stm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (1), /* Storef. */
- COSTS_N_INSNS (1), /* Stored. */
- COSTS_N_INSNS (1) /* Store_unaligned. */
+ COSTS_N_INSNS (2), /* load. */
+ COSTS_N_INSNS (2), /* load_sign_extend. */
+ COSTS_N_INSNS (2), /* ldrd. */
+ COSTS_N_INSNS (2), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 2, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (5), /* loadf. */
+ COSTS_N_INSNS (5), /* loadd. */
+ COSTS_N_INSNS (1), /* load_unaligned. */
+ COSTS_N_INSNS (2), /* store. */
+ COSTS_N_INSNS (2), /* strd. */
+ COSTS_N_INSNS (2), /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 2, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (1), /* storef. */
+ COSTS_N_INSNS (1), /* stored. */
+ COSTS_N_INSNS (1) /* store_unaligned. */
},
{
/* FP SFmode */
{
- COSTS_N_INSNS (14), /* Div. */
- COSTS_N_INSNS (4), /* Mult. */
- COSTS_N_INSNS (7), /* Mult_addsub. */
- COSTS_N_INSNS (30), /* Fma. */
- COSTS_N_INSNS (3), /* Addsub. */
- COSTS_N_INSNS (1), /* Fpconst. */
- COSTS_N_INSNS (1), /* Neg. */
- COSTS_N_INSNS (3), /* Compare. */
- COSTS_N_INSNS (3), /* Widen. */
- COSTS_N_INSNS (3), /* Narrow. */
- COSTS_N_INSNS (3), /* Toint. */
- COSTS_N_INSNS (3), /* Fromint. */
- COSTS_N_INSNS (3) /* Roundint. */
+ COSTS_N_INSNS (14), /* div. */
+ COSTS_N_INSNS (4), /* mult. */
+ COSTS_N_INSNS (7), /* mult_addsub. */
+ COSTS_N_INSNS (30), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ COSTS_N_INSNS (1), /* fpconst. */
+ COSTS_N_INSNS (1), /* neg. */
+ COSTS_N_INSNS (3), /* compare. */
+ COSTS_N_INSNS (3), /* widen. */
+ COSTS_N_INSNS (3), /* narrow. */
+ COSTS_N_INSNS (3), /* toint. */
+ COSTS_N_INSNS (3), /* fromint. */
+ COSTS_N_INSNS (3) /* roundint. */
},
/* FP DFmode */
{
- COSTS_N_INSNS (24), /* Div. */
- COSTS_N_INSNS (5), /* Mult. */
- COSTS_N_INSNS (8), /* Mult_addsub. */
- COSTS_N_INSNS (30), /* Fma. */
- COSTS_N_INSNS (3), /* Addsub. */
- COSTS_N_INSNS (1), /* Fpconst. */
- COSTS_N_INSNS (1), /* Neg. */
- COSTS_N_INSNS (3), /* Compare. */
- COSTS_N_INSNS (3), /* Widen. */
- COSTS_N_INSNS (3), /* Narrow. */
- COSTS_N_INSNS (3), /* Toint. */
- COSTS_N_INSNS (3), /* Fromint. */
- COSTS_N_INSNS (3) /* Roundint. */
+ COSTS_N_INSNS (24), /* div. */
+ COSTS_N_INSNS (5), /* mult. */
+ COSTS_N_INSNS (8), /* mult_addsub. */
+ COSTS_N_INSNS (30), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ COSTS_N_INSNS (1), /* fpconst. */
+ COSTS_N_INSNS (1), /* neg. */
+ COSTS_N_INSNS (3), /* compare. */
+ COSTS_N_INSNS (3), /* widen. */
+ COSTS_N_INSNS (3), /* narrow. */
+ COSTS_N_INSNS (3), /* toint. */
+ COSTS_N_INSNS (3), /* fromint. */
+ COSTS_N_INSNS (3) /* roundint. */
}
},
/* Vector */
{
- COSTS_N_INSNS (1) /* Alu. */
+ COSTS_N_INSNS (1) /* alu. */
+ }
+};
+
+const struct cpu_cost_table cortexa8_extra_costs =
+{
+ /* ALU */
+ {
+ 0, /* arith. */
+ 0, /* logical. */
+ COSTS_N_INSNS (1), /* shift. */
+ 0, /* shift_reg. */
+ COSTS_N_INSNS (1), /* arith_shift. */
+ 0, /* arith_shift_reg. */
+ COSTS_N_INSNS (1), /* log_shift. */
+ 0, /* log_shift_reg. */
+ 0, /* extend. */
+ 0, /* extend_arith. */
+ 0, /* bfi. */
+ 0, /* bfx. */
+ 0, /* clz. */
+ 0, /* rev. */
+ 0, /* non_exec. */
+ true /* non_exec_costs_exec. */
+ },
+ {
+ /* MULT SImode */
+ {
+ COSTS_N_INSNS (1), /* simple. */
+ COSTS_N_INSNS (1), /* flag_setting. */
+ COSTS_N_INSNS (1), /* extend. */
+ COSTS_N_INSNS (1), /* add. */
+ COSTS_N_INSNS (1), /* extend_add. */
+ COSTS_N_INSNS (30) /* idiv. No HW div on Cortex A8. */
+ },
+ /* MULT DImode */
+ {
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (2), /* extend. */
+ 0, /* add (N/A). */
+ COSTS_N_INSNS (2), /* extend_add. */
+ 0 /* idiv (N/A). */
+ }
+ },
+ /* LD/ST */
+ {
+ COSTS_N_INSNS (1), /* load. */
+ COSTS_N_INSNS (1), /* load_sign_extend. */
+ COSTS_N_INSNS (1), /* ldrd. */
+ COSTS_N_INSNS (1), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 2, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (1), /* loadf. */
+ COSTS_N_INSNS (1), /* loadd. */
+ COSTS_N_INSNS (1), /* load_unaligned. */
+ COSTS_N_INSNS (1), /* store. */
+ COSTS_N_INSNS (1), /* strd. */
+ COSTS_N_INSNS (1), /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 2, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (1), /* storef. */
+ COSTS_N_INSNS (1), /* stored. */
+ COSTS_N_INSNS (1) /* store_unaligned. */
+ },
+ {
+ /* FP SFmode */
+ {
+ COSTS_N_INSNS (36), /* div. */
+ COSTS_N_INSNS (11), /* mult. */
+ COSTS_N_INSNS (20), /* mult_addsub. */
+ COSTS_N_INSNS (30), /* fma. */
+ COSTS_N_INSNS (9), /* addsub. */
+ COSTS_N_INSNS (3), /* fpconst. */
+ COSTS_N_INSNS (3), /* neg. */
+ COSTS_N_INSNS (6), /* compare. */
+ COSTS_N_INSNS (4), /* widen. */
+ COSTS_N_INSNS (4), /* narrow. */
+ COSTS_N_INSNS (8), /* toint. */
+ COSTS_N_INSNS (8), /* fromint. */
+ COSTS_N_INSNS (8) /* roundint. */
+ },
+ /* FP DFmode */
+ {
+ COSTS_N_INSNS (64), /* div. */
+ COSTS_N_INSNS (16), /* mult. */
+ COSTS_N_INSNS (25), /* mult_addsub. */
+ COSTS_N_INSNS (30), /* fma. */
+ COSTS_N_INSNS (9), /* addsub. */
+ COSTS_N_INSNS (3), /* fpconst. */
+ COSTS_N_INSNS (3), /* neg. */
+ COSTS_N_INSNS (6), /* compare. */
+ COSTS_N_INSNS (6), /* widen. */
+ COSTS_N_INSNS (6), /* narrow. */
+ COSTS_N_INSNS (8), /* toint. */
+ COSTS_N_INSNS (8), /* fromint. */
+ COSTS_N_INSNS (8) /* roundint. */
+ }
+ },
+ /* Vector */
+ {
+ COSTS_N_INSNS (1) /* alu. */
+ }
+};
+
+const struct cpu_cost_table cortexa5_extra_costs =
+{
+ /* ALU */
+ {
+ 0, /* arith. */
+ 0, /* logical. */
+ COSTS_N_INSNS (1), /* shift. */
+ COSTS_N_INSNS (1), /* shift_reg. */
+ COSTS_N_INSNS (1), /* arith_shift. */
+ COSTS_N_INSNS (1), /* arith_shift_reg. */
+ COSTS_N_INSNS (1), /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ COSTS_N_INSNS (1), /* extend. */
+ COSTS_N_INSNS (1), /* extend_arith. */
+ COSTS_N_INSNS (1), /* bfi. */
+ COSTS_N_INSNS (1), /* bfx. */
+ COSTS_N_INSNS (1), /* clz. */
+ COSTS_N_INSNS (1), /* rev. */
+ 0, /* non_exec. */
+ true /* non_exec_costs_exec. */
+ },
+
+ {
+ /* MULT SImode */
+ {
+ 0, /* simple. */
+ COSTS_N_INSNS (1), /* flag_setting. */
+ COSTS_N_INSNS (1), /* extend. */
+ COSTS_N_INSNS (1), /* add. */
+ COSTS_N_INSNS (1), /* extend_add. */
+ COSTS_N_INSNS (7) /* idiv. */
+ },
+ /* MULT DImode */
+ {
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (1), /* extend. */
+ 0, /* add. */
+ COSTS_N_INSNS (2), /* extend_add. */
+ 0 /* idiv (N/A). */
+ }
+ },
+ /* LD/ST */
+ {
+ COSTS_N_INSNS (1), /* load. */
+ COSTS_N_INSNS (1), /* load_sign_extend. */
+ COSTS_N_INSNS (6), /* ldrd. */
+ COSTS_N_INSNS (1), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 2, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* loadf. */
+ COSTS_N_INSNS (4), /* loadd. */
+ COSTS_N_INSNS (1), /* load_unaligned. */
+ COSTS_N_INSNS (1), /* store. */
+ COSTS_N_INSNS (3), /* strd. */
+ COSTS_N_INSNS (1), /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 2, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* storef. */
+ COSTS_N_INSNS (2), /* stored. */
+ COSTS_N_INSNS (1) /* store_unaligned. */
+ },
+ {
+ /* FP SFmode */
+ {
+ COSTS_N_INSNS (15), /* div. */
+ COSTS_N_INSNS (3), /* mult. */
+ COSTS_N_INSNS (7), /* mult_addsub. */
+ COSTS_N_INSNS (7), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ COSTS_N_INSNS (3), /* fpconst. */
+ COSTS_N_INSNS (3), /* neg. */
+ COSTS_N_INSNS (3), /* compare. */
+ COSTS_N_INSNS (3), /* widen. */
+ COSTS_N_INSNS (3), /* narrow. */
+ COSTS_N_INSNS (3), /* toint. */
+ COSTS_N_INSNS (3), /* fromint. */
+ COSTS_N_INSNS (3) /* roundint. */
+ },
+ /* FP DFmode */
+ {
+ COSTS_N_INSNS (30), /* div. */
+ COSTS_N_INSNS (6), /* mult. */
+ COSTS_N_INSNS (10), /* mult_addsub. */
+ COSTS_N_INSNS (7), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ COSTS_N_INSNS (3), /* fpconst. */
+ COSTS_N_INSNS (3), /* neg. */
+ COSTS_N_INSNS (3), /* compare. */
+ COSTS_N_INSNS (3), /* widen. */
+ COSTS_N_INSNS (3), /* narrow. */
+ COSTS_N_INSNS (3), /* toint. */
+ COSTS_N_INSNS (3), /* fromint. */
+ COSTS_N_INSNS (3) /* roundint. */
+ }
+ },
+ /* Vector */
+ {
+ COSTS_N_INSNS (1) /* alu. */
}
};
{
/* ALU */
{
- 0, /* Arith. */
- 0, /* Logical. */
- COSTS_N_INSNS (1), /* Shift. */
- COSTS_N_INSNS (1), /* Shift_reg. */
- COSTS_N_INSNS (1), /* Arith_shift. */
- COSTS_N_INSNS (1), /* Arith_shift_reg. */
- COSTS_N_INSNS (1), /* Log_shift. */
- COSTS_N_INSNS (1), /* Log_shift_reg. */
- COSTS_N_INSNS (1), /* Extend. */
- COSTS_N_INSNS (1), /* Extend_arith. */
- COSTS_N_INSNS (1), /* Bfi. */
- COSTS_N_INSNS (1), /* Bfx. */
- COSTS_N_INSNS (1), /* Clz. */
+ 0, /* arith. */
+ 0, /* logical. */
+ COSTS_N_INSNS (1), /* shift. */
+ COSTS_N_INSNS (1), /* shift_reg. */
+ COSTS_N_INSNS (1), /* arith_shift. */
+ COSTS_N_INSNS (1), /* arith_shift_reg. */
+ COSTS_N_INSNS (1), /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ COSTS_N_INSNS (1), /* extend. */
+ COSTS_N_INSNS (1), /* extend_arith. */
+ COSTS_N_INSNS (1), /* bfi. */
+ COSTS_N_INSNS (1), /* bfx. */
+ COSTS_N_INSNS (1), /* clz. */
+ COSTS_N_INSNS (1), /* rev. */
0, /* non_exec. */
true /* non_exec_costs_exec. */
},
{
/* MULT SImode */
{
- 0, /* Simple. */
- COSTS_N_INSNS (1), /* Flag_setting. */
- COSTS_N_INSNS (1), /* Extend. */
- COSTS_N_INSNS (1), /* Add. */
- COSTS_N_INSNS (1), /* Extend_add. */
- COSTS_N_INSNS (7) /* Idiv. */
+ 0, /* simple. */
+ COSTS_N_INSNS (1), /* flag_setting. */
+ COSTS_N_INSNS (1), /* extend. */
+ COSTS_N_INSNS (1), /* add. */
+ COSTS_N_INSNS (1), /* extend_add. */
+ COSTS_N_INSNS (7) /* idiv. */
},
/* MULT DImode */
{
- 0, /* Simple (N/A). */
- 0, /* Flag_setting (N/A). */
- COSTS_N_INSNS (1), /* Extend. */
- 0, /* Add. */
- COSTS_N_INSNS (2), /* Extend_add. */
- 0 /* Idiv (N/A). */
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (1), /* extend. */
+ 0, /* add. */
+ COSTS_N_INSNS (2), /* extend_add. */
+ 0 /* idiv (N/A). */
}
},
/* LD/ST */
{
- COSTS_N_INSNS (1), /* Load. */
- COSTS_N_INSNS (1), /* Load_sign_extend. */
- COSTS_N_INSNS (3), /* Ldrd. */
- COSTS_N_INSNS (1), /* Ldm_1st. */
- 1, /* Ldm_regs_per_insn_1st. */
- 2, /* Ldm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (2), /* Loadf. */
- COSTS_N_INSNS (2), /* Loadd. */
- COSTS_N_INSNS (1), /* Load_unaligned. */
- COSTS_N_INSNS (1), /* Store. */
- COSTS_N_INSNS (3), /* Strd. */
- COSTS_N_INSNS (1), /* Stm_1st. */
- 1, /* Stm_regs_per_insn_1st. */
- 2, /* Stm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (2), /* Storef. */
- COSTS_N_INSNS (2), /* Stored. */
- COSTS_N_INSNS (1) /* Store_unaligned. */
+ COSTS_N_INSNS (1), /* load. */
+ COSTS_N_INSNS (1), /* load_sign_extend. */
+ COSTS_N_INSNS (3), /* ldrd. */
+ COSTS_N_INSNS (1), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 2, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* loadf. */
+ COSTS_N_INSNS (2), /* loadd. */
+ COSTS_N_INSNS (1), /* load_unaligned. */
+ COSTS_N_INSNS (1), /* store. */
+ COSTS_N_INSNS (3), /* strd. */
+ COSTS_N_INSNS (1), /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 2, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* storef. */
+ COSTS_N_INSNS (2), /* stored. */
+ COSTS_N_INSNS (1) /* store_unaligned. */
},
{
/* FP SFmode */
{
- COSTS_N_INSNS (15), /* Div. */
- COSTS_N_INSNS (3), /* Mult. */
- COSTS_N_INSNS (7), /* Mult_addsub. */
- COSTS_N_INSNS (7), /* Fma. */
- COSTS_N_INSNS (3), /* Addsub. */
- COSTS_N_INSNS (3), /* Fpconst. */
- COSTS_N_INSNS (3), /* Neg. */
- COSTS_N_INSNS (3), /* Compare. */
- COSTS_N_INSNS (3), /* Widen. */
- COSTS_N_INSNS (3), /* Narrow. */
- COSTS_N_INSNS (3), /* Toint. */
- COSTS_N_INSNS (3), /* Fromint. */
- COSTS_N_INSNS (3) /* Roundint. */
+ COSTS_N_INSNS (15), /* div. */
+ COSTS_N_INSNS (3), /* mult. */
+ COSTS_N_INSNS (7), /* mult_addsub. */
+ COSTS_N_INSNS (7), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ COSTS_N_INSNS (3), /* fpconst. */
+ COSTS_N_INSNS (3), /* neg. */
+ COSTS_N_INSNS (3), /* compare. */
+ COSTS_N_INSNS (3), /* widen. */
+ COSTS_N_INSNS (3), /* narrow. */
+ COSTS_N_INSNS (3), /* toint. */
+ COSTS_N_INSNS (3), /* fromint. */
+ COSTS_N_INSNS (3) /* roundint. */
},
/* FP DFmode */
{
- COSTS_N_INSNS (30), /* Div. */
- COSTS_N_INSNS (6), /* Mult. */
- COSTS_N_INSNS (10), /* Mult_addsub. */
- COSTS_N_INSNS (7), /* Fma. */
- COSTS_N_INSNS (3), /* Addsub. */
- COSTS_N_INSNS (3), /* Fpconst. */
- COSTS_N_INSNS (3), /* Neg. */
- COSTS_N_INSNS (3), /* Compare. */
- COSTS_N_INSNS (3), /* Widen. */
- COSTS_N_INSNS (3), /* Narrow. */
- COSTS_N_INSNS (3), /* Toint. */
- COSTS_N_INSNS (3), /* Fromint. */
- COSTS_N_INSNS (3) /* Roundint. */
+ COSTS_N_INSNS (30), /* div. */
+ COSTS_N_INSNS (6), /* mult. */
+ COSTS_N_INSNS (10), /* mult_addsub. */
+ COSTS_N_INSNS (7), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ COSTS_N_INSNS (3), /* fpconst. */
+ COSTS_N_INSNS (3), /* neg. */
+ COSTS_N_INSNS (3), /* compare. */
+ COSTS_N_INSNS (3), /* widen. */
+ COSTS_N_INSNS (3), /* narrow. */
+ COSTS_N_INSNS (3), /* toint. */
+ COSTS_N_INSNS (3), /* fromint. */
+ COSTS_N_INSNS (3) /* roundint. */
}
},
/* Vector */
{
- COSTS_N_INSNS (1) /* Alu. */
+ COSTS_N_INSNS (1) /* alu. */
}
};
{
/* ALU */
{
- 0, /* Arith. */
- 0, /* Logical. */
- 0, /* Shift. */
- COSTS_N_INSNS (1), /* Shift_reg. */
- COSTS_N_INSNS (1), /* Arith_shift. */
- COSTS_N_INSNS (1), /* Arith_shift_reg. */
- COSTS_N_INSNS (1), /* Log_shift. */
- COSTS_N_INSNS (1), /* Log_shift_reg. */
- 0, /* Extend. */
- COSTS_N_INSNS (1), /* Extend_arith. */
- 0, /* Bfi. */
- COSTS_N_INSNS (1), /* Bfx. */
- COSTS_N_INSNS (1), /* Clz. */
+ 0, /* arith. */
+ 0, /* logical. */
+ 0, /* shift. */
+ COSTS_N_INSNS (1), /* shift_reg. */
+ COSTS_N_INSNS (1), /* arith_shift. */
+ COSTS_N_INSNS (1), /* arith_shift_reg. */
+ COSTS_N_INSNS (1), /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ 0, /* extend. */
+ COSTS_N_INSNS (1), /* extend_arith. */
+ 0, /* bfi. */
+ COSTS_N_INSNS (1), /* bfx. */
+ COSTS_N_INSNS (1), /* clz. */
+ COSTS_N_INSNS (1), /* rev. */
0, /* non_exec. */
true /* non_exec_costs_exec. */
},
/* MULT SImode */
{
{
- COSTS_N_INSNS (2), /* Simple. */
- COSTS_N_INSNS (3), /* Flag_setting. */
- COSTS_N_INSNS (2), /* Extend. */
- COSTS_N_INSNS (3), /* Add. */
- COSTS_N_INSNS (2), /* Extend_add. */
- COSTS_N_INSNS (18) /* Idiv. */
+ COSTS_N_INSNS (2), /* simple. */
+ COSTS_N_INSNS (3), /* flag_setting. */
+ COSTS_N_INSNS (2), /* extend. */
+ COSTS_N_INSNS (3), /* add. */
+ COSTS_N_INSNS (2), /* extend_add. */
+ COSTS_N_INSNS (18) /* idiv. */
},
/* MULT DImode */
{
- 0, /* Simple (N/A). */
- 0, /* Flag_setting (N/A). */
- COSTS_N_INSNS (3), /* Extend. */
- 0, /* Add (N/A). */
- COSTS_N_INSNS (3), /* Extend_add. */
- 0 /* Idiv (N/A). */
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (3), /* extend. */
+ 0, /* add (N/A). */
+ COSTS_N_INSNS (3), /* extend_add. */
+ 0 /* idiv (N/A). */
}
},
/* LD/ST */
{
- COSTS_N_INSNS (3), /* Load. */
- COSTS_N_INSNS (3), /* Load_sign_extend. */
- COSTS_N_INSNS (3), /* Ldrd. */
- COSTS_N_INSNS (3), /* Ldm_1st. */
- 1, /* Ldm_regs_per_insn_1st. */
- 2, /* Ldm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (3), /* Loadf. */
- COSTS_N_INSNS (3), /* Loadd. */
- 0, /* Load_unaligned. */
- 0, /* Store. */
- 0, /* Strd. */
- 0, /* Stm_1st. */
- 1, /* Stm_regs_per_insn_1st. */
- 2, /* Stm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (2), /* Storef. */
- COSTS_N_INSNS (2), /* Stored. */
- 0 /* Store_unaligned. */
+ COSTS_N_INSNS (3), /* load. */
+ COSTS_N_INSNS (3), /* load_sign_extend. */
+ COSTS_N_INSNS (3), /* ldrd. */
+ COSTS_N_INSNS (3), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 2, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (3), /* loadf. */
+ COSTS_N_INSNS (3), /* loadd. */
+ 0, /* load_unaligned. */
+ 0, /* store. */
+ 0, /* strd. */
+ 0, /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 2, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* storef. */
+ COSTS_N_INSNS (2), /* stored. */
+ 0 /* store_unaligned. */
},
{
/* FP SFmode */
{
- COSTS_N_INSNS (17), /* Div. */
- COSTS_N_INSNS (4), /* Mult. */
- COSTS_N_INSNS (8), /* Mult_addsub. */
- COSTS_N_INSNS (8), /* Fma. */
- COSTS_N_INSNS (4), /* Addsub. */
- COSTS_N_INSNS (2), /* Fpconst. */
- COSTS_N_INSNS (2), /* Neg. */
- COSTS_N_INSNS (2), /* Compare. */
- COSTS_N_INSNS (4), /* Widen. */
- COSTS_N_INSNS (4), /* Narrow. */
- COSTS_N_INSNS (4), /* Toint. */
- COSTS_N_INSNS (4), /* Fromint. */
- COSTS_N_INSNS (4) /* Roundint. */
+ COSTS_N_INSNS (17), /* div. */
+ COSTS_N_INSNS (4), /* mult. */
+ COSTS_N_INSNS (8), /* mult_addsub. */
+ COSTS_N_INSNS (8), /* fma. */
+ COSTS_N_INSNS (4), /* addsub. */
+ COSTS_N_INSNS (2), /* fpconst. */
+ COSTS_N_INSNS (2), /* neg. */
+ COSTS_N_INSNS (2), /* compare. */
+ COSTS_N_INSNS (4), /* widen. */
+ COSTS_N_INSNS (4), /* narrow. */
+ COSTS_N_INSNS (4), /* toint. */
+ COSTS_N_INSNS (4), /* fromint. */
+ COSTS_N_INSNS (4) /* roundint. */
},
/* FP DFmode */
{
- COSTS_N_INSNS (31), /* Div. */
- COSTS_N_INSNS (4), /* Mult. */
- COSTS_N_INSNS (8), /* Mult_addsub. */
- COSTS_N_INSNS (8), /* Fma. */
- COSTS_N_INSNS (4), /* Addsub. */
- COSTS_N_INSNS (2), /* Fpconst. */
- COSTS_N_INSNS (2), /* Neg. */
- COSTS_N_INSNS (2), /* Compare. */
- COSTS_N_INSNS (4), /* Widen. */
- COSTS_N_INSNS (4), /* Narrow. */
- COSTS_N_INSNS (4), /* Toint. */
- COSTS_N_INSNS (4), /* Fromint. */
- COSTS_N_INSNS (4) /* Roundint. */
+ COSTS_N_INSNS (31), /* div. */
+ COSTS_N_INSNS (4), /* mult. */
+ COSTS_N_INSNS (8), /* mult_addsub. */
+ COSTS_N_INSNS (8), /* fma. */
+ COSTS_N_INSNS (4), /* addsub. */
+ COSTS_N_INSNS (2), /* fpconst. */
+ COSTS_N_INSNS (2), /* neg. */
+ COSTS_N_INSNS (2), /* compare. */
+ COSTS_N_INSNS (4), /* widen. */
+ COSTS_N_INSNS (4), /* narrow. */
+ COSTS_N_INSNS (4), /* toint. */
+ COSTS_N_INSNS (4), /* fromint. */
+ COSTS_N_INSNS (4) /* roundint. */
}
},
/* Vector */
{
- COSTS_N_INSNS (1) /* Alu. */
+ COSTS_N_INSNS (1) /* alu. */
}
};
{
/* ALU */
{
- 0, /* Arith. */
- 0, /* Logical. */
- 0, /* Shift. */
- 0, /* Shift_reg. */
- COSTS_N_INSNS (1), /* Arith_shift. */
- COSTS_N_INSNS (1), /* Arith_shift_reg. */
- COSTS_N_INSNS (1), /* Log_shift. */
- COSTS_N_INSNS (1), /* Log_shift_reg. */
- 0, /* Extend. */
- COSTS_N_INSNS (1), /* Extend_arith. */
- COSTS_N_INSNS (1), /* Bfi. */
- 0, /* Bfx. */
- 0, /* Clz. */
+ 0, /* arith. */
+ 0, /* logical. */
+ 0, /* shift. */
+ 0, /* shift_reg. */
+ COSTS_N_INSNS (1), /* arith_shift. */
+ COSTS_N_INSNS (1), /* arith_shift_reg. */
+ COSTS_N_INSNS (1), /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ 0, /* extend. */
+ COSTS_N_INSNS (1), /* extend_arith. */
+ COSTS_N_INSNS (1), /* bfi. */
+ 0, /* bfx. */
+ 0, /* clz. */
+ 0, /* rev. */
0, /* non_exec. */
true /* non_exec_costs_exec. */
},
/* MULT SImode */
{
{
- COSTS_N_INSNS (2), /* Simple. */
- COSTS_N_INSNS (3), /* Flag_setting. */
- COSTS_N_INSNS (2), /* Extend. */
- COSTS_N_INSNS (2), /* Add. */
- COSTS_N_INSNS (2), /* Extend_add. */
- COSTS_N_INSNS (18) /* Idiv. */
+ COSTS_N_INSNS (2), /* simple. */
+ COSTS_N_INSNS (3), /* flag_setting. */
+ COSTS_N_INSNS (2), /* extend. */
+ COSTS_N_INSNS (2), /* add. */
+ COSTS_N_INSNS (2), /* extend_add. */
+ COSTS_N_INSNS (18) /* idiv. */
},
/* MULT DImode */
{
- 0, /* Simple (N/A). */
- 0, /* Flag_setting (N/A). */
- COSTS_N_INSNS (3), /* Extend. */
- 0, /* Add (N/A). */
- COSTS_N_INSNS (3), /* Extend_add. */
- 0 /* Idiv (N/A). */
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (3), /* extend. */
+ 0, /* add (N/A). */
+ COSTS_N_INSNS (3), /* extend_add. */
+ 0 /* idiv (N/A). */
}
},
/* LD/ST */
{
- COSTS_N_INSNS (3), /* Load. */
- COSTS_N_INSNS (3), /* Load_sign_extend. */
- COSTS_N_INSNS (3), /* Ldrd. */
- COSTS_N_INSNS (4), /* Ldm_1st. */
- 1, /* Ldm_regs_per_insn_1st. */
- 2, /* Ldm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (4), /* Loadf. */
- COSTS_N_INSNS (4), /* Loadd. */
- 0, /* Load_unaligned. */
- 0, /* Store. */
- 0, /* Strd. */
- COSTS_N_INSNS (1), /* Stm_1st. */
- 1, /* Stm_regs_per_insn_1st. */
- 2, /* Stm_regs_per_insn_subsequent. */
- 0, /* Storef. */
- 0, /* Stored. */
- 0 /* Store_unaligned. */
+ COSTS_N_INSNS (3), /* load. */
+ COSTS_N_INSNS (3), /* load_sign_extend. */
+ COSTS_N_INSNS (3), /* ldrd. */
+ COSTS_N_INSNS (4), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 2, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (4), /* loadf. */
+ COSTS_N_INSNS (4), /* loadd. */
+ 0, /* load_unaligned. */
+ 0, /* store. */
+ 0, /* strd. */
+ COSTS_N_INSNS (1), /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 2, /* stm_regs_per_insn_subsequent. */
+ 0, /* storef. */
+ 0, /* stored. */
+ 0 /* store_unaligned. */
},
{
/* FP SFmode */
{
- COSTS_N_INSNS (17), /* Div. */
- COSTS_N_INSNS (4), /* Mult. */
- COSTS_N_INSNS (8), /* Mult_addsub. */
- COSTS_N_INSNS (8), /* Fma. */
- COSTS_N_INSNS (4), /* Addsub. */
- COSTS_N_INSNS (2), /* Fpconst. */
- COSTS_N_INSNS (2), /* Neg. */
- COSTS_N_INSNS (5), /* Compare. */
- COSTS_N_INSNS (4), /* Widen. */
- COSTS_N_INSNS (4), /* Narrow. */
- COSTS_N_INSNS (4), /* Toint. */
- COSTS_N_INSNS (4), /* Fromint. */
- COSTS_N_INSNS (4) /* Roundint. */
+ COSTS_N_INSNS (17), /* div. */
+ COSTS_N_INSNS (4), /* mult. */
+ COSTS_N_INSNS (8), /* mult_addsub. */
+ COSTS_N_INSNS (8), /* fma. */
+ COSTS_N_INSNS (4), /* addsub. */
+ COSTS_N_INSNS (2), /* fpconst. */
+ COSTS_N_INSNS (2), /* neg. */
+ COSTS_N_INSNS (5), /* compare. */
+ COSTS_N_INSNS (4), /* widen. */
+ COSTS_N_INSNS (4), /* narrow. */
+ COSTS_N_INSNS (4), /* toint. */
+ COSTS_N_INSNS (4), /* fromint. */
+ COSTS_N_INSNS (4) /* roundint. */
},
/* FP DFmode */
{
- COSTS_N_INSNS (31), /* Div. */
- COSTS_N_INSNS (4), /* Mult. */
- COSTS_N_INSNS (8), /* Mult_addsub. */
- COSTS_N_INSNS (8), /* Fma. */
- COSTS_N_INSNS (4), /* Addsub. */
- COSTS_N_INSNS (2), /* Fpconst. */
- COSTS_N_INSNS (2), /* Neg. */
- COSTS_N_INSNS (2), /* Compare. */
- COSTS_N_INSNS (4), /* Widen. */
- COSTS_N_INSNS (4), /* Narrow. */
- COSTS_N_INSNS (4), /* Toint. */
- COSTS_N_INSNS (4), /* Fromint. */
- COSTS_N_INSNS (4) /* Roundint. */
+ COSTS_N_INSNS (31), /* div. */
+ COSTS_N_INSNS (4), /* mult. */
+ COSTS_N_INSNS (8), /* mult_addsub. */
+ COSTS_N_INSNS (8), /* fma. */
+ COSTS_N_INSNS (4), /* addsub. */
+ COSTS_N_INSNS (2), /* fpconst. */
+ COSTS_N_INSNS (2), /* neg. */
+ COSTS_N_INSNS (2), /* compare. */
+ COSTS_N_INSNS (4), /* widen. */
+ COSTS_N_INSNS (4), /* narrow. */
+ COSTS_N_INSNS (4), /* toint. */
+ COSTS_N_INSNS (4), /* fromint. */
+ COSTS_N_INSNS (4) /* roundint. */
}
},
/* Vector */
{
- COSTS_N_INSNS (1) /* Alu. */
+ COSTS_N_INSNS (1) /* alu. */
}
};
{
/* ALU */
{
- 0, /* Arith. */
- 0, /* Logical. */
- 0, /* Shift. */
- 0, /* Shift_reg. */
- 0, /* Arith_shift. */
- COSTS_N_INSNS (1), /* Arith_shift_reg. */
- 0, /* Log_shift. */
- COSTS_N_INSNS (1), /* Log_shift_reg. */
- 0, /* Extend. */
- COSTS_N_INSNS (1), /* Extend_arith. */
- 0, /* Bfi. */
- 0, /* Bfx. */
- 0, /* Clz. */
+ 0, /* arith. */
+ 0, /* logical. */
+ 0, /* shift. */
+ 0, /* shift_reg. */
+ 0, /* arith_shift. */
+ COSTS_N_INSNS (1), /* arith_shift_reg. */
+ 0, /* log_shift. */
+ COSTS_N_INSNS (1), /* log_shift_reg. */
+ 0, /* extend. */
+ COSTS_N_INSNS (1), /* extend_arith. */
+ 0, /* bfi. */
+ 0, /* bfx. */
+ 0, /* clz. */
+ 0, /* rev. */
COSTS_N_INSNS (1), /* non_exec. */
false /* non_exec_costs_exec. */
},
{
/* MULT SImode */
{
- COSTS_N_INSNS (1), /* Simple. */
- COSTS_N_INSNS (1), /* Flag_setting. */
- COSTS_N_INSNS (2), /* Extend. */
- COSTS_N_INSNS (1), /* Add. */
- COSTS_N_INSNS (3), /* Extend_add. */
- COSTS_N_INSNS (8) /* Idiv. */
+ COSTS_N_INSNS (1), /* simple. */
+ COSTS_N_INSNS (1), /* flag_setting. */
+ COSTS_N_INSNS (2), /* extend. */
+ COSTS_N_INSNS (1), /* add. */
+ COSTS_N_INSNS (3), /* extend_add. */
+ COSTS_N_INSNS (8) /* idiv. */
},
/* MULT DImode */
{
- 0, /* Simple (N/A). */
- 0, /* Flag_setting (N/A). */
- COSTS_N_INSNS (2), /* Extend. */
- 0, /* Add (N/A). */
- COSTS_N_INSNS (3), /* Extend_add. */
- 0 /* Idiv (N/A). */
+ 0, /* simple (N/A). */
+ 0, /* flag_setting (N/A). */
+ COSTS_N_INSNS (2), /* extend. */
+ 0, /* add (N/A). */
+ COSTS_N_INSNS (3), /* extend_add. */
+ 0 /* idiv (N/A). */
}
},
/* LD/ST */
{
- COSTS_N_INSNS (2), /* Load. */
- 0, /* Load_sign_extend. */
- COSTS_N_INSNS (3), /* Ldrd. */
- COSTS_N_INSNS (2), /* Ldm_1st. */
- 1, /* Ldm_regs_per_insn_1st. */
- 1, /* Ldm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (2), /* Loadf. */
- COSTS_N_INSNS (3), /* Loadd. */
- COSTS_N_INSNS (1), /* Load_unaligned. */
- COSTS_N_INSNS (2), /* Store. */
- COSTS_N_INSNS (3), /* Strd. */
- COSTS_N_INSNS (2), /* Stm_1st. */
- 1, /* Stm_regs_per_insn_1st. */
- 1, /* Stm_regs_per_insn_subsequent. */
- COSTS_N_INSNS (2), /* Storef. */
- COSTS_N_INSNS (3), /* Stored. */
- COSTS_N_INSNS (1) /* Store_unaligned. */
+ COSTS_N_INSNS (2), /* load. */
+ 0, /* load_sign_extend. */
+ COSTS_N_INSNS (3), /* ldrd. */
+ COSTS_N_INSNS (2), /* ldm_1st. */
+ 1, /* ldm_regs_per_insn_1st. */
+ 1, /* ldm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* loadf. */
+ COSTS_N_INSNS (3), /* loadd. */
+ COSTS_N_INSNS (1), /* load_unaligned. */
+ COSTS_N_INSNS (2), /* store. */
+ COSTS_N_INSNS (3), /* strd. */
+ COSTS_N_INSNS (2), /* stm_1st. */
+ 1, /* stm_regs_per_insn_1st. */
+ 1, /* stm_regs_per_insn_subsequent. */
+ COSTS_N_INSNS (2), /* storef. */
+ COSTS_N_INSNS (3), /* stored. */
+ COSTS_N_INSNS (1) /* store_unaligned. */
},
{
/* FP SFmode */
{
- COSTS_N_INSNS (7), /* Div. */
- COSTS_N_INSNS (2), /* Mult. */
- COSTS_N_INSNS (5), /* Mult_addsub. */
- COSTS_N_INSNS (3), /* Fma. */
- COSTS_N_INSNS (1), /* Addsub. */
- 0, /* Fpconst. */
- 0, /* Neg. */
- 0, /* Compare. */
- 0, /* Widen. */
- 0, /* Narrow. */
- 0, /* Toint. */
- 0, /* Fromint. */
- 0 /* Roundint. */
+ COSTS_N_INSNS (7), /* div. */
+ COSTS_N_INSNS (2), /* mult. */
+ COSTS_N_INSNS (5), /* mult_addsub. */
+ COSTS_N_INSNS (3), /* fma. */
+ COSTS_N_INSNS (1), /* addsub. */
+ 0, /* fpconst. */
+ 0, /* neg. */
+ 0, /* compare. */
+ 0, /* widen. */
+ 0, /* narrow. */
+ 0, /* toint. */
+ 0, /* fromint. */
+ 0 /* roundint. */
},
/* FP DFmode */
{
- COSTS_N_INSNS (15), /* Div. */
- COSTS_N_INSNS (5), /* Mult. */
- COSTS_N_INSNS (7), /* Mult_addsub. */
- COSTS_N_INSNS (7), /* Fma. */
- COSTS_N_INSNS (3), /* Addsub. */
- 0, /* Fpconst. */
- 0, /* Neg. */
- 0, /* Compare. */
- 0, /* Widen. */
- 0, /* Narrow. */
- 0, /* Toint. */
- 0, /* Fromint. */
- 0 /* Roundint. */
+ COSTS_N_INSNS (15), /* div. */
+ COSTS_N_INSNS (5), /* mult. */
+ COSTS_N_INSNS (7), /* mult_addsub. */
+ COSTS_N_INSNS (7), /* fma. */
+ COSTS_N_INSNS (3), /* addsub. */
+ 0, /* fpconst. */
+ 0, /* neg. */
+ 0, /* compare. */
+ 0, /* widen. */
+ 0, /* narrow. */
+ 0, /* toint. */
+ 0, /* fromint. */
+ 0 /* roundint. */
}
},
/* Vector */
{
- COSTS_N_INSNS (1) /* Alu. */
+ COSTS_N_INSNS (1) /* alu. */
}
};
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_fastmul_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
/* StrongARM has early execution of branches, so a sequence that is worth
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_xscale_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_9e_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_v6t2_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
/* Generic Cortex tuning. Use more specific tunings if appropriate. */
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
+};
+
+const struct tune_params arm_cortex_a8_tune =
+{
+ arm_9e_rtx_costs,
+ &cortexa8_extra_costs,
+ NULL, /* Sched adj cost. */
+ 1, /* Constant limit. */
+ 5, /* Max cond insns. */
+ ARM_PREFETCH_NOT_BENEFICIAL,
+ false, /* Prefer constant pool. */
+ arm_default_branch_cost,
+ false, /* Prefer LDRD/STRD. */
+ {true, true}, /* Prefer non short circuit. */
+ &arm_default_vec_cost, /* Vectorizer costs. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_cortex_a7_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_cortex_a15_tune =
true, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ true, true /* Prefer 32-bit encodings. */
};
const struct tune_params arm_cortex_a53_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
+};
+
+const struct tune_params arm_cortex_a57_tune =
+{
+ arm_9e_rtx_costs,
+ &cortexa57_extra_costs,
+ NULL, /* Scheduler cost adjustment. */
+ 1, /* Constant limit. */
+ 2, /* Max cond insns. */
+ ARM_PREFETCH_NOT_BENEFICIAL,
+ false, /* Prefer constant pool. */
+ arm_default_branch_cost,
+ true, /* Prefer LDRD/STRD. */
+ {true, true}, /* Prefer non short circuit. */
+ &arm_default_vec_cost, /* Vectorizer costs. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ true, true /* Prefer 32-bit encodings. */
};
/* Branches can be dual-issued on Cortex-A5, so conditional execution is
const struct tune_params arm_cortex_a5_tune =
{
arm_9e_rtx_costs,
- NULL,
+ &cortexa5_extra_costs,
NULL, /* Sched adj cost. */
1, /* Constant limit. */
1, /* Max cond insns. */
false, /* Prefer LDRD/STRD. */
{false, false}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_cortex_a9_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_cortex_a12_tune =
true, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
/* armv7m tuning. On Cortex-M4 cores for example, MOVW/MOVT take a single
&v7m_extra_costs,
NULL, /* Sched adj cost. */
1, /* Constant limit. */
- 5, /* Max cond insns. */
+ 2, /* Max cond insns. */
ARM_PREFETCH_NOT_BENEFICIAL,
true, /* Prefer constant pool. */
arm_cortex_m_branch_cost,
false, /* Prefer LDRD/STRD. */
{false, false}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
/* The arm_v6m_tune is duplicated from arm_cortex_tune, rather than
false, /* Prefer LDRD/STRD. */
{false, false}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
const struct tune_params arm_fa726te_tune =
false, /* Prefer LDRD/STRD. */
{true, true}, /* Prefer non short circuit. */
&arm_default_vec_cost, /* Vectorizer costs. */
- false /* Prefer Neon for 64-bits bitops. */
+ false, /* Prefer Neon for 64-bits bitops. */
+ false, false /* Prefer 32-bit encodings. */
};
/* Emit an insn that's a simple single-set. Both the operands must be known
to be valid. */
-inline static rtx
+inline static rtx_insn *
emit_set_insn (rtx x, rtx y)
{
return emit_insn (gen_rtx_SET (VOIDmode, x, y));
if (TARGET_APCS_FLOAT)
warning (0, "passing floating point arguments in fp regs not yet supported");
- if (TARGET_LITTLE_WORDS)
- warning (OPT_Wdeprecated, "%<mwords-little-endian%> is deprecated and "
- "will be removed in a future release");
-
/* Initialize boolean versions of the flags, for use in the arm.md file. */
arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
arm_arch4 = (insn_flags & FL_ARCH4) != 0;
/* If optimizing for size, bump the number of instructions that we
are prepared to conditionally execute (even on a StrongARM). */
max_insns_skipped = 6;
+
+ /* For THUMB2, we limit the conditional sequence to one IT block. */
+ if (TARGET_THUMB2)
+ max_insns_skipped = MAX_INSN_PER_IT_BLOCK;
}
else
max_insns_skipped = current_tune->max_insns_skipped;
prefer_neon_for_64bits = true;
/* Use the alternative scheduling-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
+ maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, SCHED_PRESSURE_MODEL,
global_options.x_param_values,
global_options_set.x_param_values);
Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
*/
- if (!after_arm_reorg
+ if (!cfun->machine->after_arm_reorg
&& !cond
&& (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1, 0)
return hash_rtx (p1, VOIDmode, NULL, NULL, FALSE);
}
-typedef hash_table <libcall_hasher> libcall_table_type;
+typedef hash_table<libcall_hasher> libcall_table_type;
static void
-add_libcall (libcall_table_type htab, rtx libcall)
+add_libcall (libcall_table_type *htab, rtx libcall)
{
- *htab.find_slot (libcall, INSERT) = libcall;
+ *htab->find_slot (libcall, INSERT) = libcall;
}
static bool
arm_libcall_uses_aapcs_base (const_rtx libcall)
{
static bool init_done = false;
- static libcall_table_type libcall_htab;
+ static libcall_table_type *libcall_htab = NULL;
if (!init_done)
{
init_done = true;
- libcall_htab.create (31);
+ libcall_htab = new libcall_table_type (31);
add_libcall (libcall_htab,
convert_optab_libfunc (sfloat_optab, SFmode, SImode));
add_libcall (libcall_htab,
DFmode));
}
- return libcall && libcall_htab.find (libcall) != NULL;
+ return libcall && libcall_htab->find (libcall) != NULL;
}
static rtx
so we are free to use whatever conventions are
appropriate. */
/* FIXME: remove CONST_CAST_TREE when cgraph is constified. */
- struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
+ cgraph_local_info *i = cgraph_node::local_info (CONST_CAST_TREE(decl));
if (i && i->local)
return ARM_PCS_AAPCS_LOCAL;
}
int count;
tree index = TYPE_DOMAIN (type);
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
if (!DECL_SECTION_NAME (decl))
{
/* Make sure that we will not create a unique section for DECL. */
- if (flag_function_sections || DECL_ONE_ONLY (decl))
+ if (flag_function_sections || DECL_COMDAT_GROUP (decl))
return false;
}
if (TARGET_VXWORKS_RTP && flag_pic && !targetm.binds_local_p (decl))
return false;
- /* Cannot tail-call to long calls, since these are out of range of
- a branch instruction. */
- if (decl && arm_is_long_call_p (decl))
- return false;
-
/* If we are interworking and the function is not declared static
then we can't tail-call it unless we know that it exists in this
compilation unit (since it might be a Thumb routine). */
}
else
{
- rtx seq, insn;
+ rtx_insn *seq, *insn;
if (!cfun->machine->pic_reg)
cfun->machine->pic_reg = gen_reg_rtx (Pmode);
rtx
arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
{
+ if (arm_tls_referenced_p (x))
+ {
+ rtx addend = NULL;
+
+ if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ addend = XEXP (XEXP (x, 0), 1);
+ x = XEXP (XEXP (x, 0), 0);
+ }
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return x;
+
+ gcc_assert (SYMBOL_REF_TLS_MODEL (x) != 0);
+
+ x = legitimize_tls_address (x, NULL_RTX);
+
+ if (addend)
+ {
+ x = gen_rtx_PLUS (SImode, x, addend);
+ orig_x = x;
+ }
+ else
+ return x;
+ }
+
if (!TARGET_ARM)
{
/* TODO: legitimize_address for Thumb2. */
return thumb_legitimize_address (x, orig_x, mode);
}
- if (arm_tls_symbol_p (x))
- return legitimize_tls_address (x, NULL_RTX);
-
if (GET_CODE (x) == PLUS)
{
rtx xop0 = XEXP (x, 0);
rtx
thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
{
- if (arm_tls_symbol_p (x))
- return legitimize_tls_address (x, NULL_RTX);
-
if (GET_CODE (x) == PLUS
&& CONST_INT_P (XEXP (x, 1))
&& (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
/* Test for various thread-local symbols. */
-/* Return TRUE if X is a thread-local symbol. */
-
-static bool
-arm_tls_symbol_p (rtx x)
-{
- if (! TARGET_HAVE_TLS)
- return false;
-
- if (GET_CODE (x) != SYMBOL_REF)
- return false;
-
- return SYMBOL_REF_TLS_MODEL (x) != 0;
-}
-
/* Helper for arm_tls_referenced_p. */
static int
{
case SET:
*cost = 0;
+ /* SET RTXs don't have a mode so we get it from the destination. */
+ mode = GET_MODE (SET_DEST (x));
+
if (REG_P (SET_SRC (x))
&& REG_P (SET_DEST (x)))
{
in 16 bits in Thumb mode. */
if (!speed_p && TARGET_THUMB && outer_code == COND_EXEC)
*cost >>= 1;
+
+ return true;
}
if (CONST_INT_P (SET_SRC (x)))
/* Handle CONST_INT here, since the value doesn't have a mode
and we would otherwise be unable to work out the true cost. */
*cost = rtx_cost (SET_DEST (x), SET, 0, speed_p);
- mode = GET_MODE (SET_DEST (x));
outer_code = SET;
/* Slightly lower the cost of setting a core reg to a constant.
This helps break up chains and allows for better scheduling. */
*cost = LIBCALL_COST (2);
return false;
+ case BSWAP:
+ if (arm_arch6)
+ {
+ if (mode == SImode)
+ {
+ *cost = COSTS_N_INSNS (1);
+ if (speed_p)
+ *cost += extra_cost->alu.rev;
+
+ return false;
+ }
+ }
+ else
+ {
+ /* No rev instruction available. Look at arm_legacy_rev
+ and thumb_legacy_rev for the form of RTL used then. */
+ if (TARGET_THUMB)
+ {
+ *cost = COSTS_N_INSNS (10);
+
+ if (speed_p)
+ {
+ *cost += 6 * extra_cost->alu.shift;
+ *cost += 3 * extra_cost->alu.logical;
+ }
+ }
+ else
+ {
+ *cost = COSTS_N_INSNS (5);
+
+ if (speed_p)
+ {
+ *cost += 2 * extra_cost->alu.shift;
+ *cost += extra_cost->alu.arith_shift;
+ *cost += 2 * extra_cost->alu.logical;
+ }
+ }
+ return true;
+ }
+ return false;
+
case MINUS:
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE))
{
/* UXTA[BH] or SXTA[BH]. */
if (speed_p)
- *cost += extra_cost->alu.extnd_arith;
+ *cost += extra_cost->alu.extend_arith;
*cost += (rtx_cost (XEXP (XEXP (x, 0), 0), ZERO_EXTEND, 0,
speed_p)
+ rtx_cost (XEXP (x, 1), PLUS, 0, speed_p));
/* Vector mode? */
*cost = LIBCALL_COST (2);
return false;
+ case IOR:
+ if (mode == SImode && arm_arch6 && aarch_rev16_p (x))
+ {
+ *cost = COSTS_N_INSNS (1);
+ if (speed_p)
+ *cost += extra_cost->alu.rev;
- case AND: case XOR: case IOR:
+ return true;
+ }
+ /* Fall through. */
+ case AND: case XOR:
if (mode == SImode)
{
enum rtx_code subcode = GET_CODE (XEXP (x, 0));
*cost = COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), code, 0, speed_p);
if (speed_p)
- *cost += extra_cost->alu.extnd;
+ *cost += extra_cost->alu.extend;
}
else if (GET_MODE (XEXP (x, 0)) != SImode)
{
*cost = COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), code, 0, speed_p);
if (speed_p)
- *cost += extra_cost->alu.extnd;
+ *cost += extra_cost->alu.extend;
}
else if (GET_MODE (XEXP (x, 0)) != SImode)
{
const_int_cost:
if (mode == SImode)
{
- *cost += 0;
*cost += COSTS_N_INSNS (arm_gen_constant (outer_code, SImode, NULL,
INTVAL (x), NULL, NULL,
0, 0));
*cost = LIBCALL_COST (1);
return false;
+ case FMA:
+ if (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA)
+ {
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ rtx op2 = XEXP (x, 2);
+
+ *cost = COSTS_N_INSNS (1);
+
+ /* vfms or vfnma. */
+ if (GET_CODE (op0) == NEG)
+ op0 = XEXP (op0, 0);
+
+ /* vfnms or vfnma. */
+ if (GET_CODE (op2) == NEG)
+ op2 = XEXP (op2, 0);
+
+ *cost += rtx_cost (op0, FMA, 0, speed_p);
+ *cost += rtx_cost (op1, FMA, 1, speed_p);
+ *cost += rtx_cost (op2, FMA, 2, speed_p);
+
+ if (speed_p)
+ *cost += extra_cost->fp[mode ==DFmode].fma;
+
+ return true;
+ }
+
+ *cost = LIBCALL_COST (3);
+ return false;
+
case FIX:
case UNSIGNED_FIX:
if (TARGET_HARD_FLOAT)
return true;
case ASM_OPERANDS:
- /* Just a guess. Cost one insn per input. */
- *cost = COSTS_N_INSNS (ASM_OPERANDS_INPUT_LENGTH (x));
- return true;
+ {
+ /* Just a guess. Guess number of instructions in the asm
+ plus one insn per input. Always a minimum of COSTS_N_INSNS (1)
+ though (see PR60663). */
+ int asm_length = MAX (1, asm_str_count (ASM_OPERANDS_TEMPLATE (x)));
+ int num_operands = ASM_OPERANDS_INPUT_LENGTH (x);
+ *cost = COSTS_N_INSNS (asm_length + num_operands);
+ return true;
+ }
default:
if (mode != VOIDmode)
*cost = COSTS_N_INSNS (ARM_NUM_REGS (mode));
that overlaps with SHIFTED_OPERAND, then we have increase the
cost of this dependency. */
extract_insn (dep);
- preprocess_constraints ();
+ preprocess_constraints (dep);
for (opno = 0; opno < recog_data.n_operands; opno++)
{
/* We can ignore strict inputs. */
switch (get_attr_type (insn))
{
- case TYPE_ALU_REG:
- case TYPE_ALUS_REG:
+ case TYPE_ALU_DSP_REG:
+ case TYPE_ALU_SREG:
+ case TYPE_ALUS_SREG:
case TYPE_LOGIC_REG:
case TYPE_LOGICS_REG:
case TYPE_ADC_REG:
instructions. This heuristic may affect dual issue opportunities
in the current cycle. */
static void
-cortexa7_sched_reorder (FILE *file, int verbose, rtx *ready, int *n_readyp,
- int clock)
+cortexa7_sched_reorder (FILE *file, int verbose, rtx_insn **ready,
+ int *n_readyp, int clock)
{
int i;
int first_older_only = -1, first_younger = -1;
older. */
for (i = *n_readyp - 1; i >= 0; i--)
{
- rtx insn = ready[i];
+ rtx_insn *insn = ready[i];
if (cortexa7_older_only (insn))
{
first_older_only = i;
fprintf (file, ";; cortexa7_sched_reorder insn %d before %d\n",
INSN_UID(ready [first_older_only]),
INSN_UID(ready [first_younger]));
- rtx first_older_only_insn = ready [first_older_only];
+ rtx_insn *first_older_only_insn = ready [first_older_only];
for (i = first_older_only; i < first_younger; i++)
{
ready[i] = ready[i+1];
/* Implement TARGET_SCHED_REORDER. */
static int
-arm_sched_reorder (FILE *file, int verbose, rtx *ready, int *n_readyp,
+arm_sched_reorder (FILE *file, int verbose, rtx_insn **ready, int *n_readyp,
int clock)
{
switch (arm_tune)
adjust_cost function. Only put bits of code into arm_adjust_cost that
are common across all cores. */
static int
-arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
+arm_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
{
rtx i_pat, d_pat;
int sign, exponent;
unsigned HOST_WIDE_INT mantissa, mant_hi;
unsigned HOST_WIDE_INT mask;
- HOST_WIDE_INT m1, m2;
int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
+ bool fail;
if (!TARGET_VFP3 || !CONST_DOUBLE_P (x))
return -1;
WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
bits for the mantissa, this may fail (low bits would be lost). */
real_ldexp (&m, &r, point_pos - exponent);
- REAL_VALUE_TO_INT (&m1, &m2, m);
- mantissa = m1;
- mant_hi = m2;
+ wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
+ mantissa = w.elt (0);
+ mant_hi = w.elt (1);
/* If there are bits set in the low part of the mantissa, we can't
represent this value. */
rtx ind;
/* Reject eliminable registers. */
- if (! (reload_in_progress || reload_completed)
+ if (! (reload_in_progress || reload_completed || lra_in_progress)
&& ( reg_mentioned_p (frame_pointer_rtx, op)
|| reg_mentioned_p (arg_pointer_rtx, op)
|| reg_mentioned_p (virtual_incoming_args_rtx, op)
|| (type == 0 && GET_CODE (ind) == PRE_DEC))
return arm_address_register_rtx_p (XEXP (ind, 0), 0);
- /* FIXME: vld1 allows register post-modify. */
+ /* Allow post-increment by register for VLDn */
+ if (type == 2 && GET_CODE (ind) == POST_MODIFY
+ && GET_CODE (XEXP (ind, 1)) == PLUS
+ && REG_P (XEXP (XEXP (ind, 1), 1)))
+ return true;
/* Match:
(plus (reg)
}
static bool
-arm_cannot_copy_insn_p (rtx insn)
+arm_cannot_copy_insn_p (rtx_insn *insn)
{
/* The tls call insn cannot be copied, as it is paired with a data
word. */
struct minipool_fixup
{
Mfix * next;
- rtx insn;
+ rtx_insn * insn;
HOST_WIDE_INT address;
rtx * loc;
enum machine_mode mode;
#endif
static HOST_WIDE_INT
-get_jump_table_size (rtx insn)
+get_jump_table_size (rtx_jump_table_data *insn)
{
/* ADDR_VECs only take room if read-only data does into the text
section. */
/* Output the literal table */
static void
-dump_minipool (rtx scan)
+dump_minipool (rtx_insn *scan)
{
Mnode * mp;
Mnode * nmp;
create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
{
HOST_WIDE_INT count = 0;
- rtx barrier;
- rtx from = fix->insn;
+ rtx_barrier *barrier;
+ rtx_insn *from = fix->insn;
/* The instruction after which we will insert the jump. */
- rtx selected = NULL;
+ rtx_insn *selected = NULL;
int selected_cost;
/* The address at which the jump instruction will be placed. */
HOST_WIDE_INT selected_address;
Mfix * new_fix;
HOST_WIDE_INT max_count = max_address - fix->address;
- rtx label = gen_label_rtx ();
+ rtx_code_label *label = gen_label_rtx ();
selected_cost = arm_barrier_cost (from);
selected_address = fix->address;
while (from && count < max_count)
{
- rtx tmp;
+ rtx_jump_table_data *tmp;
int new_cost;
/* This code shouldn't have been called if there was a natural barrier
CALL_ARG_LOCATION note. */
if (CALL_P (selected))
{
- rtx next = NEXT_INSN (selected);
+ rtx_insn *next = NEXT_INSN (selected);
if (next && NOTE_P (next)
&& NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
selected = next;
/* Record that there is a natural barrier in the insn stream at
ADDRESS. */
static void
-push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
+push_minipool_barrier (rtx_insn *insn, HOST_WIDE_INT address)
{
Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
fixing; VALUE is the constant that must be loaded, which is of type
MODE. */
static void
-push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
+push_minipool_fix (rtx_insn *insn, HOST_WIDE_INT address, rtx *loc,
enum machine_mode mode, rtx value)
{
Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
If DO_PUSHES is false we do not actually push any of the fixups
needed. */
static void
-note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
+note_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address, int do_pushes)
{
int opno;
/* Fill in recog_op_alt with information about the constraints of
this insn. */
- preprocess_constraints ();
+ preprocess_constraints (insn);
+ const operand_alternative *op_alt = which_op_alt ();
for (opno = 0; opno < recog_data.n_operands; opno++)
{
/* Things we need to fix can only occur in inputs. */
of constants in this alternative is really to fool reload
into allowing us to accept one there. We need to fix them up
now so that we output the right code. */
- if (recog_op_alt[opno][which_alternative].memory_ok)
+ if (op_alt[opno].memory_ok)
{
rtx op = recog_data.operand[opno];
{
rtx dest, src;
rtx pat, op0, set = NULL;
- rtx prev, insn = BB_END (bb);
+ rtx_insn *prev, *insn = BB_END (bb);
bool insn_clobbered = false;
- while (insn != BB_HEAD (bb) && DEBUG_INSN_P (insn))
+ while (insn != BB_HEAD (bb) && !NONDEBUG_INSN_P (insn))
insn = PREV_INSN (insn);
/* Find the last cbranchsi4_insn in basic block BB. */
- if (INSN_CODE (insn) != CODE_FOR_cbranchsi4_insn)
+ if (insn == BB_HEAD (bb)
+ || INSN_CODE (insn) != CODE_FOR_cbranchsi4_insn)
continue;
/* Get the register with which we are comparing. */
compute_bb_for_insn ();
df_analyze ();
+ enum Convert_Action {SKIP, CONV, SWAP_CONV};
+
FOR_EACH_BB_FN (bb, cfun)
{
- rtx insn;
+ if (current_tune->disparage_flag_setting_t16_encodings
+ && optimize_bb_for_speed_p (bb))
+ continue;
+
+ rtx_insn *insn;
+ Convert_Action action = SKIP;
+ Convert_Action action_for_partial_flag_setting
+ = (current_tune->disparage_partial_flag_setting_t16_encodings
+ && optimize_bb_for_speed_p (bb))
+ ? SKIP : CONV;
COPY_REG_SET (&live, DF_LR_OUT (bb));
df_simulate_initialize_backwards (bb, &live);
&& !REGNO_REG_SET_P (&live, CC_REGNUM)
&& GET_CODE (PATTERN (insn)) == SET)
{
- enum {SKIP, CONV, SWAP_CONV} action = SKIP;
+ action = SKIP;
rtx pat = PATTERN (insn);
rtx dst = XEXP (pat, 0);
rtx src = XEXP (pat, 1);
/* ANDS <Rdn>,<Rm> */
if (rtx_equal_p (dst, op0)
&& low_register_operand (op1, SImode))
- action = CONV;
+ action = action_for_partial_flag_setting;
else if (rtx_equal_p (dst, op1)
&& low_register_operand (op0, SImode))
- action = SWAP_CONV;
+ action = action_for_partial_flag_setting == SKIP
+ ? SKIP : SWAP_CONV;
break;
case ASHIFTRT:
/* LSLS <Rdn>,<Rm> */
if (rtx_equal_p (dst, op0)
&& low_register_operand (op1, SImode))
- action = CONV;
+ action = action_for_partial_flag_setting;
/* ASRS <Rd>,<Rm>,#<imm5> */
/* LSRS <Rd>,<Rm>,#<imm5> */
/* LSLS <Rd>,<Rm>,#<imm5> */
else if (low_register_operand (op0, SImode)
&& CONST_INT_P (op1)
&& IN_RANGE (INTVAL (op1), 0, 31))
- action = CONV;
+ action = action_for_partial_flag_setting;
break;
case ROTATERT:
/* RORS <Rdn>,<Rm> */
if (rtx_equal_p (dst, op0)
&& low_register_operand (op1, SImode))
- action = CONV;
+ action = action_for_partial_flag_setting;
break;
case NOT:
- case NEG:
/* MVNS <Rd>,<Rm> */
+ if (low_register_operand (op0, SImode))
+ action = action_for_partial_flag_setting;
+ break;
+
+ case NEG:
/* NEGS <Rd>,<Rm> (a.k.a RSBS) */
if (low_register_operand (op0, SImode))
action = CONV;
/* MOVS <Rd>,#<imm8> */
if (CONST_INT_P (src)
&& IN_RANGE (INTVAL (src), 0, 255))
- action = CONV;
+ action = action_for_partial_flag_setting;
break;
case REG:
static void
arm_reorg (void)
{
- rtx insn;
+ rtx_insn *insn;
HOST_WIDE_INT address = 0;
Mfix * fix;
push_minipool_barrier (insn, address);
else if (INSN_P (insn))
{
- rtx table;
+ rtx_jump_table_data *table;
note_invalid_constants (insn, address, true);
address += get_attr_length (insn);
/* From now on we must synthesize any constants that we can't handle
directly. This can happen if the RTL gets split during final
instruction generation. */
- after_arm_reorg = 1;
+ cfun->machine->after_arm_reorg = 1;
/* Free the minipool memory. */
obstack_free (&minipool_obstack, minipool_startobj);
the call target. */
void
-arm_emit_call_insn (rtx pat, rtx addr)
+arm_emit_call_insn (rtx pat, rtx addr, bool sibcall)
{
rtx insn;
to the instruction's CALL_INSN_FUNCTION_USAGE. */
if (TARGET_VXWORKS_RTP
&& flag_pic
+ && !sibcall
&& GET_CODE (addr) == SYMBOL_REF
&& (SYMBOL_REF_DECL (addr)
? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
require_pic_register ();
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
}
+
+ if (TARGET_AAPCS_BASED)
+ {
+ /* For AAPCS, IP and CC can be clobbered by veneers inserted by the
+ linker. We need to add an IP clobber to allow setting
+ TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS to true. A CC clobber
+ is not needed since it's a fixed register. */
+ rtx *fusage = &CALL_INSN_FUNCTION_USAGE (insn);
+ clobber_reg (fusage, gen_rtx_REG (word_mode, IP_REGNUM));
+ }
}
/* Output a 'call' insn. */
|| (cfun->machine->return_used_this_function != 0)
|| offsets->saved_regs == offsets->outgoing_args
|| frame_pointer_needed);
-
- /* Reset the ARM-specific per-function variables. */
- after_arm_reorg = 0;
}
}
/* Generate and emit an insn that we will recognize as a push_multi.
Unfortunately, since this insn does not reflect very well the actual
semantics of the operation, we need to annotate the insn for the benefit
- of DWARF2 frame unwind information. */
+ of DWARF2 frame unwind information. DWARF_REGS_MASK is a subset of
+ MASK for registers that should be annotated for DWARF2 frame unwind
+ information. */
static rtx
-emit_multi_reg_push (unsigned long mask)
+emit_multi_reg_push (unsigned long mask, unsigned long dwarf_regs_mask)
{
int num_regs = 0;
- int num_dwarf_regs;
+ int num_dwarf_regs = 0;
int i, j;
rtx par;
rtx dwarf;
int dwarf_par_index;
rtx tmp, reg;
+ /* We don't record the PC in the dwarf frame information. */
+ dwarf_regs_mask &= ~(1 << PC_REGNUM);
+
for (i = 0; i <= LAST_ARM_REGNUM; i++)
- if (mask & (1 << i))
- num_regs++;
+ {
+ if (mask & (1 << i))
+ num_regs++;
+ if (dwarf_regs_mask & (1 << i))
+ num_dwarf_regs++;
+ }
gcc_assert (num_regs && num_regs <= 16);
-
- /* We don't record the PC in the dwarf frame information. */
- num_dwarf_regs = num_regs;
- if (mask & (1 << PC_REGNUM))
- num_dwarf_regs--;
+ gcc_assert ((dwarf_regs_mask & ~mask) == 0);
/* For the body of the insn we are going to generate an UNSPEC in
parallel with several USEs. This allows the insn to be recognized
gen_rtvec (1, reg),
UNSPEC_PUSH_MULT));
- if (i != PC_REGNUM)
+ if (dwarf_regs_mask & (1 << i))
{
tmp = gen_rtx_SET (VOIDmode,
gen_frame_mem (SImode, stack_pointer_rtx),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
- XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
- dwarf_par_index++;
+ XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
}
break;
XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
- if (i != PC_REGNUM)
+ if (dwarf_regs_mask & (1 << i))
{
tmp
= gen_rtx_SET (VOIDmode,
par = emit_insn (par);
REG_NOTES (par) = dwarf;
- arm_add_cfa_adjust_cfa_note (par, 2 * UNITS_PER_WORD * num_regs,
- base_reg, base_reg);
+ /* Make sure cfa doesn't leave with IP_REGNUM to allow unwinding fron FP. */
+ if (TARGET_VFP && REGNO (base_reg) == IP_REGNUM)
+ {
+ RTX_FRAME_RELATED_P (par) = 1;
+ add_reg_note (par, REG_CFA_DEF_CFA, hard_frame_pointer_rtx);
+ }
+ else
+ arm_add_cfa_adjust_cfa_note (par, 2 * UNITS_PER_WORD * num_regs,
+ base_reg, base_reg);
}
/* Generate and emit a pattern that will be recognized as LDRD pattern. If even
offsets->saved_args = crtl->args.pretend_args_size;
/* In Thumb mode this is incorrect, but never used. */
- offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0) +
- arm_compute_static_chain_stack_bytes();
+ offsets->frame
+ = (offsets->saved_args
+ + arm_compute_static_chain_stack_bytes ()
+ + (frame_pointer_needed ? 4 : 0));
if (TARGET_32BIT)
{
}
/* Saved registers include the stack frame. */
- offsets->saved_regs = offsets->saved_args + saved +
- arm_compute_static_chain_stack_bytes();
+ offsets->saved_regs
+ = offsets->saved_args + arm_compute_static_chain_stack_bytes () + saved;
offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
+
/* A leaf function does not need any stack alignment if it has nothing
on the stack. */
if (leaf && frame_size == 0
{
int reg = -1;
+ /* Register r3 is caller-saved. Normally it does not need to be
+ saved on entry by the prologue. However if we choose to save
+ it for padding then we may confuse the compiler into thinking
+ a prologue sequence is required when in fact it is not. This
+ will occur when shrink-wrapping if r3 is used as a scratch
+ register and there are no other callee-saved writes.
+
+ This situation can be avoided when other callee-saved registers
+ are available and r3 is not mandatory if we choose a callee-saved
+ register for padding. */
+ bool prefer_callee_reg_p = false;
+
/* If it is safe to use r3, then do so. This sometimes
generates better code on Thumb-2 by avoiding the need to
use 32-bit push/pop instructions. */
if (! any_sibcall_could_use_r3 ()
&& arm_size_return_regs () <= 12
&& (offsets->saved_regs_mask & (1 << 3)) == 0
- && (TARGET_THUMB2
+ && (TARGET_THUMB2
|| !(TARGET_LDRD && current_tune->prefer_ldrd_strd)))
{
reg = 3;
+ if (!TARGET_THUMB2)
+ prefer_callee_reg_p = true;
+ }
+ if (reg == -1
+ || prefer_callee_reg_p)
+ {
+ for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++)
+ {
+ /* Avoid fixed registers; they may be changed at
+ arbitrary times so it's unsafe to restore them
+ during the epilogue. */
+ if (!fixed_regs[i]
+ && (offsets->saved_regs_mask & (1 << i)) == 0)
+ {
+ reg = i;
+ break;
+ }
+ }
}
- else
- for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++)
- {
- /* Avoid fixed registers; they may be changed at
- arbitrary times so it's unsafe to restore them
- during the epilogue. */
- if (!fixed_regs[i]
- && (offsets->saved_regs_mask & (1 << i)) == 0)
- {
- reg = i;
- break;
- }
- }
if (reg != -1)
{
/* Interrupt functions must not corrupt any registers.
Creating a frame pointer however, corrupts the IP
register, so we must push it first. */
- emit_multi_reg_push (1 << IP_REGNUM);
+ emit_multi_reg_push (1 << IP_REGNUM, 1 << IP_REGNUM);
/* Do not set RTX_FRAME_RELATED_P on this insn.
The dwarf stack unwinding code only wants to see one
if (cfun->machine->uses_anonymous_args)
{
insn
- = emit_multi_reg_push ((0xf0 >> (args_to_push / 4)) & 0xf);
+ = emit_multi_reg_push ((0xf0 >> (args_to_push / 4)) & 0xf,
+ (0xf0 >> (args_to_push / 4)) & 0xf);
emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
saved_pretend_args = 1;
}
/* Push the argument registers, or reserve space for them. */
if (cfun->machine->uses_anonymous_args)
insn = emit_multi_reg_push
- ((0xf0 >> (args_to_push / 4)) & 0xf);
+ ((0xf0 >> (args_to_push / 4)) & 0xf,
+ (0xf0 >> (args_to_push / 4)) & 0xf);
else
insn = emit_insn
(gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
if (live_regs_mask)
{
+ unsigned long dwarf_regs_mask = live_regs_mask;
+
saved_regs += bit_count (live_regs_mask) * 4;
if (optimize_size && !frame_pointer_needed
&& saved_regs == offsets->saved_regs - offsets->saved_args)
&& current_tune->prefer_ldrd_strd
&& !optimize_function_for_size_p (cfun))
{
+ gcc_checking_assert (live_regs_mask == dwarf_regs_mask);
if (TARGET_THUMB2)
- {
- thumb2_emit_strd_push (live_regs_mask);
- }
+ thumb2_emit_strd_push (live_regs_mask);
else if (TARGET_ARM
&& !TARGET_APCS_FRAME
&& !IS_INTERRUPT (func_type))
- {
- arm_emit_strd_push (live_regs_mask);
- }
+ arm_emit_strd_push (live_regs_mask);
else
{
- insn = emit_multi_reg_push (live_regs_mask);
+ insn = emit_multi_reg_push (live_regs_mask, live_regs_mask);
RTX_FRAME_RELATED_P (insn) = 1;
}
}
else
{
- insn = emit_multi_reg_push (live_regs_mask);
+ insn = emit_multi_reg_push (live_regs_mask, dwarf_regs_mask);
RTX_FRAME_RELATED_P (insn) = 1;
}
}
{
/* This add can produce multiple insns for a large constant, so we
need to get tricky. */
- rtx last = get_last_insn ();
+ rtx_insn *last = get_last_insn ();
amount = GEN_INT (offsets->saved_args + saved_regs
- offsets->outgoing_args);
}
-/* If CODE is 'd', then the X is a condition operand and the instruction
+/* Globally reserved letters: acln
+ Puncutation letters currently used: @_|?().!#
+ Lower case letters currently used: bcdefhimpqtvwxyz
+ Upper case letters currently used: ABCDFGHJKLMNOPQRSTU
+ Letters previously used, but now deprecated/obsolete: sVWXYZ.
+
+ Note that the global reservation for 'c' is only for CONSTANT_ADDRESS_P.
+
+ If CODE is 'd', then the X is a condition operand and the instruction
should only be executed if the condition is true.
if CODE is 'D', then the X is a condition operand and the instruction
should only be executed if the condition is false: however, if the mode
}
return;
+ case 'b':
+ /* Print the log2 of a CONST_INT. */
+ {
+ HOST_WIDE_INT val;
+
+ if (!CONST_INT_P (x)
+ || (val = exact_log2 (INTVAL (x) & 0xffffffff)) < 0)
+ output_operand_lossage ("Unsupported operand for code '%c'", code);
+ else
+ fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
+ }
+ return;
+
case 'L':
/* The low 16 bits of an immediate constant. */
fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
register. */
case 'p':
{
- int mode = GET_MODE (x);
+ enum machine_mode mode = GET_MODE (x);
int regno;
if (GET_MODE_SIZE (mode) != 8 || !REG_P (x))
case 'P':
case 'q':
{
- int mode = GET_MODE (x);
+ enum machine_mode mode = GET_MODE (x);
int is_quad = (code == 'q');
int regno;
case 'e':
case 'f':
{
- int mode = GET_MODE (x);
+ enum machine_mode mode = GET_MODE (x);
int regno;
if ((GET_MODE_SIZE (mode) != 16
{
rtx addr;
bool postinc = FALSE;
+ rtx postinc_reg = NULL;
unsigned align, memsize, align_bits;
gcc_assert (MEM_P (x));
postinc = 1;
addr = XEXP (addr, 0);
}
+ if (GET_CODE (addr) == POST_MODIFY)
+ {
+ postinc_reg = XEXP( XEXP (addr, 1), 1);
+ addr = XEXP (addr, 0);
+ }
asm_fprintf (stream, "[%r", REGNO (addr));
/* We know the alignment of this access, so we can emit a hint in the
if (postinc)
fputs("!", stream);
+ if (postinc_reg)
+ asm_fprintf (stream, ", %r", REGNO (postinc_reg));
}
return;
/* Translate an S register number into a D register number and element index. */
case 'y':
{
- int mode = GET_MODE (x);
+ enum machine_mode mode = GET_MODE (x);
int regno;
if (GET_MODE_SIZE (mode) != 4 || !REG_P (x))
number into a D register number and element index. */
case 'z':
{
- int mode = GET_MODE (x);
+ enum machine_mode mode = GET_MODE (x);
int regno;
if (GET_MODE_SIZE (mode) != 2 || !REG_P (x))
/* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
instructions. */
void
-thumb2_final_prescan_insn (rtx insn)
+thumb2_final_prescan_insn (rtx_insn *insn)
{
- rtx first_insn = insn;
+ rtx_insn *first_insn = insn;
rtx body = PATTERN (insn);
rtx predicate;
enum arm_cond_code code;
int mask;
int max;
- /* Maximum number of conditionally executed instructions in a block
- is minimum of the two max values: maximum allowed in an IT block
- and maximum that is beneficial according to the cost model and tune. */
- max = (max_insns_skipped < MAX_INSN_PER_IT_BLOCK) ?
- max_insns_skipped : MAX_INSN_PER_IT_BLOCK;
+ /* max_insns_skipped in the tune was already taken into account in the
+ cost model of ifcvt pass when generating COND_EXEC insns. At this stage
+ just emit the IT blocks as we can. It does not make sense to split
+ the IT blocks. */
+ max = MAX_INSN_PER_IT_BLOCK;
/* Remove the previous insn from the count of insns to be output. */
if (arm_condexec_count)
}
void
-arm_final_prescan_insn (rtx insn)
+arm_final_prescan_insn (rtx_insn *insn)
{
/* BODY will hold the body of INSN. */
rtx body = PATTERN (insn);
/* START_INSN will hold the insn from where we start looking. This is the
first insn after the following code_label if REVERSE is true. */
- rtx start_insn = insn;
+ rtx_insn *start_insn = insn;
/* If in state 4, check if the target branch is reached, in order to
change back to state 0. */
int fail = FALSE, succeed = FALSE;
/* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
int then_not_else = TRUE;
- rtx this_insn = start_insn, label = 0;
+ rtx_insn *this_insn = start_insn;
+ rtx label = 0;
/* Register the insn jumped to. */
if (reverse)
|| (TARGET_HARD_FLOAT && TARGET_VFP
&& regno == VFPCC_REGNUM));
+ if (regno == CC_REGNUM && GET_MODE_CLASS (mode) != MODE_CC)
+ return false;
+
if (TARGET_THUMB1)
/* For the Thumb we only allow values bigger than SImode in
registers 0 - 6, so that there is always a second low
}
/* We allow almost any value to be stored in the general registers.
- Restrict doubleword quantities to even register pairs so that we can
- use ldrd. Do not allow very large Neon structure opaque modes in
- general registers; they would use too many. */
+ Restrict doubleword quantities to even register pairs in ARM state
+ so that we can use ldrd. Do not allow very large Neon structure
+ opaque modes in general registers; they would use too many. */
if (regno <= LAST_ARM_REGNUM)
- return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0)
- && ARM_NUM_REGS (mode) <= 4;
+ {
+ if (ARM_NUM_REGS (mode) > 4)
+ return FALSE;
+
+ if (TARGET_THUMB2)
+ return TRUE;
+
+ return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
+ }
if (regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM)
enum reg_class
arm_regno_class (int regno)
{
+ if (regno == PC_REGNUM)
+ return NO_REGS;
+
if (TARGET_THUMB1)
{
if (regno == STACK_POINTER_REGNUM)
int
arm_debugger_arg_offset (int value, rtx addr)
{
- rtx insn;
+ rtx_insn *insn;
/* We are only interested if dbxout_parms() failed to compute the offset. */
if (value != 0)
NEON_BINOP,
NEON_TERNOP,
NEON_UNOP,
+ NEON_BSWAP,
NEON_GETLANE,
NEON_SETLANE,
NEON_CREATE,
NEON_FLOAT_NARROW,
NEON_FIXCONV,
NEON_SELECT,
- NEON_RESULTPAIR,
NEON_REINTERP,
NEON_VTBL,
NEON_VTBX,
ARM_BUILTIN_CRC32CH,
ARM_BUILTIN_CRC32CW,
+ ARM_BUILTIN_GET_FPSCR,
+ ARM_BUILTIN_SET_FPSCR,
+
#undef CRYPTO1
#undef CRYPTO2
#undef CRYPTO3
tree V8QI_type_node;
tree V4HI_type_node;
+ tree V4UHI_type_node;
tree V4HF_type_node;
tree V2SI_type_node;
+ tree V2USI_type_node;
tree V2SF_type_node;
tree V16QI_type_node;
tree V8HI_type_node;
+ tree V8UHI_type_node;
tree V4SI_type_node;
+ tree V4USI_type_node;
tree V4SF_type_node;
tree V2DI_type_node;
+ tree V2UDI_type_node;
tree intUQI_type_node;
tree intUHI_type_node;
tree intCI_type_node;
tree intXI_type_node;
- tree V8QI_pointer_node;
- tree V4HI_pointer_node;
- tree V2SI_pointer_node;
- tree V2SF_pointer_node;
- tree V16QI_pointer_node;
- tree V8HI_pointer_node;
- tree V4SI_pointer_node;
- tree V4SF_pointer_node;
- tree V2DI_pointer_node;
-
- tree void_ftype_pv8qi_v8qi_v8qi;
- tree void_ftype_pv4hi_v4hi_v4hi;
- tree void_ftype_pv2si_v2si_v2si;
- tree void_ftype_pv2sf_v2sf_v2sf;
- tree void_ftype_pdi_di_di;
- tree void_ftype_pv16qi_v16qi_v16qi;
- tree void_ftype_pv8hi_v8hi_v8hi;
- tree void_ftype_pv4si_v4si_v4si;
- tree void_ftype_pv4sf_v4sf_v4sf;
- tree void_ftype_pv2di_v2di_v2di;
-
tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
const_intDI_pointer_node = build_pointer_type (const_intDI_node);
const_float_pointer_node = build_pointer_type (const_float_node);
+ /* Unsigned integer types for various mode sizes. */
+ intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
+ intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
+ intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
+ intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+ neon_intUTI_type_node = make_unsigned_type (GET_MODE_PRECISION (TImode));
/* Now create vector types based on our NEON element types. */
/* 64-bit vectors. */
V8QI_type_node =
build_vector_type_for_mode (neon_intQI_type_node, V8QImode);
V4HI_type_node =
build_vector_type_for_mode (neon_intHI_type_node, V4HImode);
+ V4UHI_type_node =
+ build_vector_type_for_mode (intUHI_type_node, V4HImode);
V4HF_type_node =
build_vector_type_for_mode (neon_floatHF_type_node, V4HFmode);
V2SI_type_node =
build_vector_type_for_mode (neon_intSI_type_node, V2SImode);
+ V2USI_type_node =
+ build_vector_type_for_mode (intUSI_type_node, V2SImode);
V2SF_type_node =
build_vector_type_for_mode (neon_float_type_node, V2SFmode);
/* 128-bit vectors. */
build_vector_type_for_mode (neon_intQI_type_node, V16QImode);
V8HI_type_node =
build_vector_type_for_mode (neon_intHI_type_node, V8HImode);
+ V8UHI_type_node =
+ build_vector_type_for_mode (intUHI_type_node, V8HImode);
V4SI_type_node =
build_vector_type_for_mode (neon_intSI_type_node, V4SImode);
+ V4USI_type_node =
+ build_vector_type_for_mode (intUSI_type_node, V4SImode);
V4SF_type_node =
build_vector_type_for_mode (neon_float_type_node, V4SFmode);
V2DI_type_node =
build_vector_type_for_mode (neon_intDI_type_node, V2DImode);
-
- /* Unsigned integer types for various mode sizes. */
- intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
- intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
- intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
- intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
- neon_intUTI_type_node = make_unsigned_type (GET_MODE_PRECISION (TImode));
+ V2UDI_type_node =
+ build_vector_type_for_mode (intUDI_type_node, V2DImode);
(*lang_hooks.types.register_builtin_type) (intUQI_type_node,
(*lang_hooks.types.register_builtin_type) (intXI_type_node,
"__builtin_neon_xi");
- /* Pointers to vector types. */
- V8QI_pointer_node = build_pointer_type (V8QI_type_node);
- V4HI_pointer_node = build_pointer_type (V4HI_type_node);
- V2SI_pointer_node = build_pointer_type (V2SI_type_node);
- V2SF_pointer_node = build_pointer_type (V2SF_type_node);
- V16QI_pointer_node = build_pointer_type (V16QI_type_node);
- V8HI_pointer_node = build_pointer_type (V8HI_type_node);
- V4SI_pointer_node = build_pointer_type (V4SI_type_node);
- V4SF_pointer_node = build_pointer_type (V4SF_type_node);
- V2DI_pointer_node = build_pointer_type (V2DI_type_node);
-
- /* Operations which return results as pairs. */
- void_ftype_pv8qi_v8qi_v8qi =
- build_function_type_list (void_type_node, V8QI_pointer_node, V8QI_type_node,
- V8QI_type_node, NULL);
- void_ftype_pv4hi_v4hi_v4hi =
- build_function_type_list (void_type_node, V4HI_pointer_node, V4HI_type_node,
- V4HI_type_node, NULL);
- void_ftype_pv2si_v2si_v2si =
- build_function_type_list (void_type_node, V2SI_pointer_node, V2SI_type_node,
- V2SI_type_node, NULL);
- void_ftype_pv2sf_v2sf_v2sf =
- build_function_type_list (void_type_node, V2SF_pointer_node, V2SF_type_node,
- V2SF_type_node, NULL);
- void_ftype_pdi_di_di =
- build_function_type_list (void_type_node, intDI_pointer_node,
- neon_intDI_type_node, neon_intDI_type_node, NULL);
- void_ftype_pv16qi_v16qi_v16qi =
- build_function_type_list (void_type_node, V16QI_pointer_node,
- V16QI_type_node, V16QI_type_node, NULL);
- void_ftype_pv8hi_v8hi_v8hi =
- build_function_type_list (void_type_node, V8HI_pointer_node, V8HI_type_node,
- V8HI_type_node, NULL);
- void_ftype_pv4si_v4si_v4si =
- build_function_type_list (void_type_node, V4SI_pointer_node, V4SI_type_node,
- V4SI_type_node, NULL);
- void_ftype_pv4sf_v4sf_v4sf =
- build_function_type_list (void_type_node, V4SF_pointer_node, V4SF_type_node,
- V4SF_type_node, NULL);
- void_ftype_pv2di_v2di_v2di =
- build_function_type_list (void_type_node, V2DI_pointer_node, V2DI_type_node,
- V2DI_type_node, NULL);
-
if (TARGET_CRYPTO && TARGET_HARD_FLOAT)
{
- tree V4USI_type_node =
- build_vector_type_for_mode (intUSI_type_node, V4SImode);
tree V16UQI_type_node =
build_vector_type_for_mode (intUQI_type_node, V16QImode);
}
break;
- case NEON_RESULTPAIR:
- {
- switch (insn_data[d->code].operand[1].mode)
- {
- case V8QImode: ftype = void_ftype_pv8qi_v8qi_v8qi; break;
- case V4HImode: ftype = void_ftype_pv4hi_v4hi_v4hi; break;
- case V2SImode: ftype = void_ftype_pv2si_v2si_v2si; break;
- case V2SFmode: ftype = void_ftype_pv2sf_v2sf_v2sf; break;
- case DImode: ftype = void_ftype_pdi_di_di; break;
- case V16QImode: ftype = void_ftype_pv16qi_v16qi_v16qi; break;
- case V8HImode: ftype = void_ftype_pv8hi_v8hi_v8hi; break;
- case V4SImode: ftype = void_ftype_pv4si_v4si_v4si; break;
- case V4SFmode: ftype = void_ftype_pv4sf_v4sf_v4sf; break;
- case V2DImode: ftype = void_ftype_pv2di_v2di_v2di; break;
- default: gcc_unreachable ();
- }
- }
- break;
-
case NEON_REINTERP:
{
/* We iterate over NUM_DREG_TYPES doubleword types,
ftype = build_function_type_list (return_type, eltype, NULL);
break;
}
+ case NEON_BSWAP:
+ {
+ tree eltype = NULL_TREE;
+ switch (insn_data[d->code].operand[1].mode)
+ {
+ case V4HImode:
+ eltype = V4UHI_type_node;
+ break;
+ case V8HImode:
+ eltype = V8UHI_type_node;
+ break;
+ case V2SImode:
+ eltype = V2USI_type_node;
+ break;
+ case V4SImode:
+ eltype = V4USI_type_node;
+ break;
+ case V2DImode:
+ eltype = V2UDI_type_node;
+ break;
+ default: gcc_unreachable ();
+ }
+ ftype = build_function_type_list (eltype, eltype, NULL);
+ break;
+ }
default:
gcc_unreachable ();
}
IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
+
+#define FP_BUILTIN(L, U) \
+ {0, CODE_FOR_##L, "__builtin_arm_"#L, ARM_BUILTIN_##U, \
+ UNKNOWN, 0},
+
+ FP_BUILTIN (get_fpscr, GET_FPSCR)
+ FP_BUILTIN (set_fpscr, SET_FPSCR)
+#undef FP_BUILTIN
+
#define CRC32_BUILTIN(L, U) \
{0, CODE_FOR_##L, "__builtin_arm_"#L, ARM_BUILTIN_##U, \
UNKNOWN, 0},
enum machine_mode mode;
tree type;
- if (d->name == 0)
+ if (d->name == 0 || !(d->mask == FL_IWMMXT || d->mask == FL_IWMMXT2))
continue;
mode = insn_data[d->icode].operand[1].mode;
if (TARGET_CRC32)
arm_init_crc32_builtins ();
+
+ if (TARGET_VFP && TARGET_HARD_FLOAT)
+ {
+ tree ftype_set_fpscr
+ = build_function_type_list (void_type_node, unsigned_type_node, NULL);
+ tree ftype_get_fpscr
+ = build_function_type_list (unsigned_type_node, NULL);
+
+ arm_builtin_decls[ARM_BUILTIN_GET_FPSCR]
+ = add_builtin_function ("__builtin_arm_ldfscr", ftype_get_fpscr,
+ ARM_BUILTIN_GET_FPSCR, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_SET_FPSCR]
+ = add_builtin_function ("__builtin_arm_stfscr", ftype_set_fpscr,
+ ARM_BUILTIN_SET_FPSCR, BUILT_IN_MD, NULL, NULL_TREE);
+ }
}
/* Return the ARM builtin for CODE. */
type_mode);
}
- op[argc] = expand_normal (arg[argc]);
+ /* Use EXPAND_MEMORY for NEON_ARG_MEMORY to ensure a MEM_P
+ be returned. */
+ op[argc] = expand_expr (arg[argc], NULL_RTX, VOIDmode,
+ (thisarg == NEON_ARG_MEMORY
+ ? EXPAND_MEMORY : EXPAND_NORMAL));
switch (thisarg)
{
break;
case NEON_ARG_MEMORY:
+ /* Check if expand failed. */
+ if (op[argc] == const0_rtx)
+ return 0;
gcc_assert (MEM_P (op[argc]));
PUT_MODE (op[argc], mode[argc]);
/* ??? arm_neon.h uses the same built-in functions for signed
case NEON_SPLIT:
case NEON_FLOAT_WIDEN:
case NEON_FLOAT_NARROW:
+ case NEON_BSWAP:
case NEON_REINTERP:
return arm_expand_neon_args (target, icode, 1, type_mode, exp, fcode,
NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
return arm_expand_neon_args (target, icode, 1, type_mode, exp, fcode,
NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
- case NEON_RESULTPAIR:
- return arm_expand_neon_args (target, icode, 0, type_mode, exp, fcode,
- NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
- NEON_ARG_STOP);
-
case NEON_LANEMUL:
case NEON_LANEMULL:
case NEON_LANEMULH:
emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
}
-/* Emit code to place a Neon pair result in memory locations (with equal
- registers). */
-void
-neon_emit_pair_result_insn (enum machine_mode mode,
- rtx (*intfn) (rtx, rtx, rtx, rtx), rtx destaddr,
- rtx op1, rtx op2)
-{
- rtx mem = gen_rtx_MEM (mode, destaddr);
- rtx tmp1 = gen_reg_rtx (mode);
- rtx tmp2 = gen_reg_rtx (mode);
-
- emit_insn (intfn (tmp1, op1, op2, tmp2));
-
- emit_move_insn (mem, tmp1);
- mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
- emit_move_insn (mem, tmp2);
-}
-
/* Set up OPERANDS for a register copy from SRC to DEST, taking care
not to early-clobber SRC registers in the process.
switch (fcode)
{
+ case ARM_BUILTIN_GET_FPSCR:
+ case ARM_BUILTIN_SET_FPSCR:
+ if (fcode == ARM_BUILTIN_GET_FPSCR)
+ {
+ icode = CODE_FOR_get_fpscr;
+ target = gen_reg_rtx (SImode);
+ pat = GEN_FCN (icode) (target);
+ }
+ else
+ {
+ target = NULL_RTX;
+ icode = CODE_FOR_set_fpscr;
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ pat = GEN_FCN (icode) (op0);
+ }
+ emit_insn (pat);
+ return target;
+
case ARM_BUILTIN_TEXTRMSB:
case ARM_BUILTIN_TEXTRMUB:
case ARM_BUILTIN_TEXTRMSH:
to be saved; REAL_REGS is the set of registers to be described as
saved. If REAL_REGS is 0, only describe the stack adjustment. */
-static rtx
+static rtx_insn *
thumb1_emit_multi_reg_push (unsigned long mask, unsigned long real_regs)
{
unsigned long regno;
- rtx par[10], tmp, reg, insn;
+ rtx par[10], tmp, reg;
+ rtx_insn *insn;
int i, j;
/* Build the parallel of the registers actually being stored. */
int pops_needed;
unsigned available;
unsigned required;
- int mode;
+ enum machine_mode mode;
int size;
int restore_a4 = FALSE;
For Thumb-1, we track the status of the condition codes; this
information is used in the cbranchsi4_insn pattern. */
void
-thumb1_final_prescan_insn (rtx insn)
+thumb1_final_prescan_insn (rtx_insn *insn)
{
if (flag_print_asm_name)
asm_fprintf (asm_out_file, "%@ 0x%04x\n",
static int
thumb_far_jump_used_p (void)
{
- rtx insn;
+ rtx_insn *insn;
bool far_jump = false;
unsigned int func_size = 0;
return 0;
}
+ /* We should not change far_jump_used during or after reload, as there is
+ no chance to change stack frame layout. */
+ if (reload_in_progress || reload_completed)
+ return 0;
+
/* Check to see if the function contains a branch
insn with the far jump attribute set. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
arm_init_machine_status (void)
{
struct machine_function *machine;
- machine = ggc_alloc_cleared_machine_function ();
+ machine = ggc_cleared_alloc<machine_function> ();
#if ARM_FT_UNKNOWN != 0
machine->func_type = ARM_FT_UNKNOWN;
void
thumb1_expand_prologue (void)
{
- rtx insn;
+ rtx_insn *insn;
HOST_WIDE_INT amount;
arm_stack_offsets *offsets;
saved_regs_mask = offsets->saved_regs_mask;
/* Find the offset of the floating-point save area in the frame. */
- floats_from_frame = offsets->saved_args - offsets->frame;
+ floats_from_frame
+ = (offsets->saved_args
+ + arm_compute_static_chain_stack_bytes ()
+ - offsets->frame);
/* Compute how many core registers saved and how far away the floats are. */
for (i = 0; i <= LAST_ARM_REGNUM; i++)
if (TARGET_HARD_FLOAT && TARGET_VFP)
{
int start_reg;
+ rtx ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
/* The offset is from IP_REGNUM. */
int saved_size = arm_get_vfp_saved_size ();
if (saved_size > 0)
{
+ rtx_insn *insn;
floats_from_frame += saved_size;
- emit_insn (gen_addsi3 (gen_rtx_REG (SImode, IP_REGNUM),
- hard_frame_pointer_rtx,
- GEN_INT (-floats_from_frame)));
+ insn = emit_insn (gen_addsi3 (ip_rtx,
+ hard_frame_pointer_rtx,
+ GEN_INT (-floats_from_frame)));
+ arm_add_cfa_adjust_cfa_note (insn, -floats_from_frame,
+ ip_rtx, hard_frame_pointer_rtx);
}
/* Generate VFP register multi-pop. */
{
/* The frame pointer is guaranteed to be non-double-word aligned, as
it is set to double-word-aligned old_stack_pointer - 4. */
- rtx insn;
+ rtx_insn *insn;
int lrm_count = (num_regs % 2) ? (num_regs + 2) : (num_regs + 1);
for (i = LAST_IWMMXT_REGNUM; i >= FIRST_IWMMXT_REGNUM; i--)
num_regs = bit_count (saved_regs_mask);
if ((offsets->outgoing_args != (1 + num_regs)) || cfun->calls_alloca)
{
+ rtx_insn *insn;
emit_insn (gen_blockage ());
/* Unwind the stack to just below the saved registers. */
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- hard_frame_pointer_rtx,
- GEN_INT (- 4 * num_regs)));
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ GEN_INT (- 4 * num_regs)));
+
+ arm_add_cfa_adjust_cfa_note (insn, - 4 * num_regs,
+ stack_pointer_rtx, hard_frame_pointer_rtx);
}
arm_emit_multi_reg_pop (saved_regs_mask);
{
/* Interrupt handlers will have pushed the
IP onto the stack, so restore it now. */
- rtx insn;
+ rtx_insn *insn;
rtx addr = gen_rtx_MEM (SImode,
gen_rtx_POST_INC (SImode,
stack_pointer_rtx));
if (frame_pointer_needed)
{
- rtx insn;
+ rtx_insn *insn;
/* Restore stack pointer if necessary. */
if (TARGET_ARM)
{
amount = offsets->outgoing_args - offsets->saved_regs;
if (amount)
{
- rtx tmp;
+ rtx_insn *tmp;
/* Force out any pending memory operations that reference stacked data
before stack de-allocation occurs. */
emit_insn (gen_blockage ());
for (i = FIRST_IWMMXT_REGNUM; i <= LAST_IWMMXT_REGNUM; i++)
if (df_regs_ever_live_p (i) && !call_used_regs[i])
{
- rtx insn;
+ rtx_insn *insn;
rtx addr = gen_rtx_MEM (V2SImode,
gen_rtx_POST_INC (SImode,
stack_pointer_rtx));
{
int i, j;
rtx dwarf = NULL_RTX;
- rtx tmp = emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (crtl->args.pretend_args_size)));
+ rtx_insn *tmp =
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (crtl->args.pretend_args_size)));
RTX_FRAME_RELATED_P (tmp) = 1;
const char *fpu_name;
if (arm_selected_arch)
{
- const char* pos = strchr (arm_selected_arch->name, '+');
- if (pos)
+ /* armv7ve doesn't support any extensions. */
+ if (strcmp (arm_selected_arch->name, "armv7ve") == 0)
{
- char buf[15];
- gcc_assert (strlen (arm_selected_arch->name)
- <= sizeof (buf) / sizeof (*pos));
- strncpy (buf, arm_selected_arch->name,
- (pos - arm_selected_arch->name) * sizeof (*pos));
- buf[pos - arm_selected_arch->name] = '\0';
- asm_fprintf (asm_out_file, "\t.arch %s\n", buf);
- asm_fprintf (asm_out_file, "\t.arch_extension %s\n", pos + 1);
+ /* Keep backward compatability for assemblers
+ which don't support armv7ve. */
+ asm_fprintf (asm_out_file, "\t.arch armv7-a\n");
+ asm_fprintf (asm_out_file, "\t.arch_extension virt\n");
+ asm_fprintf (asm_out_file, "\t.arch_extension idiv\n");
+ asm_fprintf (asm_out_file, "\t.arch_extension sec\n");
+ asm_fprintf (asm_out_file, "\t.arch_extension mp\n");
}
else
- asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ {
+ const char* pos = strchr (arm_selected_arch->name, '+');
+ if (pos)
+ {
+ char buf[15];
+ gcc_assert (strlen (arm_selected_arch->name)
+ <= sizeof (buf) / sizeof (*pos));
+ strncpy (buf, arm_selected_arch->name,
+ (pos - arm_selected_arch->name) * sizeof (*pos));
+ buf[pos - arm_selected_arch->name] = '\0';
+ asm_fprintf (asm_out_file, "\t.arch %s\n", buf);
+ asm_fprintf (asm_out_file, "\t.arch_extension %s\n", pos + 1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ }
}
else if (strncmp (arm_selected_cpu->name, "generic", 7) == 0)
asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_cpu->name + 8);
fputs (":\n", file);
if (flag_pic)
{
- /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
+ /* Output ".word .LTHUNKn-[3,7]-.LTHUNKPCn". */
rtx tem = XEXP (DECL_RTL (function), 0);
- tem = plus_constant (GET_MODE (tem), tem, -7);
+ /* For TARGET_THUMB1_ONLY the thunk is in Thumb mode, so the PC
+ pipeline offset is four rather than eight. Adjust the offset
+ accordingly. */
+ tem = plus_constant (GET_MODE (tem), tem,
+ TARGET_THUMB1_ONLY ? -3 : -7);
tem = gen_rtx_MINUS (GET_MODE (tem),
tem,
gen_rtx_SYMBOL_REF (Pmode,
{
enum machine_mode mode;
unsigned regno;
- rtx parts[8];
+ rtx parts[16];
int nregs;
int i;
/* Emit unwind directives for a store-multiple instruction or stack pointer
push during alignment.
These should only ever be generated by the function prologue code, so
- expect them to have a particular form. */
+ expect them to have a particular form.
+ The store-multiple instruction sometimes pushes pc as the last register,
+ although it should not be tracked into unwind information, or for -Os
+ sometimes pushes some dummy registers before first register that needs
+ to be tracked in unwind information; such dummy registers are there just
+ to avoid separate stack adjustment, and will not be restored in the
+ epilogue. */
static void
arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
int reg_size;
unsigned reg;
unsigned lastreg;
+ unsigned padfirst = 0, padlast = 0;
rtx e;
e = XVECEXP (p, 0, 0);
- if (GET_CODE (e) != SET)
- abort ();
+ gcc_assert (GET_CODE (e) == SET);
/* First insn will adjust the stack pointer. */
- if (GET_CODE (e) != SET
- || !REG_P (XEXP (e, 0))
- || REGNO (XEXP (e, 0)) != SP_REGNUM
- || GET_CODE (XEXP (e, 1)) != PLUS)
- abort ();
+ gcc_assert (GET_CODE (e) == SET
+ && REG_P (SET_DEST (e))
+ && REGNO (SET_DEST (e)) == SP_REGNUM
+ && GET_CODE (SET_SRC (e)) == PLUS);
- offset = -INTVAL (XEXP (XEXP (e, 1), 1));
+ offset = -INTVAL (XEXP (SET_SRC (e), 1));
nregs = XVECLEN (p, 0) - 1;
+ gcc_assert (nregs);
- reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
+ reg = REGNO (SET_SRC (XVECEXP (p, 0, 1)));
if (reg < 16)
{
+ /* For -Os dummy registers can be pushed at the beginning to
+ avoid separate stack pointer adjustment. */
+ e = XVECEXP (p, 0, 1);
+ e = XEXP (SET_DEST (e), 0);
+ if (GET_CODE (e) == PLUS)
+ padfirst = INTVAL (XEXP (e, 1));
+ gcc_assert (padfirst == 0 || optimize_size);
/* The function prologue may also push pc, but not annotate it as it is
never restored. We turn this into a stack pointer adjustment. */
- if (nregs * 4 == offset - 4)
- {
- fprintf (asm_out_file, "\t.pad #4\n");
- offset -= 4;
- }
+ e = XVECEXP (p, 0, nregs);
+ e = XEXP (SET_DEST (e), 0);
+ if (GET_CODE (e) == PLUS)
+ padlast = offset - INTVAL (XEXP (e, 1)) - 4;
+ else
+ padlast = offset - 4;
+ gcc_assert (padlast == 0 || padlast == 4);
+ if (padlast == 4)
+ fprintf (asm_out_file, "\t.pad #4\n");
reg_size = 4;
fprintf (asm_out_file, "\t.save {");
}
}
else
/* Unknown register type. */
- abort ();
+ gcc_unreachable ();
/* If the stack increment doesn't match the size of the saved registers,
something has gone horribly wrong. */
- if (offset != nregs * reg_size)
- abort ();
+ gcc_assert (offset == padfirst + nregs * reg_size + padlast);
- offset = 0;
+ offset = padfirst;
lastreg = 0;
/* The remaining insns will describe the stores. */
for (i = 1; i <= nregs; i++)
/* Expect (set (mem <addr>) (reg)).
Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
e = XVECEXP (p, 0, i);
- if (GET_CODE (e) != SET
- || !MEM_P (XEXP (e, 0))
- || !REG_P (XEXP (e, 1)))
- abort ();
+ gcc_assert (GET_CODE (e) == SET
+ && MEM_P (SET_DEST (e))
+ && REG_P (SET_SRC (e)));
- reg = REGNO (XEXP (e, 1));
- if (reg < lastreg)
- abort ();
+ reg = REGNO (SET_SRC (e));
+ gcc_assert (reg >= lastreg);
if (i != 1)
fprintf (asm_out_file, ", ");
#ifdef ENABLE_CHECKING
/* Check that the addresses are consecutive. */
- e = XEXP (XEXP (e, 0), 0);
+ e = XEXP (SET_DEST (e), 0);
if (GET_CODE (e) == PLUS)
- {
- offset += reg_size;
- if (!REG_P (XEXP (e, 0))
- || REGNO (XEXP (e, 0)) != SP_REGNUM
- || !CONST_INT_P (XEXP (e, 1))
- || offset != INTVAL (XEXP (e, 1)))
- abort ();
- }
- else if (i != 1
- || !REG_P (e)
- || REGNO (e) != SP_REGNUM)
- abort ();
+ gcc_assert (REG_P (XEXP (e, 0))
+ && REGNO (XEXP (e, 0)) == SP_REGNUM
+ && CONST_INT_P (XEXP (e, 1))
+ && offset == INTVAL (XEXP (e, 1)));
+ else
+ gcc_assert (i == 1
+ && REG_P (e)
+ && REGNO (e) == SP_REGNUM);
+ offset += reg_size;
#endif
}
fprintf (asm_out_file, "}\n");
+ if (padfirst)
+ fprintf (asm_out_file, "\t.pad #%d\n", padfirst);
}
/* Emit unwind directives for a SET. */
/* Emit unwind directives for the given insn. */
static void
-arm_unwind_emit (FILE * asm_out_file, rtx insn)
+arm_unwind_emit (FILE * asm_out_file, rtx_insn *insn)
{
rtx note, pat;
bool handled_one = false;
emit unwind information for it because these are used either for
pretend arguments or notes to adjust sp and restore registers from
stack. */
+ case REG_CFA_DEF_CFA:
case REG_CFA_ADJUST_CFA:
case REG_CFA_RESTORE:
return;
- case REG_CFA_DEF_CFA:
case REG_CFA_EXPRESSION:
case REG_CFA_OFFSET:
/* ??? Only handling here what we actually emit. */
const char *
thumb1_output_casesi (rtx *operands)
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[0]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[0])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
const char *
thumb2_output_casesi (rtx *operands)
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
switch (arm_tune)
{
case cortexa15:
+ case cortexa57:
return 3;
case cortexr4:
int in_n, out_n;
if (TREE_CODE (type_out) != VECTOR_TYPE
- || TREE_CODE (type_in) != VECTOR_TYPE
- || !(TARGET_NEON && TARGET_FPU_ARMV8 && flag_unsafe_math_optimizations))
+ || TREE_CODE (type_in) != VECTOR_TYPE)
return NULL_TREE;
out_mode = TYPE_MODE (TREE_TYPE (type_out));
decl of the vectorized builtin for the appropriate vector mode.
NULL_TREE is returned if no such builtin is available. */
#undef ARM_CHECK_BUILTIN_MODE
-#define ARM_CHECK_BUILTIN_MODE(C) \
+#define ARM_CHECK_BUILTIN_MODE(C) \
+ (TARGET_NEON && TARGET_FPU_ARMV8 \
+ && flag_unsafe_math_optimizations \
+ && ARM_CHECK_BUILTIN_MODE_1 (C))
+
+#undef ARM_CHECK_BUILTIN_MODE_1
+#define ARM_CHECK_BUILTIN_MODE_1(C) \
(out_mode == SFmode && out_n == C \
&& in_mode == SFmode && in_n == C)
return ARM_FIND_VRINT_VARIANT (vrintz);
case BUILT_IN_ROUNDF:
return ARM_FIND_VRINT_VARIANT (vrinta);
+#undef ARM_CHECK_BUILTIN_MODE
+#define ARM_CHECK_BUILTIN_MODE(C, N) \
+ (out_mode == N##Imode && out_n == C \
+ && in_mode == N##Imode && in_n == C)
+ case BUILT_IN_BSWAP16:
+ if (ARM_CHECK_BUILTIN_MODE (4, H))
+ return arm_builtin_decl (ARM_BUILTIN_NEON_bswapv4hi, false);
+ else if (ARM_CHECK_BUILTIN_MODE (8, H))
+ return arm_builtin_decl (ARM_BUILTIN_NEON_bswapv8hi, false);
+ else
+ return NULL_TREE;
+ case BUILT_IN_BSWAP32:
+ if (ARM_CHECK_BUILTIN_MODE (2, S))
+ return arm_builtin_decl (ARM_BUILTIN_NEON_bswapv2si, false);
+ else if (ARM_CHECK_BUILTIN_MODE (4, S))
+ return arm_builtin_decl (ARM_BUILTIN_NEON_bswapv4si, false);
+ else
+ return NULL_TREE;
+ case BUILT_IN_BSWAP64:
+ if (ARM_CHECK_BUILTIN_MODE (2, D))
+ return arm_builtin_decl (ARM_BUILTIN_NEON_bswapv2di, false);
+ else
+ return NULL_TREE;
+
default:
return NULL_TREE;
}
{
/* Vectors which aren't in packed structures will not be less aligned than
the natural alignment of their element type, so this is safe. */
- if (TARGET_NEON && !BYTES_BIG_ENDIAN)
+ if (TARGET_NEON && !BYTES_BIG_ENDIAN && unaligned_access)
return !is_packed;
return default_builtin_vector_alignment_reachable (type, is_packed);
const_tree type, int misalignment,
bool is_packed)
{
- if (TARGET_NEON && !BYTES_BIG_ENDIAN)
+ if (TARGET_NEON && !BYTES_BIG_ENDIAN && unaligned_access)
{
HOST_WIDE_INT align = TYPE_ALIGN_UNIT (type);
enum machine_mode mode;
enum memmodel mod_s, mod_f;
bool is_weak;
- rtx label1, label2, x, cond;
+ rtx_code_label *label1, *label2;
+ rtx x, cond;
rval = operands[0];
mem = operands[1];
if (!(use_acquire || use_release))
arm_pre_atomic_barrier (mod_s);
- label1 = NULL_RTX;
+ label1 = NULL;
if (!is_weak)
{
label1 = gen_label_rtx ();
enum memmodel model = (enum memmodel) INTVAL (model_rtx);
enum machine_mode mode = GET_MODE (mem);
enum machine_mode wmode = (mode == DImode ? DImode : SImode);
- rtx label, x;
+ rtx_code_label *label;
+ rtx x;
bool use_acquire = TARGET_HAVE_LDACQ
&& !(model == MEMMODEL_RELAXED
ORR (SHIFT (ASHIFT, in_up, scratch1), out_down)));
if (code == ASHIFTRT)
{
- rtx done_label = gen_label_rtx ();
+ rtx_code_label *done_label = gen_label_rtx ();
emit_jump_insn (BRANCH (LT, done_label));
emit_insn (SET (out_down, ORR (SHIFT (ASHIFTRT, in_up, scratch2),
out_down)));
if (code == ASHIFTRT)
{
- rtx done_label = gen_label_rtx ();
+ rtx_code_label *done_label = gen_label_rtx ();
emit_jump_insn (BRANCH (LT, done_label));
emit_insn (SET (scratch2, SHIFT (ASHIFTRT, in_up, scratch2)));
emit_insn (SET (out_down, ORR (out_down, scratch2)));
return (unsigned HOST_WIDE_INT) 1 << 29;
}
+
+/* This is a temporary fix for PR60655. Ideally we need
+ to handle most of these cases in the generic part but
+ currently we reject minus (..) (sym_ref). We try to
+ ameliorate the case with minus (sym_ref1) (sym_ref2)
+ where they are in the same section. */
+
+static bool
+arm_const_not_ok_for_debug_p (rtx p)
+{
+ tree decl_op0 = NULL;
+ tree decl_op1 = NULL;
+
+ if (GET_CODE (p) == MINUS)
+ {
+ if (GET_CODE (XEXP (p, 1)) == SYMBOL_REF)
+ {
+ decl_op1 = SYMBOL_REF_DECL (XEXP (p, 1));
+ if (decl_op1
+ && GET_CODE (XEXP (p, 0)) == SYMBOL_REF
+ && (decl_op0 = SYMBOL_REF_DECL (XEXP (p, 0))))
+ {
+ if ((TREE_CODE (decl_op1) == VAR_DECL
+ || TREE_CODE (decl_op1) == CONST_DECL)
+ && (TREE_CODE (decl_op0) == VAR_DECL
+ || TREE_CODE (decl_op0) == CONST_DECL))
+ return (get_variable_section (decl_op1, false)
+ != get_variable_section (decl_op0, false));
+
+ if (TREE_CODE (decl_op1) == LABEL_DECL
+ && TREE_CODE (decl_op0) == LABEL_DECL)
+ return (DECL_CONTEXT (decl_op1)
+ != DECL_CONTEXT (decl_op0));
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void
+arm_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
+{
+ const unsigned ARM_FE_INVALID = 1;
+ const unsigned ARM_FE_DIVBYZERO = 2;
+ const unsigned ARM_FE_OVERFLOW = 4;
+ const unsigned ARM_FE_UNDERFLOW = 8;
+ const unsigned ARM_FE_INEXACT = 16;
+ const unsigned HOST_WIDE_INT ARM_FE_ALL_EXCEPT = (ARM_FE_INVALID
+ | ARM_FE_DIVBYZERO
+ | ARM_FE_OVERFLOW
+ | ARM_FE_UNDERFLOW
+ | ARM_FE_INEXACT);
+ const unsigned HOST_WIDE_INT ARM_FE_EXCEPT_SHIFT = 8;
+ tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
+ tree new_fenv_var, reload_fenv, restore_fnenv;
+ tree update_call, atomic_feraiseexcept, hold_fnclex;
+
+ if (!TARGET_VFP || !TARGET_HARD_FLOAT)
+ return;
+
+ /* Generate the equivalent of :
+ unsigned int fenv_var;
+ fenv_var = __builtin_arm_get_fpscr ();
+
+ unsigned int masked_fenv;
+ masked_fenv = fenv_var & mask;
+
+ __builtin_arm_set_fpscr (masked_fenv); */
+
+ fenv_var = create_tmp_var (unsigned_type_node, NULL);
+ get_fpscr = arm_builtin_decls[ARM_BUILTIN_GET_FPSCR];
+ set_fpscr = arm_builtin_decls[ARM_BUILTIN_SET_FPSCR];
+ mask = build_int_cst (unsigned_type_node,
+ ~((ARM_FE_ALL_EXCEPT << ARM_FE_EXCEPT_SHIFT)
+ | ARM_FE_ALL_EXCEPT));
+ ld_fenv = build2 (MODIFY_EXPR, unsigned_type_node,
+ fenv_var, build_call_expr (get_fpscr, 0));
+ masked_fenv = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var, mask);
+ hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
+ *hold = build2 (COMPOUND_EXPR, void_type_node,
+ build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
+ hold_fnclex);
+
+ /* Store the value of masked_fenv to clear the exceptions:
+ __builtin_arm_set_fpscr (masked_fenv); */
+
+ *clear = build_call_expr (set_fpscr, 1, masked_fenv);
+
+ /* Generate the equivalent of :
+ unsigned int new_fenv_var;
+ new_fenv_var = __builtin_arm_get_fpscr ();
+
+ __builtin_arm_set_fpscr (fenv_var);
+
+ __atomic_feraiseexcept (new_fenv_var); */
+
+ new_fenv_var = create_tmp_var (unsigned_type_node, NULL);
+ reload_fenv = build2 (MODIFY_EXPR, unsigned_type_node, new_fenv_var,
+ build_call_expr (get_fpscr, 0));
+ restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
+ atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
+ update_call = build_call_expr (atomic_feraiseexcept, 1,
+ fold_convert (integer_type_node, new_fenv_var));
+ *update = build2 (COMPOUND_EXPR, void_type_node,
+ build2 (COMPOUND_EXPR, void_type_node,
+ reload_fenv, restore_fnenv), update_call);
+}
+
+/* return TRUE if x is a reference to a value in a constant pool */
+extern bool
+arm_is_constant_pool_ref (rtx x)
+{
+ return (MEM_P (x)
+ && GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
+}
+
#include "gt-arm.h"