#include "system.h"
#include "rtl.h"
#include "tree.h"
-#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "recog.h"
#include "expr.h"
#include "optabs.h"
-#include "obstack.h"
#include "except.h"
#include "function.h"
#include "ggc.h"
#include "timevar.h"
#include "target.h"
#include "target-def.h"
+#include "tm_p.h"
/* This is used for communication between ASM_OUTPUT_LABEL and
ASM_OUTPUT_LABELREF. */
/* String used with the -mfixed-range= option. */
const char *ia64_fixed_range_string;
+/* Determines whether we use adds, addl, or movl to generate our
+ TLS immediate offsets. */
+int ia64_tls_size = 22;
+
+/* String used with the -mtls-size= option. */
+const char *ia64_tls_size_string;
+
/* Determines whether we run our final scheduling pass or not. We always
avoid the normal second scheduling pass. */
static int ia64_flag_schedule_insns2;
unsigned int ia64_section_threshold;
\f
+static rtx gen_tls_get_addr PARAMS ((void));
+static rtx gen_thread_pointer PARAMS ((void));
static int find_gr_spill PARAMS ((int));
static int next_scratch_gr_reg PARAMS ((void));
static void mark_reg_gr_used_mask PARAMS ((rtx, void *));
static enum machine_mode hfa_element_mode PARAMS ((tree, int));
static void fix_range PARAMS ((const char *));
-static void ia64_add_gc_roots PARAMS ((void));
-static void ia64_init_machine_status PARAMS ((struct function *));
-static void ia64_mark_machine_status PARAMS ((struct function *));
-static void ia64_free_machine_status PARAMS ((struct function *));
+static struct machine_function * ia64_init_machine_status PARAMS ((void));
static void emit_insn_group_barriers PARAMS ((FILE *, rtx));
static void emit_all_insn_group_barriers PARAMS ((FILE *, rtx));
static void emit_predicate_relation_info PARAMS ((void));
+static bool ia64_in_small_data_p PARAMS ((tree));
+static void ia64_encode_section_info PARAMS ((tree, int));
+static const char *ia64_strip_name_encoding PARAMS ((const char *));
static void process_epilogue PARAMS ((void));
static int process_set PARAMS ((FILE *, rtx));
static rtx ia64_expand_lock_test_and_set PARAMS ((enum machine_mode,
tree, rtx));
static rtx ia64_expand_lock_release PARAMS ((enum machine_mode, tree, rtx));
-const struct attribute_spec ia64_attribute_table[];
static bool ia64_assemble_integer PARAMS ((rtx, unsigned int, int));
static void ia64_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT));
static void ia64_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
static int ia64_sched_reorder PARAMS ((FILE *, int, rtx *, int *, int));
static int ia64_sched_reorder2 PARAMS ((FILE *, int, rtx *, int *, int));
static int ia64_variable_issue PARAMS ((FILE *, int, rtx, int));
-static rtx ia64_cycle_display PARAMS ((int, rtx));
+static void ia64_select_rtx_section PARAMS ((enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT));
+static void ia64_aix_select_section PARAMS ((tree, int,
+ unsigned HOST_WIDE_INT))
+ ATTRIBUTE_UNUSED;
+static void ia64_aix_unique_section PARAMS ((tree, int))
+ ATTRIBUTE_UNUSED;
+static void ia64_aix_select_rtx_section PARAMS ((enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT))
+ ATTRIBUTE_UNUSED;
\f
+/* Table of valid machine attributes. */
+static const struct attribute_spec ia64_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "syscall_linkage", 0, 0, false, true, true, NULL },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
+#undef TARGET_STRIP_NAME_ENCODING
+#define TARGET_STRIP_NAME_ENCODING ia64_strip_name_encoding
+
#undef TARGET_SCHED_ADJUST_COST
#define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_REORDER ia64_sched_reorder
#undef TARGET_SCHED_REORDER2
#define TARGET_SCHED_REORDER2 ia64_sched_reorder2
-#undef TARGET_SCHED_CYCLE_DISPLAY
-#define TARGET_SCHED_CYCLE_DISPLAY ia64_cycle_display
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS true
+#endif
struct gcc_target targetm = TARGET_INITIALIZER;
\f
if (CONSTANT_POOL_ADDRESS_P (op))
return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
else
- return XSTR (op, 0)[0] == SDATA_NAME_FLAG_CHAR;
+ {
+ const char *str = XSTR (op, 0);
+ return (str[0] == ENCODE_SECTION_INFO_CHAR && str[1] == 's');
+ }
default:
break;
return 0;
}
+/* Return tls_model if OP refers to a TLS symbol. */
+
+int
+tls_symbolic_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ const char *str;
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return 0;
+ str = XSTR (op, 0);
+ if (str[0] != ENCODE_SECTION_INFO_CHAR)
+ return 0;
+ switch (str[1])
+ {
+ case 'G':
+ return TLS_MODEL_GLOBAL_DYNAMIC;
+ case 'L':
+ return TLS_MODEL_LOCAL_DYNAMIC;
+ case 'i':
+ return TLS_MODEL_INITIAL_EXEC;
+ case 'l':
+ return TLS_MODEL_LOCAL_EXEC;
+ }
+ return 0;
+}
+
+
/* Return 1 if OP refers to a function. */
int
return 0;
return fr_reg_or_fp01_operand (op, mode);
}
+
+/* Return 1 if OP is valid as a base register in a reg + offset address. */
+
+int
+basereg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
+ checks from pa.c basereg_operand as well? Seems to be OK without them
+ in test runs. */
+
+ return (register_operand (op, mode) &&
+ REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
+}
\f
/* Return 1 if the operands of a move are ok. */
else
temp = dest;
+ if (tls_symbolic_operand (src, Pmode))
+ abort ();
+
if (TARGET_AUTO_PIC)
emit_insn (gen_load_gprel64 (temp, src));
else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FLAG (src))
emit_move_insn (dest, temp);
}
+static GTY(()) rtx gen_tls_tga;
+static rtx
+gen_tls_get_addr ()
+{
+ if (!gen_tls_tga)
+ {
+ gen_tls_tga = init_one_libfunc ("__tls_get_addr");
+ }
+ return gen_tls_tga;
+}
+
+static GTY(()) rtx thread_pointer_rtx;
+static rtx
+gen_thread_pointer ()
+{
+ if (!thread_pointer_rtx)
+ {
+ thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
+ RTX_UNCHANGING_P (thread_pointer_rtx);
+ }
+ return tp;
+}
+
+rtx
+ia64_expand_move (op0, op1)
+ rtx op0, op1;
+{
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
+ op1 = force_reg (mode, op1);
+
+ if (mode == Pmode)
+ {
+ enum tls_model tls_kind;
+ if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
+ {
+ rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
+
+ switch (tls_kind)
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ start_sequence ();
+
+ tga_op1 = gen_reg_rtx (Pmode);
+ emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
+ tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
+ RTX_UNCHANGING_P (tga_op1) = 1;
+
+ tga_op2 = gen_reg_rtx (Pmode);
+ emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
+ tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
+ RTX_UNCHANGING_P (tga_op2) = 1;
+
+ tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
+ LCT_CONST, Pmode, 2, tga_op1,
+ Pmode, tga_op2, Pmode);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, op0, tga_ret, op1);
+ return NULL_RTX;
+
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ /* ??? This isn't the completely proper way to do local-dynamic
+ If the call to __tls_get_addr is used only by a single symbol,
+ then we should (somehow) move the dtprel to the second arg
+ to avoid the extra add. */
+ start_sequence ();
+
+ tga_op1 = gen_reg_rtx (Pmode);
+ emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
+ tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
+ RTX_UNCHANGING_P (tga_op1) = 1;
+
+ tga_op2 = const0_rtx;
+
+ tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
+ LCT_CONST, Pmode, 2, tga_op1,
+ Pmode, tga_op2, Pmode);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_LD_BASE);
+ tmp = gen_reg_rtx (Pmode);
+ emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
+
+ if (register_operand (op0, Pmode))
+ tga_ret = op0;
+ else
+ tga_ret = gen_reg_rtx (Pmode);
+ if (TARGET_TLS64)
+ {
+ emit_insn (gen_load_dtprel (tga_ret, op1));
+ emit_insn (gen_adddi3 (tga_ret, tmp, tga_ret));
+ }
+ else
+ emit_insn (gen_add_dtprel (tga_ret, tmp, op1));
+ if (tga_ret == op0)
+ return NULL_RTX;
+ op1 = tga_ret;
+ break;
+
+ case TLS_MODEL_INITIAL_EXEC:
+ tmp = gen_reg_rtx (Pmode);
+ emit_insn (gen_load_ltoff_tprel (tmp, op1));
+ tmp = gen_rtx_MEM (Pmode, tmp);
+ RTX_UNCHANGING_P (tmp) = 1;
+ tmp = force_reg (Pmode, tmp);
+
+ if (register_operand (op0, Pmode))
+ op1 = op0;
+ else
+ op1 = gen_reg_rtx (Pmode);
+ emit_insn (gen_adddi3 (op1, tmp, gen_thread_pointer ()));
+ if (op1 == op0)
+ return NULL_RTX;
+ break;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ if (register_operand (op0, Pmode))
+ tmp = op0;
+ else
+ tmp = gen_reg_rtx (Pmode);
+ if (TARGET_TLS64)
+ {
+ emit_insn (gen_load_tprel (tmp, op1));
+ emit_insn (gen_adddi3 (tmp, gen_thread_pointer (), tmp));
+ }
+ else
+ emit_insn (gen_add_tprel (tmp, gen_thread_pointer (), op1));
+ if (tmp == op0)
+ return NULL_RTX;
+ op1 = tmp;
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ else if (!TARGET_NO_PIC && symbolic_operand (op1, DImode))
+ {
+ /* Before optimization starts, delay committing to any particular
+ type of PIC address load. If this function gets deferred, we
+ may acquire information that changes the value of the
+ sdata_symbolic_operand predicate.
+
+ But don't delay for function pointers. Loading a function address
+ actually loads the address of the descriptor not the function.
+ If we represent these as SYMBOL_REFs, then they get cse'd with
+ calls, and we end up with calls to the descriptor address instead
+ of calls to the function address. Functions are not candidates
+ for sdata anyways.
+
+ Don't delay for LABEL_REF because the splitter loses REG_LABEL
+ notes. Don't delay for pool addresses on general principals;
+ they'll never become non-local behind our back. */
+
+ if (rtx_equal_function_value_matters
+ && GET_CODE (op1) != LABEL_REF
+ && ! (GET_CODE (op1) == SYMBOL_REF
+ && (SYMBOL_REF_FLAG (op1)
+ || CONSTANT_POOL_ADDRESS_P (op1)
+ || STRING_POOL_ADDRESS_P (op1))))
+ emit_insn (gen_movdi_symbolic (op0, op1));
+ else
+ ia64_expand_load_address (op0, op1, NULL_RTX);
+ return NULL_RTX;
+ }
+ }
+
+ return op1;
+}
+
rtx
ia64_gp_save_reg (setjmp_p)
int setjmp_p;
rtx nextarg;
int sibcall_p;
{
- rtx insn, b0, pfs, gp_save, narg_rtx;
+ rtx insn, b0, pfs, gp_save, narg_rtx, dest;
+ bool indirect_p;
int narg;
addr = XEXP (addr, 0);
return;
}
- if (sibcall_p)
+ indirect_p = ! symbolic_operand (addr, VOIDmode);
+
+ if (sibcall_p || (TARGET_CONST_GP && !indirect_p))
gp_save = NULL_RTX;
else
gp_save = ia64_gp_save_reg (setjmp_operand (addr, VOIDmode));
+ if (gp_save)
+ emit_move_insn (gp_save, pic_offset_table_rtx);
+
/* If this is an indirect call, then we have the address of a descriptor. */
- if (! symbolic_operand (addr, VOIDmode))
+ if (indirect_p)
{
- rtx dest;
-
- if (! sibcall_p)
- emit_move_insn (gp_save, pic_offset_table_rtx);
-
dest = force_reg (DImode, gen_rtx_MEM (DImode, addr));
emit_move_insn (pic_offset_table_rtx,
gen_rtx_MEM (DImode, plus_constant (addr, 8)));
-
- if (sibcall_p)
- insn = gen_sibcall_pic (dest, narg_rtx, b0, pfs);
- else if (! retval)
- insn = gen_call_pic (dest, narg_rtx, b0);
- else
- insn = gen_call_value_pic (retval, dest, narg_rtx, b0);
- emit_call_insn (insn);
-
- if (! sibcall_p)
- emit_move_insn (pic_offset_table_rtx, gp_save);
- }
- else if (TARGET_CONST_GP)
- {
- if (sibcall_p)
- insn = gen_sibcall_nopic (addr, narg_rtx, b0, pfs);
- else if (! retval)
- insn = gen_call_nopic (addr, narg_rtx, b0);
- else
- insn = gen_call_value_nopic (retval, addr, narg_rtx, b0);
- emit_call_insn (insn);
}
else
- {
- if (sibcall_p)
- emit_call_insn (gen_sibcall_pic (addr, narg_rtx, b0, pfs));
- else
- {
- emit_move_insn (gp_save, pic_offset_table_rtx);
+ dest = addr;
- if (! retval)
- insn = gen_call_pic (addr, narg_rtx, b0);
- else
- insn = gen_call_value_pic (retval, addr, narg_rtx, b0);
- emit_call_insn (insn);
+ if (sibcall_p)
+ insn = gen_sibcall_pic (dest, narg_rtx, b0, pfs);
+ else if (! retval)
+ insn = gen_call_pic (dest, narg_rtx, b0);
+ else
+ insn = gen_call_value_pic (retval, dest, narg_rtx, b0);
+ emit_call_insn (insn);
- emit_move_insn (pic_offset_table_rtx, gp_save);
- }
- }
+ if (gp_save)
+ emit_move_insn (pic_offset_table_rtx, gp_save);
}
\f
/* Begin the assembly file. */
/* We don't need an alloc instruction if we've used no outputs or locals. */
if (current_frame_info.n_local_regs == 0
&& current_frame_info.n_output_regs == 0
- && current_frame_info.n_input_regs <= current_function_args_info.words)
+ && current_frame_info.n_input_regs <= current_function_args_info.int_regs)
{
/* If there is no alloc, but there are input registers used, then we
need a .regstk directive. */
return VOIDmode;
case ARRAY_TYPE:
- return TYPE_MODE (TREE_TYPE (type));
+ return hfa_element_mode (TREE_TYPE (type), 1);
case RECORD_TYPE:
case UNION_TYPE:
FR registers, then FP values must also go in general registers. This can
happen when we have a SFmode HFA. */
else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
- return;
+ cum->int_regs = cum->words;
/* If there is a prototype, then FP values go in a FR register when
named, and in a GR registeer when unnamed. */
else if (cum->prototype)
{
if (! named)
- return;
+ cum->int_regs = cum->words;
else
/* ??? Complex types should not reach here. */
cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
/* If there is no prototype, then FP values go in both FR and GR
registers. */
else
- /* ??? Complex types should not reach here. */
- cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
+ {
+ /* ??? Complex types should not reach here. */
+ cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
+ cum->int_regs = cum->words;
+ }
+}
- return;
+/* Variable sized types are passed by reference. */
+/* ??? At present this is a GCC extension to the IA-64 ABI. */
+
+int
+ia64_function_arg_pass_by_reference (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ tree type;
+ int named ATTRIBUTE_UNUSED;
+{
+ return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
}
\f
/* Implement va_start. */
{
tree t;
+ /* Variable sized types are passed by reference. */
+ if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
+ return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
+ }
+
/* Arguments with alignment larger than 8 bytes start at the next even
boundary. */
if (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
}
}
-/* Called to register all of our global variables with the garbage
- collector. */
-
-static void
-ia64_add_gc_roots ()
-{
- ggc_add_rtx_root (&ia64_compare_op0, 1);
- ggc_add_rtx_root (&ia64_compare_op1, 1);
-}
-
-static void
-ia64_init_machine_status (p)
- struct function *p;
-{
- p->machine =
- (struct machine_function *) xcalloc (1, sizeof (struct machine_function));
-}
-
-static void
-ia64_mark_machine_status (p)
- struct function *p;
-{
- struct machine_function *machine = p->machine;
-
- if (machine)
- {
- ggc_mark_rtx (machine->ia64_eh_epilogue_sp);
- ggc_mark_rtx (machine->ia64_eh_epilogue_bsp);
- ggc_mark_rtx (machine->ia64_gp_save);
- }
-}
-
-static void
-ia64_free_machine_status (p)
- struct function *p;
+static struct machine_function *
+ia64_init_machine_status ()
{
- free (p->machine);
- p->machine = NULL;
+ return ggc_alloc_cleared (sizeof (struct machine_function));
}
/* Handle TARGET_OPTIONS switches. */
if (ia64_fixed_range_string)
fix_range (ia64_fixed_range_string);
+ if (ia64_tls_size_string)
+ {
+ char *end;
+ unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
+ if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
+ error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
+ else
+ ia64_tls_size = tmp;
+ }
+
ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
flag_schedule_insns_after_reload = 0;
ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
init_machine_status = ia64_init_machine_status;
- mark_machine_status = ia64_mark_machine_status;
- free_machine_status = ia64_free_machine_status;
-
- ia64_add_gc_roots ();
}
\f
static enum attr_itanium_requires_unit0 ia64_safe_itanium_requires_unit0 PARAMS((rtx));
case UNSPEC:
switch (XINT (x, 1))
{
- case 1: /* st8.spill */
- case 2: /* ld8.fill */
+ case UNSPEC_LTOFF_DTPMOD:
+ case UNSPEC_LTOFF_DTPREL:
+ case UNSPEC_DTPREL:
+ case UNSPEC_LTOFF_TPREL:
+ case UNSPEC_TPREL:
+ case UNSPEC_PRED_REL_MUTEX:
+ case UNSPEC_PIC_CALL:
+ case UNSPEC_MF:
+ case UNSPEC_FETCHADD_ACQ:
+ case UNSPEC_BSP_VALUE:
+ case UNSPEC_FLUSHRS:
+ case UNSPEC_BUNDLE_SELECTOR:
+ break;
+
+ case UNSPEC_GR_SPILL:
+ case UNSPEC_GR_RESTORE:
{
HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
HOST_WIDE_INT bit = (offset >> 3) & 63;
break;
}
- case 3: /* stf.spill */
- case 4: /* ldf.spill */
- case 8: /* popcnt */
+ case UNSPEC_FR_SPILL:
+ case UNSPEC_FR_RESTORE:
+ case UNSPEC_POPCNT:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
break;
- case 7: /* pred_rel_mutex */
- case 9: /* pic call */
- case 12: /* mf */
- case 19: /* fetchadd_acq */
- case 20: /* mov = ar.bsp */
- case 21: /* flushrs */
- case 22: /* bundle selector */
- case 23: /* cycle display */
- break;
-
- case 24: /* addp4 */
+ case UNSPEC_ADDP4:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
break;
- case 5: /* recip_approx */
+ case UNSPEC_FR_RECIP_APPROX:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
break;
- case 13: /* cmpxchg_acq */
+ case UNSPEC_CMPXCHG_ACQ:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
break;
case UNSPEC_VOLATILE:
switch (XINT (x, 1))
{
- case 0: /* alloc */
+ case UNSPECV_ALLOC:
/* Alloc must always be the first instruction of a group.
We force this by always returning true. */
/* ??? We might get better scheduling if we explicitly check for
rws_access_regno (REG_AR_CFM, new_flags, pred);
return 1;
- case 1: /* blockage */
- case 2: /* insn group barrier */
- return 0;
-
- case 5: /* set_bsp */
+ case UNSPECV_SET_BSP:
need_barrier = 1;
break;
- case 7: /* pred.rel.mutex */
- case 8: /* safe_across_calls all */
- case 9: /* safe_across_calls normal */
+ case UNSPECV_BLOCKAGE:
+ case UNSPECV_INSN_GROUP_BARRIER:
+ case UNSPECV_BREAK:
+ case UNSPECV_PSAC_ALL:
+ case UNSPECV_PSAC_NORMAL:
return 0;
default:
/* We play dependency tricks with the epilogue in order
to get proper schedules. Undo this for dv analysis. */
case CODE_FOR_epilogue_deallocate_stack:
+ case CODE_FOR_prologue_allocate_stack:
pat = XVECEXP (pat, 0, 0);
break;
}
else if (GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
- && XINT (PATTERN (insn), 1) == 2)
+ && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
{
init_insn_group_barriers ();
last_label = 0;
x = COND_EXEC_CODE (x);
if (GET_CODE (x) == SET)
return x;
- ret = single_set_2 (insn, x);
- if (ret == NULL && GET_CODE (x) == PARALLEL)
- {
- /* Special case here prologue_allocate_stack and
- epilogue_deallocate_stack. Although it is not a classical
- single set, the second set is there just to protect it
- from moving past FP-relative stack accesses. */
- if (XVECLEN (x, 0) == 2
- && GET_CODE (XVECEXP (x, 0, 0)) == SET
- && GET_CODE (XVECEXP (x, 0, 1)) == SET
- && GET_CODE (SET_DEST (XVECEXP (x, 0, 1))) == REG
- && SET_DEST (XVECEXP (x, 0, 1)) == SET_SRC (XVECEXP (x, 0, 1))
- && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
- ret = XVECEXP (x, 0, 0);
+
+ /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
+ Although they are not classical single set, the second set is there just
+ to protect it from moving past FP-relative stack accesses. */
+ switch (recog_memoized (insn))
+ {
+ case CODE_FOR_prologue_allocate_stack:
+ case CODE_FOR_epilogue_deallocate_stack:
+ ret = XVECEXP (x, 0, 0);
+ break;
+
+ default:
+ ret = single_set_2 (insn, x);
+ break;
}
+
return ret;
}
if (reg_overlap_mentioned_p (SET_DEST (set), addr))
return cost + 1;
}
+
if ((dep_class == ITANIUM_CLASS_IALU
|| dep_class == ITANIUM_CLASS_ILOG
|| dep_class == ITANIUM_CLASS_LD)
|| insn_class == ITANIUM_CLASS_MMSHF
|| insn_class == ITANIUM_CLASS_MMSHFI))
return 3;
+
if (dep_class == ITANIUM_CLASS_FMAC
&& (insn_class == ITANIUM_CLASS_FMISC
|| insn_class == ITANIUM_CLASS_FCVTFX
|| insn_class == ITANIUM_CLASS_XMPY))
return 7;
+
if ((dep_class == ITANIUM_CLASS_FMAC
|| dep_class == ITANIUM_CLASS_FMISC
|| dep_class == ITANIUM_CLASS_FCVTFX
|| dep_class == ITANIUM_CLASS_XMPY)
&& insn_class == ITANIUM_CLASS_STF)
return 8;
+
+ /* Intel docs say only LD, ST, IALU, ILOG, ISHF consumers have latency 4,
+ but HP engineers say any non-MM operation. */
if ((dep_class == ITANIUM_CLASS_MMMUL
|| dep_class == ITANIUM_CLASS_MMSHF
|| dep_class == ITANIUM_CLASS_MMSHFI)
- && (insn_class == ITANIUM_CLASS_LD
- || insn_class == ITANIUM_CLASS_ST
- || insn_class == ITANIUM_CLASS_IALU
- || insn_class == ITANIUM_CLASS_ILOG
- || insn_class == ITANIUM_CLASS_ISHF))
+ && insn_class != ITANIUM_CLASS_MMMUL
+ && insn_class != ITANIUM_CLASS_MMSHF
+ && insn_class != ITANIUM_CLASS_MMSHFI)
return 4;
return cost;
return 0;
}
-/* Like emit_insn_before, but skip cycle_display insns. This makes the
- assembly output a bit prettier. */
+/* Like emit_insn_before, but skip cycle_display notes.
+ ??? When cycle display notes are implemented, update this. */
static void
ia64_emit_insn_before (insn, before)
rtx insn, before;
{
- rtx prev = PREV_INSN (before);
- if (prev && GET_CODE (prev) == INSN
- && GET_CODE (PATTERN (prev)) == UNSPEC
- && XINT (PATTERN (prev), 1) == 23)
- before = prev;
emit_insn_before (insn, before);
}
-#if 0
-/* Generate a nop insn of the given type. Note we never generate L type
- nops. */
-
-static rtx
-gen_nop_type (t)
- enum attr_type t;
-{
- switch (t)
- {
- case TYPE_M:
- return gen_nop_m ();
- case TYPE_I:
- return gen_nop_i ();
- case TYPE_B:
- return gen_nop_b ();
- case TYPE_F:
- return gen_nop_f ();
- case TYPE_X:
- return gen_nop_x ();
- default:
- abort ();
- }
-}
-#endif
-
/* When rotating a bundle out of the issue window, insert a bundle selector
insn in front of it. DUMP is the scheduling dump file or NULL. START
is either 0 or 3, depending on whether we want to emit a bundle selector
if (slot > sched_data.split)
abort ();
if (dump)
- fprintf (dump, "// Packet needs %s, have %s\n", type_names[packet->t[slot]],
- type_names[t]);
+ fprintf (dump, "// Packet needs %s, have %s\n",
+ type_names[packet->t[slot]], type_names[t]);
sched_data.types[slot] = packet->t[slot];
sched_data.insns[slot] = 0;
sched_data.stopbit[slot] = 0;
slot++;
}
+
/* Do _not_ use T here. If T == TYPE_A, then we'd risk changing the
actual slot type later. */
sched_data.types[slot] = packet->t[slot];
sched_data.insns[slot] = tmp_insns[i];
sched_data.stopbit[slot] = 0;
slot++;
+
/* TYPE_L instructions always fill up two slots. */
if (t == TYPE_L)
- slot++;
+ {
+ sched_data.types[slot] = packet->t[slot];
+ sched_data.insns[slot] = 0;
+ sched_data.stopbit[slot] = 0;
+ slot++;
+ }
}
/* This isn't right - there's no need to pad out until the forced split;
memmove (sched_data.insns,
sched_data.insns + 3,
sched_data.cur * sizeof *sched_data.insns);
+ sched_data.packet
+ = &packets[(sched_data.packet->t2 - bundle) * NR_BUNDLES];
}
else
{
maybe_rotate (dump)
FILE *dump;
{
+ cycle_end_fill_slots (dump);
if (sched_data.cur == 6)
rotate_two_bundles (dump);
else if (sched_data.cur >= 3)
value of sched_data.first_slot. */
static int prev_first;
-/* The last insn that has been scheduled. At the start of a new cycle
- we know that we can emit new insns after it; the main scheduling code
- has already emitted a cycle_display insn after it and is using that
- as its current last insn. */
-static rtx last_issued;
-
/* Emit NOPs to fill the delay between PREV_CYCLE and CLOCK_VAR. Used to
pad out the delay between MM (shifts, etc.) and integer operations. */
{
int prev_clock = prev_cycle;
int cycles_left = clock_var - prev_clock;
+ bool did_stop = false;
/* Finish the previous cycle; pad it out with NOPs. */
if (sched_data.cur == 3)
{
- rtx t = gen_insn_group_barrier (GEN_INT (3));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (3)));
+ did_stop = true;
maybe_rotate (dump);
}
else if (sched_data.cur > 0)
int i;
for (i = sched_data.cur; i < split; i++)
{
- rtx t;
-
- t = gen_nop_type (sched_data.packet->t[i]);
- last_issued = emit_insn_after (t, last_issued);
- sched_data.types[i] = sched_data.packet->t[sched_data.cur];
- sched_data.insns[i] = last_issued;
+ rtx t = sched_emit_insn (gen_nop_type (sched_data.packet->t[i]));
+ sched_data.types[i] = sched_data.packet->t[i];
+ sched_data.insns[i] = t;
sched_data.stopbit[i] = 0;
}
sched_data.cur = split;
int i;
for (i = sched_data.cur; i < 6; i++)
{
- rtx t;
-
- t = gen_nop_type (sched_data.packet->t[i]);
- last_issued = emit_insn_after (t, last_issued);
- sched_data.types[i] = sched_data.packet->t[sched_data.cur];
- sched_data.insns[i] = last_issued;
+ rtx t = sched_emit_insn (gen_nop_type (sched_data.packet->t[i]));
+ sched_data.types[i] = sched_data.packet->t[i];
+ sched_data.insns[i] = t;
sched_data.stopbit[i] = 0;
}
sched_data.cur = 6;
if (need_stop || sched_data.cur == 6)
{
- rtx t = gen_insn_group_barrier (GEN_INT (3));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (3)));
+ did_stop = true;
}
maybe_rotate (dump);
}
cycles_left--;
while (cycles_left > 0)
{
- rtx t = gen_bundle_selector (GEN_INT (0));
- last_issued = emit_insn_after (t, last_issued);
- t = gen_nop_type (TYPE_M);
- last_issued = emit_insn_after (t, last_issued);
- t = gen_nop_type (TYPE_I);
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_bundle_selector (GEN_INT (0)));
+ sched_emit_insn (gen_nop_type (TYPE_M));
+ sched_emit_insn (gen_nop_type (TYPE_I));
if (cycles_left > 1)
{
- t = gen_insn_group_barrier (GEN_INT (2));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (2)));
cycles_left--;
}
- t = gen_nop_type (TYPE_I);
- last_issued = emit_insn_after (t, last_issued);
- t = gen_insn_group_barrier (GEN_INT (3));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_nop_type (TYPE_I));
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (3)));
+ did_stop = true;
cycles_left--;
}
+
+ if (did_stop)
+ init_insn_group_barriers ();
}
/* We are about to being issuing insns for this clock cycle.
dump_current_packet (dump);
}
+ /* Work around the pipeline flush that will occurr if the results of
+ an MM instruction are accessed before the result is ready. Intel
+ documentation says this only happens with IALU, ISHF, ILOG, LD,
+ and ST consumers, but experimental evidence shows that *any* non-MM
+ type instruction will incurr the flush. */
if (reorder_type == 0 && clock_var > 0 && ia64_final_schedule)
{
for (insnp = ready; insnp < e_ready; insnp++)
{
- rtx insn = *insnp;
+ rtx insn = *insnp, link;
enum attr_itanium_class t = ia64_safe_itanium_class (insn);
- if (t == ITANIUM_CLASS_IALU || t == ITANIUM_CLASS_ISHF
- || t == ITANIUM_CLASS_ILOG
- || t == ITANIUM_CLASS_LD || t == ITANIUM_CLASS_ST)
- {
- rtx link;
- for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT
- && REG_NOTE_KIND (link) != REG_DEP_ANTI)
+
+ if (t == ITANIUM_CLASS_MMMUL
+ || t == ITANIUM_CLASS_MMSHF
+ || t == ITANIUM_CLASS_MMSHFI)
+ continue;
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ rtx other = XEXP (link, 0);
+ enum attr_itanium_class t0 = ia64_safe_itanium_class (other);
+ if (t0 == ITANIUM_CLASS_MMSHF || t0 == ITANIUM_CLASS_MMMUL)
{
- rtx other = XEXP (link, 0);
- enum attr_itanium_class t0 = ia64_safe_itanium_class (other);
- if (t0 == ITANIUM_CLASS_MMSHF
- || t0 == ITANIUM_CLASS_MMMUL)
- {
- nop_cycles_until (clock_var, sched_verbose ? dump : NULL);
- goto out;
- }
+ nop_cycles_until (clock_var, sched_verbose ? dump : NULL);
+ goto out;
}
- }
+ }
}
}
out:
abort ();
insn_code = recog_memoized (stop);
- /* Ignore cycle displays and .pred.rel.mutex. */
- if (insn_code == CODE_FOR_cycle_display
- || insn_code == CODE_FOR_pred_rel_mutex
+ /* Ignore .pred.rel.mutex.
+
+ ??? Update this to ignore cycle display notes too
+ ??? once those are implemented */
+ if (insn_code == CODE_FOR_pred_rel_mutex
|| insn_code == CODE_FOR_prologue_use)
continue;
{
enum attr_type t = ia64_safe_type (insn);
- last_issued = insn;
-
if (sched_data.last_was_stop)
{
int t = sched_data.first_slot;
free (sched_types);
free (sched_ready);
}
-
-static rtx
-ia64_cycle_display (clock, last)
- int clock;
- rtx last;
-{
- if (ia64_final_schedule)
- return emit_insn_after (gen_cycle_display (GEN_INT (clock)), last);
- else
- return last;
-}
\f
/* Emit pseudo-ops for the assembler to describe predicate relations.
At present this assumes that we only consider predicate pairs to
static void
emit_predicate_relation_info ()
{
- int i;
+ basic_block bb;
- for (i = n_basic_blocks - 1; i >= 0; --i)
+ FOR_EACH_BB_REVERSE (bb)
{
- basic_block bb = BASIC_BLOCK (i);
int r;
rtx head = bb->head;
relations around them. Otherwise the assembler will assume the call
returns, and complain about uses of call-clobbered predicates after
the call. */
- for (i = n_basic_blocks - 1; i >= 0; --i)
+ FOR_EACH_BB_REVERSE (bb)
{
- basic_block bb = BASIC_BLOCK (i);
rtx insn = bb->head;
while (1)
pat = INSN_P (insn) ? PATTERN (insn) : const0_rtx;
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
- if ((GET_CODE (pat) == UNSPEC && XINT (pat, 1) == 22)
+ if ((GET_CODE (pat) == UNSPEC && XINT (pat, 1) == UNSPEC_BUNDLE_SELECTOR)
|| GET_CODE (insn) == CODE_LABEL)
{
if (b)
bundle_pos = 0;
continue;
}
- else if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == 2)
+ else if (GET_CODE (pat) == UNSPEC_VOLATILE
+ && XINT (pat, 1) == UNSPECV_INSN_GROUP_BARRIER)
{
int t = INTVAL (XVECEXP (pat, 0, 0));
if (b)
ia64_reorg (insns)
rtx insns;
{
+ /* We are freeing block_for_insn in the toplev to keep compatibility
+ with old MDEP_REORGS that are not CFG based. Recompute it now. */
+ compute_bb_for_insn (get_max_uid ());
+
/* If optimizing, we'll have split before scheduling. */
if (optimize == 0)
- split_all_insns_noflow ();
+ split_all_insns (0);
- /* Make sure the CFG and global_live_at_start are correct
- for emit_predicate_relation_info. */
- find_basic_blocks (insns, max_reg_num (), NULL);
- life_analysis (insns, NULL, PROP_DEATH_NOTES);
+ /* ??? update_life_info_in_dirty_blocks fails to terminate during
+ non-optimizing bootstrap. */
+ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
if (ia64_flag_schedule_insns2)
{
insn = prev_active_insn (insn);
if (GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
- && XINT (PATTERN (insn), 1) == 2)
+ && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
{
saw_stop = 1;
insn = prev_active_insn (insn);
}
}
-/* Table of valid machine attributes. */
-const struct attribute_spec ia64_attribute_table[] =
+/* Return true if REGNO is used by the frame unwinder. */
+
+int
+ia64_eh_uses (regno)
+ int regno;
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "syscall_linkage", 0, 0, false, true, true, NULL },
- { NULL, 0, 0, false, false, false, NULL }
-};
+ if (! reload_completed)
+ return 0;
+
+ if (current_frame_info.reg_save_b0
+ && regno == current_frame_info.reg_save_b0)
+ return 1;
+ if (current_frame_info.reg_save_pr
+ && regno == current_frame_info.reg_save_pr)
+ return 1;
+ if (current_frame_info.reg_save_ar_pfs
+ && regno == current_frame_info.reg_save_ar_pfs)
+ return 1;
+ if (current_frame_info.reg_save_ar_unat
+ && regno == current_frame_info.reg_save_ar_unat)
+ return 1;
+ if (current_frame_info.reg_save_ar_lc
+ && regno == current_frame_info.reg_save_ar_lc)
+ return 1;
+
+ return 0;
+}
\f
/* For ia64, SYMBOL_REF_FLAG set means that it is a function.
code faster because there is one less load. This also includes incomplete
types which can't go in sdata/sbss. */
-/* ??? See select_section. We must put short own readonly variables in
- sdata/sbss instead of the more natural rodata, because we can't perform
- the DECL_READONLY_SECTION test here. */
+static bool
+ia64_in_small_data_p (exp)
+ tree exp;
+{
+ if (TARGET_NO_SDATA)
+ return false;
+
+ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
+ if (strcmp (section, ".sdata") == 0
+ || strcmp (section, ".sbss") == 0)
+ return true;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+
+ /* If this is an incomplete type with size 0, then we can't put it
+ in sdata because it might be too big when completed. */
+ if (size > 0 && size <= ia64_section_threshold)
+ return true;
+ }
-extern struct obstack * saveable_obstack;
+ return false;
+}
-void
-ia64_encode_section_info (decl)
+static void
+ia64_encode_section_info (decl, first)
tree decl;
+ int first ATTRIBUTE_UNUSED;
{
const char *symbol_str;
+ bool is_local;
+ rtx symbol;
+ char encoding = 0;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
|| GET_CODE (DECL_RTL (decl)) != MEM
|| GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
return;
-
- symbol_str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
-
- /* We assume that -fpic is used only to create a shared library (dso).
- With -fpic, no global data can ever be sdata.
- Without -fpic, global common uninitialized data can never be sdata, since
- it can unify with a real definition in a dso. */
- /* ??? Actually, we can put globals in sdata, as long as we don't use gprel
- to access them. The linker may then be able to do linker relaxation to
- optimize references to them. Currently sdata implies use of gprel. */
- /* We need the DECL_EXTERNAL check for C++. static class data members get
- both TREE_STATIC and DECL_EXTERNAL set, to indicate that they are
- statically allocated, but the space is allocated somewhere else. Such
- decls can not be own data. */
- if (! TARGET_NO_SDATA
- && ((TREE_STATIC (decl) && ! DECL_EXTERNAL (decl)
- && ! (DECL_ONE_ONLY (decl) || DECL_WEAK (decl))
- && ! (TREE_PUBLIC (decl)
- && (flag_pic
- || (DECL_COMMON (decl)
- && (DECL_INITIAL (decl) == 0
- || DECL_INITIAL (decl) == error_mark_node)))))
- || MODULE_LOCAL_P (decl))
- /* Either the variable must be declared without a section attribute,
- or the section must be sdata or sbss. */
- && (DECL_SECTION_NAME (decl) == 0
- || ! strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
- ".sdata")
- || ! strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)),
- ".sbss")))
- {
- HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
-
- /* If the variable has already been defined in the output file, then it
- is too late to put it in sdata if it wasn't put there in the first
- place. The test is here rather than above, because if it is already
- in sdata, then it can stay there. */
-
- if (TREE_ASM_WRITTEN (decl))
- ;
-
- /* If this is an incomplete type with size 0, then we can't put it in
- sdata because it might be too big when completed.
- Objects bigger than threshold should have SDATA_NAME_FLAG_CHAR
- added if they are in .sdata or .sbss explicitely. */
- else if (((size > 0
- && size <= (HOST_WIDE_INT) ia64_section_threshold)
- || DECL_SECTION_NAME (decl))
- && symbol_str[0] != SDATA_NAME_FLAG_CHAR)
- {
- size_t len = strlen (symbol_str);
- char *newstr = alloca (len + 1);
- const char *string;
- *newstr = SDATA_NAME_FLAG_CHAR;
- memcpy (newstr + 1, symbol_str, len + 1);
-
- string = ggc_alloc_string (newstr, len + 1);
- XSTR (XEXP (DECL_RTL (decl), 0), 0) = string;
+ symbol = XEXP (DECL_RTL (decl), 0);
+ symbol_str = XSTR (symbol, 0);
+
+ is_local = (*targetm.binds_local_p) (decl);
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl))
+ {
+ enum tls_model kind;
+ if (!flag_pic)
+ {
+ if (is_local)
+ kind = TLS_MODEL_LOCAL_EXEC;
+ else
+ kind = TLS_MODEL_INITIAL_EXEC;
}
+ else if (is_local)
+ kind = TLS_MODEL_LOCAL_DYNAMIC;
+ else
+ kind = TLS_MODEL_GLOBAL_DYNAMIC;
+ if (kind < flag_tls_default)
+ kind = flag_tls_default;
+
+ encoding = " GLil"[kind];
}
- /* This decl is marked as being in small data/bss but it shouldn't
- be; one likely explanation for this is that the decl has been
- moved into a different section from the one it was in when
- ENCODE_SECTION_INFO was first called. Remove the '@'. */
- else if (symbol_str[0] == SDATA_NAME_FLAG_CHAR)
+ /* Determine if DECL will wind up in .sdata/.sbss. */
+ else if (is_local && ia64_in_small_data_p (decl))
+ encoding = 's';
+
+ /* Finally, encode this into the symbol string. */
+ if (encoding)
{
- XSTR (XEXP (DECL_RTL (decl), 0), 0)
- = ggc_strdup (symbol_str + 1);
+ char *newstr;
+ size_t len;
+
+ if (symbol_str[0] == ENCODE_SECTION_INFO_CHAR)
+ {
+ if (encoding == symbol_str[1])
+ return;
+ /* ??? Sdata became thread or thread becaome not thread. Lose. */
+ abort ();
+ }
+
+ len = strlen (symbol_str);
+ newstr = alloca (len + 3);
+ newstr[0] = ENCODE_SECTION_INFO_CHAR;
+ newstr[1] = encoding;
+ memcpy (newstr + 2, symbol_str, len + 1);
+
+ XSTR (symbol, 0) = ggc_alloc_string (newstr, len + 2);
}
+
+ /* This decl is marked as being in small data/bss but it shouldn't be;
+ one likely explanation for this is that the decl has been moved into
+ a different section from the one it was in when encode_section_info
+ was first called. Remove the encoding. */
+ else if (symbol_str[0] == ENCODE_SECTION_INFO_CHAR)
+ XSTR (symbol, 0) = ggc_strdup (symbol_str + 2);
+}
+
+static const char *
+ia64_strip_name_encoding (str)
+ const char *str;
+{
+ if (str[0] == ENCODE_SECTION_INFO_CHAR)
+ str += 2;
+ if (str[0] == '*')
+ str++;
+ return str;
}
\f
/* Output assembly directives for prologue regions. */
/* The current basic block number. */
-static int block_num;
+static bool last_block;
/* True if we need a copy_state command at the start of the next block. */
-static int need_copy_state;
+static bool need_copy_state;
/* The function emits unwind directives for the start of an epilogue. */
/* If this isn't the last block of the function, then we need to label the
current state, and copy it back in at the start of the next block. */
- if (block_num != n_basic_blocks - 1)
+ if (!last_block)
{
fprintf (asm_out_file, "\t.label_state 1\n");
- need_copy_state = 1;
+ need_copy_state = true;
}
fprintf (asm_out_file, "\t.restore sp\n");
/* Look for the ALLOC insn. */
if (GET_CODE (src) == UNSPEC_VOLATILE
- && XINT (src, 1) == 0
+ && XINT (src, 1) == UNSPECV_ALLOC
&& GET_CODE (dest) == REG)
{
dest_regno = REGNO (dest);
if (GET_CODE (insn) == NOTE
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
{
- block_num = NOTE_BASIC_BLOCK (insn)->index;
+ last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
/* Restore unwind state from immediately before the epilogue. */
if (need_copy_state)
{
fprintf (asm_out_file, "\t.body\n");
fprintf (asm_out_file, "\t.copy_state 1\n");
- need_copy_state = 0;
+ need_copy_state = false;
}
}
- if (! RTX_FRAME_RELATED_P (insn))
+ if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
return;
pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
: GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
? downward : upward);
}
+\f
+/* Switch to the section to which we should output X. The only thing
+ special we do here is to honor small data. */
+
+static void
+ia64_select_rtx_section (mode, x, align)
+ enum machine_mode mode;
+ rtx x;
+ unsigned HOST_WIDE_INT align;
+{
+ if (GET_MODE_SIZE (mode) > 0
+ && GET_MODE_SIZE (mode) <= ia64_section_threshold)
+ sdata_section ();
+ else
+ default_elf_select_rtx_section (mode, x, align);
+}
+
+/* It is illegal to have relocations in shared segments on AIX.
+ Pretend flag_pic is always set. */
+
+static void
+ia64_aix_select_section (exp, reloc, align)
+ tree exp;
+ int reloc;
+ unsigned HOST_WIDE_INT align;
+{
+ int save_pic = flag_pic;
+ flag_pic = 1;
+ default_elf_select_section (exp, reloc, align);
+ flag_pic = save_pic;
+}
+
+static void
+ia64_aix_unique_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int save_pic = flag_pic;
+ flag_pic = 1;
+ default_unique_section (decl, reloc);
+ flag_pic = save_pic;
+}
+
+static void
+ia64_aix_select_rtx_section (mode, x, align)
+ enum machine_mode mode;
+ rtx x;
+ unsigned HOST_WIDE_INT align;
+{
+ int save_pic = flag_pic;
+ flag_pic = 1;
+ ia64_select_rtx_section (mode, x, align);
+ flag_pic = save_pic;
+}
+
+#include "gt-ia64.h"