]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/ia64/ia64.c
arc-protos.h (arc_select_cc_mode, gen_compare_reg): Wrap in RTX_CODE macro guard.
[thirdparty/gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
53 #include "tree-gimple.h"
54 #include "intl.h"
55 #include "df.h"
56 #include "debug.h"
57 #include "params.h"
58 #include "dbgcnt.h"
59 #include "tm-constrs.h"
60
61 /* This is used for communication between ASM_OUTPUT_LABEL and
62 ASM_OUTPUT_LABELREF. */
63 int ia64_asm_output_label = 0;
64
65 /* Define the information needed to generate branch and scc insns. This is
66 stored from the compare operation. */
67 struct rtx_def * ia64_compare_op0;
68 struct rtx_def * ia64_compare_op1;
69
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
84
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105
106 /* Which cpu are we scheduling for. */
107 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
108
109 /* Determines whether we run our final scheduling pass or not. We always
110 avoid the normal second scheduling pass. */
111 static int ia64_flag_schedule_insns2;
112
113 /* Determines whether we run variable tracking in machine dependent
114 reorganization. */
115 static int ia64_flag_var_tracking;
116
117 /* Variables which are this size or smaller are put in the sdata/sbss
118 sections. */
119
120 unsigned int ia64_section_threshold;
121
122 /* The following variable is used by the DFA insn scheduler. The value is
123 TRUE if we do insn bundling instead of insn scheduling. */
124 int bundling_p = 0;
125
126 enum ia64_frame_regs
127 {
128 reg_fp,
129 reg_save_b0,
130 reg_save_pr,
131 reg_save_ar_pfs,
132 reg_save_ar_unat,
133 reg_save_ar_lc,
134 reg_save_gp,
135 number_of_ia64_frame_regs
136 };
137
138 /* Structure to be filled in by ia64_compute_frame_size with register
139 save masks and offsets for the current function. */
140
141 struct ia64_frame_info
142 {
143 HOST_WIDE_INT total_size; /* size of the stack frame, not including
144 the caller's scratch area. */
145 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
146 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
147 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
148 HARD_REG_SET mask; /* mask of saved registers. */
149 unsigned int gr_used_mask; /* mask of registers in use as gr spill
150 registers or long-term scratches. */
151 int n_spilled; /* number of spilled registers. */
152 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
153 int n_input_regs; /* number of input registers used. */
154 int n_local_regs; /* number of local registers used. */
155 int n_output_regs; /* number of output registers used. */
156 int n_rotate_regs; /* number of rotating registers used. */
157
158 char need_regstk; /* true if a .regstk directive needed. */
159 char initialized; /* true if the data is finalized. */
160 };
161
162 /* Current frame information calculated by ia64_compute_frame_size. */
163 static struct ia64_frame_info current_frame_info;
164 /* The actual registers that are emitted. */
165 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
166 \f
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (rtx);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
174 static void ia64_h_i_d_extended (void);
175 static int ia64_mode_to_int (enum machine_mode);
176 static void ia64_set_sched_flags (spec_info_t);
177 static int ia64_speculate_insn (rtx, ds_t, rtx *);
178 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
179 static bool ia64_needs_block_p (rtx);
180 static rtx ia64_gen_check (rtx, rtx, bool);
181 static int ia64_spec_check_p (rtx);
182 static int ia64_spec_check_src_p (rtx);
183 static rtx gen_tls_get_addr (void);
184 static rtx gen_thread_pointer (void);
185 static int find_gr_spill (enum ia64_frame_regs, int);
186 static int next_scratch_gr_reg (void);
187 static void mark_reg_gr_used_mask (rtx, void *);
188 static void ia64_compute_frame_size (HOST_WIDE_INT);
189 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
190 static void finish_spill_pointers (void);
191 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
192 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
193 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
194 static rtx gen_movdi_x (rtx, rtx, rtx);
195 static rtx gen_fr_spill_x (rtx, rtx, rtx);
196 static rtx gen_fr_restore_x (rtx, rtx, rtx);
197
198 static enum machine_mode hfa_element_mode (const_tree, bool);
199 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
200 tree, int *, int);
201 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
202 tree, bool);
203 static bool ia64_function_ok_for_sibcall (tree, tree);
204 static bool ia64_return_in_memory (const_tree, const_tree);
205 static bool ia64_rtx_costs (rtx, int, int, int *);
206 static void fix_range (const char *);
207 static bool ia64_handle_option (size_t, const char *, int);
208 static struct machine_function * ia64_init_machine_status (void);
209 static void emit_insn_group_barriers (FILE *);
210 static void emit_all_insn_group_barriers (FILE *);
211 static void final_emit_insn_group_barriers (FILE *);
212 static void emit_predicate_relation_info (void);
213 static void ia64_reorg (void);
214 static bool ia64_in_small_data_p (tree);
215 static void process_epilogue (FILE *, rtx, bool, bool);
216 static int process_set (FILE *, rtx, rtx, bool, bool);
217
218 static bool ia64_assemble_integer (rtx, unsigned int, int);
219 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
220 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
221 static void ia64_output_function_end_prologue (FILE *);
222
223 static int ia64_issue_rate (void);
224 static int ia64_adjust_cost (rtx, rtx, rtx, int);
225 static void ia64_sched_init (FILE *, int, int);
226 static void ia64_sched_init_global (FILE *, int, int);
227 static void ia64_sched_finish_global (FILE *, int);
228 static void ia64_sched_finish (FILE *, int);
229 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
230 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
231 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
232 static int ia64_variable_issue (FILE *, int, rtx, int);
233
234 static struct bundle_state *get_free_bundle_state (void);
235 static void free_bundle_state (struct bundle_state *);
236 static void initiate_bundle_states (void);
237 static void finish_bundle_states (void);
238 static unsigned bundle_state_hash (const void *);
239 static int bundle_state_eq_p (const void *, const void *);
240 static int insert_bundle_state (struct bundle_state *);
241 static void initiate_bundle_state_table (void);
242 static void finish_bundle_state_table (void);
243 static int try_issue_nops (struct bundle_state *, int);
244 static int try_issue_insn (struct bundle_state *, rtx);
245 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
246 static int get_max_pos (state_t);
247 static int get_template (state_t, int);
248
249 static rtx get_next_important_insn (rtx, rtx);
250 static void bundling (FILE *, int, rtx, rtx);
251
252 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
253 HOST_WIDE_INT, tree);
254 static void ia64_file_start (void);
255 static void ia64_globalize_decl_name (FILE *, tree);
256
257 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
258 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
259 static section *ia64_select_rtx_section (enum machine_mode, rtx,
260 unsigned HOST_WIDE_INT);
261 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
262 ATTRIBUTE_UNUSED;
263 static unsigned int ia64_section_type_flags (tree, const char *, int);
264 static void ia64_init_libfuncs (void)
265 ATTRIBUTE_UNUSED;
266 static void ia64_hpux_init_libfuncs (void)
267 ATTRIBUTE_UNUSED;
268 static void ia64_sysv4_init_libfuncs (void)
269 ATTRIBUTE_UNUSED;
270 static void ia64_vms_init_libfuncs (void)
271 ATTRIBUTE_UNUSED;
272
273 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
274 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
275 static void ia64_encode_section_info (tree, rtx, int);
276 static rtx ia64_struct_value_rtx (tree, int);
277 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
278 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
279 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
280 static bool ia64_cannot_force_const_mem (rtx);
281 static const char *ia64_mangle_type (tree);
282 static const char *ia64_invalid_conversion (tree, tree);
283 static const char *ia64_invalid_unary_op (int, tree);
284 static const char *ia64_invalid_binary_op (int, tree, tree);
285 static enum machine_mode ia64_c_mode_for_suffix (char);
286 \f
287 /* Table of valid machine attributes. */
288 static const struct attribute_spec ia64_attribute_table[] =
289 {
290 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
291 { "syscall_linkage", 0, 0, false, true, true, NULL },
292 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
293 { "version_id", 1, 1, true, false, false,
294 ia64_handle_version_id_attribute },
295 { NULL, 0, 0, false, false, false, NULL }
296 };
297
298 /* Initialize the GCC target structure. */
299 #undef TARGET_ATTRIBUTE_TABLE
300 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
301
302 #undef TARGET_INIT_BUILTINS
303 #define TARGET_INIT_BUILTINS ia64_init_builtins
304
305 #undef TARGET_EXPAND_BUILTIN
306 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
307
308 #undef TARGET_ASM_BYTE_OP
309 #define TARGET_ASM_BYTE_OP "\tdata1\t"
310 #undef TARGET_ASM_ALIGNED_HI_OP
311 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
312 #undef TARGET_ASM_ALIGNED_SI_OP
313 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
314 #undef TARGET_ASM_ALIGNED_DI_OP
315 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
316 #undef TARGET_ASM_UNALIGNED_HI_OP
317 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
318 #undef TARGET_ASM_UNALIGNED_SI_OP
319 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
320 #undef TARGET_ASM_UNALIGNED_DI_OP
321 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
322 #undef TARGET_ASM_INTEGER
323 #define TARGET_ASM_INTEGER ia64_assemble_integer
324
325 #undef TARGET_ASM_FUNCTION_PROLOGUE
326 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
327 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
328 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
329 #undef TARGET_ASM_FUNCTION_EPILOGUE
330 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
331
332 #undef TARGET_IN_SMALL_DATA_P
333 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
334
335 #undef TARGET_SCHED_ADJUST_COST
336 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
337 #undef TARGET_SCHED_ISSUE_RATE
338 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
339 #undef TARGET_SCHED_VARIABLE_ISSUE
340 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
341 #undef TARGET_SCHED_INIT
342 #define TARGET_SCHED_INIT ia64_sched_init
343 #undef TARGET_SCHED_FINISH
344 #define TARGET_SCHED_FINISH ia64_sched_finish
345 #undef TARGET_SCHED_INIT_GLOBAL
346 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
347 #undef TARGET_SCHED_FINISH_GLOBAL
348 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
349 #undef TARGET_SCHED_REORDER
350 #define TARGET_SCHED_REORDER ia64_sched_reorder
351 #undef TARGET_SCHED_REORDER2
352 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
353
354 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
355 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
356
357 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
358 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
359
360 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
361 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
362 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
363 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
364
365 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
366 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
367 ia64_first_cycle_multipass_dfa_lookahead_guard
368
369 #undef TARGET_SCHED_DFA_NEW_CYCLE
370 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
371
372 #undef TARGET_SCHED_H_I_D_EXTENDED
373 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
374
375 #undef TARGET_SCHED_SET_SCHED_FLAGS
376 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
377
378 #undef TARGET_SCHED_SPECULATE_INSN
379 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
380
381 #undef TARGET_SCHED_NEEDS_BLOCK_P
382 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
383
384 #undef TARGET_SCHED_GEN_CHECK
385 #define TARGET_SCHED_GEN_CHECK ia64_gen_check
386
387 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
388 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
389 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
390
391 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
392 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
393 #undef TARGET_ARG_PARTIAL_BYTES
394 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
395
396 #undef TARGET_ASM_OUTPUT_MI_THUNK
397 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
398 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
399 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
400
401 #undef TARGET_ASM_FILE_START
402 #define TARGET_ASM_FILE_START ia64_file_start
403
404 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
405 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
406
407 #undef TARGET_RTX_COSTS
408 #define TARGET_RTX_COSTS ia64_rtx_costs
409 #undef TARGET_ADDRESS_COST
410 #define TARGET_ADDRESS_COST hook_int_rtx_0
411
412 #undef TARGET_MACHINE_DEPENDENT_REORG
413 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
414
415 #undef TARGET_ENCODE_SECTION_INFO
416 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
417
418 #undef TARGET_SECTION_TYPE_FLAGS
419 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
420
421 #ifdef HAVE_AS_TLS
422 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
423 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
424 #endif
425
426 /* ??? ABI doesn't allow us to define this. */
427 #if 0
428 #undef TARGET_PROMOTE_FUNCTION_ARGS
429 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
430 #endif
431
432 /* ??? ABI doesn't allow us to define this. */
433 #if 0
434 #undef TARGET_PROMOTE_FUNCTION_RETURN
435 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
436 #endif
437
438 /* ??? Investigate. */
439 #if 0
440 #undef TARGET_PROMOTE_PROTOTYPES
441 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
442 #endif
443
444 #undef TARGET_STRUCT_VALUE_RTX
445 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
446 #undef TARGET_RETURN_IN_MEMORY
447 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
448 #undef TARGET_SETUP_INCOMING_VARARGS
449 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
450 #undef TARGET_STRICT_ARGUMENT_NAMING
451 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
452 #undef TARGET_MUST_PASS_IN_STACK
453 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
454
455 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
456 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
457
458 #undef TARGET_UNWIND_EMIT
459 #define TARGET_UNWIND_EMIT process_for_unwind_directive
460
461 #undef TARGET_SCALAR_MODE_SUPPORTED_P
462 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
463 #undef TARGET_VECTOR_MODE_SUPPORTED_P
464 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
465
466 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
467 in an order different from the specified program order. */
468 #undef TARGET_RELAXED_ORDERING
469 #define TARGET_RELAXED_ORDERING true
470
471 #undef TARGET_DEFAULT_TARGET_FLAGS
472 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
473 #undef TARGET_HANDLE_OPTION
474 #define TARGET_HANDLE_OPTION ia64_handle_option
475
476 #undef TARGET_CANNOT_FORCE_CONST_MEM
477 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
478
479 #undef TARGET_MANGLE_TYPE
480 #define TARGET_MANGLE_TYPE ia64_mangle_type
481
482 #undef TARGET_INVALID_CONVERSION
483 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
484 #undef TARGET_INVALID_UNARY_OP
485 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
486 #undef TARGET_INVALID_BINARY_OP
487 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
488
489 #undef TARGET_C_MODE_FOR_SUFFIX
490 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
491
492 struct gcc_target targetm = TARGET_INITIALIZER;
493 \f
494 typedef enum
495 {
496 ADDR_AREA_NORMAL, /* normal address area */
497 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
498 }
499 ia64_addr_area;
500
501 static GTY(()) tree small_ident1;
502 static GTY(()) tree small_ident2;
503
504 static void
505 init_idents (void)
506 {
507 if (small_ident1 == 0)
508 {
509 small_ident1 = get_identifier ("small");
510 small_ident2 = get_identifier ("__small__");
511 }
512 }
513
514 /* Retrieve the address area that has been chosen for the given decl. */
515
516 static ia64_addr_area
517 ia64_get_addr_area (tree decl)
518 {
519 tree model_attr;
520
521 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
522 if (model_attr)
523 {
524 tree id;
525
526 init_idents ();
527 id = TREE_VALUE (TREE_VALUE (model_attr));
528 if (id == small_ident1 || id == small_ident2)
529 return ADDR_AREA_SMALL;
530 }
531 return ADDR_AREA_NORMAL;
532 }
533
534 static tree
535 ia64_handle_model_attribute (tree *node, tree name, tree args,
536 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
537 {
538 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
539 ia64_addr_area area;
540 tree arg, decl = *node;
541
542 init_idents ();
543 arg = TREE_VALUE (args);
544 if (arg == small_ident1 || arg == small_ident2)
545 {
546 addr_area = ADDR_AREA_SMALL;
547 }
548 else
549 {
550 warning (OPT_Wattributes, "invalid argument of %qs attribute",
551 IDENTIFIER_POINTER (name));
552 *no_add_attrs = true;
553 }
554
555 switch (TREE_CODE (decl))
556 {
557 case VAR_DECL:
558 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
559 == FUNCTION_DECL)
560 && !TREE_STATIC (decl))
561 {
562 error ("%Jan address area attribute cannot be specified for "
563 "local variables", decl);
564 *no_add_attrs = true;
565 }
566 area = ia64_get_addr_area (decl);
567 if (area != ADDR_AREA_NORMAL && addr_area != area)
568 {
569 error ("address area of %q+D conflicts with previous "
570 "declaration", decl);
571 *no_add_attrs = true;
572 }
573 break;
574
575 case FUNCTION_DECL:
576 error ("%Jaddress area attribute cannot be specified for functions",
577 decl);
578 *no_add_attrs = true;
579 break;
580
581 default:
582 warning (OPT_Wattributes, "%qs attribute ignored",
583 IDENTIFIER_POINTER (name));
584 *no_add_attrs = true;
585 break;
586 }
587
588 return NULL_TREE;
589 }
590
591 static void
592 ia64_encode_addr_area (tree decl, rtx symbol)
593 {
594 int flags;
595
596 flags = SYMBOL_REF_FLAGS (symbol);
597 switch (ia64_get_addr_area (decl))
598 {
599 case ADDR_AREA_NORMAL: break;
600 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
601 default: gcc_unreachable ();
602 }
603 SYMBOL_REF_FLAGS (symbol) = flags;
604 }
605
606 static void
607 ia64_encode_section_info (tree decl, rtx rtl, int first)
608 {
609 default_encode_section_info (decl, rtl, first);
610
611 /* Careful not to prod global register variables. */
612 if (TREE_CODE (decl) == VAR_DECL
613 && GET_CODE (DECL_RTL (decl)) == MEM
614 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
615 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
616 ia64_encode_addr_area (decl, XEXP (rtl, 0));
617 }
618 \f
619 /* Return 1 if the operands of a move are ok. */
620
621 int
622 ia64_move_ok (rtx dst, rtx src)
623 {
624 /* If we're under init_recog_no_volatile, we'll not be able to use
625 memory_operand. So check the code directly and don't worry about
626 the validity of the underlying address, which should have been
627 checked elsewhere anyway. */
628 if (GET_CODE (dst) != MEM)
629 return 1;
630 if (GET_CODE (src) == MEM)
631 return 0;
632 if (register_operand (src, VOIDmode))
633 return 1;
634
635 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
636 if (INTEGRAL_MODE_P (GET_MODE (dst)))
637 return src == const0_rtx;
638 else
639 return satisfies_constraint_G (src);
640 }
641
642 /* Return 1 if the operands are ok for a floating point load pair. */
643
644 int
645 ia64_load_pair_ok (rtx dst, rtx src)
646 {
647 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
648 return 0;
649 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
650 return 0;
651 switch (GET_CODE (XEXP (src, 0)))
652 {
653 case REG:
654 case POST_INC:
655 break;
656 case POST_DEC:
657 return 0;
658 case POST_MODIFY:
659 {
660 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
661
662 if (GET_CODE (adjust) != CONST_INT
663 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
664 return 0;
665 }
666 break;
667 default:
668 abort ();
669 }
670 return 1;
671 }
672
673 int
674 addp4_optimize_ok (rtx op1, rtx op2)
675 {
676 return (basereg_operand (op1, GET_MODE(op1)) !=
677 basereg_operand (op2, GET_MODE(op2)));
678 }
679
680 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
681 Return the length of the field, or <= 0 on failure. */
682
683 int
684 ia64_depz_field_mask (rtx rop, rtx rshift)
685 {
686 unsigned HOST_WIDE_INT op = INTVAL (rop);
687 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
688
689 /* Get rid of the zero bits we're shifting in. */
690 op >>= shift;
691
692 /* We must now have a solid block of 1's at bit 0. */
693 return exact_log2 (op + 1);
694 }
695
696 /* Return the TLS model to use for ADDR. */
697
698 static enum tls_model
699 tls_symbolic_operand_type (rtx addr)
700 {
701 enum tls_model tls_kind = 0;
702
703 if (GET_CODE (addr) == CONST)
704 {
705 if (GET_CODE (XEXP (addr, 0)) == PLUS
706 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
707 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
708 }
709 else if (GET_CODE (addr) == SYMBOL_REF)
710 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
711
712 return tls_kind;
713 }
714
715 /* Return true if X is a constant that is valid for some immediate
716 field in an instruction. */
717
718 bool
719 ia64_legitimate_constant_p (rtx x)
720 {
721 switch (GET_CODE (x))
722 {
723 case CONST_INT:
724 case LABEL_REF:
725 return true;
726
727 case CONST_DOUBLE:
728 if (GET_MODE (x) == VOIDmode)
729 return true;
730 return satisfies_constraint_G (x);
731
732 case CONST:
733 case SYMBOL_REF:
734 /* ??? Short term workaround for PR 28490. We must make the code here
735 match the code in ia64_expand_move and move_operand, even though they
736 are both technically wrong. */
737 if (tls_symbolic_operand_type (x) == 0)
738 {
739 HOST_WIDE_INT addend = 0;
740 rtx op = x;
741
742 if (GET_CODE (op) == CONST
743 && GET_CODE (XEXP (op, 0)) == PLUS
744 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
745 {
746 addend = INTVAL (XEXP (XEXP (op, 0), 1));
747 op = XEXP (XEXP (op, 0), 0);
748 }
749
750 if (any_offset_symbol_operand (op, GET_MODE (op))
751 || function_operand (op, GET_MODE (op)))
752 return true;
753 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
754 return (addend & 0x3fff) == 0;
755 return false;
756 }
757 return false;
758
759 case CONST_VECTOR:
760 {
761 enum machine_mode mode = GET_MODE (x);
762
763 if (mode == V2SFmode)
764 return satisfies_constraint_Y (x);
765
766 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
767 && GET_MODE_SIZE (mode) <= 8);
768 }
769
770 default:
771 return false;
772 }
773 }
774
775 /* Don't allow TLS addresses to get spilled to memory. */
776
777 static bool
778 ia64_cannot_force_const_mem (rtx x)
779 {
780 return tls_symbolic_operand_type (x) != 0;
781 }
782
783 /* Expand a symbolic constant load. */
784
785 bool
786 ia64_expand_load_address (rtx dest, rtx src)
787 {
788 gcc_assert (GET_CODE (dest) == REG);
789
790 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
791 having to pointer-extend the value afterward. Other forms of address
792 computation below are also more natural to compute as 64-bit quantities.
793 If we've been given an SImode destination register, change it. */
794 if (GET_MODE (dest) != Pmode)
795 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
796
797 if (TARGET_NO_PIC)
798 return false;
799 if (small_addr_symbolic_operand (src, VOIDmode))
800 return false;
801
802 if (TARGET_AUTO_PIC)
803 emit_insn (gen_load_gprel64 (dest, src));
804 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
805 emit_insn (gen_load_fptr (dest, src));
806 else if (sdata_symbolic_operand (src, VOIDmode))
807 emit_insn (gen_load_gprel (dest, src));
808 else
809 {
810 HOST_WIDE_INT addend = 0;
811 rtx tmp;
812
813 /* We did split constant offsets in ia64_expand_move, and we did try
814 to keep them split in move_operand, but we also allowed reload to
815 rematerialize arbitrary constants rather than spill the value to
816 the stack and reload it. So we have to be prepared here to split
817 them apart again. */
818 if (GET_CODE (src) == CONST)
819 {
820 HOST_WIDE_INT hi, lo;
821
822 hi = INTVAL (XEXP (XEXP (src, 0), 1));
823 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
824 hi = hi - lo;
825
826 if (lo != 0)
827 {
828 addend = lo;
829 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
830 }
831 }
832
833 tmp = gen_rtx_HIGH (Pmode, src);
834 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
835 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
836
837 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
838 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
839
840 if (addend)
841 {
842 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
843 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
844 }
845 }
846
847 return true;
848 }
849
850 static GTY(()) rtx gen_tls_tga;
851 static rtx
852 gen_tls_get_addr (void)
853 {
854 if (!gen_tls_tga)
855 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
856 return gen_tls_tga;
857 }
858
859 static GTY(()) rtx thread_pointer_rtx;
860 static rtx
861 gen_thread_pointer (void)
862 {
863 if (!thread_pointer_rtx)
864 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
865 return thread_pointer_rtx;
866 }
867
868 static rtx
869 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
870 rtx orig_op1, HOST_WIDE_INT addend)
871 {
872 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
873 rtx orig_op0 = op0;
874 HOST_WIDE_INT addend_lo, addend_hi;
875
876 switch (tls_kind)
877 {
878 case TLS_MODEL_GLOBAL_DYNAMIC:
879 start_sequence ();
880
881 tga_op1 = gen_reg_rtx (Pmode);
882 emit_insn (gen_load_dtpmod (tga_op1, op1));
883
884 tga_op2 = gen_reg_rtx (Pmode);
885 emit_insn (gen_load_dtprel (tga_op2, op1));
886
887 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
888 LCT_CONST, Pmode, 2, tga_op1,
889 Pmode, tga_op2, Pmode);
890
891 insns = get_insns ();
892 end_sequence ();
893
894 if (GET_MODE (op0) != Pmode)
895 op0 = tga_ret;
896 emit_libcall_block (insns, op0, tga_ret, op1);
897 break;
898
899 case TLS_MODEL_LOCAL_DYNAMIC:
900 /* ??? This isn't the completely proper way to do local-dynamic
901 If the call to __tls_get_addr is used only by a single symbol,
902 then we should (somehow) move the dtprel to the second arg
903 to avoid the extra add. */
904 start_sequence ();
905
906 tga_op1 = gen_reg_rtx (Pmode);
907 emit_insn (gen_load_dtpmod (tga_op1, op1));
908
909 tga_op2 = const0_rtx;
910
911 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
912 LCT_CONST, Pmode, 2, tga_op1,
913 Pmode, tga_op2, Pmode);
914
915 insns = get_insns ();
916 end_sequence ();
917
918 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
919 UNSPEC_LD_BASE);
920 tmp = gen_reg_rtx (Pmode);
921 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
922
923 if (!register_operand (op0, Pmode))
924 op0 = gen_reg_rtx (Pmode);
925 if (TARGET_TLS64)
926 {
927 emit_insn (gen_load_dtprel (op0, op1));
928 emit_insn (gen_adddi3 (op0, tmp, op0));
929 }
930 else
931 emit_insn (gen_add_dtprel (op0, op1, tmp));
932 break;
933
934 case TLS_MODEL_INITIAL_EXEC:
935 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
936 addend_hi = addend - addend_lo;
937
938 op1 = plus_constant (op1, addend_hi);
939 addend = addend_lo;
940
941 tmp = gen_reg_rtx (Pmode);
942 emit_insn (gen_load_tprel (tmp, op1));
943
944 if (!register_operand (op0, Pmode))
945 op0 = gen_reg_rtx (Pmode);
946 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
947 break;
948
949 case TLS_MODEL_LOCAL_EXEC:
950 if (!register_operand (op0, Pmode))
951 op0 = gen_reg_rtx (Pmode);
952
953 op1 = orig_op1;
954 addend = 0;
955 if (TARGET_TLS64)
956 {
957 emit_insn (gen_load_tprel (op0, op1));
958 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
959 }
960 else
961 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
962 break;
963
964 default:
965 gcc_unreachable ();
966 }
967
968 if (addend)
969 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
970 orig_op0, 1, OPTAB_DIRECT);
971 if (orig_op0 == op0)
972 return NULL_RTX;
973 if (GET_MODE (orig_op0) == Pmode)
974 return op0;
975 return gen_lowpart (GET_MODE (orig_op0), op0);
976 }
977
978 rtx
979 ia64_expand_move (rtx op0, rtx op1)
980 {
981 enum machine_mode mode = GET_MODE (op0);
982
983 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
984 op1 = force_reg (mode, op1);
985
986 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
987 {
988 HOST_WIDE_INT addend = 0;
989 enum tls_model tls_kind;
990 rtx sym = op1;
991
992 if (GET_CODE (op1) == CONST
993 && GET_CODE (XEXP (op1, 0)) == PLUS
994 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
995 {
996 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
997 sym = XEXP (XEXP (op1, 0), 0);
998 }
999
1000 tls_kind = tls_symbolic_operand_type (sym);
1001 if (tls_kind)
1002 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1003
1004 if (any_offset_symbol_operand (sym, mode))
1005 addend = 0;
1006 else if (aligned_offset_symbol_operand (sym, mode))
1007 {
1008 HOST_WIDE_INT addend_lo, addend_hi;
1009
1010 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1011 addend_hi = addend - addend_lo;
1012
1013 if (addend_lo != 0)
1014 {
1015 op1 = plus_constant (sym, addend_hi);
1016 addend = addend_lo;
1017 }
1018 else
1019 addend = 0;
1020 }
1021 else
1022 op1 = sym;
1023
1024 if (reload_completed)
1025 {
1026 /* We really should have taken care of this offset earlier. */
1027 gcc_assert (addend == 0);
1028 if (ia64_expand_load_address (op0, op1))
1029 return NULL_RTX;
1030 }
1031
1032 if (addend)
1033 {
1034 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1035
1036 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1037
1038 op1 = expand_simple_binop (mode, PLUS, subtarget,
1039 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1040 if (op0 == op1)
1041 return NULL_RTX;
1042 }
1043 }
1044
1045 return op1;
1046 }
1047
1048 /* Split a move from OP1 to OP0 conditional on COND. */
1049
1050 void
1051 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1052 {
1053 rtx insn, first = get_last_insn ();
1054
1055 emit_move_insn (op0, op1);
1056
1057 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1058 if (INSN_P (insn))
1059 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1060 PATTERN (insn));
1061 }
1062
1063 /* Split a post-reload TImode or TFmode reference into two DImode
1064 components. This is made extra difficult by the fact that we do
1065 not get any scratch registers to work with, because reload cannot
1066 be prevented from giving us a scratch that overlaps the register
1067 pair involved. So instead, when addressing memory, we tweak the
1068 pointer register up and back down with POST_INCs. Or up and not
1069 back down when we can get away with it.
1070
1071 REVERSED is true when the loads must be done in reversed order
1072 (high word first) for correctness. DEAD is true when the pointer
1073 dies with the second insn we generate and therefore the second
1074 address must not carry a postmodify.
1075
1076 May return an insn which is to be emitted after the moves. */
1077
1078 static rtx
1079 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1080 {
1081 rtx fixup = 0;
1082
1083 switch (GET_CODE (in))
1084 {
1085 case REG:
1086 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1087 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1088 break;
1089
1090 case CONST_INT:
1091 case CONST_DOUBLE:
1092 /* Cannot occur reversed. */
1093 gcc_assert (!reversed);
1094
1095 if (GET_MODE (in) != TFmode)
1096 split_double (in, &out[0], &out[1]);
1097 else
1098 /* split_double does not understand how to split a TFmode
1099 quantity into a pair of DImode constants. */
1100 {
1101 REAL_VALUE_TYPE r;
1102 unsigned HOST_WIDE_INT p[2];
1103 long l[4]; /* TFmode is 128 bits */
1104
1105 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1106 real_to_target (l, &r, TFmode);
1107
1108 if (FLOAT_WORDS_BIG_ENDIAN)
1109 {
1110 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1111 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1112 }
1113 else
1114 {
1115 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1116 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1117 }
1118 out[0] = GEN_INT (p[0]);
1119 out[1] = GEN_INT (p[1]);
1120 }
1121 break;
1122
1123 case MEM:
1124 {
1125 rtx base = XEXP (in, 0);
1126 rtx offset;
1127
1128 switch (GET_CODE (base))
1129 {
1130 case REG:
1131 if (!reversed)
1132 {
1133 out[0] = adjust_automodify_address
1134 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1135 out[1] = adjust_automodify_address
1136 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1137 }
1138 else
1139 {
1140 /* Reversal requires a pre-increment, which can only
1141 be done as a separate insn. */
1142 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1143 out[0] = adjust_automodify_address
1144 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1145 out[1] = adjust_address (in, DImode, 0);
1146 }
1147 break;
1148
1149 case POST_INC:
1150 gcc_assert (!reversed && !dead);
1151
1152 /* Just do the increment in two steps. */
1153 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1154 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1155 break;
1156
1157 case POST_DEC:
1158 gcc_assert (!reversed && !dead);
1159
1160 /* Add 8, subtract 24. */
1161 base = XEXP (base, 0);
1162 out[0] = adjust_automodify_address
1163 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1164 out[1] = adjust_automodify_address
1165 (in, DImode,
1166 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1167 8);
1168 break;
1169
1170 case POST_MODIFY:
1171 gcc_assert (!reversed && !dead);
1172
1173 /* Extract and adjust the modification. This case is
1174 trickier than the others, because we might have an
1175 index register, or we might have a combined offset that
1176 doesn't fit a signed 9-bit displacement field. We can
1177 assume the incoming expression is already legitimate. */
1178 offset = XEXP (base, 1);
1179 base = XEXP (base, 0);
1180
1181 out[0] = adjust_automodify_address
1182 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1183
1184 if (GET_CODE (XEXP (offset, 1)) == REG)
1185 {
1186 /* Can't adjust the postmodify to match. Emit the
1187 original, then a separate addition insn. */
1188 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1189 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1190 }
1191 else
1192 {
1193 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1194 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1195 {
1196 /* Again the postmodify cannot be made to match,
1197 but in this case it's more efficient to get rid
1198 of the postmodify entirely and fix up with an
1199 add insn. */
1200 out[1] = adjust_automodify_address (in, DImode, base, 8);
1201 fixup = gen_adddi3
1202 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1203 }
1204 else
1205 {
1206 /* Combined offset still fits in the displacement field.
1207 (We cannot overflow it at the high end.) */
1208 out[1] = adjust_automodify_address
1209 (in, DImode, gen_rtx_POST_MODIFY
1210 (Pmode, base, gen_rtx_PLUS
1211 (Pmode, base,
1212 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1213 8);
1214 }
1215 }
1216 break;
1217
1218 default:
1219 gcc_unreachable ();
1220 }
1221 break;
1222 }
1223
1224 default:
1225 gcc_unreachable ();
1226 }
1227
1228 return fixup;
1229 }
1230
1231 /* Split a TImode or TFmode move instruction after reload.
1232 This is used by *movtf_internal and *movti_internal. */
1233 void
1234 ia64_split_tmode_move (rtx operands[])
1235 {
1236 rtx in[2], out[2], insn;
1237 rtx fixup[2];
1238 bool dead = false;
1239 bool reversed = false;
1240
1241 /* It is possible for reload to decide to overwrite a pointer with
1242 the value it points to. In that case we have to do the loads in
1243 the appropriate order so that the pointer is not destroyed too
1244 early. Also we must not generate a postmodify for that second
1245 load, or rws_access_regno will die. */
1246 if (GET_CODE (operands[1]) == MEM
1247 && reg_overlap_mentioned_p (operands[0], operands[1]))
1248 {
1249 rtx base = XEXP (operands[1], 0);
1250 while (GET_CODE (base) != REG)
1251 base = XEXP (base, 0);
1252
1253 if (REGNO (base) == REGNO (operands[0]))
1254 reversed = true;
1255 dead = true;
1256 }
1257 /* Another reason to do the moves in reversed order is if the first
1258 element of the target register pair is also the second element of
1259 the source register pair. */
1260 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1261 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1262 reversed = true;
1263
1264 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1265 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1266
1267 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1268 if (GET_CODE (EXP) == MEM \
1269 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1270 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1271 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1272 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1273 XEXP (XEXP (EXP, 0), 0), \
1274 REG_NOTES (INSN))
1275
1276 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1277 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1278 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1279
1280 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1281 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1282 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1283
1284 if (fixup[0])
1285 emit_insn (fixup[0]);
1286 if (fixup[1])
1287 emit_insn (fixup[1]);
1288
1289 #undef MAYBE_ADD_REG_INC_NOTE
1290 }
1291
1292 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1293 through memory plus an extra GR scratch register. Except that you can
1294 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1295 SECONDARY_RELOAD_CLASS, but not both.
1296
1297 We got into problems in the first place by allowing a construct like
1298 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1299 This solution attempts to prevent this situation from occurring. When
1300 we see something like the above, we spill the inner register to memory. */
1301
1302 static rtx
1303 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1304 {
1305 if (GET_CODE (in) == SUBREG
1306 && GET_MODE (SUBREG_REG (in)) == TImode
1307 && GET_CODE (SUBREG_REG (in)) == REG)
1308 {
1309 rtx memt = assign_stack_temp (TImode, 16, 0);
1310 emit_move_insn (memt, SUBREG_REG (in));
1311 return adjust_address (memt, mode, 0);
1312 }
1313 else if (force && GET_CODE (in) == REG)
1314 {
1315 rtx memx = assign_stack_temp (mode, 16, 0);
1316 emit_move_insn (memx, in);
1317 return memx;
1318 }
1319 else
1320 return in;
1321 }
1322
1323 /* Expand the movxf or movrf pattern (MODE says which) with the given
1324 OPERANDS, returning true if the pattern should then invoke
1325 DONE. */
1326
1327 bool
1328 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1329 {
1330 rtx op0 = operands[0];
1331
1332 if (GET_CODE (op0) == SUBREG)
1333 op0 = SUBREG_REG (op0);
1334
1335 /* We must support XFmode loads into general registers for stdarg/vararg,
1336 unprototyped calls, and a rare case where a long double is passed as
1337 an argument after a float HFA fills the FP registers. We split them into
1338 DImode loads for convenience. We also need to support XFmode stores
1339 for the last case. This case does not happen for stdarg/vararg routines,
1340 because we do a block store to memory of unnamed arguments. */
1341
1342 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1343 {
1344 rtx out[2];
1345
1346 /* We're hoping to transform everything that deals with XFmode
1347 quantities and GR registers early in the compiler. */
1348 gcc_assert (can_create_pseudo_p ());
1349
1350 /* Struct to register can just use TImode instead. */
1351 if ((GET_CODE (operands[1]) == SUBREG
1352 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1353 || (GET_CODE (operands[1]) == REG
1354 && GR_REGNO_P (REGNO (operands[1]))))
1355 {
1356 rtx op1 = operands[1];
1357
1358 if (GET_CODE (op1) == SUBREG)
1359 op1 = SUBREG_REG (op1);
1360 else
1361 op1 = gen_rtx_REG (TImode, REGNO (op1));
1362
1363 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1364 return true;
1365 }
1366
1367 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1368 {
1369 /* Don't word-swap when reading in the constant. */
1370 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1371 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1372 0, mode));
1373 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1374 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1375 0, mode));
1376 return true;
1377 }
1378
1379 /* If the quantity is in a register not known to be GR, spill it. */
1380 if (register_operand (operands[1], mode))
1381 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1382
1383 gcc_assert (GET_CODE (operands[1]) == MEM);
1384
1385 /* Don't word-swap when reading in the value. */
1386 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1387 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1388
1389 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1390 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1391 return true;
1392 }
1393
1394 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1395 {
1396 /* We're hoping to transform everything that deals with XFmode
1397 quantities and GR registers early in the compiler. */
1398 gcc_assert (can_create_pseudo_p ());
1399
1400 /* Op0 can't be a GR_REG here, as that case is handled above.
1401 If op0 is a register, then we spill op1, so that we now have a
1402 MEM operand. This requires creating an XFmode subreg of a TImode reg
1403 to force the spill. */
1404 if (register_operand (operands[0], mode))
1405 {
1406 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1407 op1 = gen_rtx_SUBREG (mode, op1, 0);
1408 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1409 }
1410
1411 else
1412 {
1413 rtx in[2];
1414
1415 gcc_assert (GET_CODE (operands[0]) == MEM);
1416
1417 /* Don't word-swap when writing out the value. */
1418 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1419 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1420
1421 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1422 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1423 return true;
1424 }
1425 }
1426
1427 if (!reload_in_progress && !reload_completed)
1428 {
1429 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1430
1431 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1432 {
1433 rtx memt, memx, in = operands[1];
1434 if (CONSTANT_P (in))
1435 in = validize_mem (force_const_mem (mode, in));
1436 if (GET_CODE (in) == MEM)
1437 memt = adjust_address (in, TImode, 0);
1438 else
1439 {
1440 memt = assign_stack_temp (TImode, 16, 0);
1441 memx = adjust_address (memt, mode, 0);
1442 emit_move_insn (memx, in);
1443 }
1444 emit_move_insn (op0, memt);
1445 return true;
1446 }
1447
1448 if (!ia64_move_ok (operands[0], operands[1]))
1449 operands[1] = force_reg (mode, operands[1]);
1450 }
1451
1452 return false;
1453 }
1454
1455 /* Emit comparison instruction if necessary, returning the expression
1456 that holds the compare result in the proper mode. */
1457
1458 static GTY(()) rtx cmptf_libfunc;
1459
1460 rtx
1461 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1462 {
1463 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1464 rtx cmp;
1465
1466 /* If we have a BImode input, then we already have a compare result, and
1467 do not need to emit another comparison. */
1468 if (GET_MODE (op0) == BImode)
1469 {
1470 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1471 cmp = op0;
1472 }
1473 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1474 magic number as its third argument, that indicates what to do.
1475 The return value is an integer to be compared against zero. */
1476 else if (GET_MODE (op0) == TFmode)
1477 {
1478 enum qfcmp_magic {
1479 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1480 QCMP_UNORD = 2,
1481 QCMP_EQ = 4,
1482 QCMP_LT = 8,
1483 QCMP_GT = 16
1484 } magic;
1485 enum rtx_code ncode;
1486 rtx ret, insns;
1487
1488 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1489 switch (code)
1490 {
1491 /* 1 = equal, 0 = not equal. Equality operators do
1492 not raise FP_INVALID when given an SNaN operand. */
1493 case EQ: magic = QCMP_EQ; ncode = NE; break;
1494 case NE: magic = QCMP_EQ; ncode = EQ; break;
1495 /* isunordered() from C99. */
1496 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1497 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1498 /* Relational operators raise FP_INVALID when given
1499 an SNaN operand. */
1500 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1501 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1502 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1503 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1504 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1505 Expanders for buneq etc. weuld have to be added to ia64.md
1506 for this to be useful. */
1507 default: gcc_unreachable ();
1508 }
1509
1510 start_sequence ();
1511
1512 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1513 op0, TFmode, op1, TFmode,
1514 GEN_INT (magic), DImode);
1515 cmp = gen_reg_rtx (BImode);
1516 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1517 gen_rtx_fmt_ee (ncode, BImode,
1518 ret, const0_rtx)));
1519
1520 insns = get_insns ();
1521 end_sequence ();
1522
1523 emit_libcall_block (insns, cmp, cmp,
1524 gen_rtx_fmt_ee (code, BImode, op0, op1));
1525 code = NE;
1526 }
1527 else
1528 {
1529 cmp = gen_reg_rtx (BImode);
1530 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1531 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1532 code = NE;
1533 }
1534
1535 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1536 }
1537
1538 /* Generate an integral vector comparison. Return true if the condition has
1539 been reversed, and so the sense of the comparison should be inverted. */
1540
1541 static bool
1542 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1543 rtx dest, rtx op0, rtx op1)
1544 {
1545 bool negate = false;
1546 rtx x;
1547
1548 /* Canonicalize the comparison to EQ, GT, GTU. */
1549 switch (code)
1550 {
1551 case EQ:
1552 case GT:
1553 case GTU:
1554 break;
1555
1556 case NE:
1557 case LE:
1558 case LEU:
1559 code = reverse_condition (code);
1560 negate = true;
1561 break;
1562
1563 case GE:
1564 case GEU:
1565 code = reverse_condition (code);
1566 negate = true;
1567 /* FALLTHRU */
1568
1569 case LT:
1570 case LTU:
1571 code = swap_condition (code);
1572 x = op0, op0 = op1, op1 = x;
1573 break;
1574
1575 default:
1576 gcc_unreachable ();
1577 }
1578
1579 /* Unsigned parallel compare is not supported by the hardware. Play some
1580 tricks to turn this into a signed comparison against 0. */
1581 if (code == GTU)
1582 {
1583 switch (mode)
1584 {
1585 case V2SImode:
1586 {
1587 rtx t1, t2, mask;
1588
1589 /* Perform a parallel modulo subtraction. */
1590 t1 = gen_reg_rtx (V2SImode);
1591 emit_insn (gen_subv2si3 (t1, op0, op1));
1592
1593 /* Extract the original sign bit of op0. */
1594 mask = GEN_INT (-0x80000000);
1595 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1596 mask = force_reg (V2SImode, mask);
1597 t2 = gen_reg_rtx (V2SImode);
1598 emit_insn (gen_andv2si3 (t2, op0, mask));
1599
1600 /* XOR it back into the result of the subtraction. This results
1601 in the sign bit set iff we saw unsigned underflow. */
1602 x = gen_reg_rtx (V2SImode);
1603 emit_insn (gen_xorv2si3 (x, t1, t2));
1604
1605 code = GT;
1606 op0 = x;
1607 op1 = CONST0_RTX (mode);
1608 }
1609 break;
1610
1611 case V8QImode:
1612 case V4HImode:
1613 /* Perform a parallel unsigned saturating subtraction. */
1614 x = gen_reg_rtx (mode);
1615 emit_insn (gen_rtx_SET (VOIDmode, x,
1616 gen_rtx_US_MINUS (mode, op0, op1)));
1617
1618 code = EQ;
1619 op0 = x;
1620 op1 = CONST0_RTX (mode);
1621 negate = !negate;
1622 break;
1623
1624 default:
1625 gcc_unreachable ();
1626 }
1627 }
1628
1629 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1630 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1631
1632 return negate;
1633 }
1634
1635 /* Emit an integral vector conditional move. */
1636
1637 void
1638 ia64_expand_vecint_cmov (rtx operands[])
1639 {
1640 enum machine_mode mode = GET_MODE (operands[0]);
1641 enum rtx_code code = GET_CODE (operands[3]);
1642 bool negate;
1643 rtx cmp, x, ot, of;
1644
1645 cmp = gen_reg_rtx (mode);
1646 negate = ia64_expand_vecint_compare (code, mode, cmp,
1647 operands[4], operands[5]);
1648
1649 ot = operands[1+negate];
1650 of = operands[2-negate];
1651
1652 if (ot == CONST0_RTX (mode))
1653 {
1654 if (of == CONST0_RTX (mode))
1655 {
1656 emit_move_insn (operands[0], ot);
1657 return;
1658 }
1659
1660 x = gen_rtx_NOT (mode, cmp);
1661 x = gen_rtx_AND (mode, x, of);
1662 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1663 }
1664 else if (of == CONST0_RTX (mode))
1665 {
1666 x = gen_rtx_AND (mode, cmp, ot);
1667 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1668 }
1669 else
1670 {
1671 rtx t, f;
1672
1673 t = gen_reg_rtx (mode);
1674 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1675 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1676
1677 f = gen_reg_rtx (mode);
1678 x = gen_rtx_NOT (mode, cmp);
1679 x = gen_rtx_AND (mode, x, operands[2-negate]);
1680 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1681
1682 x = gen_rtx_IOR (mode, t, f);
1683 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1684 }
1685 }
1686
1687 /* Emit an integral vector min or max operation. Return true if all done. */
1688
1689 bool
1690 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1691 rtx operands[])
1692 {
1693 rtx xops[6];
1694
1695 /* These four combinations are supported directly. */
1696 if (mode == V8QImode && (code == UMIN || code == UMAX))
1697 return false;
1698 if (mode == V4HImode && (code == SMIN || code == SMAX))
1699 return false;
1700
1701 /* This combination can be implemented with only saturating subtraction. */
1702 if (mode == V4HImode && code == UMAX)
1703 {
1704 rtx x, tmp = gen_reg_rtx (mode);
1705
1706 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1707 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1708
1709 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1710 return true;
1711 }
1712
1713 /* Everything else implemented via vector comparisons. */
1714 xops[0] = operands[0];
1715 xops[4] = xops[1] = operands[1];
1716 xops[5] = xops[2] = operands[2];
1717
1718 switch (code)
1719 {
1720 case UMIN:
1721 code = LTU;
1722 break;
1723 case UMAX:
1724 code = GTU;
1725 break;
1726 case SMIN:
1727 code = LT;
1728 break;
1729 case SMAX:
1730 code = GT;
1731 break;
1732 default:
1733 gcc_unreachable ();
1734 }
1735 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1736
1737 ia64_expand_vecint_cmov (xops);
1738 return true;
1739 }
1740
1741 /* Emit an integral vector widening sum operations. */
1742
1743 void
1744 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1745 {
1746 rtx l, h, x, s;
1747 enum machine_mode wmode, mode;
1748 rtx (*unpack_l) (rtx, rtx, rtx);
1749 rtx (*unpack_h) (rtx, rtx, rtx);
1750 rtx (*plus) (rtx, rtx, rtx);
1751
1752 wmode = GET_MODE (operands[0]);
1753 mode = GET_MODE (operands[1]);
1754
1755 switch (mode)
1756 {
1757 case V8QImode:
1758 unpack_l = gen_unpack1_l;
1759 unpack_h = gen_unpack1_h;
1760 plus = gen_addv4hi3;
1761 break;
1762 case V4HImode:
1763 unpack_l = gen_unpack2_l;
1764 unpack_h = gen_unpack2_h;
1765 plus = gen_addv2si3;
1766 break;
1767 default:
1768 gcc_unreachable ();
1769 }
1770
1771 /* Fill in x with the sign extension of each element in op1. */
1772 if (unsignedp)
1773 x = CONST0_RTX (mode);
1774 else
1775 {
1776 bool neg;
1777
1778 x = gen_reg_rtx (mode);
1779
1780 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1781 CONST0_RTX (mode));
1782 gcc_assert (!neg);
1783 }
1784
1785 l = gen_reg_rtx (wmode);
1786 h = gen_reg_rtx (wmode);
1787 s = gen_reg_rtx (wmode);
1788
1789 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1790 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1791 emit_insn (plus (s, l, operands[2]));
1792 emit_insn (plus (operands[0], h, s));
1793 }
1794
1795 /* Emit a signed or unsigned V8QI dot product operation. */
1796
1797 void
1798 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1799 {
1800 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1801
1802 /* Fill in x1 and x2 with the sign extension of each element. */
1803 if (unsignedp)
1804 x1 = x2 = CONST0_RTX (V8QImode);
1805 else
1806 {
1807 bool neg;
1808
1809 x1 = gen_reg_rtx (V8QImode);
1810 x2 = gen_reg_rtx (V8QImode);
1811
1812 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1813 CONST0_RTX (V8QImode));
1814 gcc_assert (!neg);
1815 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1816 CONST0_RTX (V8QImode));
1817 gcc_assert (!neg);
1818 }
1819
1820 l1 = gen_reg_rtx (V4HImode);
1821 l2 = gen_reg_rtx (V4HImode);
1822 h1 = gen_reg_rtx (V4HImode);
1823 h2 = gen_reg_rtx (V4HImode);
1824
1825 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1826 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1827 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1828 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1829
1830 p1 = gen_reg_rtx (V2SImode);
1831 p2 = gen_reg_rtx (V2SImode);
1832 p3 = gen_reg_rtx (V2SImode);
1833 p4 = gen_reg_rtx (V2SImode);
1834 emit_insn (gen_pmpy2_r (p1, l1, l2));
1835 emit_insn (gen_pmpy2_l (p2, l1, l2));
1836 emit_insn (gen_pmpy2_r (p3, h1, h2));
1837 emit_insn (gen_pmpy2_l (p4, h1, h2));
1838
1839 s1 = gen_reg_rtx (V2SImode);
1840 s2 = gen_reg_rtx (V2SImode);
1841 s3 = gen_reg_rtx (V2SImode);
1842 emit_insn (gen_addv2si3 (s1, p1, p2));
1843 emit_insn (gen_addv2si3 (s2, p3, p4));
1844 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1845 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1846 }
1847
1848 /* Emit the appropriate sequence for a call. */
1849
1850 void
1851 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1852 int sibcall_p)
1853 {
1854 rtx insn, b0;
1855
1856 addr = XEXP (addr, 0);
1857 addr = convert_memory_address (DImode, addr);
1858 b0 = gen_rtx_REG (DImode, R_BR (0));
1859
1860 /* ??? Should do this for functions known to bind local too. */
1861 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1862 {
1863 if (sibcall_p)
1864 insn = gen_sibcall_nogp (addr);
1865 else if (! retval)
1866 insn = gen_call_nogp (addr, b0);
1867 else
1868 insn = gen_call_value_nogp (retval, addr, b0);
1869 insn = emit_call_insn (insn);
1870 }
1871 else
1872 {
1873 if (sibcall_p)
1874 insn = gen_sibcall_gp (addr);
1875 else if (! retval)
1876 insn = gen_call_gp (addr, b0);
1877 else
1878 insn = gen_call_value_gp (retval, addr, b0);
1879 insn = emit_call_insn (insn);
1880
1881 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1882 }
1883
1884 if (sibcall_p)
1885 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1886 }
1887
1888 static void
1889 reg_emitted (enum ia64_frame_regs r)
1890 {
1891 if (emitted_frame_related_regs[r] == 0)
1892 emitted_frame_related_regs[r] = current_frame_info.r[r];
1893 else
1894 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
1895 }
1896
1897 static int
1898 get_reg (enum ia64_frame_regs r)
1899 {
1900 reg_emitted (r);
1901 return current_frame_info.r[r];
1902 }
1903
1904 static bool
1905 is_emitted (int regno)
1906 {
1907 enum ia64_frame_regs r;
1908
1909 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1910 if (emitted_frame_related_regs[r] == regno)
1911 return true;
1912 return false;
1913 }
1914
1915 void
1916 ia64_reload_gp (void)
1917 {
1918 rtx tmp;
1919
1920 if (current_frame_info.r[reg_save_gp])
1921 {
1922 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
1923 }
1924 else
1925 {
1926 HOST_WIDE_INT offset;
1927 rtx offset_r;
1928
1929 offset = (current_frame_info.spill_cfa_off
1930 + current_frame_info.spill_size);
1931 if (frame_pointer_needed)
1932 {
1933 tmp = hard_frame_pointer_rtx;
1934 offset = -offset;
1935 }
1936 else
1937 {
1938 tmp = stack_pointer_rtx;
1939 offset = current_frame_info.total_size - offset;
1940 }
1941
1942 offset_r = GEN_INT (offset);
1943 if (satisfies_constraint_I (offset_r))
1944 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1945 else
1946 {
1947 emit_move_insn (pic_offset_table_rtx, offset_r);
1948 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1949 pic_offset_table_rtx, tmp));
1950 }
1951
1952 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1953 }
1954
1955 emit_move_insn (pic_offset_table_rtx, tmp);
1956 }
1957
1958 void
1959 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1960 rtx scratch_b, int noreturn_p, int sibcall_p)
1961 {
1962 rtx insn;
1963 bool is_desc = false;
1964
1965 /* If we find we're calling through a register, then we're actually
1966 calling through a descriptor, so load up the values. */
1967 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1968 {
1969 rtx tmp;
1970 bool addr_dead_p;
1971
1972 /* ??? We are currently constrained to *not* use peep2, because
1973 we can legitimately change the global lifetime of the GP
1974 (in the form of killing where previously live). This is
1975 because a call through a descriptor doesn't use the previous
1976 value of the GP, while a direct call does, and we do not
1977 commit to either form until the split here.
1978
1979 That said, this means that we lack precise life info for
1980 whether ADDR is dead after this call. This is not terribly
1981 important, since we can fix things up essentially for free
1982 with the POST_DEC below, but it's nice to not use it when we
1983 can immediately tell it's not necessary. */
1984 addr_dead_p = ((noreturn_p || sibcall_p
1985 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1986 REGNO (addr)))
1987 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1988
1989 /* Load the code address into scratch_b. */
1990 tmp = gen_rtx_POST_INC (Pmode, addr);
1991 tmp = gen_rtx_MEM (Pmode, tmp);
1992 emit_move_insn (scratch_r, tmp);
1993 emit_move_insn (scratch_b, scratch_r);
1994
1995 /* Load the GP address. If ADDR is not dead here, then we must
1996 revert the change made above via the POST_INCREMENT. */
1997 if (!addr_dead_p)
1998 tmp = gen_rtx_POST_DEC (Pmode, addr);
1999 else
2000 tmp = addr;
2001 tmp = gen_rtx_MEM (Pmode, tmp);
2002 emit_move_insn (pic_offset_table_rtx, tmp);
2003
2004 is_desc = true;
2005 addr = scratch_b;
2006 }
2007
2008 if (sibcall_p)
2009 insn = gen_sibcall_nogp (addr);
2010 else if (retval)
2011 insn = gen_call_value_nogp (retval, addr, retaddr);
2012 else
2013 insn = gen_call_nogp (addr, retaddr);
2014 emit_call_insn (insn);
2015
2016 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2017 ia64_reload_gp ();
2018 }
2019
2020 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2021
2022 This differs from the generic code in that we know about the zero-extending
2023 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2024 also know that ld.acq+cmpxchg.rel equals a full barrier.
2025
2026 The loop we want to generate looks like
2027
2028 cmp_reg = mem;
2029 label:
2030 old_reg = cmp_reg;
2031 new_reg = cmp_reg op val;
2032 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2033 if (cmp_reg != old_reg)
2034 goto label;
2035
2036 Note that we only do the plain load from memory once. Subsequent
2037 iterations use the value loaded by the compare-and-swap pattern. */
2038
2039 void
2040 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2041 rtx old_dst, rtx new_dst)
2042 {
2043 enum machine_mode mode = GET_MODE (mem);
2044 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2045 enum insn_code icode;
2046
2047 /* Special case for using fetchadd. */
2048 if ((mode == SImode || mode == DImode)
2049 && (code == PLUS || code == MINUS)
2050 && fetchadd_operand (val, mode))
2051 {
2052 if (code == MINUS)
2053 val = GEN_INT (-INTVAL (val));
2054
2055 if (!old_dst)
2056 old_dst = gen_reg_rtx (mode);
2057
2058 emit_insn (gen_memory_barrier ());
2059
2060 if (mode == SImode)
2061 icode = CODE_FOR_fetchadd_acq_si;
2062 else
2063 icode = CODE_FOR_fetchadd_acq_di;
2064 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2065
2066 if (new_dst)
2067 {
2068 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2069 true, OPTAB_WIDEN);
2070 if (new_reg != new_dst)
2071 emit_move_insn (new_dst, new_reg);
2072 }
2073 return;
2074 }
2075
2076 /* Because of the volatile mem read, we get an ld.acq, which is the
2077 front half of the full barrier. The end half is the cmpxchg.rel. */
2078 gcc_assert (MEM_VOLATILE_P (mem));
2079
2080 old_reg = gen_reg_rtx (DImode);
2081 cmp_reg = gen_reg_rtx (DImode);
2082 label = gen_label_rtx ();
2083
2084 if (mode != DImode)
2085 {
2086 val = simplify_gen_subreg (DImode, val, mode, 0);
2087 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2088 }
2089 else
2090 emit_move_insn (cmp_reg, mem);
2091
2092 emit_label (label);
2093
2094 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2095 emit_move_insn (old_reg, cmp_reg);
2096 emit_move_insn (ar_ccv, cmp_reg);
2097
2098 if (old_dst)
2099 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2100
2101 new_reg = cmp_reg;
2102 if (code == NOT)
2103 {
2104 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2105 code = AND;
2106 }
2107 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2108 true, OPTAB_DIRECT);
2109
2110 if (mode != DImode)
2111 new_reg = gen_lowpart (mode, new_reg);
2112 if (new_dst)
2113 emit_move_insn (new_dst, new_reg);
2114
2115 switch (mode)
2116 {
2117 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2118 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2119 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2120 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2121 default:
2122 gcc_unreachable ();
2123 }
2124
2125 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2126
2127 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2128 }
2129 \f
2130 /* Begin the assembly file. */
2131
2132 static void
2133 ia64_file_start (void)
2134 {
2135 /* Variable tracking should be run after all optimizations which change order
2136 of insns. It also needs a valid CFG. This can't be done in
2137 ia64_override_options, because flag_var_tracking is finalized after
2138 that. */
2139 ia64_flag_var_tracking = flag_var_tracking;
2140 flag_var_tracking = 0;
2141
2142 default_file_start ();
2143 emit_safe_across_calls ();
2144 }
2145
2146 void
2147 emit_safe_across_calls (void)
2148 {
2149 unsigned int rs, re;
2150 int out_state;
2151
2152 rs = 1;
2153 out_state = 0;
2154 while (1)
2155 {
2156 while (rs < 64 && call_used_regs[PR_REG (rs)])
2157 rs++;
2158 if (rs >= 64)
2159 break;
2160 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2161 continue;
2162 if (out_state == 0)
2163 {
2164 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2165 out_state = 1;
2166 }
2167 else
2168 fputc (',', asm_out_file);
2169 if (re == rs + 1)
2170 fprintf (asm_out_file, "p%u", rs);
2171 else
2172 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2173 rs = re + 1;
2174 }
2175 if (out_state)
2176 fputc ('\n', asm_out_file);
2177 }
2178
2179 /* Globalize a declaration. */
2180
2181 static void
2182 ia64_globalize_decl_name (FILE * stream, tree decl)
2183 {
2184 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2185 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2186 if (version_attr)
2187 {
2188 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2189 const char *p = TREE_STRING_POINTER (v);
2190 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2191 }
2192 targetm.asm_out.globalize_label (stream, name);
2193 if (TREE_CODE (decl) == FUNCTION_DECL)
2194 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2195 }
2196
2197 /* Helper function for ia64_compute_frame_size: find an appropriate general
2198 register to spill some special register to. SPECIAL_SPILL_MASK contains
2199 bits in GR0 to GR31 that have already been allocated by this routine.
2200 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2201
2202 static int
2203 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2204 {
2205 int regno;
2206
2207 if (emitted_frame_related_regs[r] != 0)
2208 {
2209 regno = emitted_frame_related_regs[r];
2210 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed))
2211 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2212 else if (current_function_is_leaf
2213 && regno >= GR_REG (1) && regno <= GR_REG (31))
2214 current_frame_info.gr_used_mask |= 1 << regno;
2215
2216 return regno;
2217 }
2218
2219 /* If this is a leaf function, first try an otherwise unused
2220 call-clobbered register. */
2221 if (current_function_is_leaf)
2222 {
2223 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2224 if (! df_regs_ever_live_p (regno)
2225 && call_used_regs[regno]
2226 && ! fixed_regs[regno]
2227 && ! global_regs[regno]
2228 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2229 && ! is_emitted (regno))
2230 {
2231 current_frame_info.gr_used_mask |= 1 << regno;
2232 return regno;
2233 }
2234 }
2235
2236 if (try_locals)
2237 {
2238 regno = current_frame_info.n_local_regs;
2239 /* If there is a frame pointer, then we can't use loc79, because
2240 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2241 reg_name switching code in ia64_expand_prologue. */
2242 if (regno < (80 - frame_pointer_needed))
2243 {
2244 current_frame_info.n_local_regs = regno + 1;
2245 return LOC_REG (0) + regno;
2246 }
2247 }
2248
2249 /* Failed to find a general register to spill to. Must use stack. */
2250 return 0;
2251 }
2252
2253 /* In order to make for nice schedules, we try to allocate every temporary
2254 to a different register. We must of course stay away from call-saved,
2255 fixed, and global registers. We must also stay away from registers
2256 allocated in current_frame_info.gr_used_mask, since those include regs
2257 used all through the prologue.
2258
2259 Any register allocated here must be used immediately. The idea is to
2260 aid scheduling, not to solve data flow problems. */
2261
2262 static int last_scratch_gr_reg;
2263
2264 static int
2265 next_scratch_gr_reg (void)
2266 {
2267 int i, regno;
2268
2269 for (i = 0; i < 32; ++i)
2270 {
2271 regno = (last_scratch_gr_reg + i + 1) & 31;
2272 if (call_used_regs[regno]
2273 && ! fixed_regs[regno]
2274 && ! global_regs[regno]
2275 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2276 {
2277 last_scratch_gr_reg = regno;
2278 return regno;
2279 }
2280 }
2281
2282 /* There must be _something_ available. */
2283 gcc_unreachable ();
2284 }
2285
2286 /* Helper function for ia64_compute_frame_size, called through
2287 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2288
2289 static void
2290 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2291 {
2292 unsigned int regno = REGNO (reg);
2293 if (regno < 32)
2294 {
2295 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2296 for (i = 0; i < n; ++i)
2297 current_frame_info.gr_used_mask |= 1 << (regno + i);
2298 }
2299 }
2300
2301
2302 /* Returns the number of bytes offset between the frame pointer and the stack
2303 pointer for the current function. SIZE is the number of bytes of space
2304 needed for local variables. */
2305
2306 static void
2307 ia64_compute_frame_size (HOST_WIDE_INT size)
2308 {
2309 HOST_WIDE_INT total_size;
2310 HOST_WIDE_INT spill_size = 0;
2311 HOST_WIDE_INT extra_spill_size = 0;
2312 HOST_WIDE_INT pretend_args_size;
2313 HARD_REG_SET mask;
2314 int n_spilled = 0;
2315 int spilled_gr_p = 0;
2316 int spilled_fr_p = 0;
2317 unsigned int regno;
2318 int i;
2319
2320 if (current_frame_info.initialized)
2321 return;
2322
2323 memset (&current_frame_info, 0, sizeof current_frame_info);
2324 CLEAR_HARD_REG_SET (mask);
2325
2326 /* Don't allocate scratches to the return register. */
2327 diddle_return_value (mark_reg_gr_used_mask, NULL);
2328
2329 /* Don't allocate scratches to the EH scratch registers. */
2330 if (cfun->machine->ia64_eh_epilogue_sp)
2331 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2332 if (cfun->machine->ia64_eh_epilogue_bsp)
2333 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2334
2335 /* Find the size of the register stack frame. We have only 80 local
2336 registers, because we reserve 8 for the inputs and 8 for the
2337 outputs. */
2338
2339 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2340 since we'll be adjusting that down later. */
2341 regno = LOC_REG (78) + ! frame_pointer_needed;
2342 for (; regno >= LOC_REG (0); regno--)
2343 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2344 break;
2345 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2346
2347 /* For functions marked with the syscall_linkage attribute, we must mark
2348 all eight input registers as in use, so that locals aren't visible to
2349 the caller. */
2350
2351 if (cfun->machine->n_varargs > 0
2352 || lookup_attribute ("syscall_linkage",
2353 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2354 current_frame_info.n_input_regs = 8;
2355 else
2356 {
2357 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2358 if (df_regs_ever_live_p (regno))
2359 break;
2360 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2361 }
2362
2363 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2364 if (df_regs_ever_live_p (regno))
2365 break;
2366 i = regno - OUT_REG (0) + 1;
2367
2368 #ifndef PROFILE_HOOK
2369 /* When -p profiling, we need one output register for the mcount argument.
2370 Likewise for -a profiling for the bb_init_func argument. For -ax
2371 profiling, we need two output registers for the two bb_init_trace_func
2372 arguments. */
2373 if (current_function_profile)
2374 i = MAX (i, 1);
2375 #endif
2376 current_frame_info.n_output_regs = i;
2377
2378 /* ??? No rotating register support yet. */
2379 current_frame_info.n_rotate_regs = 0;
2380
2381 /* Discover which registers need spilling, and how much room that
2382 will take. Begin with floating point and general registers,
2383 which will always wind up on the stack. */
2384
2385 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2386 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2387 {
2388 SET_HARD_REG_BIT (mask, regno);
2389 spill_size += 16;
2390 n_spilled += 1;
2391 spilled_fr_p = 1;
2392 }
2393
2394 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2395 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2396 {
2397 SET_HARD_REG_BIT (mask, regno);
2398 spill_size += 8;
2399 n_spilled += 1;
2400 spilled_gr_p = 1;
2401 }
2402
2403 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2404 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2405 {
2406 SET_HARD_REG_BIT (mask, regno);
2407 spill_size += 8;
2408 n_spilled += 1;
2409 }
2410
2411 /* Now come all special registers that might get saved in other
2412 general registers. */
2413
2414 if (frame_pointer_needed)
2415 {
2416 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2417 /* If we did not get a register, then we take LOC79. This is guaranteed
2418 to be free, even if regs_ever_live is already set, because this is
2419 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2420 as we don't count loc79 above. */
2421 if (current_frame_info.r[reg_fp] == 0)
2422 {
2423 current_frame_info.r[reg_fp] = LOC_REG (79);
2424 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2425 }
2426 }
2427
2428 if (! current_function_is_leaf)
2429 {
2430 /* Emit a save of BR0 if we call other functions. Do this even
2431 if this function doesn't return, as EH depends on this to be
2432 able to unwind the stack. */
2433 SET_HARD_REG_BIT (mask, BR_REG (0));
2434
2435 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2436 if (current_frame_info.r[reg_save_b0] == 0)
2437 {
2438 extra_spill_size += 8;
2439 n_spilled += 1;
2440 }
2441
2442 /* Similarly for ar.pfs. */
2443 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2444 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2445 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2446 {
2447 extra_spill_size += 8;
2448 n_spilled += 1;
2449 }
2450
2451 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2452 registers are clobbered, so we fall back to the stack. */
2453 current_frame_info.r[reg_save_gp]
2454 = (current_function_calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2455 if (current_frame_info.r[reg_save_gp] == 0)
2456 {
2457 SET_HARD_REG_BIT (mask, GR_REG (1));
2458 spill_size += 8;
2459 n_spilled += 1;
2460 }
2461 }
2462 else
2463 {
2464 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2465 {
2466 SET_HARD_REG_BIT (mask, BR_REG (0));
2467 extra_spill_size += 8;
2468 n_spilled += 1;
2469 }
2470
2471 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2472 {
2473 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2474 current_frame_info.r[reg_save_ar_pfs]
2475 = find_gr_spill (reg_save_ar_pfs, 1);
2476 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2477 {
2478 extra_spill_size += 8;
2479 n_spilled += 1;
2480 }
2481 }
2482 }
2483
2484 /* Unwind descriptor hackery: things are most efficient if we allocate
2485 consecutive GR save registers for RP, PFS, FP in that order. However,
2486 it is absolutely critical that FP get the only hard register that's
2487 guaranteed to be free, so we allocated it first. If all three did
2488 happen to be allocated hard regs, and are consecutive, rearrange them
2489 into the preferred order now.
2490
2491 If we have already emitted code for any of those registers,
2492 then it's already too late to change. */
2493 if (current_frame_info.r[reg_fp] != 0
2494 && current_frame_info.r[reg_save_b0] == current_frame_info.r[reg_fp] + 1
2495 && current_frame_info.r[reg_save_ar_pfs] == current_frame_info.r[reg_fp] + 2
2496 && emitted_frame_related_regs[reg_save_b0] == 0
2497 && emitted_frame_related_regs[reg_save_ar_pfs] == 0
2498 && emitted_frame_related_regs[reg_fp] == 0)
2499 {
2500 current_frame_info.r[reg_save_b0] = current_frame_info.r[reg_fp];
2501 current_frame_info.r[reg_save_ar_pfs] = current_frame_info.r[reg_fp] + 1;
2502 current_frame_info.r[reg_fp] = current_frame_info.r[reg_fp] + 2;
2503 }
2504
2505 /* See if we need to store the predicate register block. */
2506 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2507 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2508 break;
2509 if (regno <= PR_REG (63))
2510 {
2511 SET_HARD_REG_BIT (mask, PR_REG (0));
2512 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2513 if (current_frame_info.r[reg_save_pr] == 0)
2514 {
2515 extra_spill_size += 8;
2516 n_spilled += 1;
2517 }
2518
2519 /* ??? Mark them all as used so that register renaming and such
2520 are free to use them. */
2521 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2522 df_set_regs_ever_live (regno, true);
2523 }
2524
2525 /* If we're forced to use st8.spill, we're forced to save and restore
2526 ar.unat as well. The check for existing liveness allows inline asm
2527 to touch ar.unat. */
2528 if (spilled_gr_p || cfun->machine->n_varargs
2529 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2530 {
2531 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2532 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2533 current_frame_info.r[reg_save_ar_unat]
2534 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2535 if (current_frame_info.r[reg_save_ar_unat] == 0)
2536 {
2537 extra_spill_size += 8;
2538 n_spilled += 1;
2539 }
2540 }
2541
2542 if (df_regs_ever_live_p (AR_LC_REGNUM))
2543 {
2544 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2545 current_frame_info.r[reg_save_ar_lc]
2546 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2547 if (current_frame_info.r[reg_save_ar_lc] == 0)
2548 {
2549 extra_spill_size += 8;
2550 n_spilled += 1;
2551 }
2552 }
2553
2554 /* If we have an odd number of words of pretend arguments written to
2555 the stack, then the FR save area will be unaligned. We round the
2556 size of this area up to keep things 16 byte aligned. */
2557 if (spilled_fr_p)
2558 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2559 else
2560 pretend_args_size = current_function_pretend_args_size;
2561
2562 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2563 + current_function_outgoing_args_size);
2564 total_size = IA64_STACK_ALIGN (total_size);
2565
2566 /* We always use the 16-byte scratch area provided by the caller, but
2567 if we are a leaf function, there's no one to which we need to provide
2568 a scratch area. */
2569 if (current_function_is_leaf)
2570 total_size = MAX (0, total_size - 16);
2571
2572 current_frame_info.total_size = total_size;
2573 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2574 current_frame_info.spill_size = spill_size;
2575 current_frame_info.extra_spill_size = extra_spill_size;
2576 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2577 current_frame_info.n_spilled = n_spilled;
2578 current_frame_info.initialized = reload_completed;
2579 }
2580
2581 /* Compute the initial difference between the specified pair of registers. */
2582
2583 HOST_WIDE_INT
2584 ia64_initial_elimination_offset (int from, int to)
2585 {
2586 HOST_WIDE_INT offset;
2587
2588 ia64_compute_frame_size (get_frame_size ());
2589 switch (from)
2590 {
2591 case FRAME_POINTER_REGNUM:
2592 switch (to)
2593 {
2594 case HARD_FRAME_POINTER_REGNUM:
2595 if (current_function_is_leaf)
2596 offset = -current_frame_info.total_size;
2597 else
2598 offset = -(current_frame_info.total_size
2599 - current_function_outgoing_args_size - 16);
2600 break;
2601
2602 case STACK_POINTER_REGNUM:
2603 if (current_function_is_leaf)
2604 offset = 0;
2605 else
2606 offset = 16 + current_function_outgoing_args_size;
2607 break;
2608
2609 default:
2610 gcc_unreachable ();
2611 }
2612 break;
2613
2614 case ARG_POINTER_REGNUM:
2615 /* Arguments start above the 16 byte save area, unless stdarg
2616 in which case we store through the 16 byte save area. */
2617 switch (to)
2618 {
2619 case HARD_FRAME_POINTER_REGNUM:
2620 offset = 16 - current_function_pretend_args_size;
2621 break;
2622
2623 case STACK_POINTER_REGNUM:
2624 offset = (current_frame_info.total_size
2625 + 16 - current_function_pretend_args_size);
2626 break;
2627
2628 default:
2629 gcc_unreachable ();
2630 }
2631 break;
2632
2633 default:
2634 gcc_unreachable ();
2635 }
2636
2637 return offset;
2638 }
2639
2640 /* If there are more than a trivial number of register spills, we use
2641 two interleaved iterators so that we can get two memory references
2642 per insn group.
2643
2644 In order to simplify things in the prologue and epilogue expanders,
2645 we use helper functions to fix up the memory references after the
2646 fact with the appropriate offsets to a POST_MODIFY memory mode.
2647 The following data structure tracks the state of the two iterators
2648 while insns are being emitted. */
2649
2650 struct spill_fill_data
2651 {
2652 rtx init_after; /* point at which to emit initializations */
2653 rtx init_reg[2]; /* initial base register */
2654 rtx iter_reg[2]; /* the iterator registers */
2655 rtx *prev_addr[2]; /* address of last memory use */
2656 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2657 HOST_WIDE_INT prev_off[2]; /* last offset */
2658 int n_iter; /* number of iterators in use */
2659 int next_iter; /* next iterator to use */
2660 unsigned int save_gr_used_mask;
2661 };
2662
2663 static struct spill_fill_data spill_fill_data;
2664
2665 static void
2666 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2667 {
2668 int i;
2669
2670 spill_fill_data.init_after = get_last_insn ();
2671 spill_fill_data.init_reg[0] = init_reg;
2672 spill_fill_data.init_reg[1] = init_reg;
2673 spill_fill_data.prev_addr[0] = NULL;
2674 spill_fill_data.prev_addr[1] = NULL;
2675 spill_fill_data.prev_insn[0] = NULL;
2676 spill_fill_data.prev_insn[1] = NULL;
2677 spill_fill_data.prev_off[0] = cfa_off;
2678 spill_fill_data.prev_off[1] = cfa_off;
2679 spill_fill_data.next_iter = 0;
2680 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2681
2682 spill_fill_data.n_iter = 1 + (n_spills > 2);
2683 for (i = 0; i < spill_fill_data.n_iter; ++i)
2684 {
2685 int regno = next_scratch_gr_reg ();
2686 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2687 current_frame_info.gr_used_mask |= 1 << regno;
2688 }
2689 }
2690
2691 static void
2692 finish_spill_pointers (void)
2693 {
2694 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2695 }
2696
2697 static rtx
2698 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2699 {
2700 int iter = spill_fill_data.next_iter;
2701 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2702 rtx disp_rtx = GEN_INT (disp);
2703 rtx mem;
2704
2705 if (spill_fill_data.prev_addr[iter])
2706 {
2707 if (satisfies_constraint_N (disp_rtx))
2708 {
2709 *spill_fill_data.prev_addr[iter]
2710 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2711 gen_rtx_PLUS (DImode,
2712 spill_fill_data.iter_reg[iter],
2713 disp_rtx));
2714 REG_NOTES (spill_fill_data.prev_insn[iter])
2715 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2716 REG_NOTES (spill_fill_data.prev_insn[iter]));
2717 }
2718 else
2719 {
2720 /* ??? Could use register post_modify for loads. */
2721 if (!satisfies_constraint_I (disp_rtx))
2722 {
2723 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2724 emit_move_insn (tmp, disp_rtx);
2725 disp_rtx = tmp;
2726 }
2727 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2728 spill_fill_data.iter_reg[iter], disp_rtx));
2729 }
2730 }
2731 /* Micro-optimization: if we've created a frame pointer, it's at
2732 CFA 0, which may allow the real iterator to be initialized lower,
2733 slightly increasing parallelism. Also, if there are few saves
2734 it may eliminate the iterator entirely. */
2735 else if (disp == 0
2736 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2737 && frame_pointer_needed)
2738 {
2739 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2740 set_mem_alias_set (mem, get_varargs_alias_set ());
2741 return mem;
2742 }
2743 else
2744 {
2745 rtx seq, insn;
2746
2747 if (disp == 0)
2748 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2749 spill_fill_data.init_reg[iter]);
2750 else
2751 {
2752 start_sequence ();
2753
2754 if (!satisfies_constraint_I (disp_rtx))
2755 {
2756 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2757 emit_move_insn (tmp, disp_rtx);
2758 disp_rtx = tmp;
2759 }
2760
2761 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2762 spill_fill_data.init_reg[iter],
2763 disp_rtx));
2764
2765 seq = get_insns ();
2766 end_sequence ();
2767 }
2768
2769 /* Careful for being the first insn in a sequence. */
2770 if (spill_fill_data.init_after)
2771 insn = emit_insn_after (seq, spill_fill_data.init_after);
2772 else
2773 {
2774 rtx first = get_insns ();
2775 if (first)
2776 insn = emit_insn_before (seq, first);
2777 else
2778 insn = emit_insn (seq);
2779 }
2780 spill_fill_data.init_after = insn;
2781 }
2782
2783 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2784
2785 /* ??? Not all of the spills are for varargs, but some of them are.
2786 The rest of the spills belong in an alias set of their own. But
2787 it doesn't actually hurt to include them here. */
2788 set_mem_alias_set (mem, get_varargs_alias_set ());
2789
2790 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2791 spill_fill_data.prev_off[iter] = cfa_off;
2792
2793 if (++iter >= spill_fill_data.n_iter)
2794 iter = 0;
2795 spill_fill_data.next_iter = iter;
2796
2797 return mem;
2798 }
2799
2800 static void
2801 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2802 rtx frame_reg)
2803 {
2804 int iter = spill_fill_data.next_iter;
2805 rtx mem, insn;
2806
2807 mem = spill_restore_mem (reg, cfa_off);
2808 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2809 spill_fill_data.prev_insn[iter] = insn;
2810
2811 if (frame_reg)
2812 {
2813 rtx base;
2814 HOST_WIDE_INT off;
2815
2816 RTX_FRAME_RELATED_P (insn) = 1;
2817
2818 /* Don't even pretend that the unwind code can intuit its way
2819 through a pair of interleaved post_modify iterators. Just
2820 provide the correct answer. */
2821
2822 if (frame_pointer_needed)
2823 {
2824 base = hard_frame_pointer_rtx;
2825 off = - cfa_off;
2826 }
2827 else
2828 {
2829 base = stack_pointer_rtx;
2830 off = current_frame_info.total_size - cfa_off;
2831 }
2832
2833 REG_NOTES (insn)
2834 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2835 gen_rtx_SET (VOIDmode,
2836 gen_rtx_MEM (GET_MODE (reg),
2837 plus_constant (base, off)),
2838 frame_reg),
2839 REG_NOTES (insn));
2840 }
2841 }
2842
2843 static void
2844 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2845 {
2846 int iter = spill_fill_data.next_iter;
2847 rtx insn;
2848
2849 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2850 GEN_INT (cfa_off)));
2851 spill_fill_data.prev_insn[iter] = insn;
2852 }
2853
2854 /* Wrapper functions that discards the CONST_INT spill offset. These
2855 exist so that we can give gr_spill/gr_fill the offset they need and
2856 use a consistent function interface. */
2857
2858 static rtx
2859 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2860 {
2861 return gen_movdi (dest, src);
2862 }
2863
2864 static rtx
2865 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2866 {
2867 return gen_fr_spill (dest, src);
2868 }
2869
2870 static rtx
2871 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2872 {
2873 return gen_fr_restore (dest, src);
2874 }
2875
2876 /* Called after register allocation to add any instructions needed for the
2877 prologue. Using a prologue insn is favored compared to putting all of the
2878 instructions in output_function_prologue(), since it allows the scheduler
2879 to intermix instructions with the saves of the caller saved registers. In
2880 some cases, it might be necessary to emit a barrier instruction as the last
2881 insn to prevent such scheduling.
2882
2883 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2884 so that the debug info generation code can handle them properly.
2885
2886 The register save area is layed out like so:
2887 cfa+16
2888 [ varargs spill area ]
2889 [ fr register spill area ]
2890 [ br register spill area ]
2891 [ ar register spill area ]
2892 [ pr register spill area ]
2893 [ gr register spill area ] */
2894
2895 /* ??? Get inefficient code when the frame size is larger than can fit in an
2896 adds instruction. */
2897
2898 void
2899 ia64_expand_prologue (void)
2900 {
2901 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2902 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2903 rtx reg, alt_reg;
2904
2905 ia64_compute_frame_size (get_frame_size ());
2906 last_scratch_gr_reg = 15;
2907
2908 if (dump_file)
2909 {
2910 fprintf (dump_file, "ia64 frame related registers "
2911 "recorded in current_frame_info.r[]:\n");
2912 #define PRINTREG(a) if (current_frame_info.r[a]) \
2913 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
2914 PRINTREG(reg_fp);
2915 PRINTREG(reg_save_b0);
2916 PRINTREG(reg_save_pr);
2917 PRINTREG(reg_save_ar_pfs);
2918 PRINTREG(reg_save_ar_unat);
2919 PRINTREG(reg_save_ar_lc);
2920 PRINTREG(reg_save_gp);
2921 #undef PRINTREG
2922 }
2923
2924 /* If there is no epilogue, then we don't need some prologue insns.
2925 We need to avoid emitting the dead prologue insns, because flow
2926 will complain about them. */
2927 if (optimize)
2928 {
2929 edge e;
2930 edge_iterator ei;
2931
2932 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2933 if ((e->flags & EDGE_FAKE) == 0
2934 && (e->flags & EDGE_FALLTHRU) != 0)
2935 break;
2936 epilogue_p = (e != NULL);
2937 }
2938 else
2939 epilogue_p = 1;
2940
2941 /* Set the local, input, and output register names. We need to do this
2942 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2943 half. If we use in/loc/out register names, then we get assembler errors
2944 in crtn.S because there is no alloc insn or regstk directive in there. */
2945 if (! TARGET_REG_NAMES)
2946 {
2947 int inputs = current_frame_info.n_input_regs;
2948 int locals = current_frame_info.n_local_regs;
2949 int outputs = current_frame_info.n_output_regs;
2950
2951 for (i = 0; i < inputs; i++)
2952 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2953 for (i = 0; i < locals; i++)
2954 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2955 for (i = 0; i < outputs; i++)
2956 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2957 }
2958
2959 /* Set the frame pointer register name. The regnum is logically loc79,
2960 but of course we'll not have allocated that many locals. Rather than
2961 worrying about renumbering the existing rtxs, we adjust the name. */
2962 /* ??? This code means that we can never use one local register when
2963 there is a frame pointer. loc79 gets wasted in this case, as it is
2964 renamed to a register that will never be used. See also the try_locals
2965 code in find_gr_spill. */
2966 if (current_frame_info.r[reg_fp])
2967 {
2968 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2969 reg_names[HARD_FRAME_POINTER_REGNUM]
2970 = reg_names[current_frame_info.r[reg_fp]];
2971 reg_names[current_frame_info.r[reg_fp]] = tmp;
2972 }
2973
2974 /* We don't need an alloc instruction if we've used no outputs or locals. */
2975 if (current_frame_info.n_local_regs == 0
2976 && current_frame_info.n_output_regs == 0
2977 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2978 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2979 {
2980 /* If there is no alloc, but there are input registers used, then we
2981 need a .regstk directive. */
2982 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2983 ar_pfs_save_reg = NULL_RTX;
2984 }
2985 else
2986 {
2987 current_frame_info.need_regstk = 0;
2988
2989 if (current_frame_info.r[reg_save_ar_pfs])
2990 {
2991 regno = current_frame_info.r[reg_save_ar_pfs];
2992 reg_emitted (reg_save_ar_pfs);
2993 }
2994 else
2995 regno = next_scratch_gr_reg ();
2996 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2997
2998 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2999 GEN_INT (current_frame_info.n_input_regs),
3000 GEN_INT (current_frame_info.n_local_regs),
3001 GEN_INT (current_frame_info.n_output_regs),
3002 GEN_INT (current_frame_info.n_rotate_regs)));
3003 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3004 }
3005
3006 /* Set up frame pointer, stack pointer, and spill iterators. */
3007
3008 n_varargs = cfun->machine->n_varargs;
3009 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3010 stack_pointer_rtx, 0);
3011
3012 if (frame_pointer_needed)
3013 {
3014 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3015 RTX_FRAME_RELATED_P (insn) = 1;
3016 }
3017
3018 if (current_frame_info.total_size != 0)
3019 {
3020 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3021 rtx offset;
3022
3023 if (satisfies_constraint_I (frame_size_rtx))
3024 offset = frame_size_rtx;
3025 else
3026 {
3027 regno = next_scratch_gr_reg ();
3028 offset = gen_rtx_REG (DImode, regno);
3029 emit_move_insn (offset, frame_size_rtx);
3030 }
3031
3032 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3033 stack_pointer_rtx, offset));
3034
3035 if (! frame_pointer_needed)
3036 {
3037 RTX_FRAME_RELATED_P (insn) = 1;
3038 if (GET_CODE (offset) != CONST_INT)
3039 {
3040 REG_NOTES (insn)
3041 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3042 gen_rtx_SET (VOIDmode,
3043 stack_pointer_rtx,
3044 gen_rtx_PLUS (DImode,
3045 stack_pointer_rtx,
3046 frame_size_rtx)),
3047 REG_NOTES (insn));
3048 }
3049 }
3050
3051 /* ??? At this point we must generate a magic insn that appears to
3052 modify the stack pointer, the frame pointer, and all spill
3053 iterators. This would allow the most scheduling freedom. For
3054 now, just hard stop. */
3055 emit_insn (gen_blockage ());
3056 }
3057
3058 /* Must copy out ar.unat before doing any integer spills. */
3059 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3060 {
3061 if (current_frame_info.r[reg_save_ar_unat])
3062 {
3063 ar_unat_save_reg
3064 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3065 reg_emitted (reg_save_ar_unat);
3066 }
3067 else
3068 {
3069 alt_regno = next_scratch_gr_reg ();
3070 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3071 current_frame_info.gr_used_mask |= 1 << alt_regno;
3072 }
3073
3074 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3075 insn = emit_move_insn (ar_unat_save_reg, reg);
3076 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3077
3078 /* Even if we're not going to generate an epilogue, we still
3079 need to save the register so that EH works. */
3080 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3081 emit_insn (gen_prologue_use (ar_unat_save_reg));
3082 }
3083 else
3084 ar_unat_save_reg = NULL_RTX;
3085
3086 /* Spill all varargs registers. Do this before spilling any GR registers,
3087 since we want the UNAT bits for the GR registers to override the UNAT
3088 bits from varargs, which we don't care about. */
3089
3090 cfa_off = -16;
3091 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3092 {
3093 reg = gen_rtx_REG (DImode, regno);
3094 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3095 }
3096
3097 /* Locate the bottom of the register save area. */
3098 cfa_off = (current_frame_info.spill_cfa_off
3099 + current_frame_info.spill_size
3100 + current_frame_info.extra_spill_size);
3101
3102 /* Save the predicate register block either in a register or in memory. */
3103 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3104 {
3105 reg = gen_rtx_REG (DImode, PR_REG (0));
3106 if (current_frame_info.r[reg_save_pr] != 0)
3107 {
3108 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3109 reg_emitted (reg_save_pr);
3110 insn = emit_move_insn (alt_reg, reg);
3111
3112 /* ??? Denote pr spill/fill by a DImode move that modifies all
3113 64 hard registers. */
3114 RTX_FRAME_RELATED_P (insn) = 1;
3115 REG_NOTES (insn)
3116 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3117 gen_rtx_SET (VOIDmode, alt_reg, reg),
3118 REG_NOTES (insn));
3119
3120 /* Even if we're not going to generate an epilogue, we still
3121 need to save the register so that EH works. */
3122 if (! epilogue_p)
3123 emit_insn (gen_prologue_use (alt_reg));
3124 }
3125 else
3126 {
3127 alt_regno = next_scratch_gr_reg ();
3128 alt_reg = gen_rtx_REG (DImode, alt_regno);
3129 insn = emit_move_insn (alt_reg, reg);
3130 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3131 cfa_off -= 8;
3132 }
3133 }
3134
3135 /* Handle AR regs in numerical order. All of them get special handling. */
3136 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3137 && current_frame_info.r[reg_save_ar_unat] == 0)
3138 {
3139 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3140 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3141 cfa_off -= 8;
3142 }
3143
3144 /* The alloc insn already copied ar.pfs into a general register. The
3145 only thing we have to do now is copy that register to a stack slot
3146 if we'd not allocated a local register for the job. */
3147 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3148 && current_frame_info.r[reg_save_ar_pfs] == 0)
3149 {
3150 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3151 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3152 cfa_off -= 8;
3153 }
3154
3155 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3156 {
3157 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3158 if (current_frame_info.r[reg_save_ar_lc] != 0)
3159 {
3160 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3161 reg_emitted (reg_save_ar_lc);
3162 insn = emit_move_insn (alt_reg, reg);
3163 RTX_FRAME_RELATED_P (insn) = 1;
3164
3165 /* Even if we're not going to generate an epilogue, we still
3166 need to save the register so that EH works. */
3167 if (! epilogue_p)
3168 emit_insn (gen_prologue_use (alt_reg));
3169 }
3170 else
3171 {
3172 alt_regno = next_scratch_gr_reg ();
3173 alt_reg = gen_rtx_REG (DImode, alt_regno);
3174 emit_move_insn (alt_reg, reg);
3175 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3176 cfa_off -= 8;
3177 }
3178 }
3179
3180 /* Save the return pointer. */
3181 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3182 {
3183 reg = gen_rtx_REG (DImode, BR_REG (0));
3184 if (current_frame_info.r[reg_save_b0] != 0)
3185 {
3186 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3187 reg_emitted (reg_save_b0);
3188 insn = emit_move_insn (alt_reg, reg);
3189 RTX_FRAME_RELATED_P (insn) = 1;
3190
3191 /* Even if we're not going to generate an epilogue, we still
3192 need to save the register so that EH works. */
3193 if (! epilogue_p)
3194 emit_insn (gen_prologue_use (alt_reg));
3195 }
3196 else
3197 {
3198 alt_regno = next_scratch_gr_reg ();
3199 alt_reg = gen_rtx_REG (DImode, alt_regno);
3200 emit_move_insn (alt_reg, reg);
3201 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3202 cfa_off -= 8;
3203 }
3204 }
3205
3206 if (current_frame_info.r[reg_save_gp])
3207 {
3208 reg_emitted (reg_save_gp);
3209 insn = emit_move_insn (gen_rtx_REG (DImode,
3210 current_frame_info.r[reg_save_gp]),
3211 pic_offset_table_rtx);
3212 }
3213
3214 /* We should now be at the base of the gr/br/fr spill area. */
3215 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3216 + current_frame_info.spill_size));
3217
3218 /* Spill all general registers. */
3219 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3220 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3221 {
3222 reg = gen_rtx_REG (DImode, regno);
3223 do_spill (gen_gr_spill, reg, cfa_off, reg);
3224 cfa_off -= 8;
3225 }
3226
3227 /* Spill the rest of the BR registers. */
3228 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3229 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3230 {
3231 alt_regno = next_scratch_gr_reg ();
3232 alt_reg = gen_rtx_REG (DImode, alt_regno);
3233 reg = gen_rtx_REG (DImode, regno);
3234 emit_move_insn (alt_reg, reg);
3235 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3236 cfa_off -= 8;
3237 }
3238
3239 /* Align the frame and spill all FR registers. */
3240 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3241 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3242 {
3243 gcc_assert (!(cfa_off & 15));
3244 reg = gen_rtx_REG (XFmode, regno);
3245 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3246 cfa_off -= 16;
3247 }
3248
3249 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3250
3251 finish_spill_pointers ();
3252 }
3253
3254 /* Called after register allocation to add any instructions needed for the
3255 epilogue. Using an epilogue insn is favored compared to putting all of the
3256 instructions in output_function_prologue(), since it allows the scheduler
3257 to intermix instructions with the saves of the caller saved registers. In
3258 some cases, it might be necessary to emit a barrier instruction as the last
3259 insn to prevent such scheduling. */
3260
3261 void
3262 ia64_expand_epilogue (int sibcall_p)
3263 {
3264 rtx insn, reg, alt_reg, ar_unat_save_reg;
3265 int regno, alt_regno, cfa_off;
3266
3267 ia64_compute_frame_size (get_frame_size ());
3268
3269 /* If there is a frame pointer, then we use it instead of the stack
3270 pointer, so that the stack pointer does not need to be valid when
3271 the epilogue starts. See EXIT_IGNORE_STACK. */
3272 if (frame_pointer_needed)
3273 setup_spill_pointers (current_frame_info.n_spilled,
3274 hard_frame_pointer_rtx, 0);
3275 else
3276 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3277 current_frame_info.total_size);
3278
3279 if (current_frame_info.total_size != 0)
3280 {
3281 /* ??? At this point we must generate a magic insn that appears to
3282 modify the spill iterators and the frame pointer. This would
3283 allow the most scheduling freedom. For now, just hard stop. */
3284 emit_insn (gen_blockage ());
3285 }
3286
3287 /* Locate the bottom of the register save area. */
3288 cfa_off = (current_frame_info.spill_cfa_off
3289 + current_frame_info.spill_size
3290 + current_frame_info.extra_spill_size);
3291
3292 /* Restore the predicate registers. */
3293 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3294 {
3295 if (current_frame_info.r[reg_save_pr] != 0)
3296 {
3297 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3298 reg_emitted (reg_save_pr);
3299 }
3300 else
3301 {
3302 alt_regno = next_scratch_gr_reg ();
3303 alt_reg = gen_rtx_REG (DImode, alt_regno);
3304 do_restore (gen_movdi_x, alt_reg, cfa_off);
3305 cfa_off -= 8;
3306 }
3307 reg = gen_rtx_REG (DImode, PR_REG (0));
3308 emit_move_insn (reg, alt_reg);
3309 }
3310
3311 /* Restore the application registers. */
3312
3313 /* Load the saved unat from the stack, but do not restore it until
3314 after the GRs have been restored. */
3315 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3316 {
3317 if (current_frame_info.r[reg_save_ar_unat] != 0)
3318 {
3319 ar_unat_save_reg
3320 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3321 reg_emitted (reg_save_ar_unat);
3322 }
3323 else
3324 {
3325 alt_regno = next_scratch_gr_reg ();
3326 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3327 current_frame_info.gr_used_mask |= 1 << alt_regno;
3328 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3329 cfa_off -= 8;
3330 }
3331 }
3332 else
3333 ar_unat_save_reg = NULL_RTX;
3334
3335 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3336 {
3337 reg_emitted (reg_save_ar_pfs);
3338 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3339 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3340 emit_move_insn (reg, alt_reg);
3341 }
3342 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3343 {
3344 alt_regno = next_scratch_gr_reg ();
3345 alt_reg = gen_rtx_REG (DImode, alt_regno);
3346 do_restore (gen_movdi_x, alt_reg, cfa_off);
3347 cfa_off -= 8;
3348 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3349 emit_move_insn (reg, alt_reg);
3350 }
3351
3352 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3353 {
3354 if (current_frame_info.r[reg_save_ar_lc] != 0)
3355 {
3356 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3357 reg_emitted (reg_save_ar_lc);
3358 }
3359 else
3360 {
3361 alt_regno = next_scratch_gr_reg ();
3362 alt_reg = gen_rtx_REG (DImode, alt_regno);
3363 do_restore (gen_movdi_x, alt_reg, cfa_off);
3364 cfa_off -= 8;
3365 }
3366 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3367 emit_move_insn (reg, alt_reg);
3368 }
3369
3370 /* Restore the return pointer. */
3371 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3372 {
3373 if (current_frame_info.r[reg_save_b0] != 0)
3374 {
3375 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3376 reg_emitted (reg_save_b0);
3377 }
3378 else
3379 {
3380 alt_regno = next_scratch_gr_reg ();
3381 alt_reg = gen_rtx_REG (DImode, alt_regno);
3382 do_restore (gen_movdi_x, alt_reg, cfa_off);
3383 cfa_off -= 8;
3384 }
3385 reg = gen_rtx_REG (DImode, BR_REG (0));
3386 emit_move_insn (reg, alt_reg);
3387 }
3388
3389 /* We should now be at the base of the gr/br/fr spill area. */
3390 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3391 + current_frame_info.spill_size));
3392
3393 /* The GP may be stored on the stack in the prologue, but it's
3394 never restored in the epilogue. Skip the stack slot. */
3395 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3396 cfa_off -= 8;
3397
3398 /* Restore all general registers. */
3399 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3400 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3401 {
3402 reg = gen_rtx_REG (DImode, regno);
3403 do_restore (gen_gr_restore, reg, cfa_off);
3404 cfa_off -= 8;
3405 }
3406
3407 /* Restore the branch registers. */
3408 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3409 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3410 {
3411 alt_regno = next_scratch_gr_reg ();
3412 alt_reg = gen_rtx_REG (DImode, alt_regno);
3413 do_restore (gen_movdi_x, alt_reg, cfa_off);
3414 cfa_off -= 8;
3415 reg = gen_rtx_REG (DImode, regno);
3416 emit_move_insn (reg, alt_reg);
3417 }
3418
3419 /* Restore floating point registers. */
3420 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3421 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3422 {
3423 gcc_assert (!(cfa_off & 15));
3424 reg = gen_rtx_REG (XFmode, regno);
3425 do_restore (gen_fr_restore_x, reg, cfa_off);
3426 cfa_off -= 16;
3427 }
3428
3429 /* Restore ar.unat for real. */
3430 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3431 {
3432 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3433 emit_move_insn (reg, ar_unat_save_reg);
3434 }
3435
3436 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3437
3438 finish_spill_pointers ();
3439
3440 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3441 {
3442 /* ??? At this point we must generate a magic insn that appears to
3443 modify the spill iterators, the stack pointer, and the frame
3444 pointer. This would allow the most scheduling freedom. For now,
3445 just hard stop. */
3446 emit_insn (gen_blockage ());
3447 }
3448
3449 if (cfun->machine->ia64_eh_epilogue_sp)
3450 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3451 else if (frame_pointer_needed)
3452 {
3453 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3454 RTX_FRAME_RELATED_P (insn) = 1;
3455 }
3456 else if (current_frame_info.total_size)
3457 {
3458 rtx offset, frame_size_rtx;
3459
3460 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3461 if (satisfies_constraint_I (frame_size_rtx))
3462 offset = frame_size_rtx;
3463 else
3464 {
3465 regno = next_scratch_gr_reg ();
3466 offset = gen_rtx_REG (DImode, regno);
3467 emit_move_insn (offset, frame_size_rtx);
3468 }
3469
3470 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3471 offset));
3472
3473 RTX_FRAME_RELATED_P (insn) = 1;
3474 if (GET_CODE (offset) != CONST_INT)
3475 {
3476 REG_NOTES (insn)
3477 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3478 gen_rtx_SET (VOIDmode,
3479 stack_pointer_rtx,
3480 gen_rtx_PLUS (DImode,
3481 stack_pointer_rtx,
3482 frame_size_rtx)),
3483 REG_NOTES (insn));
3484 }
3485 }
3486
3487 if (cfun->machine->ia64_eh_epilogue_bsp)
3488 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3489
3490 if (! sibcall_p)
3491 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3492 else
3493 {
3494 int fp = GR_REG (2);
3495 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3496 first available call clobbered register. If there was a frame_pointer
3497 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3498 so we have to make sure we're using the string "r2" when emitting
3499 the register name for the assembler. */
3500 if (current_frame_info.r[reg_fp]
3501 && current_frame_info.r[reg_fp] == GR_REG (2))
3502 fp = HARD_FRAME_POINTER_REGNUM;
3503
3504 /* We must emit an alloc to force the input registers to become output
3505 registers. Otherwise, if the callee tries to pass its parameters
3506 through to another call without an intervening alloc, then these
3507 values get lost. */
3508 /* ??? We don't need to preserve all input registers. We only need to
3509 preserve those input registers used as arguments to the sibling call.
3510 It is unclear how to compute that number here. */
3511 if (current_frame_info.n_input_regs != 0)
3512 {
3513 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3514 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3515 const0_rtx, const0_rtx,
3516 n_inputs, const0_rtx));
3517 RTX_FRAME_RELATED_P (insn) = 1;
3518 }
3519 }
3520 }
3521
3522 /* Return 1 if br.ret can do all the work required to return from a
3523 function. */
3524
3525 int
3526 ia64_direct_return (void)
3527 {
3528 if (reload_completed && ! frame_pointer_needed)
3529 {
3530 ia64_compute_frame_size (get_frame_size ());
3531
3532 return (current_frame_info.total_size == 0
3533 && current_frame_info.n_spilled == 0
3534 && current_frame_info.r[reg_save_b0] == 0
3535 && current_frame_info.r[reg_save_pr] == 0
3536 && current_frame_info.r[reg_save_ar_pfs] == 0
3537 && current_frame_info.r[reg_save_ar_unat] == 0
3538 && current_frame_info.r[reg_save_ar_lc] == 0);
3539 }
3540 return 0;
3541 }
3542
3543 /* Return the magic cookie that we use to hold the return address
3544 during early compilation. */
3545
3546 rtx
3547 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3548 {
3549 if (count != 0)
3550 return NULL;
3551 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3552 }
3553
3554 /* Split this value after reload, now that we know where the return
3555 address is saved. */
3556
3557 void
3558 ia64_split_return_addr_rtx (rtx dest)
3559 {
3560 rtx src;
3561
3562 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3563 {
3564 if (current_frame_info.r[reg_save_b0] != 0)
3565 {
3566 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3567 reg_emitted (reg_save_b0);
3568 }
3569 else
3570 {
3571 HOST_WIDE_INT off;
3572 unsigned int regno;
3573 rtx off_r;
3574
3575 /* Compute offset from CFA for BR0. */
3576 /* ??? Must be kept in sync with ia64_expand_prologue. */
3577 off = (current_frame_info.spill_cfa_off
3578 + current_frame_info.spill_size);
3579 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3580 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3581 off -= 8;
3582
3583 /* Convert CFA offset to a register based offset. */
3584 if (frame_pointer_needed)
3585 src = hard_frame_pointer_rtx;
3586 else
3587 {
3588 src = stack_pointer_rtx;
3589 off += current_frame_info.total_size;
3590 }
3591
3592 /* Load address into scratch register. */
3593 off_r = GEN_INT (off);
3594 if (satisfies_constraint_I (off_r))
3595 emit_insn (gen_adddi3 (dest, src, off_r));
3596 else
3597 {
3598 emit_move_insn (dest, off_r);
3599 emit_insn (gen_adddi3 (dest, src, dest));
3600 }
3601
3602 src = gen_rtx_MEM (Pmode, dest);
3603 }
3604 }
3605 else
3606 src = gen_rtx_REG (DImode, BR_REG (0));
3607
3608 emit_move_insn (dest, src);
3609 }
3610
3611 int
3612 ia64_hard_regno_rename_ok (int from, int to)
3613 {
3614 /* Don't clobber any of the registers we reserved for the prologue. */
3615 enum ia64_frame_regs r;
3616
3617 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3618 if (to == current_frame_info.r[r]
3619 || from == current_frame_info.r[r]
3620 || to == emitted_frame_related_regs[r]
3621 || from == emitted_frame_related_regs[r])
3622 return 0;
3623
3624 /* Don't use output registers outside the register frame. */
3625 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3626 return 0;
3627
3628 /* Retain even/oddness on predicate register pairs. */
3629 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3630 return (from & 1) == (to & 1);
3631
3632 return 1;
3633 }
3634
3635 /* Target hook for assembling integer objects. Handle word-sized
3636 aligned objects and detect the cases when @fptr is needed. */
3637
3638 static bool
3639 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3640 {
3641 if (size == POINTER_SIZE / BITS_PER_UNIT
3642 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3643 && GET_CODE (x) == SYMBOL_REF
3644 && SYMBOL_REF_FUNCTION_P (x))
3645 {
3646 static const char * const directive[2][2] = {
3647 /* 64-bit pointer */ /* 32-bit pointer */
3648 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3649 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3650 };
3651 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3652 output_addr_const (asm_out_file, x);
3653 fputs (")\n", asm_out_file);
3654 return true;
3655 }
3656 return default_assemble_integer (x, size, aligned_p);
3657 }
3658
3659 /* Emit the function prologue. */
3660
3661 static void
3662 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3663 {
3664 int mask, grsave, grsave_prev;
3665
3666 if (current_frame_info.need_regstk)
3667 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3668 current_frame_info.n_input_regs,
3669 current_frame_info.n_local_regs,
3670 current_frame_info.n_output_regs,
3671 current_frame_info.n_rotate_regs);
3672
3673 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3674 return;
3675
3676 /* Emit the .prologue directive. */
3677
3678 mask = 0;
3679 grsave = grsave_prev = 0;
3680 if (current_frame_info.r[reg_save_b0] != 0)
3681 {
3682 mask |= 8;
3683 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3684 }
3685 if (current_frame_info.r[reg_save_ar_pfs] != 0
3686 && (grsave_prev == 0
3687 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3688 {
3689 mask |= 4;
3690 if (grsave_prev == 0)
3691 grsave = current_frame_info.r[reg_save_ar_pfs];
3692 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3693 }
3694 if (current_frame_info.r[reg_fp] != 0
3695 && (grsave_prev == 0
3696 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3697 {
3698 mask |= 2;
3699 if (grsave_prev == 0)
3700 grsave = HARD_FRAME_POINTER_REGNUM;
3701 grsave_prev = current_frame_info.r[reg_fp];
3702 }
3703 if (current_frame_info.r[reg_save_pr] != 0
3704 && (grsave_prev == 0
3705 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3706 {
3707 mask |= 1;
3708 if (grsave_prev == 0)
3709 grsave = current_frame_info.r[reg_save_pr];
3710 }
3711
3712 if (mask && TARGET_GNU_AS)
3713 fprintf (file, "\t.prologue %d, %d\n", mask,
3714 ia64_dbx_register_number (grsave));
3715 else
3716 fputs ("\t.prologue\n", file);
3717
3718 /* Emit a .spill directive, if necessary, to relocate the base of
3719 the register spill area. */
3720 if (current_frame_info.spill_cfa_off != -16)
3721 fprintf (file, "\t.spill %ld\n",
3722 (long) (current_frame_info.spill_cfa_off
3723 + current_frame_info.spill_size));
3724 }
3725
3726 /* Emit the .body directive at the scheduled end of the prologue. */
3727
3728 static void
3729 ia64_output_function_end_prologue (FILE *file)
3730 {
3731 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3732 return;
3733
3734 fputs ("\t.body\n", file);
3735 }
3736
3737 /* Emit the function epilogue. */
3738
3739 static void
3740 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3741 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3742 {
3743 int i;
3744
3745 if (current_frame_info.r[reg_fp])
3746 {
3747 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3748 reg_names[HARD_FRAME_POINTER_REGNUM]
3749 = reg_names[current_frame_info.r[reg_fp]];
3750 reg_names[current_frame_info.r[reg_fp]] = tmp;
3751 reg_emitted (reg_fp);
3752 }
3753 if (! TARGET_REG_NAMES)
3754 {
3755 for (i = 0; i < current_frame_info.n_input_regs; i++)
3756 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3757 for (i = 0; i < current_frame_info.n_local_regs; i++)
3758 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3759 for (i = 0; i < current_frame_info.n_output_regs; i++)
3760 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3761 }
3762
3763 current_frame_info.initialized = 0;
3764 }
3765
3766 int
3767 ia64_dbx_register_number (int regno)
3768 {
3769 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3770 from its home at loc79 to something inside the register frame. We
3771 must perform the same renumbering here for the debug info. */
3772 if (current_frame_info.r[reg_fp])
3773 {
3774 if (regno == HARD_FRAME_POINTER_REGNUM)
3775 regno = current_frame_info.r[reg_fp];
3776 else if (regno == current_frame_info.r[reg_fp])
3777 regno = HARD_FRAME_POINTER_REGNUM;
3778 }
3779
3780 if (IN_REGNO_P (regno))
3781 return 32 + regno - IN_REG (0);
3782 else if (LOC_REGNO_P (regno))
3783 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3784 else if (OUT_REGNO_P (regno))
3785 return (32 + current_frame_info.n_input_regs
3786 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3787 else
3788 return regno;
3789 }
3790
3791 void
3792 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3793 {
3794 rtx addr_reg, eight = GEN_INT (8);
3795
3796 /* The Intel assembler requires that the global __ia64_trampoline symbol
3797 be declared explicitly */
3798 if (!TARGET_GNU_AS)
3799 {
3800 static bool declared_ia64_trampoline = false;
3801
3802 if (!declared_ia64_trampoline)
3803 {
3804 declared_ia64_trampoline = true;
3805 (*targetm.asm_out.globalize_label) (asm_out_file,
3806 "__ia64_trampoline");
3807 }
3808 }
3809
3810 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3811 addr = convert_memory_address (Pmode, addr);
3812 fnaddr = convert_memory_address (Pmode, fnaddr);
3813 static_chain = convert_memory_address (Pmode, static_chain);
3814
3815 /* Load up our iterator. */
3816 addr_reg = gen_reg_rtx (Pmode);
3817 emit_move_insn (addr_reg, addr);
3818
3819 /* The first two words are the fake descriptor:
3820 __ia64_trampoline, ADDR+16. */
3821 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3822 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3823 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3824
3825 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3826 copy_to_reg (plus_constant (addr, 16)));
3827 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3828
3829 /* The third word is the target descriptor. */
3830 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3831 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3832
3833 /* The fourth word is the static chain. */
3834 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3835 }
3836 \f
3837 /* Do any needed setup for a variadic function. CUM has not been updated
3838 for the last named argument which has type TYPE and mode MODE.
3839
3840 We generate the actual spill instructions during prologue generation. */
3841
3842 static void
3843 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3844 tree type, int * pretend_size,
3845 int second_time ATTRIBUTE_UNUSED)
3846 {
3847 CUMULATIVE_ARGS next_cum = *cum;
3848
3849 /* Skip the current argument. */
3850 ia64_function_arg_advance (&next_cum, mode, type, 1);
3851
3852 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3853 {
3854 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3855 *pretend_size = n * UNITS_PER_WORD;
3856 cfun->machine->n_varargs = n;
3857 }
3858 }
3859
3860 /* Check whether TYPE is a homogeneous floating point aggregate. If
3861 it is, return the mode of the floating point type that appears
3862 in all leafs. If it is not, return VOIDmode.
3863
3864 An aggregate is a homogeneous floating point aggregate is if all
3865 fields/elements in it have the same floating point type (e.g,
3866 SFmode). 128-bit quad-precision floats are excluded.
3867
3868 Variable sized aggregates should never arrive here, since we should
3869 have already decided to pass them by reference. Top-level zero-sized
3870 aggregates are excluded because our parallels crash the middle-end. */
3871
3872 static enum machine_mode
3873 hfa_element_mode (const_tree type, bool nested)
3874 {
3875 enum machine_mode element_mode = VOIDmode;
3876 enum machine_mode mode;
3877 enum tree_code code = TREE_CODE (type);
3878 int know_element_mode = 0;
3879 tree t;
3880
3881 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3882 return VOIDmode;
3883
3884 switch (code)
3885 {
3886 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3887 case BOOLEAN_TYPE: case POINTER_TYPE:
3888 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3889 case LANG_TYPE: case FUNCTION_TYPE:
3890 return VOIDmode;
3891
3892 /* Fortran complex types are supposed to be HFAs, so we need to handle
3893 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3894 types though. */
3895 case COMPLEX_TYPE:
3896 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3897 && TYPE_MODE (type) != TCmode)
3898 return GET_MODE_INNER (TYPE_MODE (type));
3899 else
3900 return VOIDmode;
3901
3902 case REAL_TYPE:
3903 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3904 mode if this is contained within an aggregate. */
3905 if (nested && TYPE_MODE (type) != TFmode)
3906 return TYPE_MODE (type);
3907 else
3908 return VOIDmode;
3909
3910 case ARRAY_TYPE:
3911 return hfa_element_mode (TREE_TYPE (type), 1);
3912
3913 case RECORD_TYPE:
3914 case UNION_TYPE:
3915 case QUAL_UNION_TYPE:
3916 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3917 {
3918 if (TREE_CODE (t) != FIELD_DECL)
3919 continue;
3920
3921 mode = hfa_element_mode (TREE_TYPE (t), 1);
3922 if (know_element_mode)
3923 {
3924 if (mode != element_mode)
3925 return VOIDmode;
3926 }
3927 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3928 return VOIDmode;
3929 else
3930 {
3931 know_element_mode = 1;
3932 element_mode = mode;
3933 }
3934 }
3935 return element_mode;
3936
3937 default:
3938 /* If we reach here, we probably have some front-end specific type
3939 that the backend doesn't know about. This can happen via the
3940 aggregate_value_p call in init_function_start. All we can do is
3941 ignore unknown tree types. */
3942 return VOIDmode;
3943 }
3944
3945 return VOIDmode;
3946 }
3947
3948 /* Return the number of words required to hold a quantity of TYPE and MODE
3949 when passed as an argument. */
3950 static int
3951 ia64_function_arg_words (tree type, enum machine_mode mode)
3952 {
3953 int words;
3954
3955 if (mode == BLKmode)
3956 words = int_size_in_bytes (type);
3957 else
3958 words = GET_MODE_SIZE (mode);
3959
3960 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3961 }
3962
3963 /* Return the number of registers that should be skipped so the current
3964 argument (described by TYPE and WORDS) will be properly aligned.
3965
3966 Integer and float arguments larger than 8 bytes start at the next
3967 even boundary. Aggregates larger than 8 bytes start at the next
3968 even boundary if the aggregate has 16 byte alignment. Note that
3969 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3970 but are still to be aligned in registers.
3971
3972 ??? The ABI does not specify how to handle aggregates with
3973 alignment from 9 to 15 bytes, or greater than 16. We handle them
3974 all as if they had 16 byte alignment. Such aggregates can occur
3975 only if gcc extensions are used. */
3976 static int
3977 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3978 {
3979 if ((cum->words & 1) == 0)
3980 return 0;
3981
3982 if (type
3983 && TREE_CODE (type) != INTEGER_TYPE
3984 && TREE_CODE (type) != REAL_TYPE)
3985 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3986 else
3987 return words > 1;
3988 }
3989
3990 /* Return rtx for register where argument is passed, or zero if it is passed
3991 on the stack. */
3992 /* ??? 128-bit quad-precision floats are always passed in general
3993 registers. */
3994
3995 rtx
3996 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3997 int named, int incoming)
3998 {
3999 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4000 int words = ia64_function_arg_words (type, mode);
4001 int offset = ia64_function_arg_offset (cum, type, words);
4002 enum machine_mode hfa_mode = VOIDmode;
4003
4004 /* If all argument slots are used, then it must go on the stack. */
4005 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4006 return 0;
4007
4008 /* Check for and handle homogeneous FP aggregates. */
4009 if (type)
4010 hfa_mode = hfa_element_mode (type, 0);
4011
4012 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4013 and unprototyped hfas are passed specially. */
4014 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4015 {
4016 rtx loc[16];
4017 int i = 0;
4018 int fp_regs = cum->fp_regs;
4019 int int_regs = cum->words + offset;
4020 int hfa_size = GET_MODE_SIZE (hfa_mode);
4021 int byte_size;
4022 int args_byte_size;
4023
4024 /* If prototyped, pass it in FR regs then GR regs.
4025 If not prototyped, pass it in both FR and GR regs.
4026
4027 If this is an SFmode aggregate, then it is possible to run out of
4028 FR regs while GR regs are still left. In that case, we pass the
4029 remaining part in the GR regs. */
4030
4031 /* Fill the FP regs. We do this always. We stop if we reach the end
4032 of the argument, the last FP register, or the last argument slot. */
4033
4034 byte_size = ((mode == BLKmode)
4035 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4036 args_byte_size = int_regs * UNITS_PER_WORD;
4037 offset = 0;
4038 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4039 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4040 {
4041 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4042 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4043 + fp_regs)),
4044 GEN_INT (offset));
4045 offset += hfa_size;
4046 args_byte_size += hfa_size;
4047 fp_regs++;
4048 }
4049
4050 /* If no prototype, then the whole thing must go in GR regs. */
4051 if (! cum->prototype)
4052 offset = 0;
4053 /* If this is an SFmode aggregate, then we might have some left over
4054 that needs to go in GR regs. */
4055 else if (byte_size != offset)
4056 int_regs += offset / UNITS_PER_WORD;
4057
4058 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4059
4060 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4061 {
4062 enum machine_mode gr_mode = DImode;
4063 unsigned int gr_size;
4064
4065 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4066 then this goes in a GR reg left adjusted/little endian, right
4067 adjusted/big endian. */
4068 /* ??? Currently this is handled wrong, because 4-byte hunks are
4069 always right adjusted/little endian. */
4070 if (offset & 0x4)
4071 gr_mode = SImode;
4072 /* If we have an even 4 byte hunk because the aggregate is a
4073 multiple of 4 bytes in size, then this goes in a GR reg right
4074 adjusted/little endian. */
4075 else if (byte_size - offset == 4)
4076 gr_mode = SImode;
4077
4078 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4079 gen_rtx_REG (gr_mode, (basereg
4080 + int_regs)),
4081 GEN_INT (offset));
4082
4083 gr_size = GET_MODE_SIZE (gr_mode);
4084 offset += gr_size;
4085 if (gr_size == UNITS_PER_WORD
4086 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4087 int_regs++;
4088 else if (gr_size > UNITS_PER_WORD)
4089 int_regs += gr_size / UNITS_PER_WORD;
4090 }
4091 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4092 }
4093
4094 /* Integral and aggregates go in general registers. If we have run out of
4095 FR registers, then FP values must also go in general registers. This can
4096 happen when we have a SFmode HFA. */
4097 else if (mode == TFmode || mode == TCmode
4098 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4099 {
4100 int byte_size = ((mode == BLKmode)
4101 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4102 if (BYTES_BIG_ENDIAN
4103 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4104 && byte_size < UNITS_PER_WORD
4105 && byte_size > 0)
4106 {
4107 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4108 gen_rtx_REG (DImode,
4109 (basereg + cum->words
4110 + offset)),
4111 const0_rtx);
4112 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4113 }
4114 else
4115 return gen_rtx_REG (mode, basereg + cum->words + offset);
4116
4117 }
4118
4119 /* If there is a prototype, then FP values go in a FR register when
4120 named, and in a GR register when unnamed. */
4121 else if (cum->prototype)
4122 {
4123 if (named)
4124 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4125 /* In big-endian mode, an anonymous SFmode value must be represented
4126 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4127 the value into the high half of the general register. */
4128 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4129 return gen_rtx_PARALLEL (mode,
4130 gen_rtvec (1,
4131 gen_rtx_EXPR_LIST (VOIDmode,
4132 gen_rtx_REG (DImode, basereg + cum->words + offset),
4133 const0_rtx)));
4134 else
4135 return gen_rtx_REG (mode, basereg + cum->words + offset);
4136 }
4137 /* If there is no prototype, then FP values go in both FR and GR
4138 registers. */
4139 else
4140 {
4141 /* See comment above. */
4142 enum machine_mode inner_mode =
4143 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4144
4145 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4146 gen_rtx_REG (mode, (FR_ARG_FIRST
4147 + cum->fp_regs)),
4148 const0_rtx);
4149 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4150 gen_rtx_REG (inner_mode,
4151 (basereg + cum->words
4152 + offset)),
4153 const0_rtx);
4154
4155 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4156 }
4157 }
4158
4159 /* Return number of bytes, at the beginning of the argument, that must be
4160 put in registers. 0 is the argument is entirely in registers or entirely
4161 in memory. */
4162
4163 static int
4164 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4165 tree type, bool named ATTRIBUTE_UNUSED)
4166 {
4167 int words = ia64_function_arg_words (type, mode);
4168 int offset = ia64_function_arg_offset (cum, type, words);
4169
4170 /* If all argument slots are used, then it must go on the stack. */
4171 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4172 return 0;
4173
4174 /* It doesn't matter whether the argument goes in FR or GR regs. If
4175 it fits within the 8 argument slots, then it goes entirely in
4176 registers. If it extends past the last argument slot, then the rest
4177 goes on the stack. */
4178
4179 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4180 return 0;
4181
4182 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4183 }
4184
4185 /* Update CUM to point after this argument. This is patterned after
4186 ia64_function_arg. */
4187
4188 void
4189 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4190 tree type, int named)
4191 {
4192 int words = ia64_function_arg_words (type, mode);
4193 int offset = ia64_function_arg_offset (cum, type, words);
4194 enum machine_mode hfa_mode = VOIDmode;
4195
4196 /* If all arg slots are already full, then there is nothing to do. */
4197 if (cum->words >= MAX_ARGUMENT_SLOTS)
4198 return;
4199
4200 cum->words += words + offset;
4201
4202 /* Check for and handle homogeneous FP aggregates. */
4203 if (type)
4204 hfa_mode = hfa_element_mode (type, 0);
4205
4206 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4207 and unprototyped hfas are passed specially. */
4208 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4209 {
4210 int fp_regs = cum->fp_regs;
4211 /* This is the original value of cum->words + offset. */
4212 int int_regs = cum->words - words;
4213 int hfa_size = GET_MODE_SIZE (hfa_mode);
4214 int byte_size;
4215 int args_byte_size;
4216
4217 /* If prototyped, pass it in FR regs then GR regs.
4218 If not prototyped, pass it in both FR and GR regs.
4219
4220 If this is an SFmode aggregate, then it is possible to run out of
4221 FR regs while GR regs are still left. In that case, we pass the
4222 remaining part in the GR regs. */
4223
4224 /* Fill the FP regs. We do this always. We stop if we reach the end
4225 of the argument, the last FP register, or the last argument slot. */
4226
4227 byte_size = ((mode == BLKmode)
4228 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4229 args_byte_size = int_regs * UNITS_PER_WORD;
4230 offset = 0;
4231 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4232 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4233 {
4234 offset += hfa_size;
4235 args_byte_size += hfa_size;
4236 fp_regs++;
4237 }
4238
4239 cum->fp_regs = fp_regs;
4240 }
4241
4242 /* Integral and aggregates go in general registers. So do TFmode FP values.
4243 If we have run out of FR registers, then other FP values must also go in
4244 general registers. This can happen when we have a SFmode HFA. */
4245 else if (mode == TFmode || mode == TCmode
4246 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4247 cum->int_regs = cum->words;
4248
4249 /* If there is a prototype, then FP values go in a FR register when
4250 named, and in a GR register when unnamed. */
4251 else if (cum->prototype)
4252 {
4253 if (! named)
4254 cum->int_regs = cum->words;
4255 else
4256 /* ??? Complex types should not reach here. */
4257 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4258 }
4259 /* If there is no prototype, then FP values go in both FR and GR
4260 registers. */
4261 else
4262 {
4263 /* ??? Complex types should not reach here. */
4264 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4265 cum->int_regs = cum->words;
4266 }
4267 }
4268
4269 /* Arguments with alignment larger than 8 bytes start at the next even
4270 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4271 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4272
4273 int
4274 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4275 {
4276
4277 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4278 return PARM_BOUNDARY * 2;
4279
4280 if (type)
4281 {
4282 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4283 return PARM_BOUNDARY * 2;
4284 else
4285 return PARM_BOUNDARY;
4286 }
4287
4288 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4289 return PARM_BOUNDARY * 2;
4290 else
4291 return PARM_BOUNDARY;
4292 }
4293
4294 /* True if it is OK to do sibling call optimization for the specified
4295 call expression EXP. DECL will be the called function, or NULL if
4296 this is an indirect call. */
4297 static bool
4298 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4299 {
4300 /* We can't perform a sibcall if the current function has the syscall_linkage
4301 attribute. */
4302 if (lookup_attribute ("syscall_linkage",
4303 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4304 return false;
4305
4306 /* We must always return with our current GP. This means we can
4307 only sibcall to functions defined in the current module. */
4308 return decl && (*targetm.binds_local_p) (decl);
4309 }
4310 \f
4311
4312 /* Implement va_arg. */
4313
4314 static tree
4315 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4316 {
4317 /* Variable sized types are passed by reference. */
4318 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4319 {
4320 tree ptrtype = build_pointer_type (type);
4321 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4322 return build_va_arg_indirect_ref (addr);
4323 }
4324
4325 /* Aggregate arguments with alignment larger than 8 bytes start at
4326 the next even boundary. Integer and floating point arguments
4327 do so if they are larger than 8 bytes, whether or not they are
4328 also aligned larger than 8 bytes. */
4329 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4330 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4331 {
4332 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4333 size_int (2 * UNITS_PER_WORD - 1));
4334 t = fold_convert (sizetype, t);
4335 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4336 size_int (-2 * UNITS_PER_WORD));
4337 t = fold_convert (TREE_TYPE (valist), t);
4338 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist, t);
4339 gimplify_and_add (t, pre_p);
4340 }
4341
4342 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4343 }
4344 \f
4345 /* Return 1 if function return value returned in memory. Return 0 if it is
4346 in a register. */
4347
4348 static bool
4349 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4350 {
4351 enum machine_mode mode;
4352 enum machine_mode hfa_mode;
4353 HOST_WIDE_INT byte_size;
4354
4355 mode = TYPE_MODE (valtype);
4356 byte_size = GET_MODE_SIZE (mode);
4357 if (mode == BLKmode)
4358 {
4359 byte_size = int_size_in_bytes (valtype);
4360 if (byte_size < 0)
4361 return true;
4362 }
4363
4364 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4365
4366 hfa_mode = hfa_element_mode (valtype, 0);
4367 if (hfa_mode != VOIDmode)
4368 {
4369 int hfa_size = GET_MODE_SIZE (hfa_mode);
4370
4371 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4372 return true;
4373 else
4374 return false;
4375 }
4376 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4377 return true;
4378 else
4379 return false;
4380 }
4381
4382 /* Return rtx for register that holds the function return value. */
4383
4384 rtx
4385 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
4386 {
4387 enum machine_mode mode;
4388 enum machine_mode hfa_mode;
4389
4390 mode = TYPE_MODE (valtype);
4391 hfa_mode = hfa_element_mode (valtype, 0);
4392
4393 if (hfa_mode != VOIDmode)
4394 {
4395 rtx loc[8];
4396 int i;
4397 int hfa_size;
4398 int byte_size;
4399 int offset;
4400
4401 hfa_size = GET_MODE_SIZE (hfa_mode);
4402 byte_size = ((mode == BLKmode)
4403 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4404 offset = 0;
4405 for (i = 0; offset < byte_size; i++)
4406 {
4407 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4408 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4409 GEN_INT (offset));
4410 offset += hfa_size;
4411 }
4412 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4413 }
4414 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4415 return gen_rtx_REG (mode, FR_ARG_FIRST);
4416 else
4417 {
4418 bool need_parallel = false;
4419
4420 /* In big-endian mode, we need to manage the layout of aggregates
4421 in the registers so that we get the bits properly aligned in
4422 the highpart of the registers. */
4423 if (BYTES_BIG_ENDIAN
4424 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4425 need_parallel = true;
4426
4427 /* Something like struct S { long double x; char a[0] } is not an
4428 HFA structure, and therefore doesn't go in fp registers. But
4429 the middle-end will give it XFmode anyway, and XFmode values
4430 don't normally fit in integer registers. So we need to smuggle
4431 the value inside a parallel. */
4432 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4433 need_parallel = true;
4434
4435 if (need_parallel)
4436 {
4437 rtx loc[8];
4438 int offset;
4439 int bytesize;
4440 int i;
4441
4442 offset = 0;
4443 bytesize = int_size_in_bytes (valtype);
4444 /* An empty PARALLEL is invalid here, but the return value
4445 doesn't matter for empty structs. */
4446 if (bytesize == 0)
4447 return gen_rtx_REG (mode, GR_RET_FIRST);
4448 for (i = 0; offset < bytesize; i++)
4449 {
4450 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4451 gen_rtx_REG (DImode,
4452 GR_RET_FIRST + i),
4453 GEN_INT (offset));
4454 offset += UNITS_PER_WORD;
4455 }
4456 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4457 }
4458
4459 return gen_rtx_REG (mode, GR_RET_FIRST);
4460 }
4461 }
4462
4463 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4464 We need to emit DTP-relative relocations. */
4465
4466 static void
4467 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4468 {
4469 gcc_assert (size == 4 || size == 8);
4470 if (size == 4)
4471 fputs ("\tdata4.ua\t@dtprel(", file);
4472 else
4473 fputs ("\tdata8.ua\t@dtprel(", file);
4474 output_addr_const (file, x);
4475 fputs (")", file);
4476 }
4477
4478 /* Print a memory address as an operand to reference that memory location. */
4479
4480 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4481 also call this from ia64_print_operand for memory addresses. */
4482
4483 void
4484 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4485 rtx address ATTRIBUTE_UNUSED)
4486 {
4487 }
4488
4489 /* Print an operand to an assembler instruction.
4490 C Swap and print a comparison operator.
4491 D Print an FP comparison operator.
4492 E Print 32 - constant, for SImode shifts as extract.
4493 e Print 64 - constant, for DImode rotates.
4494 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4495 a floating point register emitted normally.
4496 I Invert a predicate register by adding 1.
4497 J Select the proper predicate register for a condition.
4498 j Select the inverse predicate register for a condition.
4499 O Append .acq for volatile load.
4500 P Postincrement of a MEM.
4501 Q Append .rel for volatile store.
4502 R Print .s .d or nothing for a single, double or no truncation.
4503 S Shift amount for shladd instruction.
4504 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4505 for Intel assembler.
4506 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4507 for Intel assembler.
4508 X A pair of floating point registers.
4509 r Print register name, or constant 0 as r0. HP compatibility for
4510 Linux kernel.
4511 v Print vector constant value as an 8-byte integer value. */
4512
4513 void
4514 ia64_print_operand (FILE * file, rtx x, int code)
4515 {
4516 const char *str;
4517
4518 switch (code)
4519 {
4520 case 0:
4521 /* Handled below. */
4522 break;
4523
4524 case 'C':
4525 {
4526 enum rtx_code c = swap_condition (GET_CODE (x));
4527 fputs (GET_RTX_NAME (c), file);
4528 return;
4529 }
4530
4531 case 'D':
4532 switch (GET_CODE (x))
4533 {
4534 case NE:
4535 str = "neq";
4536 break;
4537 case UNORDERED:
4538 str = "unord";
4539 break;
4540 case ORDERED:
4541 str = "ord";
4542 break;
4543 case UNLT:
4544 str = "nge";
4545 break;
4546 case UNLE:
4547 str = "ngt";
4548 break;
4549 case UNGT:
4550 str = "nle";
4551 break;
4552 case UNGE:
4553 str = "nlt";
4554 break;
4555 default:
4556 str = GET_RTX_NAME (GET_CODE (x));
4557 break;
4558 }
4559 fputs (str, file);
4560 return;
4561
4562 case 'E':
4563 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4564 return;
4565
4566 case 'e':
4567 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4568 return;
4569
4570 case 'F':
4571 if (x == CONST0_RTX (GET_MODE (x)))
4572 str = reg_names [FR_REG (0)];
4573 else if (x == CONST1_RTX (GET_MODE (x)))
4574 str = reg_names [FR_REG (1)];
4575 else
4576 {
4577 gcc_assert (GET_CODE (x) == REG);
4578 str = reg_names [REGNO (x)];
4579 }
4580 fputs (str, file);
4581 return;
4582
4583 case 'I':
4584 fputs (reg_names [REGNO (x) + 1], file);
4585 return;
4586
4587 case 'J':
4588 case 'j':
4589 {
4590 unsigned int regno = REGNO (XEXP (x, 0));
4591 if (GET_CODE (x) == EQ)
4592 regno += 1;
4593 if (code == 'j')
4594 regno ^= 1;
4595 fputs (reg_names [regno], file);
4596 }
4597 return;
4598
4599 case 'O':
4600 if (MEM_VOLATILE_P (x))
4601 fputs(".acq", file);
4602 return;
4603
4604 case 'P':
4605 {
4606 HOST_WIDE_INT value;
4607
4608 switch (GET_CODE (XEXP (x, 0)))
4609 {
4610 default:
4611 return;
4612
4613 case POST_MODIFY:
4614 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4615 if (GET_CODE (x) == CONST_INT)
4616 value = INTVAL (x);
4617 else
4618 {
4619 gcc_assert (GET_CODE (x) == REG);
4620 fprintf (file, ", %s", reg_names[REGNO (x)]);
4621 return;
4622 }
4623 break;
4624
4625 case POST_INC:
4626 value = GET_MODE_SIZE (GET_MODE (x));
4627 break;
4628
4629 case POST_DEC:
4630 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4631 break;
4632 }
4633
4634 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4635 return;
4636 }
4637
4638 case 'Q':
4639 if (MEM_VOLATILE_P (x))
4640 fputs(".rel", file);
4641 return;
4642
4643 case 'R':
4644 if (x == CONST0_RTX (GET_MODE (x)))
4645 fputs(".s", file);
4646 else if (x == CONST1_RTX (GET_MODE (x)))
4647 fputs(".d", file);
4648 else if (x == CONST2_RTX (GET_MODE (x)))
4649 ;
4650 else
4651 output_operand_lossage ("invalid %%R value");
4652 return;
4653
4654 case 'S':
4655 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4656 return;
4657
4658 case 'T':
4659 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4660 {
4661 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4662 return;
4663 }
4664 break;
4665
4666 case 'U':
4667 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4668 {
4669 const char *prefix = "0x";
4670 if (INTVAL (x) & 0x80000000)
4671 {
4672 fprintf (file, "0xffffffff");
4673 prefix = "";
4674 }
4675 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4676 return;
4677 }
4678 break;
4679
4680 case 'X':
4681 {
4682 unsigned int regno = REGNO (x);
4683 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4684 }
4685 return;
4686
4687 case 'r':
4688 /* If this operand is the constant zero, write it as register zero.
4689 Any register, zero, or CONST_INT value is OK here. */
4690 if (GET_CODE (x) == REG)
4691 fputs (reg_names[REGNO (x)], file);
4692 else if (x == CONST0_RTX (GET_MODE (x)))
4693 fputs ("r0", file);
4694 else if (GET_CODE (x) == CONST_INT)
4695 output_addr_const (file, x);
4696 else
4697 output_operand_lossage ("invalid %%r value");
4698 return;
4699
4700 case 'v':
4701 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4702 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4703 break;
4704
4705 case '+':
4706 {
4707 const char *which;
4708
4709 /* For conditional branches, returns or calls, substitute
4710 sptk, dptk, dpnt, or spnt for %s. */
4711 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4712 if (x)
4713 {
4714 int pred_val = INTVAL (XEXP (x, 0));
4715
4716 /* Guess top and bottom 10% statically predicted. */
4717 if (pred_val < REG_BR_PROB_BASE / 50
4718 && br_prob_note_reliable_p (x))
4719 which = ".spnt";
4720 else if (pred_val < REG_BR_PROB_BASE / 2)
4721 which = ".dpnt";
4722 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4723 || !br_prob_note_reliable_p (x))
4724 which = ".dptk";
4725 else
4726 which = ".sptk";
4727 }
4728 else if (GET_CODE (current_output_insn) == CALL_INSN)
4729 which = ".sptk";
4730 else
4731 which = ".dptk";
4732
4733 fputs (which, file);
4734 return;
4735 }
4736
4737 case ',':
4738 x = current_insn_predicate;
4739 if (x)
4740 {
4741 unsigned int regno = REGNO (XEXP (x, 0));
4742 if (GET_CODE (x) == EQ)
4743 regno += 1;
4744 fprintf (file, "(%s) ", reg_names [regno]);
4745 }
4746 return;
4747
4748 default:
4749 output_operand_lossage ("ia64_print_operand: unknown code");
4750 return;
4751 }
4752
4753 switch (GET_CODE (x))
4754 {
4755 /* This happens for the spill/restore instructions. */
4756 case POST_INC:
4757 case POST_DEC:
4758 case POST_MODIFY:
4759 x = XEXP (x, 0);
4760 /* ... fall through ... */
4761
4762 case REG:
4763 fputs (reg_names [REGNO (x)], file);
4764 break;
4765
4766 case MEM:
4767 {
4768 rtx addr = XEXP (x, 0);
4769 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4770 addr = XEXP (addr, 0);
4771 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4772 break;
4773 }
4774
4775 default:
4776 output_addr_const (file, x);
4777 break;
4778 }
4779
4780 return;
4781 }
4782 \f
4783 /* Compute a (partial) cost for rtx X. Return true if the complete
4784 cost has been computed, and false if subexpressions should be
4785 scanned. In either case, *TOTAL contains the cost result. */
4786 /* ??? This is incomplete. */
4787
4788 static bool
4789 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4790 {
4791 switch (code)
4792 {
4793 case CONST_INT:
4794 switch (outer_code)
4795 {
4796 case SET:
4797 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4798 return true;
4799 case PLUS:
4800 if (satisfies_constraint_I (x))
4801 *total = 0;
4802 else if (satisfies_constraint_J (x))
4803 *total = 1;
4804 else
4805 *total = COSTS_N_INSNS (1);
4806 return true;
4807 default:
4808 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4809 *total = 0;
4810 else
4811 *total = COSTS_N_INSNS (1);
4812 return true;
4813 }
4814
4815 case CONST_DOUBLE:
4816 *total = COSTS_N_INSNS (1);
4817 return true;
4818
4819 case CONST:
4820 case SYMBOL_REF:
4821 case LABEL_REF:
4822 *total = COSTS_N_INSNS (3);
4823 return true;
4824
4825 case MULT:
4826 /* For multiplies wider than HImode, we have to go to the FPU,
4827 which normally involves copies. Plus there's the latency
4828 of the multiply itself, and the latency of the instructions to
4829 transfer integer regs to FP regs. */
4830 /* ??? Check for FP mode. */
4831 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4832 *total = COSTS_N_INSNS (10);
4833 else
4834 *total = COSTS_N_INSNS (2);
4835 return true;
4836
4837 case PLUS:
4838 case MINUS:
4839 case ASHIFT:
4840 case ASHIFTRT:
4841 case LSHIFTRT:
4842 *total = COSTS_N_INSNS (1);
4843 return true;
4844
4845 case DIV:
4846 case UDIV:
4847 case MOD:
4848 case UMOD:
4849 /* We make divide expensive, so that divide-by-constant will be
4850 optimized to a multiply. */
4851 *total = COSTS_N_INSNS (60);
4852 return true;
4853
4854 default:
4855 return false;
4856 }
4857 }
4858
4859 /* Calculate the cost of moving data from a register in class FROM to
4860 one in class TO, using MODE. */
4861
4862 int
4863 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4864 enum reg_class to)
4865 {
4866 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4867 if (to == ADDL_REGS)
4868 to = GR_REGS;
4869 if (from == ADDL_REGS)
4870 from = GR_REGS;
4871
4872 /* All costs are symmetric, so reduce cases by putting the
4873 lower number class as the destination. */
4874 if (from < to)
4875 {
4876 enum reg_class tmp = to;
4877 to = from, from = tmp;
4878 }
4879
4880 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4881 so that we get secondary memory reloads. Between FR_REGS,
4882 we have to make this at least as expensive as MEMORY_MOVE_COST
4883 to avoid spectacularly poor register class preferencing. */
4884 if (mode == XFmode || mode == RFmode)
4885 {
4886 if (to != GR_REGS || from != GR_REGS)
4887 return MEMORY_MOVE_COST (mode, to, 0);
4888 else
4889 return 3;
4890 }
4891
4892 switch (to)
4893 {
4894 case PR_REGS:
4895 /* Moving between PR registers takes two insns. */
4896 if (from == PR_REGS)
4897 return 3;
4898 /* Moving between PR and anything but GR is impossible. */
4899 if (from != GR_REGS)
4900 return MEMORY_MOVE_COST (mode, to, 0);
4901 break;
4902
4903 case BR_REGS:
4904 /* Moving between BR and anything but GR is impossible. */
4905 if (from != GR_REGS && from != GR_AND_BR_REGS)
4906 return MEMORY_MOVE_COST (mode, to, 0);
4907 break;
4908
4909 case AR_I_REGS:
4910 case AR_M_REGS:
4911 /* Moving between AR and anything but GR is impossible. */
4912 if (from != GR_REGS)
4913 return MEMORY_MOVE_COST (mode, to, 0);
4914 break;
4915
4916 case GR_REGS:
4917 case FR_REGS:
4918 case FP_REGS:
4919 case GR_AND_FR_REGS:
4920 case GR_AND_BR_REGS:
4921 case ALL_REGS:
4922 break;
4923
4924 default:
4925 gcc_unreachable ();
4926 }
4927
4928 return 2;
4929 }
4930
4931 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4932 to use when copying X into that class. */
4933
4934 enum reg_class
4935 ia64_preferred_reload_class (rtx x, enum reg_class class)
4936 {
4937 switch (class)
4938 {
4939 case FR_REGS:
4940 case FP_REGS:
4941 /* Don't allow volatile mem reloads into floating point registers.
4942 This is defined to force reload to choose the r/m case instead
4943 of the f/f case when reloading (set (reg fX) (mem/v)). */
4944 if (MEM_P (x) && MEM_VOLATILE_P (x))
4945 return NO_REGS;
4946
4947 /* Force all unrecognized constants into the constant pool. */
4948 if (CONSTANT_P (x))
4949 return NO_REGS;
4950 break;
4951
4952 case AR_M_REGS:
4953 case AR_I_REGS:
4954 if (!OBJECT_P (x))
4955 return NO_REGS;
4956 break;
4957
4958 default:
4959 break;
4960 }
4961
4962 return class;
4963 }
4964
4965 /* This function returns the register class required for a secondary
4966 register when copying between one of the registers in CLASS, and X,
4967 using MODE. A return value of NO_REGS means that no secondary register
4968 is required. */
4969
4970 enum reg_class
4971 ia64_secondary_reload_class (enum reg_class class,
4972 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4973 {
4974 int regno = -1;
4975
4976 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4977 regno = true_regnum (x);
4978
4979 switch (class)
4980 {
4981 case BR_REGS:
4982 case AR_M_REGS:
4983 case AR_I_REGS:
4984 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4985 interaction. We end up with two pseudos with overlapping lifetimes
4986 both of which are equiv to the same constant, and both which need
4987 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4988 changes depending on the path length, which means the qty_first_reg
4989 check in make_regs_eqv can give different answers at different times.
4990 At some point I'll probably need a reload_indi pattern to handle
4991 this.
4992
4993 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4994 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4995 non-general registers for good measure. */
4996 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4997 return GR_REGS;
4998
4999 /* This is needed if a pseudo used as a call_operand gets spilled to a
5000 stack slot. */
5001 if (GET_CODE (x) == MEM)
5002 return GR_REGS;
5003 break;
5004
5005 case FR_REGS:
5006 case FP_REGS:
5007 /* Need to go through general registers to get to other class regs. */
5008 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5009 return GR_REGS;
5010
5011 /* This can happen when a paradoxical subreg is an operand to the
5012 muldi3 pattern. */
5013 /* ??? This shouldn't be necessary after instruction scheduling is
5014 enabled, because paradoxical subregs are not accepted by
5015 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5016 stop the paradoxical subreg stupidity in the *_operand functions
5017 in recog.c. */
5018 if (GET_CODE (x) == MEM
5019 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5020 || GET_MODE (x) == QImode))
5021 return GR_REGS;
5022
5023 /* This can happen because of the ior/and/etc patterns that accept FP
5024 registers as operands. If the third operand is a constant, then it
5025 needs to be reloaded into a FP register. */
5026 if (GET_CODE (x) == CONST_INT)
5027 return GR_REGS;
5028
5029 /* This can happen because of register elimination in a muldi3 insn.
5030 E.g. `26107 * (unsigned long)&u'. */
5031 if (GET_CODE (x) == PLUS)
5032 return GR_REGS;
5033 break;
5034
5035 case PR_REGS:
5036 /* ??? This happens if we cse/gcse a BImode value across a call,
5037 and the function has a nonlocal goto. This is because global
5038 does not allocate call crossing pseudos to hard registers when
5039 current_function_has_nonlocal_goto is true. This is relatively
5040 common for C++ programs that use exceptions. To reproduce,
5041 return NO_REGS and compile libstdc++. */
5042 if (GET_CODE (x) == MEM)
5043 return GR_REGS;
5044
5045 /* This can happen when we take a BImode subreg of a DImode value,
5046 and that DImode value winds up in some non-GR register. */
5047 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5048 return GR_REGS;
5049 break;
5050
5051 default:
5052 break;
5053 }
5054
5055 return NO_REGS;
5056 }
5057
5058 \f
5059 /* Parse the -mfixed-range= option string. */
5060
5061 static void
5062 fix_range (const char *const_str)
5063 {
5064 int i, first, last;
5065 char *str, *dash, *comma;
5066
5067 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5068 REG2 are either register names or register numbers. The effect
5069 of this option is to mark the registers in the range from REG1 to
5070 REG2 as ``fixed'' so they won't be used by the compiler. This is
5071 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5072
5073 i = strlen (const_str);
5074 str = (char *) alloca (i + 1);
5075 memcpy (str, const_str, i + 1);
5076
5077 while (1)
5078 {
5079 dash = strchr (str, '-');
5080 if (!dash)
5081 {
5082 warning (0, "value of -mfixed-range must have form REG1-REG2");
5083 return;
5084 }
5085 *dash = '\0';
5086
5087 comma = strchr (dash + 1, ',');
5088 if (comma)
5089 *comma = '\0';
5090
5091 first = decode_reg_name (str);
5092 if (first < 0)
5093 {
5094 warning (0, "unknown register name: %s", str);
5095 return;
5096 }
5097
5098 last = decode_reg_name (dash + 1);
5099 if (last < 0)
5100 {
5101 warning (0, "unknown register name: %s", dash + 1);
5102 return;
5103 }
5104
5105 *dash = '-';
5106
5107 if (first > last)
5108 {
5109 warning (0, "%s-%s is an empty range", str, dash + 1);
5110 return;
5111 }
5112
5113 for (i = first; i <= last; ++i)
5114 fixed_regs[i] = call_used_regs[i] = 1;
5115
5116 if (!comma)
5117 break;
5118
5119 *comma = ',';
5120 str = comma + 1;
5121 }
5122 }
5123
5124 /* Implement TARGET_HANDLE_OPTION. */
5125
5126 static bool
5127 ia64_handle_option (size_t code, const char *arg, int value)
5128 {
5129 switch (code)
5130 {
5131 case OPT_mfixed_range_:
5132 fix_range (arg);
5133 return true;
5134
5135 case OPT_mtls_size_:
5136 if (value != 14 && value != 22 && value != 64)
5137 error ("bad value %<%s%> for -mtls-size= switch", arg);
5138 return true;
5139
5140 case OPT_mtune_:
5141 {
5142 static struct pta
5143 {
5144 const char *name; /* processor name or nickname. */
5145 enum processor_type processor;
5146 }
5147 const processor_alias_table[] =
5148 {
5149 {"itanium", PROCESSOR_ITANIUM},
5150 {"itanium1", PROCESSOR_ITANIUM},
5151 {"merced", PROCESSOR_ITANIUM},
5152 {"itanium2", PROCESSOR_ITANIUM2},
5153 {"mckinley", PROCESSOR_ITANIUM2},
5154 };
5155 int const pta_size = ARRAY_SIZE (processor_alias_table);
5156 int i;
5157
5158 for (i = 0; i < pta_size; i++)
5159 if (!strcmp (arg, processor_alias_table[i].name))
5160 {
5161 ia64_tune = processor_alias_table[i].processor;
5162 break;
5163 }
5164 if (i == pta_size)
5165 error ("bad value %<%s%> for -mtune= switch", arg);
5166 return true;
5167 }
5168
5169 default:
5170 return true;
5171 }
5172 }
5173
5174 /* Implement OVERRIDE_OPTIONS. */
5175
5176 void
5177 ia64_override_options (void)
5178 {
5179 if (TARGET_AUTO_PIC)
5180 target_flags |= MASK_CONST_GP;
5181
5182 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5183 {
5184 warning (0, "not yet implemented: latency-optimized inline square root");
5185 TARGET_INLINE_SQRT = INL_MAX_THR;
5186 }
5187
5188 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5189 flag_schedule_insns_after_reload = 0;
5190
5191 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5192
5193 init_machine_status = ia64_init_machine_status;
5194 }
5195
5196 /* Initialize the record of emitted frame related registers. */
5197
5198 void ia64_init_expanders (void)
5199 {
5200 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5201 }
5202
5203 static struct machine_function *
5204 ia64_init_machine_status (void)
5205 {
5206 return ggc_alloc_cleared (sizeof (struct machine_function));
5207 }
5208 \f
5209 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5210 static enum attr_type ia64_safe_type (rtx);
5211
5212 static enum attr_itanium_class
5213 ia64_safe_itanium_class (rtx insn)
5214 {
5215 if (recog_memoized (insn) >= 0)
5216 return get_attr_itanium_class (insn);
5217 else
5218 return ITANIUM_CLASS_UNKNOWN;
5219 }
5220
5221 static enum attr_type
5222 ia64_safe_type (rtx insn)
5223 {
5224 if (recog_memoized (insn) >= 0)
5225 return get_attr_type (insn);
5226 else
5227 return TYPE_UNKNOWN;
5228 }
5229 \f
5230 /* The following collection of routines emit instruction group stop bits as
5231 necessary to avoid dependencies. */
5232
5233 /* Need to track some additional registers as far as serialization is
5234 concerned so we can properly handle br.call and br.ret. We could
5235 make these registers visible to gcc, but since these registers are
5236 never explicitly used in gcc generated code, it seems wasteful to
5237 do so (plus it would make the call and return patterns needlessly
5238 complex). */
5239 #define REG_RP (BR_REG (0))
5240 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5241 /* This is used for volatile asms which may require a stop bit immediately
5242 before and after them. */
5243 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5244 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5245 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5246
5247 /* For each register, we keep track of how it has been written in the
5248 current instruction group.
5249
5250 If a register is written unconditionally (no qualifying predicate),
5251 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5252
5253 If a register is written if its qualifying predicate P is true, we
5254 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5255 may be written again by the complement of P (P^1) and when this happens,
5256 WRITE_COUNT gets set to 2.
5257
5258 The result of this is that whenever an insn attempts to write a register
5259 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5260
5261 If a predicate register is written by a floating-point insn, we set
5262 WRITTEN_BY_FP to true.
5263
5264 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5265 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5266
5267 struct reg_write_state
5268 {
5269 unsigned int write_count : 2;
5270 unsigned int first_pred : 16;
5271 unsigned int written_by_fp : 1;
5272 unsigned int written_by_and : 1;
5273 unsigned int written_by_or : 1;
5274 };
5275
5276 /* Cumulative info for the current instruction group. */
5277 struct reg_write_state rws_sum[NUM_REGS];
5278 /* Info for the current instruction. This gets copied to rws_sum after a
5279 stop bit is emitted. */
5280 struct reg_write_state rws_insn[NUM_REGS];
5281
5282 /* Indicates whether this is the first instruction after a stop bit,
5283 in which case we don't need another stop bit. Without this,
5284 ia64_variable_issue will die when scheduling an alloc. */
5285 static int first_instruction;
5286
5287 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5288 RTL for one instruction. */
5289 struct reg_flags
5290 {
5291 unsigned int is_write : 1; /* Is register being written? */
5292 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5293 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5294 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5295 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5296 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5297 };
5298
5299 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5300 static int rws_access_regno (int, struct reg_flags, int);
5301 static int rws_access_reg (rtx, struct reg_flags, int);
5302 static void update_set_flags (rtx, struct reg_flags *);
5303 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5304 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5305 static void init_insn_group_barriers (void);
5306 static int group_barrier_needed (rtx);
5307 static int safe_group_barrier_needed (rtx);
5308
5309 /* Update *RWS for REGNO, which is being written by the current instruction,
5310 with predicate PRED, and associated register flags in FLAGS. */
5311
5312 static void
5313 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5314 {
5315 if (pred)
5316 rws[regno].write_count++;
5317 else
5318 rws[regno].write_count = 2;
5319 rws[regno].written_by_fp |= flags.is_fp;
5320 /* ??? Not tracking and/or across differing predicates. */
5321 rws[regno].written_by_and = flags.is_and;
5322 rws[regno].written_by_or = flags.is_or;
5323 rws[regno].first_pred = pred;
5324 }
5325
5326 /* Handle an access to register REGNO of type FLAGS using predicate register
5327 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5328 a dependency with an earlier instruction in the same group. */
5329
5330 static int
5331 rws_access_regno (int regno, struct reg_flags flags, int pred)
5332 {
5333 int need_barrier = 0;
5334
5335 gcc_assert (regno < NUM_REGS);
5336
5337 if (! PR_REGNO_P (regno))
5338 flags.is_and = flags.is_or = 0;
5339
5340 if (flags.is_write)
5341 {
5342 int write_count;
5343
5344 /* One insn writes same reg multiple times? */
5345 gcc_assert (!rws_insn[regno].write_count);
5346
5347 /* Update info for current instruction. */
5348 rws_update (rws_insn, regno, flags, pred);
5349 write_count = rws_sum[regno].write_count;
5350
5351 switch (write_count)
5352 {
5353 case 0:
5354 /* The register has not been written yet. */
5355 rws_update (rws_sum, regno, flags, pred);
5356 break;
5357
5358 case 1:
5359 /* The register has been written via a predicate. If this is
5360 not a complementary predicate, then we need a barrier. */
5361 /* ??? This assumes that P and P+1 are always complementary
5362 predicates for P even. */
5363 if (flags.is_and && rws_sum[regno].written_by_and)
5364 ;
5365 else if (flags.is_or && rws_sum[regno].written_by_or)
5366 ;
5367 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5368 need_barrier = 1;
5369 rws_update (rws_sum, regno, flags, pred);
5370 break;
5371
5372 case 2:
5373 /* The register has been unconditionally written already. We
5374 need a barrier. */
5375 if (flags.is_and && rws_sum[regno].written_by_and)
5376 ;
5377 else if (flags.is_or && rws_sum[regno].written_by_or)
5378 ;
5379 else
5380 need_barrier = 1;
5381 rws_sum[regno].written_by_and = flags.is_and;
5382 rws_sum[regno].written_by_or = flags.is_or;
5383 break;
5384
5385 default:
5386 gcc_unreachable ();
5387 }
5388 }
5389 else
5390 {
5391 if (flags.is_branch)
5392 {
5393 /* Branches have several RAW exceptions that allow to avoid
5394 barriers. */
5395
5396 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5397 /* RAW dependencies on branch regs are permissible as long
5398 as the writer is a non-branch instruction. Since we
5399 never generate code that uses a branch register written
5400 by a branch instruction, handling this case is
5401 easy. */
5402 return 0;
5403
5404 if (REGNO_REG_CLASS (regno) == PR_REGS
5405 && ! rws_sum[regno].written_by_fp)
5406 /* The predicates of a branch are available within the
5407 same insn group as long as the predicate was written by
5408 something other than a floating-point instruction. */
5409 return 0;
5410 }
5411
5412 if (flags.is_and && rws_sum[regno].written_by_and)
5413 return 0;
5414 if (flags.is_or && rws_sum[regno].written_by_or)
5415 return 0;
5416
5417 switch (rws_sum[regno].write_count)
5418 {
5419 case 0:
5420 /* The register has not been written yet. */
5421 break;
5422
5423 case 1:
5424 /* The register has been written via a predicate. If this is
5425 not a complementary predicate, then we need a barrier. */
5426 /* ??? This assumes that P and P+1 are always complementary
5427 predicates for P even. */
5428 if ((rws_sum[regno].first_pred ^ 1) != pred)
5429 need_barrier = 1;
5430 break;
5431
5432 case 2:
5433 /* The register has been unconditionally written already. We
5434 need a barrier. */
5435 need_barrier = 1;
5436 break;
5437
5438 default:
5439 gcc_unreachable ();
5440 }
5441 }
5442
5443 return need_barrier;
5444 }
5445
5446 static int
5447 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5448 {
5449 int regno = REGNO (reg);
5450 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5451
5452 if (n == 1)
5453 return rws_access_regno (regno, flags, pred);
5454 else
5455 {
5456 int need_barrier = 0;
5457 while (--n >= 0)
5458 need_barrier |= rws_access_regno (regno + n, flags, pred);
5459 return need_barrier;
5460 }
5461 }
5462
5463 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5464 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5465
5466 static void
5467 update_set_flags (rtx x, struct reg_flags *pflags)
5468 {
5469 rtx src = SET_SRC (x);
5470
5471 switch (GET_CODE (src))
5472 {
5473 case CALL:
5474 return;
5475
5476 case IF_THEN_ELSE:
5477 /* There are four cases here:
5478 (1) The destination is (pc), in which case this is a branch,
5479 nothing here applies.
5480 (2) The destination is ar.lc, in which case this is a
5481 doloop_end_internal,
5482 (3) The destination is an fp register, in which case this is
5483 an fselect instruction.
5484 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5485 this is a check load.
5486 In all cases, nothing we do in this function applies. */
5487 return;
5488
5489 default:
5490 if (COMPARISON_P (src)
5491 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5492 /* Set pflags->is_fp to 1 so that we know we're dealing
5493 with a floating point comparison when processing the
5494 destination of the SET. */
5495 pflags->is_fp = 1;
5496
5497 /* Discover if this is a parallel comparison. We only handle
5498 and.orcm and or.andcm at present, since we must retain a
5499 strict inverse on the predicate pair. */
5500 else if (GET_CODE (src) == AND)
5501 pflags->is_and = 1;
5502 else if (GET_CODE (src) == IOR)
5503 pflags->is_or = 1;
5504
5505 break;
5506 }
5507 }
5508
5509 /* Subroutine of rtx_needs_barrier; this function determines whether the
5510 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5511 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5512 for this insn. */
5513
5514 static int
5515 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5516 {
5517 int need_barrier = 0;
5518 rtx dst;
5519 rtx src = SET_SRC (x);
5520
5521 if (GET_CODE (src) == CALL)
5522 /* We don't need to worry about the result registers that
5523 get written by subroutine call. */
5524 return rtx_needs_barrier (src, flags, pred);
5525 else if (SET_DEST (x) == pc_rtx)
5526 {
5527 /* X is a conditional branch. */
5528 /* ??? This seems redundant, as the caller sets this bit for
5529 all JUMP_INSNs. */
5530 if (!ia64_spec_check_src_p (src))
5531 flags.is_branch = 1;
5532 return rtx_needs_barrier (src, flags, pred);
5533 }
5534
5535 if (ia64_spec_check_src_p (src))
5536 /* Avoid checking one register twice (in condition
5537 and in 'then' section) for ldc pattern. */
5538 {
5539 gcc_assert (REG_P (XEXP (src, 2)));
5540 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5541
5542 /* We process MEM below. */
5543 src = XEXP (src, 1);
5544 }
5545
5546 need_barrier |= rtx_needs_barrier (src, flags, pred);
5547
5548 dst = SET_DEST (x);
5549 if (GET_CODE (dst) == ZERO_EXTRACT)
5550 {
5551 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5552 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5553 }
5554 return need_barrier;
5555 }
5556
5557 /* Handle an access to rtx X of type FLAGS using predicate register
5558 PRED. Return 1 if this access creates a dependency with an earlier
5559 instruction in the same group. */
5560
5561 static int
5562 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5563 {
5564 int i, j;
5565 int is_complemented = 0;
5566 int need_barrier = 0;
5567 const char *format_ptr;
5568 struct reg_flags new_flags;
5569 rtx cond;
5570
5571 if (! x)
5572 return 0;
5573
5574 new_flags = flags;
5575
5576 switch (GET_CODE (x))
5577 {
5578 case SET:
5579 update_set_flags (x, &new_flags);
5580 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5581 if (GET_CODE (SET_SRC (x)) != CALL)
5582 {
5583 new_flags.is_write = 1;
5584 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5585 }
5586 break;
5587
5588 case CALL:
5589 new_flags.is_write = 0;
5590 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5591
5592 /* Avoid multiple register writes, in case this is a pattern with
5593 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5594 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5595 {
5596 new_flags.is_write = 1;
5597 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5598 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5599 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5600 }
5601 break;
5602
5603 case COND_EXEC:
5604 /* X is a predicated instruction. */
5605
5606 cond = COND_EXEC_TEST (x);
5607 gcc_assert (!pred);
5608 need_barrier = rtx_needs_barrier (cond, flags, 0);
5609
5610 if (GET_CODE (cond) == EQ)
5611 is_complemented = 1;
5612 cond = XEXP (cond, 0);
5613 gcc_assert (GET_CODE (cond) == REG
5614 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5615 pred = REGNO (cond);
5616 if (is_complemented)
5617 ++pred;
5618
5619 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5620 return need_barrier;
5621
5622 case CLOBBER:
5623 case USE:
5624 /* Clobber & use are for earlier compiler-phases only. */
5625 break;
5626
5627 case ASM_OPERANDS:
5628 case ASM_INPUT:
5629 /* We always emit stop bits for traditional asms. We emit stop bits
5630 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5631 if (GET_CODE (x) != ASM_OPERANDS
5632 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5633 {
5634 /* Avoid writing the register multiple times if we have multiple
5635 asm outputs. This avoids a failure in rws_access_reg. */
5636 if (! rws_insn[REG_VOLATILE].write_count)
5637 {
5638 new_flags.is_write = 1;
5639 rws_access_regno (REG_VOLATILE, new_flags, pred);
5640 }
5641 return 1;
5642 }
5643
5644 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5645 We cannot just fall through here since then we would be confused
5646 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5647 traditional asms unlike their normal usage. */
5648
5649 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5650 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5651 need_barrier = 1;
5652 break;
5653
5654 case PARALLEL:
5655 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5656 {
5657 rtx pat = XVECEXP (x, 0, i);
5658 switch (GET_CODE (pat))
5659 {
5660 case SET:
5661 update_set_flags (pat, &new_flags);
5662 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5663 break;
5664
5665 case USE:
5666 case CALL:
5667 case ASM_OPERANDS:
5668 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5669 break;
5670
5671 case CLOBBER:
5672 case RETURN:
5673 break;
5674
5675 default:
5676 gcc_unreachable ();
5677 }
5678 }
5679 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5680 {
5681 rtx pat = XVECEXP (x, 0, i);
5682 if (GET_CODE (pat) == SET)
5683 {
5684 if (GET_CODE (SET_SRC (pat)) != CALL)
5685 {
5686 new_flags.is_write = 1;
5687 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5688 pred);
5689 }
5690 }
5691 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5692 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5693 }
5694 break;
5695
5696 case SUBREG:
5697 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5698 break;
5699 case REG:
5700 if (REGNO (x) == AR_UNAT_REGNUM)
5701 {
5702 for (i = 0; i < 64; ++i)
5703 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5704 }
5705 else
5706 need_barrier = rws_access_reg (x, flags, pred);
5707 break;
5708
5709 case MEM:
5710 /* Find the regs used in memory address computation. */
5711 new_flags.is_write = 0;
5712 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5713 break;
5714
5715 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5716 case SYMBOL_REF: case LABEL_REF: case CONST:
5717 break;
5718
5719 /* Operators with side-effects. */
5720 case POST_INC: case POST_DEC:
5721 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5722
5723 new_flags.is_write = 0;
5724 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5725 new_flags.is_write = 1;
5726 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5727 break;
5728
5729 case POST_MODIFY:
5730 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5731
5732 new_flags.is_write = 0;
5733 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5734 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5735 new_flags.is_write = 1;
5736 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5737 break;
5738
5739 /* Handle common unary and binary ops for efficiency. */
5740 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5741 case MOD: case UDIV: case UMOD: case AND: case IOR:
5742 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5743 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5744 case NE: case EQ: case GE: case GT: case LE:
5745 case LT: case GEU: case GTU: case LEU: case LTU:
5746 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5747 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5748 break;
5749
5750 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5751 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5752 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5753 case SQRT: case FFS: case POPCOUNT:
5754 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5755 break;
5756
5757 case VEC_SELECT:
5758 /* VEC_SELECT's second argument is a PARALLEL with integers that
5759 describe the elements selected. On ia64, those integers are
5760 always constants. Avoid walking the PARALLEL so that we don't
5761 get confused with "normal" parallels and then die. */
5762 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5763 break;
5764
5765 case UNSPEC:
5766 switch (XINT (x, 1))
5767 {
5768 case UNSPEC_LTOFF_DTPMOD:
5769 case UNSPEC_LTOFF_DTPREL:
5770 case UNSPEC_DTPREL:
5771 case UNSPEC_LTOFF_TPREL:
5772 case UNSPEC_TPREL:
5773 case UNSPEC_PRED_REL_MUTEX:
5774 case UNSPEC_PIC_CALL:
5775 case UNSPEC_MF:
5776 case UNSPEC_FETCHADD_ACQ:
5777 case UNSPEC_BSP_VALUE:
5778 case UNSPEC_FLUSHRS:
5779 case UNSPEC_BUNDLE_SELECTOR:
5780 break;
5781
5782 case UNSPEC_GR_SPILL:
5783 case UNSPEC_GR_RESTORE:
5784 {
5785 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5786 HOST_WIDE_INT bit = (offset >> 3) & 63;
5787
5788 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5789 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5790 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5791 new_flags, pred);
5792 break;
5793 }
5794
5795 case UNSPEC_FR_SPILL:
5796 case UNSPEC_FR_RESTORE:
5797 case UNSPEC_GETF_EXP:
5798 case UNSPEC_SETF_EXP:
5799 case UNSPEC_ADDP4:
5800 case UNSPEC_FR_SQRT_RECIP_APPROX:
5801 case UNSPEC_LDA:
5802 case UNSPEC_LDS:
5803 case UNSPEC_LDSA:
5804 case UNSPEC_CHKACLR:
5805 case UNSPEC_CHKS:
5806 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5807 break;
5808
5809 case UNSPEC_FR_RECIP_APPROX:
5810 case UNSPEC_SHRP:
5811 case UNSPEC_COPYSIGN:
5812 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5813 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5814 break;
5815
5816 case UNSPEC_CMPXCHG_ACQ:
5817 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5818 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5819 break;
5820
5821 default:
5822 gcc_unreachable ();
5823 }
5824 break;
5825
5826 case UNSPEC_VOLATILE:
5827 switch (XINT (x, 1))
5828 {
5829 case UNSPECV_ALLOC:
5830 /* Alloc must always be the first instruction of a group.
5831 We force this by always returning true. */
5832 /* ??? We might get better scheduling if we explicitly check for
5833 input/local/output register dependencies, and modify the
5834 scheduler so that alloc is always reordered to the start of
5835 the current group. We could then eliminate all of the
5836 first_instruction code. */
5837 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5838
5839 new_flags.is_write = 1;
5840 rws_access_regno (REG_AR_CFM, new_flags, pred);
5841 return 1;
5842
5843 case UNSPECV_SET_BSP:
5844 need_barrier = 1;
5845 break;
5846
5847 case UNSPECV_BLOCKAGE:
5848 case UNSPECV_INSN_GROUP_BARRIER:
5849 case UNSPECV_BREAK:
5850 case UNSPECV_PSAC_ALL:
5851 case UNSPECV_PSAC_NORMAL:
5852 return 0;
5853
5854 default:
5855 gcc_unreachable ();
5856 }
5857 break;
5858
5859 case RETURN:
5860 new_flags.is_write = 0;
5861 need_barrier = rws_access_regno (REG_RP, flags, pred);
5862 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5863
5864 new_flags.is_write = 1;
5865 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5866 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5867 break;
5868
5869 default:
5870 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5871 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5872 switch (format_ptr[i])
5873 {
5874 case '0': /* unused field */
5875 case 'i': /* integer */
5876 case 'n': /* note */
5877 case 'w': /* wide integer */
5878 case 's': /* pointer to string */
5879 case 'S': /* optional pointer to string */
5880 break;
5881
5882 case 'e':
5883 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5884 need_barrier = 1;
5885 break;
5886
5887 case 'E':
5888 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5889 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5890 need_barrier = 1;
5891 break;
5892
5893 default:
5894 gcc_unreachable ();
5895 }
5896 break;
5897 }
5898 return need_barrier;
5899 }
5900
5901 /* Clear out the state for group_barrier_needed at the start of a
5902 sequence of insns. */
5903
5904 static void
5905 init_insn_group_barriers (void)
5906 {
5907 memset (rws_sum, 0, sizeof (rws_sum));
5908 first_instruction = 1;
5909 }
5910
5911 /* Given the current state, determine whether a group barrier (a stop bit) is
5912 necessary before INSN. Return nonzero if so. This modifies the state to
5913 include the effects of INSN as a side-effect. */
5914
5915 static int
5916 group_barrier_needed (rtx insn)
5917 {
5918 rtx pat;
5919 int need_barrier = 0;
5920 struct reg_flags flags;
5921
5922 memset (&flags, 0, sizeof (flags));
5923 switch (GET_CODE (insn))
5924 {
5925 case NOTE:
5926 break;
5927
5928 case BARRIER:
5929 /* A barrier doesn't imply an instruction group boundary. */
5930 break;
5931
5932 case CODE_LABEL:
5933 memset (rws_insn, 0, sizeof (rws_insn));
5934 return 1;
5935
5936 case CALL_INSN:
5937 flags.is_branch = 1;
5938 flags.is_sibcall = SIBLING_CALL_P (insn);
5939 memset (rws_insn, 0, sizeof (rws_insn));
5940
5941 /* Don't bundle a call following another call. */
5942 if ((pat = prev_active_insn (insn))
5943 && GET_CODE (pat) == CALL_INSN)
5944 {
5945 need_barrier = 1;
5946 break;
5947 }
5948
5949 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5950 break;
5951
5952 case JUMP_INSN:
5953 if (!ia64_spec_check_p (insn))
5954 flags.is_branch = 1;
5955
5956 /* Don't bundle a jump following a call. */
5957 if ((pat = prev_active_insn (insn))
5958 && GET_CODE (pat) == CALL_INSN)
5959 {
5960 need_barrier = 1;
5961 break;
5962 }
5963 /* FALLTHRU */
5964
5965 case INSN:
5966 if (GET_CODE (PATTERN (insn)) == USE
5967 || GET_CODE (PATTERN (insn)) == CLOBBER)
5968 /* Don't care about USE and CLOBBER "insns"---those are used to
5969 indicate to the optimizer that it shouldn't get rid of
5970 certain operations. */
5971 break;
5972
5973 pat = PATTERN (insn);
5974
5975 /* Ug. Hack hacks hacked elsewhere. */
5976 switch (recog_memoized (insn))
5977 {
5978 /* We play dependency tricks with the epilogue in order
5979 to get proper schedules. Undo this for dv analysis. */
5980 case CODE_FOR_epilogue_deallocate_stack:
5981 case CODE_FOR_prologue_allocate_stack:
5982 pat = XVECEXP (pat, 0, 0);
5983 break;
5984
5985 /* The pattern we use for br.cloop confuses the code above.
5986 The second element of the vector is representative. */
5987 case CODE_FOR_doloop_end_internal:
5988 pat = XVECEXP (pat, 0, 1);
5989 break;
5990
5991 /* Doesn't generate code. */
5992 case CODE_FOR_pred_rel_mutex:
5993 case CODE_FOR_prologue_use:
5994 return 0;
5995
5996 default:
5997 break;
5998 }
5999
6000 memset (rws_insn, 0, sizeof (rws_insn));
6001 need_barrier = rtx_needs_barrier (pat, flags, 0);
6002
6003 /* Check to see if the previous instruction was a volatile
6004 asm. */
6005 if (! need_barrier)
6006 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6007 break;
6008
6009 default:
6010 gcc_unreachable ();
6011 }
6012
6013 if (first_instruction && INSN_P (insn)
6014 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6015 && GET_CODE (PATTERN (insn)) != USE
6016 && GET_CODE (PATTERN (insn)) != CLOBBER)
6017 {
6018 need_barrier = 0;
6019 first_instruction = 0;
6020 }
6021
6022 return need_barrier;
6023 }
6024
6025 /* Like group_barrier_needed, but do not clobber the current state. */
6026
6027 static int
6028 safe_group_barrier_needed (rtx insn)
6029 {
6030 struct reg_write_state rws_saved[NUM_REGS];
6031 int saved_first_instruction;
6032 int t;
6033
6034 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
6035 saved_first_instruction = first_instruction;
6036
6037 t = group_barrier_needed (insn);
6038
6039 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
6040 first_instruction = saved_first_instruction;
6041
6042 return t;
6043 }
6044
6045 /* Scan the current function and insert stop bits as necessary to
6046 eliminate dependencies. This function assumes that a final
6047 instruction scheduling pass has been run which has already
6048 inserted most of the necessary stop bits. This function only
6049 inserts new ones at basic block boundaries, since these are
6050 invisible to the scheduler. */
6051
6052 static void
6053 emit_insn_group_barriers (FILE *dump)
6054 {
6055 rtx insn;
6056 rtx last_label = 0;
6057 int insns_since_last_label = 0;
6058
6059 init_insn_group_barriers ();
6060
6061 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6062 {
6063 if (GET_CODE (insn) == CODE_LABEL)
6064 {
6065 if (insns_since_last_label)
6066 last_label = insn;
6067 insns_since_last_label = 0;
6068 }
6069 else if (GET_CODE (insn) == NOTE
6070 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6071 {
6072 if (insns_since_last_label)
6073 last_label = insn;
6074 insns_since_last_label = 0;
6075 }
6076 else if (GET_CODE (insn) == INSN
6077 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6078 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6079 {
6080 init_insn_group_barriers ();
6081 last_label = 0;
6082 }
6083 else if (INSN_P (insn))
6084 {
6085 insns_since_last_label = 1;
6086
6087 if (group_barrier_needed (insn))
6088 {
6089 if (last_label)
6090 {
6091 if (dump)
6092 fprintf (dump, "Emitting stop before label %d\n",
6093 INSN_UID (last_label));
6094 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6095 insn = last_label;
6096
6097 init_insn_group_barriers ();
6098 last_label = 0;
6099 }
6100 }
6101 }
6102 }
6103 }
6104
6105 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6106 This function has to emit all necessary group barriers. */
6107
6108 static void
6109 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6110 {
6111 rtx insn;
6112
6113 init_insn_group_barriers ();
6114
6115 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6116 {
6117 if (GET_CODE (insn) == BARRIER)
6118 {
6119 rtx last = prev_active_insn (insn);
6120
6121 if (! last)
6122 continue;
6123 if (GET_CODE (last) == JUMP_INSN
6124 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6125 last = prev_active_insn (last);
6126 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6127 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6128
6129 init_insn_group_barriers ();
6130 }
6131 else if (INSN_P (insn))
6132 {
6133 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6134 init_insn_group_barriers ();
6135 else if (group_barrier_needed (insn))
6136 {
6137 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6138 init_insn_group_barriers ();
6139 group_barrier_needed (insn);
6140 }
6141 }
6142 }
6143 }
6144
6145 \f
6146
6147 /* Instruction scheduling support. */
6148
6149 #define NR_BUNDLES 10
6150
6151 /* A list of names of all available bundles. */
6152
6153 static const char *bundle_name [NR_BUNDLES] =
6154 {
6155 ".mii",
6156 ".mmi",
6157 ".mfi",
6158 ".mmf",
6159 #if NR_BUNDLES == 10
6160 ".bbb",
6161 ".mbb",
6162 #endif
6163 ".mib",
6164 ".mmb",
6165 ".mfb",
6166 ".mlx"
6167 };
6168
6169 /* Nonzero if we should insert stop bits into the schedule. */
6170
6171 int ia64_final_schedule = 0;
6172
6173 /* Codes of the corresponding queried units: */
6174
6175 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6176 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6177
6178 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6179 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6180
6181 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6182
6183 /* The following variable value is an insn group barrier. */
6184
6185 static rtx dfa_stop_insn;
6186
6187 /* The following variable value is the last issued insn. */
6188
6189 static rtx last_scheduled_insn;
6190
6191 /* The following variable value is size of the DFA state. */
6192
6193 static size_t dfa_state_size;
6194
6195 /* The following variable value is pointer to a DFA state used as
6196 temporary variable. */
6197
6198 static state_t temp_dfa_state = NULL;
6199
6200 /* The following variable value is DFA state after issuing the last
6201 insn. */
6202
6203 static state_t prev_cycle_state = NULL;
6204
6205 /* The following array element values are TRUE if the corresponding
6206 insn requires to add stop bits before it. */
6207
6208 static char *stops_p = NULL;
6209
6210 /* The following array element values are ZERO for non-speculative
6211 instructions and hold corresponding speculation check number for
6212 speculative instructions. */
6213 static int *spec_check_no = NULL;
6214
6215 /* Size of spec_check_no array. */
6216 static int max_uid = 0;
6217
6218 /* The following variable is used to set up the mentioned above array. */
6219
6220 static int stop_before_p = 0;
6221
6222 /* The following variable value is length of the arrays `clocks' and
6223 `add_cycles'. */
6224
6225 static int clocks_length;
6226
6227 /* The following array element values are cycles on which the
6228 corresponding insn will be issued. The array is used only for
6229 Itanium1. */
6230
6231 static int *clocks;
6232
6233 /* The following array element values are numbers of cycles should be
6234 added to improve insn scheduling for MM_insns for Itanium1. */
6235
6236 static int *add_cycles;
6237
6238 /* The following variable value is number of data speculations in progress. */
6239 static int pending_data_specs = 0;
6240
6241 static rtx ia64_single_set (rtx);
6242 static void ia64_emit_insn_before (rtx, rtx);
6243
6244 /* Map a bundle number to its pseudo-op. */
6245
6246 const char *
6247 get_bundle_name (int b)
6248 {
6249 return bundle_name[b];
6250 }
6251
6252
6253 /* Return the maximum number of instructions a cpu can issue. */
6254
6255 static int
6256 ia64_issue_rate (void)
6257 {
6258 return 6;
6259 }
6260
6261 /* Helper function - like single_set, but look inside COND_EXEC. */
6262
6263 static rtx
6264 ia64_single_set (rtx insn)
6265 {
6266 rtx x = PATTERN (insn), ret;
6267 if (GET_CODE (x) == COND_EXEC)
6268 x = COND_EXEC_CODE (x);
6269 if (GET_CODE (x) == SET)
6270 return x;
6271
6272 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6273 Although they are not classical single set, the second set is there just
6274 to protect it from moving past FP-relative stack accesses. */
6275 switch (recog_memoized (insn))
6276 {
6277 case CODE_FOR_prologue_allocate_stack:
6278 case CODE_FOR_epilogue_deallocate_stack:
6279 ret = XVECEXP (x, 0, 0);
6280 break;
6281
6282 default:
6283 ret = single_set_2 (insn, x);
6284 break;
6285 }
6286
6287 return ret;
6288 }
6289
6290 /* Adjust the cost of a scheduling dependency. Return the new cost of
6291 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6292
6293 static int
6294 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6295 {
6296 enum attr_itanium_class dep_class;
6297 enum attr_itanium_class insn_class;
6298
6299 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6300 return cost;
6301
6302 insn_class = ia64_safe_itanium_class (insn);
6303 dep_class = ia64_safe_itanium_class (dep_insn);
6304 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6305 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6306 return 0;
6307
6308 return cost;
6309 }
6310
6311 /* Like emit_insn_before, but skip cycle_display notes.
6312 ??? When cycle display notes are implemented, update this. */
6313
6314 static void
6315 ia64_emit_insn_before (rtx insn, rtx before)
6316 {
6317 emit_insn_before (insn, before);
6318 }
6319
6320 /* The following function marks insns who produce addresses for load
6321 and store insns. Such insns will be placed into M slots because it
6322 decrease latency time for Itanium1 (see function
6323 `ia64_produce_address_p' and the DFA descriptions). */
6324
6325 static void
6326 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6327 {
6328 rtx insn, next, next_tail;
6329
6330 /* Before reload, which_alternative is not set, which means that
6331 ia64_safe_itanium_class will produce wrong results for (at least)
6332 move instructions. */
6333 if (!reload_completed)
6334 return;
6335
6336 next_tail = NEXT_INSN (tail);
6337 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6338 if (INSN_P (insn))
6339 insn->call = 0;
6340 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6341 if (INSN_P (insn)
6342 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6343 {
6344 sd_iterator_def sd_it;
6345 dep_t dep;
6346 bool has_mem_op_consumer_p = false;
6347
6348 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6349 {
6350 enum attr_itanium_class c;
6351
6352 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6353 continue;
6354
6355 next = DEP_CON (dep);
6356 c = ia64_safe_itanium_class (next);
6357 if ((c == ITANIUM_CLASS_ST
6358 || c == ITANIUM_CLASS_STF)
6359 && ia64_st_address_bypass_p (insn, next))
6360 {
6361 has_mem_op_consumer_p = true;
6362 break;
6363 }
6364 else if ((c == ITANIUM_CLASS_LD
6365 || c == ITANIUM_CLASS_FLD
6366 || c == ITANIUM_CLASS_FLDP)
6367 && ia64_ld_address_bypass_p (insn, next))
6368 {
6369 has_mem_op_consumer_p = true;
6370 break;
6371 }
6372 }
6373
6374 insn->call = has_mem_op_consumer_p;
6375 }
6376 }
6377
6378 /* We're beginning a new block. Initialize data structures as necessary. */
6379
6380 static void
6381 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6382 int sched_verbose ATTRIBUTE_UNUSED,
6383 int max_ready ATTRIBUTE_UNUSED)
6384 {
6385 #ifdef ENABLE_CHECKING
6386 rtx insn;
6387
6388 if (reload_completed)
6389 for (insn = NEXT_INSN (current_sched_info->prev_head);
6390 insn != current_sched_info->next_tail;
6391 insn = NEXT_INSN (insn))
6392 gcc_assert (!SCHED_GROUP_P (insn));
6393 #endif
6394 last_scheduled_insn = NULL_RTX;
6395 init_insn_group_barriers ();
6396 }
6397
6398 /* We're beginning a scheduling pass. Check assertion. */
6399
6400 static void
6401 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6402 int sched_verbose ATTRIBUTE_UNUSED,
6403 int max_ready ATTRIBUTE_UNUSED)
6404 {
6405 gcc_assert (!pending_data_specs);
6406 }
6407
6408 /* Scheduling pass is now finished. Free/reset static variable. */
6409 static void
6410 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6411 int sched_verbose ATTRIBUTE_UNUSED)
6412 {
6413 free (spec_check_no);
6414 spec_check_no = 0;
6415 max_uid = 0;
6416 }
6417
6418 /* We are about to being issuing insns for this clock cycle.
6419 Override the default sort algorithm to better slot instructions. */
6420
6421 static int
6422 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6423 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6424 int reorder_type)
6425 {
6426 int n_asms;
6427 int n_ready = *pn_ready;
6428 rtx *e_ready = ready + n_ready;
6429 rtx *insnp;
6430
6431 if (sched_verbose)
6432 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6433
6434 if (reorder_type == 0)
6435 {
6436 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6437 n_asms = 0;
6438 for (insnp = ready; insnp < e_ready; insnp++)
6439 if (insnp < e_ready)
6440 {
6441 rtx insn = *insnp;
6442 enum attr_type t = ia64_safe_type (insn);
6443 if (t == TYPE_UNKNOWN)
6444 {
6445 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6446 || asm_noperands (PATTERN (insn)) >= 0)
6447 {
6448 rtx lowest = ready[n_asms];
6449 ready[n_asms] = insn;
6450 *insnp = lowest;
6451 n_asms++;
6452 }
6453 else
6454 {
6455 rtx highest = ready[n_ready - 1];
6456 ready[n_ready - 1] = insn;
6457 *insnp = highest;
6458 return 1;
6459 }
6460 }
6461 }
6462
6463 if (n_asms < n_ready)
6464 {
6465 /* Some normal insns to process. Skip the asms. */
6466 ready += n_asms;
6467 n_ready -= n_asms;
6468 }
6469 else if (n_ready > 0)
6470 return 1;
6471 }
6472
6473 if (ia64_final_schedule)
6474 {
6475 int deleted = 0;
6476 int nr_need_stop = 0;
6477
6478 for (insnp = ready; insnp < e_ready; insnp++)
6479 if (safe_group_barrier_needed (*insnp))
6480 nr_need_stop++;
6481
6482 if (reorder_type == 1 && n_ready == nr_need_stop)
6483 return 0;
6484 if (reorder_type == 0)
6485 return 1;
6486 insnp = e_ready;
6487 /* Move down everything that needs a stop bit, preserving
6488 relative order. */
6489 while (insnp-- > ready + deleted)
6490 while (insnp >= ready + deleted)
6491 {
6492 rtx insn = *insnp;
6493 if (! safe_group_barrier_needed (insn))
6494 break;
6495 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6496 *ready = insn;
6497 deleted++;
6498 }
6499 n_ready -= deleted;
6500 ready += deleted;
6501 }
6502
6503 return 1;
6504 }
6505
6506 /* We are about to being issuing insns for this clock cycle. Override
6507 the default sort algorithm to better slot instructions. */
6508
6509 static int
6510 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6511 int clock_var)
6512 {
6513 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6514 pn_ready, clock_var, 0);
6515 }
6516
6517 /* Like ia64_sched_reorder, but called after issuing each insn.
6518 Override the default sort algorithm to better slot instructions. */
6519
6520 static int
6521 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6522 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6523 int *pn_ready, int clock_var)
6524 {
6525 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6526 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6527 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6528 clock_var, 1);
6529 }
6530
6531 /* We are about to issue INSN. Return the number of insns left on the
6532 ready queue that can be issued this cycle. */
6533
6534 static int
6535 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6536 int sched_verbose ATTRIBUTE_UNUSED,
6537 rtx insn ATTRIBUTE_UNUSED,
6538 int can_issue_more ATTRIBUTE_UNUSED)
6539 {
6540 if (current_sched_info->flags & DO_SPECULATION)
6541 /* Modulo scheduling does not extend h_i_d when emitting
6542 new instructions. Deal with it. */
6543 {
6544 if (DONE_SPEC (insn) & BEGIN_DATA)
6545 pending_data_specs++;
6546 if (CHECK_SPEC (insn) & BEGIN_DATA)
6547 pending_data_specs--;
6548 }
6549
6550 last_scheduled_insn = insn;
6551 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6552 if (reload_completed)
6553 {
6554 int needed = group_barrier_needed (insn);
6555
6556 gcc_assert (!needed);
6557 if (GET_CODE (insn) == CALL_INSN)
6558 init_insn_group_barriers ();
6559 stops_p [INSN_UID (insn)] = stop_before_p;
6560 stop_before_p = 0;
6561 }
6562 return 1;
6563 }
6564
6565 /* We are choosing insn from the ready queue. Return nonzero if INSN
6566 can be chosen. */
6567
6568 static int
6569 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6570 {
6571 gcc_assert (insn && INSN_P (insn));
6572 return ((!reload_completed
6573 || !safe_group_barrier_needed (insn))
6574 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6575 }
6576
6577 /* We are choosing insn from the ready queue. Return nonzero if INSN
6578 can be chosen. */
6579
6580 static bool
6581 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (rtx insn)
6582 {
6583 gcc_assert (insn && INSN_P (insn));
6584 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6585 we keep ALAT half-empty. */
6586 return (pending_data_specs < 16
6587 || !(TODO_SPEC (insn) & BEGIN_DATA));
6588 }
6589
6590 /* The following variable value is pseudo-insn used by the DFA insn
6591 scheduler to change the DFA state when the simulated clock is
6592 increased. */
6593
6594 static rtx dfa_pre_cycle_insn;
6595
6596 /* We are about to being issuing INSN. Return nonzero if we cannot
6597 issue it on given cycle CLOCK and return zero if we should not sort
6598 the ready queue on the next clock start. */
6599
6600 static int
6601 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6602 int clock, int *sort_p)
6603 {
6604 int setup_clocks_p = FALSE;
6605
6606 gcc_assert (insn && INSN_P (insn));
6607 if ((reload_completed && safe_group_barrier_needed (insn))
6608 || (last_scheduled_insn
6609 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6610 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6611 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6612 {
6613 init_insn_group_barriers ();
6614 if (verbose && dump)
6615 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6616 last_clock == clock ? " + cycle advance" : "");
6617 stop_before_p = 1;
6618 if (last_clock == clock)
6619 {
6620 state_transition (curr_state, dfa_stop_insn);
6621 if (TARGET_EARLY_STOP_BITS)
6622 *sort_p = (last_scheduled_insn == NULL_RTX
6623 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6624 else
6625 *sort_p = 0;
6626 return 1;
6627 }
6628 else if (reload_completed)
6629 setup_clocks_p = TRUE;
6630 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6631 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6632 state_reset (curr_state);
6633 else
6634 {
6635 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6636 state_transition (curr_state, dfa_stop_insn);
6637 state_transition (curr_state, dfa_pre_cycle_insn);
6638 state_transition (curr_state, NULL);
6639 }
6640 }
6641 else if (reload_completed)
6642 setup_clocks_p = TRUE;
6643 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6644 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6645 && asm_noperands (PATTERN (insn)) < 0)
6646 {
6647 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6648
6649 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6650 {
6651 sd_iterator_def sd_it;
6652 dep_t dep;
6653 int d = -1;
6654
6655 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6656 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6657 {
6658 enum attr_itanium_class dep_class;
6659 rtx dep_insn = DEP_PRO (dep);
6660
6661 dep_class = ia64_safe_itanium_class (dep_insn);
6662 if ((dep_class == ITANIUM_CLASS_MMMUL
6663 || dep_class == ITANIUM_CLASS_MMSHF)
6664 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6665 && (d < 0
6666 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6667 d = last_clock - clocks [INSN_UID (dep_insn)];
6668 }
6669 if (d >= 0)
6670 add_cycles [INSN_UID (insn)] = 3 - d;
6671 }
6672 }
6673 return 0;
6674 }
6675
6676 /* Implement targetm.sched.h_i_d_extended hook.
6677 Extend internal data structures. */
6678 static void
6679 ia64_h_i_d_extended (void)
6680 {
6681 if (current_sched_info->flags & DO_SPECULATION)
6682 {
6683 int new_max_uid = get_max_uid () + 1;
6684
6685 spec_check_no = xrecalloc (spec_check_no, new_max_uid,
6686 max_uid, sizeof (*spec_check_no));
6687 max_uid = new_max_uid;
6688 }
6689
6690 if (stops_p != NULL)
6691 {
6692 int new_clocks_length = get_max_uid () + 1;
6693
6694 stops_p = xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6695
6696 if (ia64_tune == PROCESSOR_ITANIUM)
6697 {
6698 clocks = xrecalloc (clocks, new_clocks_length, clocks_length,
6699 sizeof (int));
6700 add_cycles = xrecalloc (add_cycles, new_clocks_length, clocks_length,
6701 sizeof (int));
6702 }
6703
6704 clocks_length = new_clocks_length;
6705 }
6706 }
6707
6708 /* Constants that help mapping 'enum machine_mode' to int. */
6709 enum SPEC_MODES
6710 {
6711 SPEC_MODE_INVALID = -1,
6712 SPEC_MODE_FIRST = 0,
6713 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6714 SPEC_MODE_FOR_EXTEND_LAST = 3,
6715 SPEC_MODE_LAST = 8
6716 };
6717
6718 /* Return index of the MODE. */
6719 static int
6720 ia64_mode_to_int (enum machine_mode mode)
6721 {
6722 switch (mode)
6723 {
6724 case BImode: return 0; /* SPEC_MODE_FIRST */
6725 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6726 case HImode: return 2;
6727 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6728 case DImode: return 4;
6729 case SFmode: return 5;
6730 case DFmode: return 6;
6731 case XFmode: return 7;
6732 case TImode:
6733 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6734 mentioned in itanium[12].md. Predicate fp_register_operand also
6735 needs to be defined. Bottom line: better disable for now. */
6736 return SPEC_MODE_INVALID;
6737 default: return SPEC_MODE_INVALID;
6738 }
6739 }
6740
6741 /* Provide information about speculation capabilities. */
6742 static void
6743 ia64_set_sched_flags (spec_info_t spec_info)
6744 {
6745 unsigned int *flags = &(current_sched_info->flags);
6746
6747 if (*flags & SCHED_RGN
6748 || *flags & SCHED_EBB)
6749 {
6750 int mask = 0;
6751
6752 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6753 || (mflag_sched_ar_data_spec && reload_completed))
6754 {
6755 mask |= BEGIN_DATA;
6756
6757 if ((mflag_sched_br_in_data_spec && !reload_completed)
6758 || (mflag_sched_ar_in_data_spec && reload_completed))
6759 mask |= BE_IN_DATA;
6760 }
6761
6762 if (mflag_sched_control_spec)
6763 {
6764 mask |= BEGIN_CONTROL;
6765
6766 if (mflag_sched_in_control_spec)
6767 mask |= BE_IN_CONTROL;
6768 }
6769
6770 if (mask)
6771 {
6772 *flags |= USE_DEPS_LIST | DO_SPECULATION;
6773
6774 if (mask & BE_IN_SPEC)
6775 *flags |= NEW_BBS;
6776
6777 spec_info->mask = mask;
6778 spec_info->flags = 0;
6779
6780 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6781 spec_info->flags |= PREFER_NON_DATA_SPEC;
6782
6783 if ((mask & CONTROL_SPEC)
6784 && mflag_sched_prefer_non_control_spec_insns)
6785 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6786
6787 if (mflag_sched_spec_verbose)
6788 {
6789 if (sched_verbose >= 1)
6790 spec_info->dump = sched_dump;
6791 else
6792 spec_info->dump = stderr;
6793 }
6794 else
6795 spec_info->dump = 0;
6796
6797 if (mflag_sched_count_spec_in_critical_path)
6798 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6799 }
6800 }
6801 }
6802
6803 /* Implement targetm.sched.speculate_insn hook.
6804 Check if the INSN can be TS speculative.
6805 If 'no' - return -1.
6806 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6807 If current pattern of the INSN already provides TS speculation, return 0. */
6808 static int
6809 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6810 {
6811 rtx pat, reg, mem, mem_reg;
6812 int mode_no, gen_p = 1;
6813 bool extend_p;
6814
6815 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6816
6817 pat = PATTERN (insn);
6818
6819 if (GET_CODE (pat) == COND_EXEC)
6820 pat = COND_EXEC_CODE (pat);
6821
6822 /* This should be a SET ... */
6823 if (GET_CODE (pat) != SET)
6824 return -1;
6825
6826 reg = SET_DEST (pat);
6827 /* ... to the general/fp register ... */
6828 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6829 return -1;
6830
6831 /* ... from the mem ... */
6832 mem = SET_SRC (pat);
6833
6834 /* ... that can, possibly, be a zero_extend ... */
6835 if (GET_CODE (mem) == ZERO_EXTEND)
6836 {
6837 mem = XEXP (mem, 0);
6838 extend_p = true;
6839 }
6840 else
6841 extend_p = false;
6842
6843 /* ... or a speculative load. */
6844 if (GET_CODE (mem) == UNSPEC)
6845 {
6846 int code;
6847
6848 code = XINT (mem, 1);
6849 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6850 return -1;
6851
6852 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6853 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6854 || code == UNSPEC_LDSA)
6855 gen_p = 0;
6856
6857 mem = XVECEXP (mem, 0, 0);
6858 gcc_assert (MEM_P (mem));
6859 }
6860
6861 /* Source should be a mem ... */
6862 if (!MEM_P (mem))
6863 return -1;
6864
6865 /* ... addressed by a register. */
6866 mem_reg = XEXP (mem, 0);
6867 if (!REG_P (mem_reg))
6868 return -1;
6869
6870 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6871 will always be DImode. */
6872 mode_no = ia64_mode_to_int (GET_MODE (mem));
6873
6874 if (mode_no == SPEC_MODE_INVALID
6875 || (extend_p
6876 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6877 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6878 return -1;
6879
6880 extract_insn_cached (insn);
6881 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6882
6883 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6884
6885 return gen_p;
6886 }
6887
6888 enum
6889 {
6890 /* Offset to reach ZERO_EXTEND patterns. */
6891 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6892 /* Number of patterns for each speculation mode. */
6893 SPEC_N = (SPEC_MODE_LAST
6894 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6895 };
6896
6897 enum SPEC_GEN_LD_MAP
6898 {
6899 /* Offset to ld.a patterns. */
6900 SPEC_GEN_A = 0 * SPEC_N,
6901 /* Offset to ld.s patterns. */
6902 SPEC_GEN_S = 1 * SPEC_N,
6903 /* Offset to ld.sa patterns. */
6904 SPEC_GEN_SA = 2 * SPEC_N,
6905 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6906 mutate to chk.s. */
6907 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
6908 };
6909
6910 /* These offsets are used to get (4 * SPEC_N). */
6911 enum SPEC_GEN_CHECK_OFFSET
6912 {
6913 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
6914 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
6915 };
6916
6917 /* If GEN_P is true, calculate the index of needed speculation check and return
6918 speculative pattern for INSN with speculative mode TS, machine mode
6919 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
6920 If GEN_P is false, just calculate the index of needed speculation check. */
6921 static rtx
6922 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
6923 {
6924 rtx pat, new_pat;
6925 int load_no;
6926 int shift = 0;
6927
6928 static rtx (* const gen_load[]) (rtx, rtx) = {
6929 gen_movbi_advanced,
6930 gen_movqi_advanced,
6931 gen_movhi_advanced,
6932 gen_movsi_advanced,
6933 gen_movdi_advanced,
6934 gen_movsf_advanced,
6935 gen_movdf_advanced,
6936 gen_movxf_advanced,
6937 gen_movti_advanced,
6938 gen_zero_extendqidi2_advanced,
6939 gen_zero_extendhidi2_advanced,
6940 gen_zero_extendsidi2_advanced,
6941
6942 gen_movbi_speculative,
6943 gen_movqi_speculative,
6944 gen_movhi_speculative,
6945 gen_movsi_speculative,
6946 gen_movdi_speculative,
6947 gen_movsf_speculative,
6948 gen_movdf_speculative,
6949 gen_movxf_speculative,
6950 gen_movti_speculative,
6951 gen_zero_extendqidi2_speculative,
6952 gen_zero_extendhidi2_speculative,
6953 gen_zero_extendsidi2_speculative,
6954
6955 gen_movbi_speculative_advanced,
6956 gen_movqi_speculative_advanced,
6957 gen_movhi_speculative_advanced,
6958 gen_movsi_speculative_advanced,
6959 gen_movdi_speculative_advanced,
6960 gen_movsf_speculative_advanced,
6961 gen_movdf_speculative_advanced,
6962 gen_movxf_speculative_advanced,
6963 gen_movti_speculative_advanced,
6964 gen_zero_extendqidi2_speculative_advanced,
6965 gen_zero_extendhidi2_speculative_advanced,
6966 gen_zero_extendsidi2_speculative_advanced,
6967
6968 gen_movbi_speculative_advanced,
6969 gen_movqi_speculative_advanced,
6970 gen_movhi_speculative_advanced,
6971 gen_movsi_speculative_advanced,
6972 gen_movdi_speculative_advanced,
6973 gen_movsf_speculative_advanced,
6974 gen_movdf_speculative_advanced,
6975 gen_movxf_speculative_advanced,
6976 gen_movti_speculative_advanced,
6977 gen_zero_extendqidi2_speculative_advanced,
6978 gen_zero_extendhidi2_speculative_advanced,
6979 gen_zero_extendsidi2_speculative_advanced
6980 };
6981
6982 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
6983
6984 if (ts & BEGIN_DATA)
6985 {
6986 /* We don't need recovery because even if this is ld.sa
6987 ALAT entry will be allocated only if NAT bit is set to zero.
6988 So it is enough to use ld.c here. */
6989
6990 if (ts & BEGIN_CONTROL)
6991 {
6992 load_no += SPEC_GEN_SA;
6993
6994 if (!mflag_sched_ldc)
6995 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
6996 }
6997 else
6998 {
6999 load_no += SPEC_GEN_A;
7000
7001 if (!mflag_sched_ldc)
7002 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
7003 }
7004 }
7005 else if (ts & BEGIN_CONTROL)
7006 {
7007 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
7008 if (!mflag_control_ldc)
7009 load_no += SPEC_GEN_S;
7010 else
7011 {
7012 gcc_assert (mflag_sched_ldc);
7013 load_no += SPEC_GEN_SA_FOR_S;
7014 }
7015 }
7016 else
7017 gcc_unreachable ();
7018
7019 /* Set the desired check index. We add '1', because zero element in this
7020 array means, that instruction with such uid is non-speculative. */
7021 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
7022
7023 if (!gen_p)
7024 return 0;
7025
7026 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
7027 copy_rtx (recog_data.operand[1]));
7028
7029 pat = PATTERN (insn);
7030 if (GET_CODE (pat) == COND_EXEC)
7031 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
7032 (COND_EXEC_TEST (pat)), new_pat);
7033
7034 return new_pat;
7035 }
7036
7037 /* Offset to branchy checks. */
7038 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
7039
7040 /* Return nonzero, if INSN needs branchy recovery check. */
7041 static bool
7042 ia64_needs_block_p (rtx insn)
7043 {
7044 int check_no;
7045
7046 check_no = spec_check_no[INSN_UID(insn)] - 1;
7047 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
7048
7049 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
7050 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
7051 }
7052
7053 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7054 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7055 Otherwise, generate a simple check. */
7056 static rtx
7057 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
7058 {
7059 rtx op1, pat, check_pat;
7060
7061 static rtx (* const gen_check[]) (rtx, rtx) = {
7062 gen_movbi_clr,
7063 gen_movqi_clr,
7064 gen_movhi_clr,
7065 gen_movsi_clr,
7066 gen_movdi_clr,
7067 gen_movsf_clr,
7068 gen_movdf_clr,
7069 gen_movxf_clr,
7070 gen_movti_clr,
7071 gen_zero_extendqidi2_clr,
7072 gen_zero_extendhidi2_clr,
7073 gen_zero_extendsidi2_clr,
7074
7075 gen_speculation_check_bi,
7076 gen_speculation_check_qi,
7077 gen_speculation_check_hi,
7078 gen_speculation_check_si,
7079 gen_speculation_check_di,
7080 gen_speculation_check_sf,
7081 gen_speculation_check_df,
7082 gen_speculation_check_xf,
7083 gen_speculation_check_ti,
7084 gen_speculation_check_di,
7085 gen_speculation_check_di,
7086 gen_speculation_check_di,
7087
7088 gen_movbi_clr,
7089 gen_movqi_clr,
7090 gen_movhi_clr,
7091 gen_movsi_clr,
7092 gen_movdi_clr,
7093 gen_movsf_clr,
7094 gen_movdf_clr,
7095 gen_movxf_clr,
7096 gen_movti_clr,
7097 gen_zero_extendqidi2_clr,
7098 gen_zero_extendhidi2_clr,
7099 gen_zero_extendsidi2_clr,
7100
7101 gen_movbi_clr,
7102 gen_movqi_clr,
7103 gen_movhi_clr,
7104 gen_movsi_clr,
7105 gen_movdi_clr,
7106 gen_movsf_clr,
7107 gen_movdf_clr,
7108 gen_movxf_clr,
7109 gen_movti_clr,
7110 gen_zero_extendqidi2_clr,
7111 gen_zero_extendhidi2_clr,
7112 gen_zero_extendsidi2_clr,
7113
7114 gen_advanced_load_check_clr_bi,
7115 gen_advanced_load_check_clr_qi,
7116 gen_advanced_load_check_clr_hi,
7117 gen_advanced_load_check_clr_si,
7118 gen_advanced_load_check_clr_di,
7119 gen_advanced_load_check_clr_sf,
7120 gen_advanced_load_check_clr_df,
7121 gen_advanced_load_check_clr_xf,
7122 gen_advanced_load_check_clr_ti,
7123 gen_advanced_load_check_clr_di,
7124 gen_advanced_load_check_clr_di,
7125 gen_advanced_load_check_clr_di,
7126
7127 /* Following checks are generated during mutation. */
7128 gen_advanced_load_check_clr_bi,
7129 gen_advanced_load_check_clr_qi,
7130 gen_advanced_load_check_clr_hi,
7131 gen_advanced_load_check_clr_si,
7132 gen_advanced_load_check_clr_di,
7133 gen_advanced_load_check_clr_sf,
7134 gen_advanced_load_check_clr_df,
7135 gen_advanced_load_check_clr_xf,
7136 gen_advanced_load_check_clr_ti,
7137 gen_advanced_load_check_clr_di,
7138 gen_advanced_load_check_clr_di,
7139 gen_advanced_load_check_clr_di,
7140
7141 0,0,0,0,0,0,0,0,0,0,0,0,
7142
7143 gen_advanced_load_check_clr_bi,
7144 gen_advanced_load_check_clr_qi,
7145 gen_advanced_load_check_clr_hi,
7146 gen_advanced_load_check_clr_si,
7147 gen_advanced_load_check_clr_di,
7148 gen_advanced_load_check_clr_sf,
7149 gen_advanced_load_check_clr_df,
7150 gen_advanced_load_check_clr_xf,
7151 gen_advanced_load_check_clr_ti,
7152 gen_advanced_load_check_clr_di,
7153 gen_advanced_load_check_clr_di,
7154 gen_advanced_load_check_clr_di,
7155
7156 gen_speculation_check_bi,
7157 gen_speculation_check_qi,
7158 gen_speculation_check_hi,
7159 gen_speculation_check_si,
7160 gen_speculation_check_di,
7161 gen_speculation_check_sf,
7162 gen_speculation_check_df,
7163 gen_speculation_check_xf,
7164 gen_speculation_check_ti,
7165 gen_speculation_check_di,
7166 gen_speculation_check_di,
7167 gen_speculation_check_di
7168 };
7169
7170 extract_insn_cached (insn);
7171
7172 if (label)
7173 {
7174 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7175 op1 = label;
7176 }
7177 else
7178 {
7179 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7180 op1 = copy_rtx (recog_data.operand[1]);
7181 }
7182
7183 if (mutate_p)
7184 /* INSN is ld.c.
7185 Find the speculation check number by searching for original
7186 speculative load in the RESOLVED_DEPS list of INSN.
7187 As long as patterns are unique for each instruction, this can be
7188 accomplished by matching ORIG_PAT fields. */
7189 {
7190 sd_iterator_def sd_it;
7191 dep_t dep;
7192 int check_no = 0;
7193 rtx orig_pat = ORIG_PAT (insn);
7194
7195 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7196 {
7197 rtx x = DEP_PRO (dep);
7198
7199 if (ORIG_PAT (x) == orig_pat)
7200 check_no = spec_check_no[INSN_UID (x)];
7201 }
7202 gcc_assert (check_no);
7203
7204 spec_check_no[INSN_UID (insn)] = (check_no
7205 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7206 }
7207
7208 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7209 (copy_rtx (recog_data.operand[0]), op1));
7210
7211 pat = PATTERN (insn);
7212 if (GET_CODE (pat) == COND_EXEC)
7213 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7214 check_pat);
7215
7216 return check_pat;
7217 }
7218
7219 /* Return nonzero, if X is branchy recovery check. */
7220 static int
7221 ia64_spec_check_p (rtx x)
7222 {
7223 x = PATTERN (x);
7224 if (GET_CODE (x) == COND_EXEC)
7225 x = COND_EXEC_CODE (x);
7226 if (GET_CODE (x) == SET)
7227 return ia64_spec_check_src_p (SET_SRC (x));
7228 return 0;
7229 }
7230
7231 /* Return nonzero, if SRC belongs to recovery check. */
7232 static int
7233 ia64_spec_check_src_p (rtx src)
7234 {
7235 if (GET_CODE (src) == IF_THEN_ELSE)
7236 {
7237 rtx t;
7238
7239 t = XEXP (src, 0);
7240 if (GET_CODE (t) == NE)
7241 {
7242 t = XEXP (t, 0);
7243
7244 if (GET_CODE (t) == UNSPEC)
7245 {
7246 int code;
7247
7248 code = XINT (t, 1);
7249
7250 if (code == UNSPEC_CHKACLR
7251 || code == UNSPEC_CHKS
7252 || code == UNSPEC_LDCCLR)
7253 {
7254 gcc_assert (code != 0);
7255 return code;
7256 }
7257 }
7258 }
7259 }
7260 return 0;
7261 }
7262 \f
7263
7264 /* The following page contains abstract data `bundle states' which are
7265 used for bundling insns (inserting nops and template generation). */
7266
7267 /* The following describes state of insn bundling. */
7268
7269 struct bundle_state
7270 {
7271 /* Unique bundle state number to identify them in the debugging
7272 output */
7273 int unique_num;
7274 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7275 /* number nops before and after the insn */
7276 short before_nops_num, after_nops_num;
7277 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7278 insn */
7279 int cost; /* cost of the state in cycles */
7280 int accumulated_insns_num; /* number of all previous insns including
7281 nops. L is considered as 2 insns */
7282 int branch_deviation; /* deviation of previous branches from 3rd slots */
7283 struct bundle_state *next; /* next state with the same insn_num */
7284 struct bundle_state *originator; /* originator (previous insn state) */
7285 /* All bundle states are in the following chain. */
7286 struct bundle_state *allocated_states_chain;
7287 /* The DFA State after issuing the insn and the nops. */
7288 state_t dfa_state;
7289 };
7290
7291 /* The following is map insn number to the corresponding bundle state. */
7292
7293 static struct bundle_state **index_to_bundle_states;
7294
7295 /* The unique number of next bundle state. */
7296
7297 static int bundle_states_num;
7298
7299 /* All allocated bundle states are in the following chain. */
7300
7301 static struct bundle_state *allocated_bundle_states_chain;
7302
7303 /* All allocated but not used bundle states are in the following
7304 chain. */
7305
7306 static struct bundle_state *free_bundle_state_chain;
7307
7308
7309 /* The following function returns a free bundle state. */
7310
7311 static struct bundle_state *
7312 get_free_bundle_state (void)
7313 {
7314 struct bundle_state *result;
7315
7316 if (free_bundle_state_chain != NULL)
7317 {
7318 result = free_bundle_state_chain;
7319 free_bundle_state_chain = result->next;
7320 }
7321 else
7322 {
7323 result = xmalloc (sizeof (struct bundle_state));
7324 result->dfa_state = xmalloc (dfa_state_size);
7325 result->allocated_states_chain = allocated_bundle_states_chain;
7326 allocated_bundle_states_chain = result;
7327 }
7328 result->unique_num = bundle_states_num++;
7329 return result;
7330
7331 }
7332
7333 /* The following function frees given bundle state. */
7334
7335 static void
7336 free_bundle_state (struct bundle_state *state)
7337 {
7338 state->next = free_bundle_state_chain;
7339 free_bundle_state_chain = state;
7340 }
7341
7342 /* Start work with abstract data `bundle states'. */
7343
7344 static void
7345 initiate_bundle_states (void)
7346 {
7347 bundle_states_num = 0;
7348 free_bundle_state_chain = NULL;
7349 allocated_bundle_states_chain = NULL;
7350 }
7351
7352 /* Finish work with abstract data `bundle states'. */
7353
7354 static void
7355 finish_bundle_states (void)
7356 {
7357 struct bundle_state *curr_state, *next_state;
7358
7359 for (curr_state = allocated_bundle_states_chain;
7360 curr_state != NULL;
7361 curr_state = next_state)
7362 {
7363 next_state = curr_state->allocated_states_chain;
7364 free (curr_state->dfa_state);
7365 free (curr_state);
7366 }
7367 }
7368
7369 /* Hash table of the bundle states. The key is dfa_state and insn_num
7370 of the bundle states. */
7371
7372 static htab_t bundle_state_table;
7373
7374 /* The function returns hash of BUNDLE_STATE. */
7375
7376 static unsigned
7377 bundle_state_hash (const void *bundle_state)
7378 {
7379 const struct bundle_state *const state
7380 = (const struct bundle_state *) bundle_state;
7381 unsigned result, i;
7382
7383 for (result = i = 0; i < dfa_state_size; i++)
7384 result += (((unsigned char *) state->dfa_state) [i]
7385 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7386 return result + state->insn_num;
7387 }
7388
7389 /* The function returns nonzero if the bundle state keys are equal. */
7390
7391 static int
7392 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7393 {
7394 const struct bundle_state *const state1
7395 = (const struct bundle_state *) bundle_state_1;
7396 const struct bundle_state *const state2
7397 = (const struct bundle_state *) bundle_state_2;
7398
7399 return (state1->insn_num == state2->insn_num
7400 && memcmp (state1->dfa_state, state2->dfa_state,
7401 dfa_state_size) == 0);
7402 }
7403
7404 /* The function inserts the BUNDLE_STATE into the hash table. The
7405 function returns nonzero if the bundle has been inserted into the
7406 table. The table contains the best bundle state with given key. */
7407
7408 static int
7409 insert_bundle_state (struct bundle_state *bundle_state)
7410 {
7411 void **entry_ptr;
7412
7413 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7414 if (*entry_ptr == NULL)
7415 {
7416 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7417 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7418 *entry_ptr = (void *) bundle_state;
7419 return TRUE;
7420 }
7421 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7422 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7423 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7424 > bundle_state->accumulated_insns_num
7425 || (((struct bundle_state *)
7426 *entry_ptr)->accumulated_insns_num
7427 == bundle_state->accumulated_insns_num
7428 && ((struct bundle_state *)
7429 *entry_ptr)->branch_deviation
7430 > bundle_state->branch_deviation))))
7431
7432 {
7433 struct bundle_state temp;
7434
7435 temp = *(struct bundle_state *) *entry_ptr;
7436 *(struct bundle_state *) *entry_ptr = *bundle_state;
7437 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7438 *bundle_state = temp;
7439 }
7440 return FALSE;
7441 }
7442
7443 /* Start work with the hash table. */
7444
7445 static void
7446 initiate_bundle_state_table (void)
7447 {
7448 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7449 (htab_del) 0);
7450 }
7451
7452 /* Finish work with the hash table. */
7453
7454 static void
7455 finish_bundle_state_table (void)
7456 {
7457 htab_delete (bundle_state_table);
7458 }
7459
7460 \f
7461
7462 /* The following variable is a insn `nop' used to check bundle states
7463 with different number of inserted nops. */
7464
7465 static rtx ia64_nop;
7466
7467 /* The following function tries to issue NOPS_NUM nops for the current
7468 state without advancing processor cycle. If it failed, the
7469 function returns FALSE and frees the current state. */
7470
7471 static int
7472 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7473 {
7474 int i;
7475
7476 for (i = 0; i < nops_num; i++)
7477 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7478 {
7479 free_bundle_state (curr_state);
7480 return FALSE;
7481 }
7482 return TRUE;
7483 }
7484
7485 /* The following function tries to issue INSN for the current
7486 state without advancing processor cycle. If it failed, the
7487 function returns FALSE and frees the current state. */
7488
7489 static int
7490 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7491 {
7492 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7493 {
7494 free_bundle_state (curr_state);
7495 return FALSE;
7496 }
7497 return TRUE;
7498 }
7499
7500 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7501 starting with ORIGINATOR without advancing processor cycle. If
7502 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7503 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7504 If it was successful, the function creates new bundle state and
7505 insert into the hash table and into `index_to_bundle_states'. */
7506
7507 static void
7508 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7509 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7510 {
7511 struct bundle_state *curr_state;
7512
7513 curr_state = get_free_bundle_state ();
7514 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7515 curr_state->insn = insn;
7516 curr_state->insn_num = originator->insn_num + 1;
7517 curr_state->cost = originator->cost;
7518 curr_state->originator = originator;
7519 curr_state->before_nops_num = before_nops_num;
7520 curr_state->after_nops_num = 0;
7521 curr_state->accumulated_insns_num
7522 = originator->accumulated_insns_num + before_nops_num;
7523 curr_state->branch_deviation = originator->branch_deviation;
7524 gcc_assert (insn);
7525 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7526 {
7527 gcc_assert (GET_MODE (insn) != TImode);
7528 if (!try_issue_nops (curr_state, before_nops_num))
7529 return;
7530 if (!try_issue_insn (curr_state, insn))
7531 return;
7532 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7533 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7534 && curr_state->accumulated_insns_num % 3 != 0)
7535 {
7536 free_bundle_state (curr_state);
7537 return;
7538 }
7539 }
7540 else if (GET_MODE (insn) != TImode)
7541 {
7542 if (!try_issue_nops (curr_state, before_nops_num))
7543 return;
7544 if (!try_issue_insn (curr_state, insn))
7545 return;
7546 curr_state->accumulated_insns_num++;
7547 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7548 && asm_noperands (PATTERN (insn)) < 0);
7549
7550 if (ia64_safe_type (insn) == TYPE_L)
7551 curr_state->accumulated_insns_num++;
7552 }
7553 else
7554 {
7555 /* If this is an insn that must be first in a group, then don't allow
7556 nops to be emitted before it. Currently, alloc is the only such
7557 supported instruction. */
7558 /* ??? The bundling automatons should handle this for us, but they do
7559 not yet have support for the first_insn attribute. */
7560 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7561 {
7562 free_bundle_state (curr_state);
7563 return;
7564 }
7565
7566 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7567 state_transition (curr_state->dfa_state, NULL);
7568 curr_state->cost++;
7569 if (!try_issue_nops (curr_state, before_nops_num))
7570 return;
7571 if (!try_issue_insn (curr_state, insn))
7572 return;
7573 curr_state->accumulated_insns_num++;
7574 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7575 || asm_noperands (PATTERN (insn)) >= 0)
7576 {
7577 /* Finish bundle containing asm insn. */
7578 curr_state->after_nops_num
7579 = 3 - curr_state->accumulated_insns_num % 3;
7580 curr_state->accumulated_insns_num
7581 += 3 - curr_state->accumulated_insns_num % 3;
7582 }
7583 else if (ia64_safe_type (insn) == TYPE_L)
7584 curr_state->accumulated_insns_num++;
7585 }
7586 if (ia64_safe_type (insn) == TYPE_B)
7587 curr_state->branch_deviation
7588 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7589 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7590 {
7591 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7592 {
7593 state_t dfa_state;
7594 struct bundle_state *curr_state1;
7595 struct bundle_state *allocated_states_chain;
7596
7597 curr_state1 = get_free_bundle_state ();
7598 dfa_state = curr_state1->dfa_state;
7599 allocated_states_chain = curr_state1->allocated_states_chain;
7600 *curr_state1 = *curr_state;
7601 curr_state1->dfa_state = dfa_state;
7602 curr_state1->allocated_states_chain = allocated_states_chain;
7603 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7604 dfa_state_size);
7605 curr_state = curr_state1;
7606 }
7607 if (!try_issue_nops (curr_state,
7608 3 - curr_state->accumulated_insns_num % 3))
7609 return;
7610 curr_state->after_nops_num
7611 = 3 - curr_state->accumulated_insns_num % 3;
7612 curr_state->accumulated_insns_num
7613 += 3 - curr_state->accumulated_insns_num % 3;
7614 }
7615 if (!insert_bundle_state (curr_state))
7616 free_bundle_state (curr_state);
7617 return;
7618 }
7619
7620 /* The following function returns position in the two window bundle
7621 for given STATE. */
7622
7623 static int
7624 get_max_pos (state_t state)
7625 {
7626 if (cpu_unit_reservation_p (state, pos_6))
7627 return 6;
7628 else if (cpu_unit_reservation_p (state, pos_5))
7629 return 5;
7630 else if (cpu_unit_reservation_p (state, pos_4))
7631 return 4;
7632 else if (cpu_unit_reservation_p (state, pos_3))
7633 return 3;
7634 else if (cpu_unit_reservation_p (state, pos_2))
7635 return 2;
7636 else if (cpu_unit_reservation_p (state, pos_1))
7637 return 1;
7638 else
7639 return 0;
7640 }
7641
7642 /* The function returns code of a possible template for given position
7643 and state. The function should be called only with 2 values of
7644 position equal to 3 or 6. We avoid generating F NOPs by putting
7645 templates containing F insns at the end of the template search
7646 because undocumented anomaly in McKinley derived cores which can
7647 cause stalls if an F-unit insn (including a NOP) is issued within a
7648 six-cycle window after reading certain application registers (such
7649 as ar.bsp). Furthermore, power-considerations also argue against
7650 the use of F-unit instructions unless they're really needed. */
7651
7652 static int
7653 get_template (state_t state, int pos)
7654 {
7655 switch (pos)
7656 {
7657 case 3:
7658 if (cpu_unit_reservation_p (state, _0mmi_))
7659 return 1;
7660 else if (cpu_unit_reservation_p (state, _0mii_))
7661 return 0;
7662 else if (cpu_unit_reservation_p (state, _0mmb_))
7663 return 7;
7664 else if (cpu_unit_reservation_p (state, _0mib_))
7665 return 6;
7666 else if (cpu_unit_reservation_p (state, _0mbb_))
7667 return 5;
7668 else if (cpu_unit_reservation_p (state, _0bbb_))
7669 return 4;
7670 else if (cpu_unit_reservation_p (state, _0mmf_))
7671 return 3;
7672 else if (cpu_unit_reservation_p (state, _0mfi_))
7673 return 2;
7674 else if (cpu_unit_reservation_p (state, _0mfb_))
7675 return 8;
7676 else if (cpu_unit_reservation_p (state, _0mlx_))
7677 return 9;
7678 else
7679 gcc_unreachable ();
7680 case 6:
7681 if (cpu_unit_reservation_p (state, _1mmi_))
7682 return 1;
7683 else if (cpu_unit_reservation_p (state, _1mii_))
7684 return 0;
7685 else if (cpu_unit_reservation_p (state, _1mmb_))
7686 return 7;
7687 else if (cpu_unit_reservation_p (state, _1mib_))
7688 return 6;
7689 else if (cpu_unit_reservation_p (state, _1mbb_))
7690 return 5;
7691 else if (cpu_unit_reservation_p (state, _1bbb_))
7692 return 4;
7693 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7694 return 3;
7695 else if (cpu_unit_reservation_p (state, _1mfi_))
7696 return 2;
7697 else if (cpu_unit_reservation_p (state, _1mfb_))
7698 return 8;
7699 else if (cpu_unit_reservation_p (state, _1mlx_))
7700 return 9;
7701 else
7702 gcc_unreachable ();
7703 default:
7704 gcc_unreachable ();
7705 }
7706 }
7707
7708 /* The following function returns an insn important for insn bundling
7709 followed by INSN and before TAIL. */
7710
7711 static rtx
7712 get_next_important_insn (rtx insn, rtx tail)
7713 {
7714 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7715 if (INSN_P (insn)
7716 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7717 && GET_CODE (PATTERN (insn)) != USE
7718 && GET_CODE (PATTERN (insn)) != CLOBBER)
7719 return insn;
7720 return NULL_RTX;
7721 }
7722
7723 /* Add a bundle selector TEMPLATE0 before INSN. */
7724
7725 static void
7726 ia64_add_bundle_selector_before (int template0, rtx insn)
7727 {
7728 rtx b = gen_bundle_selector (GEN_INT (template0));
7729
7730 ia64_emit_insn_before (b, insn);
7731 #if NR_BUNDLES == 10
7732 if ((template0 == 4 || template0 == 5)
7733 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7734 {
7735 int i;
7736 rtx note = NULL_RTX;
7737
7738 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7739 first or second slot. If it is and has REG_EH_NOTE set, copy it
7740 to following nops, as br.call sets rp to the address of following
7741 bundle and therefore an EH region end must be on a bundle
7742 boundary. */
7743 insn = PREV_INSN (insn);
7744 for (i = 0; i < 3; i++)
7745 {
7746 do
7747 insn = next_active_insn (insn);
7748 while (GET_CODE (insn) == INSN
7749 && get_attr_empty (insn) == EMPTY_YES);
7750 if (GET_CODE (insn) == CALL_INSN)
7751 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7752 else if (note)
7753 {
7754 int code;
7755
7756 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7757 || code == CODE_FOR_nop_b);
7758 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7759 note = NULL_RTX;
7760 else
7761 REG_NOTES (insn)
7762 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7763 REG_NOTES (insn));
7764 }
7765 }
7766 }
7767 #endif
7768 }
7769
7770 /* The following function does insn bundling. Bundling means
7771 inserting templates and nop insns to fit insn groups into permitted
7772 templates. Instruction scheduling uses NDFA (non-deterministic
7773 finite automata) encoding informations about the templates and the
7774 inserted nops. Nondeterminism of the automata permits follows
7775 all possible insn sequences very fast.
7776
7777 Unfortunately it is not possible to get information about inserting
7778 nop insns and used templates from the automata states. The
7779 automata only says that we can issue an insn possibly inserting
7780 some nops before it and using some template. Therefore insn
7781 bundling in this function is implemented by using DFA
7782 (deterministic finite automata). We follow all possible insn
7783 sequences by inserting 0-2 nops (that is what the NDFA describe for
7784 insn scheduling) before/after each insn being bundled. We know the
7785 start of simulated processor cycle from insn scheduling (insn
7786 starting a new cycle has TImode).
7787
7788 Simple implementation of insn bundling would create enormous
7789 number of possible insn sequences satisfying information about new
7790 cycle ticks taken from the insn scheduling. To make the algorithm
7791 practical we use dynamic programming. Each decision (about
7792 inserting nops and implicitly about previous decisions) is described
7793 by structure bundle_state (see above). If we generate the same
7794 bundle state (key is automaton state after issuing the insns and
7795 nops for it), we reuse already generated one. As consequence we
7796 reject some decisions which cannot improve the solution and
7797 reduce memory for the algorithm.
7798
7799 When we reach the end of EBB (extended basic block), we choose the
7800 best sequence and then, moving back in EBB, insert templates for
7801 the best alternative. The templates are taken from querying
7802 automaton state for each insn in chosen bundle states.
7803
7804 So the algorithm makes two (forward and backward) passes through
7805 EBB. There is an additional forward pass through EBB for Itanium1
7806 processor. This pass inserts more nops to make dependency between
7807 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7808
7809 static void
7810 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7811 {
7812 struct bundle_state *curr_state, *next_state, *best_state;
7813 rtx insn, next_insn;
7814 int insn_num;
7815 int i, bundle_end_p, only_bundle_end_p, asm_p;
7816 int pos = 0, max_pos, template0, template1;
7817 rtx b;
7818 rtx nop;
7819 enum attr_type type;
7820
7821 insn_num = 0;
7822 /* Count insns in the EBB. */
7823 for (insn = NEXT_INSN (prev_head_insn);
7824 insn && insn != tail;
7825 insn = NEXT_INSN (insn))
7826 if (INSN_P (insn))
7827 insn_num++;
7828 if (insn_num == 0)
7829 return;
7830 bundling_p = 1;
7831 dfa_clean_insn_cache ();
7832 initiate_bundle_state_table ();
7833 index_to_bundle_states = xmalloc ((insn_num + 2)
7834 * sizeof (struct bundle_state *));
7835 /* First (forward) pass -- generation of bundle states. */
7836 curr_state = get_free_bundle_state ();
7837 curr_state->insn = NULL;
7838 curr_state->before_nops_num = 0;
7839 curr_state->after_nops_num = 0;
7840 curr_state->insn_num = 0;
7841 curr_state->cost = 0;
7842 curr_state->accumulated_insns_num = 0;
7843 curr_state->branch_deviation = 0;
7844 curr_state->next = NULL;
7845 curr_state->originator = NULL;
7846 state_reset (curr_state->dfa_state);
7847 index_to_bundle_states [0] = curr_state;
7848 insn_num = 0;
7849 /* Shift cycle mark if it is put on insn which could be ignored. */
7850 for (insn = NEXT_INSN (prev_head_insn);
7851 insn != tail;
7852 insn = NEXT_INSN (insn))
7853 if (INSN_P (insn)
7854 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7855 || GET_CODE (PATTERN (insn)) == USE
7856 || GET_CODE (PATTERN (insn)) == CLOBBER)
7857 && GET_MODE (insn) == TImode)
7858 {
7859 PUT_MODE (insn, VOIDmode);
7860 for (next_insn = NEXT_INSN (insn);
7861 next_insn != tail;
7862 next_insn = NEXT_INSN (next_insn))
7863 if (INSN_P (next_insn)
7864 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7865 && GET_CODE (PATTERN (next_insn)) != USE
7866 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7867 {
7868 PUT_MODE (next_insn, TImode);
7869 break;
7870 }
7871 }
7872 /* Forward pass: generation of bundle states. */
7873 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7874 insn != NULL_RTX;
7875 insn = next_insn)
7876 {
7877 gcc_assert (INSN_P (insn)
7878 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7879 && GET_CODE (PATTERN (insn)) != USE
7880 && GET_CODE (PATTERN (insn)) != CLOBBER);
7881 type = ia64_safe_type (insn);
7882 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7883 insn_num++;
7884 index_to_bundle_states [insn_num] = NULL;
7885 for (curr_state = index_to_bundle_states [insn_num - 1];
7886 curr_state != NULL;
7887 curr_state = next_state)
7888 {
7889 pos = curr_state->accumulated_insns_num % 3;
7890 next_state = curr_state->next;
7891 /* We must fill up the current bundle in order to start a
7892 subsequent asm insn in a new bundle. Asm insn is always
7893 placed in a separate bundle. */
7894 only_bundle_end_p
7895 = (next_insn != NULL_RTX
7896 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7897 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7898 /* We may fill up the current bundle if it is the cycle end
7899 without a group barrier. */
7900 bundle_end_p
7901 = (only_bundle_end_p || next_insn == NULL_RTX
7902 || (GET_MODE (next_insn) == TImode
7903 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7904 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7905 || type == TYPE_S
7906 /* We need to insert 2 nops for cases like M_MII. To
7907 guarantee issuing all insns on the same cycle for
7908 Itanium 1, we need to issue 2 nops after the first M
7909 insn (MnnMII where n is a nop insn). */
7910 || ((type == TYPE_M || type == TYPE_A)
7911 && ia64_tune == PROCESSOR_ITANIUM
7912 && !bundle_end_p && pos == 1))
7913 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
7914 only_bundle_end_p);
7915 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
7916 only_bundle_end_p);
7917 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
7918 only_bundle_end_p);
7919 }
7920 gcc_assert (index_to_bundle_states [insn_num]);
7921 for (curr_state = index_to_bundle_states [insn_num];
7922 curr_state != NULL;
7923 curr_state = curr_state->next)
7924 if (verbose >= 2 && dump)
7925 {
7926 /* This structure is taken from generated code of the
7927 pipeline hazard recognizer (see file insn-attrtab.c).
7928 Please don't forget to change the structure if a new
7929 automaton is added to .md file. */
7930 struct DFA_chip
7931 {
7932 unsigned short one_automaton_state;
7933 unsigned short oneb_automaton_state;
7934 unsigned short two_automaton_state;
7935 unsigned short twob_automaton_state;
7936 };
7937
7938 fprintf
7939 (dump,
7940 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7941 curr_state->unique_num,
7942 (curr_state->originator == NULL
7943 ? -1 : curr_state->originator->unique_num),
7944 curr_state->cost,
7945 curr_state->before_nops_num, curr_state->after_nops_num,
7946 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7947 (ia64_tune == PROCESSOR_ITANIUM
7948 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7949 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7950 INSN_UID (insn));
7951 }
7952 }
7953
7954 /* We should find a solution because the 2nd insn scheduling has
7955 found one. */
7956 gcc_assert (index_to_bundle_states [insn_num]);
7957 /* Find a state corresponding to the best insn sequence. */
7958 best_state = NULL;
7959 for (curr_state = index_to_bundle_states [insn_num];
7960 curr_state != NULL;
7961 curr_state = curr_state->next)
7962 /* We are just looking at the states with fully filled up last
7963 bundle. The first we prefer insn sequences with minimal cost
7964 then with minimal inserted nops and finally with branch insns
7965 placed in the 3rd slots. */
7966 if (curr_state->accumulated_insns_num % 3 == 0
7967 && (best_state == NULL || best_state->cost > curr_state->cost
7968 || (best_state->cost == curr_state->cost
7969 && (curr_state->accumulated_insns_num
7970 < best_state->accumulated_insns_num
7971 || (curr_state->accumulated_insns_num
7972 == best_state->accumulated_insns_num
7973 && curr_state->branch_deviation
7974 < best_state->branch_deviation)))))
7975 best_state = curr_state;
7976 /* Second (backward) pass: adding nops and templates. */
7977 insn_num = best_state->before_nops_num;
7978 template0 = template1 = -1;
7979 for (curr_state = best_state;
7980 curr_state->originator != NULL;
7981 curr_state = curr_state->originator)
7982 {
7983 insn = curr_state->insn;
7984 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7985 || asm_noperands (PATTERN (insn)) >= 0);
7986 insn_num++;
7987 if (verbose >= 2 && dump)
7988 {
7989 struct DFA_chip
7990 {
7991 unsigned short one_automaton_state;
7992 unsigned short oneb_automaton_state;
7993 unsigned short two_automaton_state;
7994 unsigned short twob_automaton_state;
7995 };
7996
7997 fprintf
7998 (dump,
7999 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8000 curr_state->unique_num,
8001 (curr_state->originator == NULL
8002 ? -1 : curr_state->originator->unique_num),
8003 curr_state->cost,
8004 curr_state->before_nops_num, curr_state->after_nops_num,
8005 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8006 (ia64_tune == PROCESSOR_ITANIUM
8007 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8008 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8009 INSN_UID (insn));
8010 }
8011 /* Find the position in the current bundle window. The window can
8012 contain at most two bundles. Two bundle window means that
8013 the processor will make two bundle rotation. */
8014 max_pos = get_max_pos (curr_state->dfa_state);
8015 if (max_pos == 6
8016 /* The following (negative template number) means that the
8017 processor did one bundle rotation. */
8018 || (max_pos == 3 && template0 < 0))
8019 {
8020 /* We are at the end of the window -- find template(s) for
8021 its bundle(s). */
8022 pos = max_pos;
8023 if (max_pos == 3)
8024 template0 = get_template (curr_state->dfa_state, 3);
8025 else
8026 {
8027 template1 = get_template (curr_state->dfa_state, 3);
8028 template0 = get_template (curr_state->dfa_state, 6);
8029 }
8030 }
8031 if (max_pos > 3 && template1 < 0)
8032 /* It may happen when we have the stop inside a bundle. */
8033 {
8034 gcc_assert (pos <= 3);
8035 template1 = get_template (curr_state->dfa_state, 3);
8036 pos += 3;
8037 }
8038 if (!asm_p)
8039 /* Emit nops after the current insn. */
8040 for (i = 0; i < curr_state->after_nops_num; i++)
8041 {
8042 nop = gen_nop ();
8043 emit_insn_after (nop, insn);
8044 pos--;
8045 gcc_assert (pos >= 0);
8046 if (pos % 3 == 0)
8047 {
8048 /* We are at the start of a bundle: emit the template
8049 (it should be defined). */
8050 gcc_assert (template0 >= 0);
8051 ia64_add_bundle_selector_before (template0, nop);
8052 /* If we have two bundle window, we make one bundle
8053 rotation. Otherwise template0 will be undefined
8054 (negative value). */
8055 template0 = template1;
8056 template1 = -1;
8057 }
8058 }
8059 /* Move the position backward in the window. Group barrier has
8060 no slot. Asm insn takes all bundle. */
8061 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8062 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8063 && asm_noperands (PATTERN (insn)) < 0)
8064 pos--;
8065 /* Long insn takes 2 slots. */
8066 if (ia64_safe_type (insn) == TYPE_L)
8067 pos--;
8068 gcc_assert (pos >= 0);
8069 if (pos % 3 == 0
8070 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8071 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8072 && asm_noperands (PATTERN (insn)) < 0)
8073 {
8074 /* The current insn is at the bundle start: emit the
8075 template. */
8076 gcc_assert (template0 >= 0);
8077 ia64_add_bundle_selector_before (template0, insn);
8078 b = PREV_INSN (insn);
8079 insn = b;
8080 /* See comment above in analogous place for emitting nops
8081 after the insn. */
8082 template0 = template1;
8083 template1 = -1;
8084 }
8085 /* Emit nops after the current insn. */
8086 for (i = 0; i < curr_state->before_nops_num; i++)
8087 {
8088 nop = gen_nop ();
8089 ia64_emit_insn_before (nop, insn);
8090 nop = PREV_INSN (insn);
8091 insn = nop;
8092 pos--;
8093 gcc_assert (pos >= 0);
8094 if (pos % 3 == 0)
8095 {
8096 /* See comment above in analogous place for emitting nops
8097 after the insn. */
8098 gcc_assert (template0 >= 0);
8099 ia64_add_bundle_selector_before (template0, insn);
8100 b = PREV_INSN (insn);
8101 insn = b;
8102 template0 = template1;
8103 template1 = -1;
8104 }
8105 }
8106 }
8107 if (ia64_tune == PROCESSOR_ITANIUM)
8108 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8109 Itanium1 has a strange design, if the distance between an insn
8110 and dependent MM-insn is less 4 then we have a 6 additional
8111 cycles stall. So we make the distance equal to 4 cycles if it
8112 is less. */
8113 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8114 insn != NULL_RTX;
8115 insn = next_insn)
8116 {
8117 gcc_assert (INSN_P (insn)
8118 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8119 && GET_CODE (PATTERN (insn)) != USE
8120 && GET_CODE (PATTERN (insn)) != CLOBBER);
8121 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8122 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8123 /* We found a MM-insn which needs additional cycles. */
8124 {
8125 rtx last;
8126 int i, j, n;
8127 int pred_stop_p;
8128
8129 /* Now we are searching for a template of the bundle in
8130 which the MM-insn is placed and the position of the
8131 insn in the bundle (0, 1, 2). Also we are searching
8132 for that there is a stop before the insn. */
8133 last = prev_active_insn (insn);
8134 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8135 if (pred_stop_p)
8136 last = prev_active_insn (last);
8137 n = 0;
8138 for (;; last = prev_active_insn (last))
8139 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8140 {
8141 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8142 if (template0 == 9)
8143 /* The insn is in MLX bundle. Change the template
8144 onto MFI because we will add nops before the
8145 insn. It simplifies subsequent code a lot. */
8146 PATTERN (last)
8147 = gen_bundle_selector (const2_rtx); /* -> MFI */
8148 break;
8149 }
8150 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8151 && (ia64_safe_itanium_class (last)
8152 != ITANIUM_CLASS_IGNORE))
8153 n++;
8154 /* Some check of correctness: the stop is not at the
8155 bundle start, there are no more 3 insns in the bundle,
8156 and the MM-insn is not at the start of bundle with
8157 template MLX. */
8158 gcc_assert ((!pred_stop_p || n)
8159 && n <= 2
8160 && (template0 != 9 || !n));
8161 /* Put nops after the insn in the bundle. */
8162 for (j = 3 - n; j > 0; j --)
8163 ia64_emit_insn_before (gen_nop (), insn);
8164 /* It takes into account that we will add more N nops
8165 before the insn lately -- please see code below. */
8166 add_cycles [INSN_UID (insn)]--;
8167 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8168 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8169 insn);
8170 if (pred_stop_p)
8171 add_cycles [INSN_UID (insn)]--;
8172 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8173 {
8174 /* Insert "MII;" template. */
8175 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8176 insn);
8177 ia64_emit_insn_before (gen_nop (), insn);
8178 ia64_emit_insn_before (gen_nop (), insn);
8179 if (i > 1)
8180 {
8181 /* To decrease code size, we use "MI;I;"
8182 template. */
8183 ia64_emit_insn_before
8184 (gen_insn_group_barrier (GEN_INT (3)), insn);
8185 i--;
8186 }
8187 ia64_emit_insn_before (gen_nop (), insn);
8188 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8189 insn);
8190 }
8191 /* Put the MM-insn in the same slot of a bundle with the
8192 same template as the original one. */
8193 ia64_add_bundle_selector_before (template0, insn);
8194 /* To put the insn in the same slot, add necessary number
8195 of nops. */
8196 for (j = n; j > 0; j --)
8197 ia64_emit_insn_before (gen_nop (), insn);
8198 /* Put the stop if the original bundle had it. */
8199 if (pred_stop_p)
8200 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8201 insn);
8202 }
8203 }
8204 free (index_to_bundle_states);
8205 finish_bundle_state_table ();
8206 bundling_p = 0;
8207 dfa_clean_insn_cache ();
8208 }
8209
8210 /* The following function is called at the end of scheduling BB or
8211 EBB. After reload, it inserts stop bits and does insn bundling. */
8212
8213 static void
8214 ia64_sched_finish (FILE *dump, int sched_verbose)
8215 {
8216 if (sched_verbose)
8217 fprintf (dump, "// Finishing schedule.\n");
8218 if (!reload_completed)
8219 return;
8220 if (reload_completed)
8221 {
8222 final_emit_insn_group_barriers (dump);
8223 bundling (dump, sched_verbose, current_sched_info->prev_head,
8224 current_sched_info->next_tail);
8225 if (sched_verbose && dump)
8226 fprintf (dump, "// finishing %d-%d\n",
8227 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8228 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8229
8230 return;
8231 }
8232 }
8233
8234 /* The following function inserts stop bits in scheduled BB or EBB. */
8235
8236 static void
8237 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8238 {
8239 rtx insn;
8240 int need_barrier_p = 0;
8241 rtx prev_insn = NULL_RTX;
8242
8243 init_insn_group_barriers ();
8244
8245 for (insn = NEXT_INSN (current_sched_info->prev_head);
8246 insn != current_sched_info->next_tail;
8247 insn = NEXT_INSN (insn))
8248 {
8249 if (GET_CODE (insn) == BARRIER)
8250 {
8251 rtx last = prev_active_insn (insn);
8252
8253 if (! last)
8254 continue;
8255 if (GET_CODE (last) == JUMP_INSN
8256 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8257 last = prev_active_insn (last);
8258 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8259 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8260
8261 init_insn_group_barriers ();
8262 need_barrier_p = 0;
8263 prev_insn = NULL_RTX;
8264 }
8265 else if (INSN_P (insn))
8266 {
8267 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8268 {
8269 init_insn_group_barriers ();
8270 need_barrier_p = 0;
8271 prev_insn = NULL_RTX;
8272 }
8273 else if (need_barrier_p || group_barrier_needed (insn))
8274 {
8275 if (TARGET_EARLY_STOP_BITS)
8276 {
8277 rtx last;
8278
8279 for (last = insn;
8280 last != current_sched_info->prev_head;
8281 last = PREV_INSN (last))
8282 if (INSN_P (last) && GET_MODE (last) == TImode
8283 && stops_p [INSN_UID (last)])
8284 break;
8285 if (last == current_sched_info->prev_head)
8286 last = insn;
8287 last = prev_active_insn (last);
8288 if (last
8289 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8290 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8291 last);
8292 init_insn_group_barriers ();
8293 for (last = NEXT_INSN (last);
8294 last != insn;
8295 last = NEXT_INSN (last))
8296 if (INSN_P (last))
8297 group_barrier_needed (last);
8298 }
8299 else
8300 {
8301 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8302 insn);
8303 init_insn_group_barriers ();
8304 }
8305 group_barrier_needed (insn);
8306 prev_insn = NULL_RTX;
8307 }
8308 else if (recog_memoized (insn) >= 0)
8309 prev_insn = insn;
8310 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8311 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8312 || asm_noperands (PATTERN (insn)) >= 0);
8313 }
8314 }
8315 }
8316
8317 \f
8318
8319 /* If the following function returns TRUE, we will use the DFA
8320 insn scheduler. */
8321
8322 static int
8323 ia64_first_cycle_multipass_dfa_lookahead (void)
8324 {
8325 return (reload_completed ? 6 : 4);
8326 }
8327
8328 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8329
8330 static void
8331 ia64_init_dfa_pre_cycle_insn (void)
8332 {
8333 if (temp_dfa_state == NULL)
8334 {
8335 dfa_state_size = state_size ();
8336 temp_dfa_state = xmalloc (dfa_state_size);
8337 prev_cycle_state = xmalloc (dfa_state_size);
8338 }
8339 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8340 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8341 recog_memoized (dfa_pre_cycle_insn);
8342 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8343 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8344 recog_memoized (dfa_stop_insn);
8345 }
8346
8347 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8348 used by the DFA insn scheduler. */
8349
8350 static rtx
8351 ia64_dfa_pre_cycle_insn (void)
8352 {
8353 return dfa_pre_cycle_insn;
8354 }
8355
8356 /* The following function returns TRUE if PRODUCER (of type ilog or
8357 ld) produces address for CONSUMER (of type st or stf). */
8358
8359 int
8360 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8361 {
8362 rtx dest, reg, mem;
8363
8364 gcc_assert (producer && consumer);
8365 dest = ia64_single_set (producer);
8366 gcc_assert (dest);
8367 reg = SET_DEST (dest);
8368 gcc_assert (reg);
8369 if (GET_CODE (reg) == SUBREG)
8370 reg = SUBREG_REG (reg);
8371 gcc_assert (GET_CODE (reg) == REG);
8372
8373 dest = ia64_single_set (consumer);
8374 gcc_assert (dest);
8375 mem = SET_DEST (dest);
8376 gcc_assert (mem && GET_CODE (mem) == MEM);
8377 return reg_mentioned_p (reg, mem);
8378 }
8379
8380 /* The following function returns TRUE if PRODUCER (of type ilog or
8381 ld) produces address for CONSUMER (of type ld or fld). */
8382
8383 int
8384 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8385 {
8386 rtx dest, src, reg, mem;
8387
8388 gcc_assert (producer && consumer);
8389 dest = ia64_single_set (producer);
8390 gcc_assert (dest);
8391 reg = SET_DEST (dest);
8392 gcc_assert (reg);
8393 if (GET_CODE (reg) == SUBREG)
8394 reg = SUBREG_REG (reg);
8395 gcc_assert (GET_CODE (reg) == REG);
8396
8397 src = ia64_single_set (consumer);
8398 gcc_assert (src);
8399 mem = SET_SRC (src);
8400 gcc_assert (mem);
8401
8402 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8403 mem = XVECEXP (mem, 0, 0);
8404 else if (GET_CODE (mem) == IF_THEN_ELSE)
8405 /* ??? Is this bypass necessary for ld.c? */
8406 {
8407 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8408 mem = XEXP (mem, 1);
8409 }
8410
8411 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8412 mem = XEXP (mem, 0);
8413
8414 if (GET_CODE (mem) == UNSPEC)
8415 {
8416 int c = XINT (mem, 1);
8417
8418 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8419 mem = XVECEXP (mem, 0, 0);
8420 }
8421
8422 /* Note that LO_SUM is used for GOT loads. */
8423 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8424
8425 return reg_mentioned_p (reg, mem);
8426 }
8427
8428 /* The following function returns TRUE if INSN produces address for a
8429 load/store insn. We will place such insns into M slot because it
8430 decreases its latency time. */
8431
8432 int
8433 ia64_produce_address_p (rtx insn)
8434 {
8435 return insn->call;
8436 }
8437
8438 \f
8439 /* Emit pseudo-ops for the assembler to describe predicate relations.
8440 At present this assumes that we only consider predicate pairs to
8441 be mutex, and that the assembler can deduce proper values from
8442 straight-line code. */
8443
8444 static void
8445 emit_predicate_relation_info (void)
8446 {
8447 basic_block bb;
8448
8449 FOR_EACH_BB_REVERSE (bb)
8450 {
8451 int r;
8452 rtx head = BB_HEAD (bb);
8453
8454 /* We only need such notes at code labels. */
8455 if (GET_CODE (head) != CODE_LABEL)
8456 continue;
8457 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
8458 head = NEXT_INSN (head);
8459
8460 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8461 grabbing the entire block of predicate registers. */
8462 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8463 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
8464 {
8465 rtx p = gen_rtx_REG (BImode, r);
8466 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8467 if (head == BB_END (bb))
8468 BB_END (bb) = n;
8469 head = n;
8470 }
8471 }
8472
8473 /* Look for conditional calls that do not return, and protect predicate
8474 relations around them. Otherwise the assembler will assume the call
8475 returns, and complain about uses of call-clobbered predicates after
8476 the call. */
8477 FOR_EACH_BB_REVERSE (bb)
8478 {
8479 rtx insn = BB_HEAD (bb);
8480
8481 while (1)
8482 {
8483 if (GET_CODE (insn) == CALL_INSN
8484 && GET_CODE (PATTERN (insn)) == COND_EXEC
8485 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8486 {
8487 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8488 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8489 if (BB_HEAD (bb) == insn)
8490 BB_HEAD (bb) = b;
8491 if (BB_END (bb) == insn)
8492 BB_END (bb) = a;
8493 }
8494
8495 if (insn == BB_END (bb))
8496 break;
8497 insn = NEXT_INSN (insn);
8498 }
8499 }
8500 }
8501
8502 /* Perform machine dependent operations on the rtl chain INSNS. */
8503
8504 static void
8505 ia64_reorg (void)
8506 {
8507 /* We are freeing block_for_insn in the toplev to keep compatibility
8508 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8509 compute_bb_for_insn ();
8510
8511 /* If optimizing, we'll have split before scheduling. */
8512 if (optimize == 0)
8513 split_all_insns ();
8514
8515 if (optimize && ia64_flag_schedule_insns2 && dbg_cnt (ia64_sched2))
8516 {
8517 timevar_push (TV_SCHED2);
8518 ia64_final_schedule = 1;
8519
8520 initiate_bundle_states ();
8521 ia64_nop = make_insn_raw (gen_nop ());
8522 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8523 recog_memoized (ia64_nop);
8524 clocks_length = get_max_uid () + 1;
8525 stops_p = xcalloc (1, clocks_length);
8526 if (ia64_tune == PROCESSOR_ITANIUM)
8527 {
8528 clocks = xcalloc (clocks_length, sizeof (int));
8529 add_cycles = xcalloc (clocks_length, sizeof (int));
8530 }
8531 if (ia64_tune == PROCESSOR_ITANIUM2)
8532 {
8533 pos_1 = get_cpu_unit_code ("2_1");
8534 pos_2 = get_cpu_unit_code ("2_2");
8535 pos_3 = get_cpu_unit_code ("2_3");
8536 pos_4 = get_cpu_unit_code ("2_4");
8537 pos_5 = get_cpu_unit_code ("2_5");
8538 pos_6 = get_cpu_unit_code ("2_6");
8539 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8540 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8541 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8542 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8543 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8544 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8545 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8546 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8547 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8548 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8549 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8550 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8551 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8552 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8553 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8554 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8555 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8556 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8557 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8558 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8559 }
8560 else
8561 {
8562 pos_1 = get_cpu_unit_code ("1_1");
8563 pos_2 = get_cpu_unit_code ("1_2");
8564 pos_3 = get_cpu_unit_code ("1_3");
8565 pos_4 = get_cpu_unit_code ("1_4");
8566 pos_5 = get_cpu_unit_code ("1_5");
8567 pos_6 = get_cpu_unit_code ("1_6");
8568 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8569 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8570 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8571 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8572 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8573 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8574 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8575 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8576 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8577 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8578 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8579 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8580 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8581 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8582 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8583 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8584 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8585 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8586 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8587 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8588 }
8589 schedule_ebbs ();
8590 /* We cannot reuse this one because it has been corrupted by the
8591 evil glat. */
8592 finish_bundle_states ();
8593 if (ia64_tune == PROCESSOR_ITANIUM)
8594 {
8595 free (add_cycles);
8596 free (clocks);
8597 }
8598 free (stops_p);
8599 stops_p = NULL;
8600 emit_insn_group_barriers (dump_file);
8601
8602 ia64_final_schedule = 0;
8603 timevar_pop (TV_SCHED2);
8604 }
8605 else
8606 emit_all_insn_group_barriers (dump_file);
8607
8608 df_analyze ();
8609
8610 /* A call must not be the last instruction in a function, so that the
8611 return address is still within the function, so that unwinding works
8612 properly. Note that IA-64 differs from dwarf2 on this point. */
8613 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8614 {
8615 rtx insn;
8616 int saw_stop = 0;
8617
8618 insn = get_last_insn ();
8619 if (! INSN_P (insn))
8620 insn = prev_active_insn (insn);
8621 /* Skip over insns that expand to nothing. */
8622 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8623 {
8624 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8625 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8626 saw_stop = 1;
8627 insn = prev_active_insn (insn);
8628 }
8629 if (GET_CODE (insn) == CALL_INSN)
8630 {
8631 if (! saw_stop)
8632 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8633 emit_insn (gen_break_f ());
8634 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8635 }
8636 }
8637
8638 emit_predicate_relation_info ();
8639
8640 if (ia64_flag_var_tracking)
8641 {
8642 timevar_push (TV_VAR_TRACKING);
8643 variable_tracking_main ();
8644 timevar_pop (TV_VAR_TRACKING);
8645 }
8646 df_finish_pass (false);
8647 }
8648 \f
8649 /* Return true if REGNO is used by the epilogue. */
8650
8651 int
8652 ia64_epilogue_uses (int regno)
8653 {
8654 switch (regno)
8655 {
8656 case R_GR (1):
8657 /* With a call to a function in another module, we will write a new
8658 value to "gp". After returning from such a call, we need to make
8659 sure the function restores the original gp-value, even if the
8660 function itself does not use the gp anymore. */
8661 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8662
8663 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8664 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8665 /* For functions defined with the syscall_linkage attribute, all
8666 input registers are marked as live at all function exits. This
8667 prevents the register allocator from using the input registers,
8668 which in turn makes it possible to restart a system call after
8669 an interrupt without having to save/restore the input registers.
8670 This also prevents kernel data from leaking to application code. */
8671 return lookup_attribute ("syscall_linkage",
8672 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8673
8674 case R_BR (0):
8675 /* Conditional return patterns can't represent the use of `b0' as
8676 the return address, so we force the value live this way. */
8677 return 1;
8678
8679 case AR_PFS_REGNUM:
8680 /* Likewise for ar.pfs, which is used by br.ret. */
8681 return 1;
8682
8683 default:
8684 return 0;
8685 }
8686 }
8687
8688 /* Return true if REGNO is used by the frame unwinder. */
8689
8690 int
8691 ia64_eh_uses (int regno)
8692 {
8693 enum ia64_frame_regs r;
8694
8695 if (! reload_completed)
8696 return 0;
8697
8698 if (regno == 0)
8699 return 0;
8700
8701 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
8702 if (regno == current_frame_info.r[r]
8703 || regno == emitted_frame_related_regs[r])
8704 return 1;
8705
8706 return 0;
8707 }
8708 \f
8709 /* Return true if this goes in small data/bss. */
8710
8711 /* ??? We could also support own long data here. Generating movl/add/ld8
8712 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8713 code faster because there is one less load. This also includes incomplete
8714 types which can't go in sdata/sbss. */
8715
8716 static bool
8717 ia64_in_small_data_p (tree exp)
8718 {
8719 if (TARGET_NO_SDATA)
8720 return false;
8721
8722 /* We want to merge strings, so we never consider them small data. */
8723 if (TREE_CODE (exp) == STRING_CST)
8724 return false;
8725
8726 /* Functions are never small data. */
8727 if (TREE_CODE (exp) == FUNCTION_DECL)
8728 return false;
8729
8730 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8731 {
8732 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8733
8734 if (strcmp (section, ".sdata") == 0
8735 || strncmp (section, ".sdata.", 7) == 0
8736 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8737 || strcmp (section, ".sbss") == 0
8738 || strncmp (section, ".sbss.", 6) == 0
8739 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8740 return true;
8741 }
8742 else
8743 {
8744 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8745
8746 /* If this is an incomplete type with size 0, then we can't put it
8747 in sdata because it might be too big when completed. */
8748 if (size > 0 && size <= ia64_section_threshold)
8749 return true;
8750 }
8751
8752 return false;
8753 }
8754 \f
8755 /* Output assembly directives for prologue regions. */
8756
8757 /* The current basic block number. */
8758
8759 static bool last_block;
8760
8761 /* True if we need a copy_state command at the start of the next block. */
8762
8763 static bool need_copy_state;
8764
8765 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8766 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8767 #endif
8768
8769 /* Emit a debugging label after a call-frame-related insn. We'd
8770 rather output the label right away, but we'd have to output it
8771 after, not before, the instruction, and the instruction has not
8772 been output yet. So we emit the label after the insn, delete it to
8773 avoid introducing basic blocks, and mark it as preserved, such that
8774 it is still output, given that it is referenced in debug info. */
8775
8776 static const char *
8777 ia64_emit_deleted_label_after_insn (rtx insn)
8778 {
8779 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8780 rtx lb = gen_label_rtx ();
8781 rtx label_insn = emit_label_after (lb, insn);
8782
8783 LABEL_PRESERVE_P (lb) = 1;
8784
8785 delete_insn (label_insn);
8786
8787 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8788
8789 return xstrdup (label);
8790 }
8791
8792 /* Define the CFA after INSN with the steady-state definition. */
8793
8794 static void
8795 ia64_dwarf2out_def_steady_cfa (rtx insn)
8796 {
8797 rtx fp = frame_pointer_needed
8798 ? hard_frame_pointer_rtx
8799 : stack_pointer_rtx;
8800
8801 dwarf2out_def_cfa
8802 (ia64_emit_deleted_label_after_insn (insn),
8803 REGNO (fp),
8804 ia64_initial_elimination_offset
8805 (REGNO (arg_pointer_rtx), REGNO (fp))
8806 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8807 }
8808
8809 /* The generic dwarf2 frame debug info generator does not define a
8810 separate region for the very end of the epilogue, so refrain from
8811 doing so in the IA64-specific code as well. */
8812
8813 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8814
8815 /* The function emits unwind directives for the start of an epilogue. */
8816
8817 static void
8818 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8819 {
8820 /* If this isn't the last block of the function, then we need to label the
8821 current state, and copy it back in at the start of the next block. */
8822
8823 if (!last_block)
8824 {
8825 if (unwind)
8826 fprintf (asm_out_file, "\t.label_state %d\n",
8827 ++cfun->machine->state_num);
8828 need_copy_state = true;
8829 }
8830
8831 if (unwind)
8832 fprintf (asm_out_file, "\t.restore sp\n");
8833 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8834 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8835 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8836 }
8837
8838 /* This function processes a SET pattern looking for specific patterns
8839 which result in emitting an assembly directive required for unwinding. */
8840
8841 static int
8842 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8843 {
8844 rtx src = SET_SRC (pat);
8845 rtx dest = SET_DEST (pat);
8846 int src_regno, dest_regno;
8847
8848 /* Look for the ALLOC insn. */
8849 if (GET_CODE (src) == UNSPEC_VOLATILE
8850 && XINT (src, 1) == UNSPECV_ALLOC
8851 && GET_CODE (dest) == REG)
8852 {
8853 dest_regno = REGNO (dest);
8854
8855 /* If this is the final destination for ar.pfs, then this must
8856 be the alloc in the prologue. */
8857 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
8858 {
8859 if (unwind)
8860 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8861 ia64_dbx_register_number (dest_regno));
8862 }
8863 else
8864 {
8865 /* This must be an alloc before a sibcall. We must drop the
8866 old frame info. The easiest way to drop the old frame
8867 info is to ensure we had a ".restore sp" directive
8868 followed by a new prologue. If the procedure doesn't
8869 have a memory-stack frame, we'll issue a dummy ".restore
8870 sp" now. */
8871 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8872 /* if haven't done process_epilogue() yet, do it now */
8873 process_epilogue (asm_out_file, insn, unwind, frame);
8874 if (unwind)
8875 fprintf (asm_out_file, "\t.prologue\n");
8876 }
8877 return 1;
8878 }
8879
8880 /* Look for SP = .... */
8881 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8882 {
8883 if (GET_CODE (src) == PLUS)
8884 {
8885 rtx op0 = XEXP (src, 0);
8886 rtx op1 = XEXP (src, 1);
8887
8888 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8889
8890 if (INTVAL (op1) < 0)
8891 {
8892 gcc_assert (!frame_pointer_needed);
8893 if (unwind)
8894 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8895 -INTVAL (op1));
8896 if (frame)
8897 ia64_dwarf2out_def_steady_cfa (insn);
8898 }
8899 else
8900 process_epilogue (asm_out_file, insn, unwind, frame);
8901 }
8902 else
8903 {
8904 gcc_assert (GET_CODE (src) == REG
8905 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8906 process_epilogue (asm_out_file, insn, unwind, frame);
8907 }
8908
8909 return 1;
8910 }
8911
8912 /* Register move we need to look at. */
8913 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
8914 {
8915 src_regno = REGNO (src);
8916 dest_regno = REGNO (dest);
8917
8918 switch (src_regno)
8919 {
8920 case BR_REG (0):
8921 /* Saving return address pointer. */
8922 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
8923 if (unwind)
8924 fprintf (asm_out_file, "\t.save rp, r%d\n",
8925 ia64_dbx_register_number (dest_regno));
8926 return 1;
8927
8928 case PR_REG (0):
8929 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
8930 if (unwind)
8931 fprintf (asm_out_file, "\t.save pr, r%d\n",
8932 ia64_dbx_register_number (dest_regno));
8933 return 1;
8934
8935 case AR_UNAT_REGNUM:
8936 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
8937 if (unwind)
8938 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
8939 ia64_dbx_register_number (dest_regno));
8940 return 1;
8941
8942 case AR_LC_REGNUM:
8943 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
8944 if (unwind)
8945 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
8946 ia64_dbx_register_number (dest_regno));
8947 return 1;
8948
8949 case STACK_POINTER_REGNUM:
8950 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
8951 && frame_pointer_needed);
8952 if (unwind)
8953 fprintf (asm_out_file, "\t.vframe r%d\n",
8954 ia64_dbx_register_number (dest_regno));
8955 if (frame)
8956 ia64_dwarf2out_def_steady_cfa (insn);
8957 return 1;
8958
8959 default:
8960 /* Everything else should indicate being stored to memory. */
8961 gcc_unreachable ();
8962 }
8963 }
8964
8965 /* Memory store we need to look at. */
8966 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
8967 {
8968 long off;
8969 rtx base;
8970 const char *saveop;
8971
8972 if (GET_CODE (XEXP (dest, 0)) == REG)
8973 {
8974 base = XEXP (dest, 0);
8975 off = 0;
8976 }
8977 else
8978 {
8979 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
8980 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
8981 base = XEXP (XEXP (dest, 0), 0);
8982 off = INTVAL (XEXP (XEXP (dest, 0), 1));
8983 }
8984
8985 if (base == hard_frame_pointer_rtx)
8986 {
8987 saveop = ".savepsp";
8988 off = - off;
8989 }
8990 else
8991 {
8992 gcc_assert (base == stack_pointer_rtx);
8993 saveop = ".savesp";
8994 }
8995
8996 src_regno = REGNO (src);
8997 switch (src_regno)
8998 {
8999 case BR_REG (0):
9000 gcc_assert (!current_frame_info.r[reg_save_b0]);
9001 if (unwind)
9002 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9003 return 1;
9004
9005 case PR_REG (0):
9006 gcc_assert (!current_frame_info.r[reg_save_pr]);
9007 if (unwind)
9008 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9009 return 1;
9010
9011 case AR_LC_REGNUM:
9012 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9013 if (unwind)
9014 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9015 return 1;
9016
9017 case AR_PFS_REGNUM:
9018 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9019 if (unwind)
9020 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9021 return 1;
9022
9023 case AR_UNAT_REGNUM:
9024 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9025 if (unwind)
9026 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9027 return 1;
9028
9029 case GR_REG (4):
9030 case GR_REG (5):
9031 case GR_REG (6):
9032 case GR_REG (7):
9033 if (unwind)
9034 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9035 1 << (src_regno - GR_REG (4)));
9036 return 1;
9037
9038 case BR_REG (1):
9039 case BR_REG (2):
9040 case BR_REG (3):
9041 case BR_REG (4):
9042 case BR_REG (5):
9043 if (unwind)
9044 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9045 1 << (src_regno - BR_REG (1)));
9046 return 1;
9047
9048 case FR_REG (2):
9049 case FR_REG (3):
9050 case FR_REG (4):
9051 case FR_REG (5):
9052 if (unwind)
9053 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9054 1 << (src_regno - FR_REG (2)));
9055 return 1;
9056
9057 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9058 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9059 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9060 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9061 if (unwind)
9062 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9063 1 << (src_regno - FR_REG (12)));
9064 return 1;
9065
9066 default:
9067 return 0;
9068 }
9069 }
9070
9071 return 0;
9072 }
9073
9074
9075 /* This function looks at a single insn and emits any directives
9076 required to unwind this insn. */
9077 void
9078 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9079 {
9080 bool unwind = (flag_unwind_tables
9081 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9082 bool frame = dwarf2out_do_frame ();
9083
9084 if (unwind || frame)
9085 {
9086 rtx pat;
9087
9088 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9089 {
9090 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9091
9092 /* Restore unwind state from immediately before the epilogue. */
9093 if (need_copy_state)
9094 {
9095 if (unwind)
9096 {
9097 fprintf (asm_out_file, "\t.body\n");
9098 fprintf (asm_out_file, "\t.copy_state %d\n",
9099 cfun->machine->state_num);
9100 }
9101 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9102 ia64_dwarf2out_def_steady_cfa (insn);
9103 need_copy_state = false;
9104 }
9105 }
9106
9107 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9108 return;
9109
9110 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9111 if (pat)
9112 pat = XEXP (pat, 0);
9113 else
9114 pat = PATTERN (insn);
9115
9116 switch (GET_CODE (pat))
9117 {
9118 case SET:
9119 process_set (asm_out_file, pat, insn, unwind, frame);
9120 break;
9121
9122 case PARALLEL:
9123 {
9124 int par_index;
9125 int limit = XVECLEN (pat, 0);
9126 for (par_index = 0; par_index < limit; par_index++)
9127 {
9128 rtx x = XVECEXP (pat, 0, par_index);
9129 if (GET_CODE (x) == SET)
9130 process_set (asm_out_file, x, insn, unwind, frame);
9131 }
9132 break;
9133 }
9134
9135 default:
9136 gcc_unreachable ();
9137 }
9138 }
9139 }
9140
9141 \f
9142 enum ia64_builtins
9143 {
9144 IA64_BUILTIN_BSP,
9145 IA64_BUILTIN_FLUSHRS
9146 };
9147
9148 void
9149 ia64_init_builtins (void)
9150 {
9151 tree fpreg_type;
9152 tree float80_type;
9153
9154 /* The __fpreg type. */
9155 fpreg_type = make_node (REAL_TYPE);
9156 TYPE_PRECISION (fpreg_type) = 82;
9157 layout_type (fpreg_type);
9158 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9159
9160 /* The __float80 type. */
9161 float80_type = make_node (REAL_TYPE);
9162 TYPE_PRECISION (float80_type) = 80;
9163 layout_type (float80_type);
9164 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9165
9166 /* The __float128 type. */
9167 if (!TARGET_HPUX)
9168 {
9169 tree float128_type = make_node (REAL_TYPE);
9170 TYPE_PRECISION (float128_type) = 128;
9171 layout_type (float128_type);
9172 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9173 }
9174 else
9175 /* Under HPUX, this is a synonym for "long double". */
9176 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9177 "__float128");
9178
9179 #define def_builtin(name, type, code) \
9180 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9181 NULL, NULL_TREE)
9182
9183 def_builtin ("__builtin_ia64_bsp",
9184 build_function_type (ptr_type_node, void_list_node),
9185 IA64_BUILTIN_BSP);
9186
9187 def_builtin ("__builtin_ia64_flushrs",
9188 build_function_type (void_type_node, void_list_node),
9189 IA64_BUILTIN_FLUSHRS);
9190
9191 #undef def_builtin
9192
9193 if (TARGET_HPUX)
9194 {
9195 if (built_in_decls [BUILT_IN_FINITE])
9196 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9197 "_Isfinite");
9198 if (built_in_decls [BUILT_IN_FINITEF])
9199 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9200 "_Isfinitef");
9201 if (built_in_decls [BUILT_IN_FINITEL])
9202 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9203 "_Isfinitef128");
9204 }
9205 }
9206
9207 rtx
9208 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9209 enum machine_mode mode ATTRIBUTE_UNUSED,
9210 int ignore ATTRIBUTE_UNUSED)
9211 {
9212 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9213 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9214
9215 switch (fcode)
9216 {
9217 case IA64_BUILTIN_BSP:
9218 if (! target || ! register_operand (target, DImode))
9219 target = gen_reg_rtx (DImode);
9220 emit_insn (gen_bsp_value (target));
9221 #ifdef POINTERS_EXTEND_UNSIGNED
9222 target = convert_memory_address (ptr_mode, target);
9223 #endif
9224 return target;
9225
9226 case IA64_BUILTIN_FLUSHRS:
9227 emit_insn (gen_flushrs ());
9228 return const0_rtx;
9229
9230 default:
9231 break;
9232 }
9233
9234 return NULL_RTX;
9235 }
9236
9237 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9238 most significant bits of the stack slot. */
9239
9240 enum direction
9241 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
9242 {
9243 /* Exception to normal case for structures/unions/etc. */
9244
9245 if (type && AGGREGATE_TYPE_P (type)
9246 && int_size_in_bytes (type) < UNITS_PER_WORD)
9247 return upward;
9248
9249 /* Fall back to the default. */
9250 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9251 }
9252
9253 /* Emit text to declare externally defined variables and functions, because
9254 the Intel assembler does not support undefined externals. */
9255
9256 void
9257 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9258 {
9259 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9260 set in order to avoid putting out names that are never really
9261 used. */
9262 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9263 {
9264 /* maybe_assemble_visibility will return 1 if the assembler
9265 visibility directive is output. */
9266 int need_visibility = ((*targetm.binds_local_p) (decl)
9267 && maybe_assemble_visibility (decl));
9268
9269 /* GNU as does not need anything here, but the HP linker does
9270 need something for external functions. */
9271 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9272 && TREE_CODE (decl) == FUNCTION_DECL)
9273 (*targetm.asm_out.globalize_decl_name) (file, decl);
9274 else if (need_visibility && !TARGET_GNU_AS)
9275 (*targetm.asm_out.globalize_label) (file, name);
9276 }
9277 }
9278
9279 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9280 modes of word_mode and larger. Rename the TFmode libfuncs using the
9281 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9282 backward compatibility. */
9283
9284 static void
9285 ia64_init_libfuncs (void)
9286 {
9287 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9288 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9289 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9290 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9291
9292 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9293 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9294 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9295 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9296 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9297
9298 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9299 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9300 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9301 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9302 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9303 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9304
9305 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9306 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9307 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9308 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9309 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9310
9311 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9312 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9313 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9314 /* HP-UX 11.23 libc does not have a function for unsigned
9315 SImode-to-TFmode conversion. */
9316 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9317 }
9318
9319 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9320
9321 static void
9322 ia64_hpux_init_libfuncs (void)
9323 {
9324 ia64_init_libfuncs ();
9325
9326 /* The HP SI millicode division and mod functions expect DI arguments.
9327 By turning them off completely we avoid using both libgcc and the
9328 non-standard millicode routines and use the HP DI millicode routines
9329 instead. */
9330
9331 set_optab_libfunc (sdiv_optab, SImode, 0);
9332 set_optab_libfunc (udiv_optab, SImode, 0);
9333 set_optab_libfunc (smod_optab, SImode, 0);
9334 set_optab_libfunc (umod_optab, SImode, 0);
9335
9336 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9337 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9338 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9339 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9340
9341 /* HP-UX libc has TF min/max/abs routines in it. */
9342 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9343 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9344 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9345
9346 /* ia64_expand_compare uses this. */
9347 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9348
9349 /* These should never be used. */
9350 set_optab_libfunc (eq_optab, TFmode, 0);
9351 set_optab_libfunc (ne_optab, TFmode, 0);
9352 set_optab_libfunc (gt_optab, TFmode, 0);
9353 set_optab_libfunc (ge_optab, TFmode, 0);
9354 set_optab_libfunc (lt_optab, TFmode, 0);
9355 set_optab_libfunc (le_optab, TFmode, 0);
9356 }
9357
9358 /* Rename the division and modulus functions in VMS. */
9359
9360 static void
9361 ia64_vms_init_libfuncs (void)
9362 {
9363 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9364 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9365 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9366 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9367 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9368 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9369 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9370 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9371 }
9372
9373 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9374 the HPUX conventions. */
9375
9376 static void
9377 ia64_sysv4_init_libfuncs (void)
9378 {
9379 ia64_init_libfuncs ();
9380
9381 /* These functions are not part of the HPUX TFmode interface. We
9382 use them instead of _U_Qfcmp, which doesn't work the way we
9383 expect. */
9384 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9385 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9386 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9387 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9388 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9389 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9390
9391 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9392 glibc doesn't have them. */
9393 }
9394 \f
9395 /* For HPUX, it is illegal to have relocations in shared segments. */
9396
9397 static int
9398 ia64_hpux_reloc_rw_mask (void)
9399 {
9400 return 3;
9401 }
9402
9403 /* For others, relax this so that relocations to local data goes in
9404 read-only segments, but we still cannot allow global relocations
9405 in read-only segments. */
9406
9407 static int
9408 ia64_reloc_rw_mask (void)
9409 {
9410 return flag_pic ? 3 : 2;
9411 }
9412
9413 /* Return the section to use for X. The only special thing we do here
9414 is to honor small data. */
9415
9416 static section *
9417 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9418 unsigned HOST_WIDE_INT align)
9419 {
9420 if (GET_MODE_SIZE (mode) > 0
9421 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9422 && !TARGET_NO_SDATA)
9423 return sdata_section;
9424 else
9425 return default_elf_select_rtx_section (mode, x, align);
9426 }
9427
9428 static unsigned int
9429 ia64_section_type_flags (tree decl, const char *name, int reloc)
9430 {
9431 unsigned int flags = 0;
9432
9433 if (strcmp (name, ".sdata") == 0
9434 || strncmp (name, ".sdata.", 7) == 0
9435 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9436 || strncmp (name, ".sdata2.", 8) == 0
9437 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9438 || strcmp (name, ".sbss") == 0
9439 || strncmp (name, ".sbss.", 6) == 0
9440 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9441 flags = SECTION_SMALL;
9442
9443 flags |= default_section_type_flags (decl, name, reloc);
9444 return flags;
9445 }
9446
9447 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9448 structure type and that the address of that type should be passed
9449 in out0, rather than in r8. */
9450
9451 static bool
9452 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9453 {
9454 tree ret_type = TREE_TYPE (fntype);
9455
9456 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9457 as the structure return address parameter, if the return value
9458 type has a non-trivial copy constructor or destructor. It is not
9459 clear if this same convention should be used for other
9460 programming languages. Until G++ 3.4, we incorrectly used r8 for
9461 these return values. */
9462 return (abi_version_at_least (2)
9463 && ret_type
9464 && TYPE_MODE (ret_type) == BLKmode
9465 && TREE_ADDRESSABLE (ret_type)
9466 && strcmp (lang_hooks.name, "GNU C++") == 0);
9467 }
9468
9469 /* Output the assembler code for a thunk function. THUNK_DECL is the
9470 declaration for the thunk function itself, FUNCTION is the decl for
9471 the target function. DELTA is an immediate constant offset to be
9472 added to THIS. If VCALL_OFFSET is nonzero, the word at
9473 *(*this + vcall_offset) should be added to THIS. */
9474
9475 static void
9476 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9477 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9478 tree function)
9479 {
9480 rtx this, insn, funexp;
9481 unsigned int this_parmno;
9482 unsigned int this_regno;
9483 rtx delta_rtx;
9484
9485 reload_completed = 1;
9486 epilogue_completed = 1;
9487
9488 /* Set things up as ia64_expand_prologue might. */
9489 last_scratch_gr_reg = 15;
9490
9491 memset (&current_frame_info, 0, sizeof (current_frame_info));
9492 current_frame_info.spill_cfa_off = -16;
9493 current_frame_info.n_input_regs = 1;
9494 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9495
9496 /* Mark the end of the (empty) prologue. */
9497 emit_note (NOTE_INSN_PROLOGUE_END);
9498
9499 /* Figure out whether "this" will be the first parameter (the
9500 typical case) or the second parameter (as happens when the
9501 virtual function returns certain class objects). */
9502 this_parmno
9503 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9504 ? 1 : 0);
9505 this_regno = IN_REG (this_parmno);
9506 if (!TARGET_REG_NAMES)
9507 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9508
9509 this = gen_rtx_REG (Pmode, this_regno);
9510
9511 /* Apply the constant offset, if required. */
9512 delta_rtx = GEN_INT (delta);
9513 if (TARGET_ILP32)
9514 {
9515 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9516 REG_POINTER (tmp) = 1;
9517 if (delta && satisfies_constraint_I (delta_rtx))
9518 {
9519 emit_insn (gen_ptr_extend_plus_imm (this, tmp, delta_rtx));
9520 delta = 0;
9521 }
9522 else
9523 emit_insn (gen_ptr_extend (this, tmp));
9524 }
9525 if (delta)
9526 {
9527 if (!satisfies_constraint_I (delta_rtx))
9528 {
9529 rtx tmp = gen_rtx_REG (Pmode, 2);
9530 emit_move_insn (tmp, delta_rtx);
9531 delta_rtx = tmp;
9532 }
9533 emit_insn (gen_adddi3 (this, this, delta_rtx));
9534 }
9535
9536 /* Apply the offset from the vtable, if required. */
9537 if (vcall_offset)
9538 {
9539 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9540 rtx tmp = gen_rtx_REG (Pmode, 2);
9541
9542 if (TARGET_ILP32)
9543 {
9544 rtx t = gen_rtx_REG (ptr_mode, 2);
9545 REG_POINTER (t) = 1;
9546 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
9547 if (satisfies_constraint_I (vcall_offset_rtx))
9548 {
9549 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
9550 vcall_offset = 0;
9551 }
9552 else
9553 emit_insn (gen_ptr_extend (tmp, t));
9554 }
9555 else
9556 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9557
9558 if (vcall_offset)
9559 {
9560 if (!satisfies_constraint_J (vcall_offset_rtx))
9561 {
9562 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9563 emit_move_insn (tmp2, vcall_offset_rtx);
9564 vcall_offset_rtx = tmp2;
9565 }
9566 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9567 }
9568
9569 if (TARGET_ILP32)
9570 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
9571 else
9572 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9573
9574 emit_insn (gen_adddi3 (this, this, tmp));
9575 }
9576
9577 /* Generate a tail call to the target function. */
9578 if (! TREE_USED (function))
9579 {
9580 assemble_external (function);
9581 TREE_USED (function) = 1;
9582 }
9583 funexp = XEXP (DECL_RTL (function), 0);
9584 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9585 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9586 insn = get_last_insn ();
9587 SIBLING_CALL_P (insn) = 1;
9588
9589 /* Code generation for calls relies on splitting. */
9590 reload_completed = 1;
9591 epilogue_completed = 1;
9592 try_split (PATTERN (insn), insn, 0);
9593
9594 emit_barrier ();
9595
9596 /* Run just enough of rest_of_compilation to get the insns emitted.
9597 There's not really enough bulk here to make other passes such as
9598 instruction scheduling worth while. Note that use_thunk calls
9599 assemble_start_function and assemble_end_function. */
9600
9601 insn_locators_alloc ();
9602 emit_all_insn_group_barriers (NULL);
9603 insn = get_insns ();
9604 shorten_branches (insn);
9605 final_start_function (insn, file, 1);
9606 final (insn, file, 1);
9607 final_end_function ();
9608
9609 reload_completed = 0;
9610 epilogue_completed = 0;
9611 }
9612
9613 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9614
9615 static rtx
9616 ia64_struct_value_rtx (tree fntype,
9617 int incoming ATTRIBUTE_UNUSED)
9618 {
9619 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9620 return NULL_RTX;
9621 return gen_rtx_REG (Pmode, GR_REG (8));
9622 }
9623
9624 static bool
9625 ia64_scalar_mode_supported_p (enum machine_mode mode)
9626 {
9627 switch (mode)
9628 {
9629 case QImode:
9630 case HImode:
9631 case SImode:
9632 case DImode:
9633 case TImode:
9634 return true;
9635
9636 case SFmode:
9637 case DFmode:
9638 case XFmode:
9639 case RFmode:
9640 return true;
9641
9642 case TFmode:
9643 return TARGET_HPUX;
9644
9645 default:
9646 return false;
9647 }
9648 }
9649
9650 static bool
9651 ia64_vector_mode_supported_p (enum machine_mode mode)
9652 {
9653 switch (mode)
9654 {
9655 case V8QImode:
9656 case V4HImode:
9657 case V2SImode:
9658 return true;
9659
9660 case V2SFmode:
9661 return true;
9662
9663 default:
9664 return false;
9665 }
9666 }
9667
9668 /* Implement the FUNCTION_PROFILER macro. */
9669
9670 void
9671 ia64_output_function_profiler (FILE *file, int labelno)
9672 {
9673 bool indirect_call;
9674
9675 /* If the function needs a static chain and the static chain
9676 register is r15, we use an indirect call so as to bypass
9677 the PLT stub in case the executable is dynamically linked,
9678 because the stub clobbers r15 as per 5.3.6 of the psABI.
9679 We don't need to do that in non canonical PIC mode. */
9680
9681 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9682 {
9683 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9684 indirect_call = true;
9685 }
9686 else
9687 indirect_call = false;
9688
9689 if (TARGET_GNU_AS)
9690 fputs ("\t.prologue 4, r40\n", file);
9691 else
9692 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9693 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9694
9695 if (NO_PROFILE_COUNTERS)
9696 fputs ("\tmov out3 = r0\n", file);
9697 else
9698 {
9699 char buf[20];
9700 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9701
9702 if (TARGET_AUTO_PIC)
9703 fputs ("\tmovl out3 = @gprel(", file);
9704 else
9705 fputs ("\taddl out3 = @ltoff(", file);
9706 assemble_name (file, buf);
9707 if (TARGET_AUTO_PIC)
9708 fputs (")\n", file);
9709 else
9710 fputs ("), r1\n", file);
9711 }
9712
9713 if (indirect_call)
9714 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9715 fputs ("\t;;\n", file);
9716
9717 fputs ("\t.save rp, r42\n", file);
9718 fputs ("\tmov out2 = b0\n", file);
9719 if (indirect_call)
9720 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9721 fputs ("\t.body\n", file);
9722 fputs ("\tmov out1 = r1\n", file);
9723 if (indirect_call)
9724 {
9725 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9726 fputs ("\tmov b6 = r16\n", file);
9727 fputs ("\tld8 r1 = [r14]\n", file);
9728 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9729 }
9730 else
9731 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9732 }
9733
9734 static GTY(()) rtx mcount_func_rtx;
9735 static rtx
9736 gen_mcount_func_rtx (void)
9737 {
9738 if (!mcount_func_rtx)
9739 mcount_func_rtx = init_one_libfunc ("_mcount");
9740 return mcount_func_rtx;
9741 }
9742
9743 void
9744 ia64_profile_hook (int labelno)
9745 {
9746 rtx label, ip;
9747
9748 if (NO_PROFILE_COUNTERS)
9749 label = const0_rtx;
9750 else
9751 {
9752 char buf[30];
9753 const char *label_name;
9754 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9755 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9756 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9757 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9758 }
9759 ip = gen_reg_rtx (Pmode);
9760 emit_insn (gen_ip_value (ip));
9761 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9762 VOIDmode, 3,
9763 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9764 ip, Pmode,
9765 label, Pmode);
9766 }
9767
9768 /* Return the mangling of TYPE if it is an extended fundamental type. */
9769
9770 static const char *
9771 ia64_mangle_type (tree type)
9772 {
9773 type = TYPE_MAIN_VARIANT (type);
9774
9775 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
9776 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
9777 return NULL;
9778
9779 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9780 mangled as "e". */
9781 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9782 return "g";
9783 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9784 an extended mangling. Elsewhere, "e" is available since long
9785 double is 80 bits. */
9786 if (TYPE_MODE (type) == XFmode)
9787 return TARGET_HPUX ? "u9__float80" : "e";
9788 if (TYPE_MODE (type) == RFmode)
9789 return "u7__fpreg";
9790 return NULL;
9791 }
9792
9793 /* Return the diagnostic message string if conversion from FROMTYPE to
9794 TOTYPE is not allowed, NULL otherwise. */
9795 static const char *
9796 ia64_invalid_conversion (tree fromtype, tree totype)
9797 {
9798 /* Reject nontrivial conversion to or from __fpreg. */
9799 if (TYPE_MODE (fromtype) == RFmode
9800 && TYPE_MODE (totype) != RFmode
9801 && TYPE_MODE (totype) != VOIDmode)
9802 return N_("invalid conversion from %<__fpreg%>");
9803 if (TYPE_MODE (totype) == RFmode
9804 && TYPE_MODE (fromtype) != RFmode)
9805 return N_("invalid conversion to %<__fpreg%>");
9806 return NULL;
9807 }
9808
9809 /* Return the diagnostic message string if the unary operation OP is
9810 not permitted on TYPE, NULL otherwise. */
9811 static const char *
9812 ia64_invalid_unary_op (int op, tree type)
9813 {
9814 /* Reject operations on __fpreg other than unary + or &. */
9815 if (TYPE_MODE (type) == RFmode
9816 && op != CONVERT_EXPR
9817 && op != ADDR_EXPR)
9818 return N_("invalid operation on %<__fpreg%>");
9819 return NULL;
9820 }
9821
9822 /* Return the diagnostic message string if the binary operation OP is
9823 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9824 static const char *
9825 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
9826 {
9827 /* Reject operations on __fpreg. */
9828 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9829 return N_("invalid operation on %<__fpreg%>");
9830 return NULL;
9831 }
9832
9833 /* Implement overriding of the optimization options. */
9834 void
9835 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9836 int size ATTRIBUTE_UNUSED)
9837 {
9838 /* Let the scheduler form additional regions. */
9839 set_param_value ("max-sched-extend-regions-iters", 2);
9840
9841 /* Set the default values for cache-related parameters. */
9842 set_param_value ("simultaneous-prefetches", 6);
9843 set_param_value ("l1-cache-line-size", 32);
9844
9845 }
9846
9847 /* HP-UX version_id attribute.
9848 For object foo, if the version_id is set to 1234 put out an alias
9849 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9850 other than an alias statement because it is an illegal symbol name. */
9851
9852 static tree
9853 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9854 tree name ATTRIBUTE_UNUSED,
9855 tree args,
9856 int flags ATTRIBUTE_UNUSED,
9857 bool *no_add_attrs)
9858 {
9859 tree arg = TREE_VALUE (args);
9860
9861 if (TREE_CODE (arg) != STRING_CST)
9862 {
9863 error("version attribute is not a string");
9864 *no_add_attrs = true;
9865 return NULL_TREE;
9866 }
9867 return NULL_TREE;
9868 }
9869
9870 /* Target hook for c_mode_for_suffix. */
9871
9872 static enum machine_mode
9873 ia64_c_mode_for_suffix (char suffix)
9874 {
9875 if (suffix == 'q')
9876 return TFmode;
9877 if (suffix == 'w')
9878 return XFmode;
9879
9880 return VOIDmode;
9881 }
9882
9883 #include "gt-ia64.h"