]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/ia64/ia64.c
0658b539702381c238bce4350c35f670b18be809
[thirdparty/gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
53 #include "tree-gimple.h"
54 #include "intl.h"
55 #include "df.h"
56 #include "debug.h"
57 #include "params.h"
58 #include "dbgcnt.h"
59 #include "tm-constrs.h"
60
61 /* This is used for communication between ASM_OUTPUT_LABEL and
62 ASM_OUTPUT_LABELREF. */
63 int ia64_asm_output_label = 0;
64
65 /* Define the information needed to generate branch and scc insns. This is
66 stored from the compare operation. */
67 struct rtx_def * ia64_compare_op0;
68 struct rtx_def * ia64_compare_op1;
69
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
84
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105
106 /* Which cpu are we scheduling for. */
107 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
108
109 /* Determines whether we run our final scheduling pass or not. We always
110 avoid the normal second scheduling pass. */
111 static int ia64_flag_schedule_insns2;
112
113 /* Determines whether we run variable tracking in machine dependent
114 reorganization. */
115 static int ia64_flag_var_tracking;
116
117 /* Variables which are this size or smaller are put in the sdata/sbss
118 sections. */
119
120 unsigned int ia64_section_threshold;
121
122 /* The following variable is used by the DFA insn scheduler. The value is
123 TRUE if we do insn bundling instead of insn scheduling. */
124 int bundling_p = 0;
125
126 enum ia64_frame_regs
127 {
128 reg_fp,
129 reg_save_b0,
130 reg_save_pr,
131 reg_save_ar_pfs,
132 reg_save_ar_unat,
133 reg_save_ar_lc,
134 reg_save_gp,
135 number_of_ia64_frame_regs
136 };
137
138 /* Structure to be filled in by ia64_compute_frame_size with register
139 save masks and offsets for the current function. */
140
141 struct ia64_frame_info
142 {
143 HOST_WIDE_INT total_size; /* size of the stack frame, not including
144 the caller's scratch area. */
145 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
146 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
147 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
148 HARD_REG_SET mask; /* mask of saved registers. */
149 unsigned int gr_used_mask; /* mask of registers in use as gr spill
150 registers or long-term scratches. */
151 int n_spilled; /* number of spilled registers. */
152 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
153 int n_input_regs; /* number of input registers used. */
154 int n_local_regs; /* number of local registers used. */
155 int n_output_regs; /* number of output registers used. */
156 int n_rotate_regs; /* number of rotating registers used. */
157
158 char need_regstk; /* true if a .regstk directive needed. */
159 char initialized; /* true if the data is finalized. */
160 };
161
162 /* Current frame information calculated by ia64_compute_frame_size. */
163 static struct ia64_frame_info current_frame_info;
164 /* The actual registers that are emitted. */
165 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
166 \f
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
174 static void ia64_h_i_d_extended (void);
175 static int ia64_mode_to_int (enum machine_mode);
176 static void ia64_set_sched_flags (spec_info_t);
177 static int ia64_speculate_insn (rtx, ds_t, rtx *);
178 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
179 static bool ia64_needs_block_p (const_rtx);
180 static rtx ia64_gen_check (rtx, rtx, bool);
181 static int ia64_spec_check_p (rtx);
182 static int ia64_spec_check_src_p (rtx);
183 static rtx gen_tls_get_addr (void);
184 static rtx gen_thread_pointer (void);
185 static int find_gr_spill (enum ia64_frame_regs, int);
186 static int next_scratch_gr_reg (void);
187 static void mark_reg_gr_used_mask (rtx, void *);
188 static void ia64_compute_frame_size (HOST_WIDE_INT);
189 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
190 static void finish_spill_pointers (void);
191 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
192 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
193 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
194 static rtx gen_movdi_x (rtx, rtx, rtx);
195 static rtx gen_fr_spill_x (rtx, rtx, rtx);
196 static rtx gen_fr_restore_x (rtx, rtx, rtx);
197
198 static enum machine_mode hfa_element_mode (const_tree, bool);
199 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
200 tree, int *, int);
201 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
202 tree, bool);
203 static bool ia64_function_ok_for_sibcall (tree, tree);
204 static bool ia64_return_in_memory (const_tree, const_tree);
205 static bool ia64_rtx_costs (rtx, int, int, int *);
206 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
207 static void fix_range (const char *);
208 static bool ia64_handle_option (size_t, const char *, int);
209 static struct machine_function * ia64_init_machine_status (void);
210 static void emit_insn_group_barriers (FILE *);
211 static void emit_all_insn_group_barriers (FILE *);
212 static void final_emit_insn_group_barriers (FILE *);
213 static void emit_predicate_relation_info (void);
214 static void ia64_reorg (void);
215 static bool ia64_in_small_data_p (const_tree);
216 static void process_epilogue (FILE *, rtx, bool, bool);
217 static int process_set (FILE *, rtx, rtx, bool, bool);
218
219 static bool ia64_assemble_integer (rtx, unsigned int, int);
220 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
221 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
222 static void ia64_output_function_end_prologue (FILE *);
223
224 static int ia64_issue_rate (void);
225 static int ia64_adjust_cost (rtx, rtx, rtx, int);
226 static void ia64_sched_init (FILE *, int, int);
227 static void ia64_sched_init_global (FILE *, int, int);
228 static void ia64_sched_finish_global (FILE *, int);
229 static void ia64_sched_finish (FILE *, int);
230 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
231 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
232 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
233 static int ia64_variable_issue (FILE *, int, rtx, int);
234
235 static struct bundle_state *get_free_bundle_state (void);
236 static void free_bundle_state (struct bundle_state *);
237 static void initiate_bundle_states (void);
238 static void finish_bundle_states (void);
239 static unsigned bundle_state_hash (const void *);
240 static int bundle_state_eq_p (const void *, const void *);
241 static int insert_bundle_state (struct bundle_state *);
242 static void initiate_bundle_state_table (void);
243 static void finish_bundle_state_table (void);
244 static int try_issue_nops (struct bundle_state *, int);
245 static int try_issue_insn (struct bundle_state *, rtx);
246 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
247 static int get_max_pos (state_t);
248 static int get_template (state_t, int);
249
250 static rtx get_next_important_insn (rtx, rtx);
251 static void bundling (FILE *, int, rtx, rtx);
252
253 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
254 HOST_WIDE_INT, tree);
255 static void ia64_file_start (void);
256 static void ia64_globalize_decl_name (FILE *, tree);
257
258 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
259 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
260 static section *ia64_select_rtx_section (enum machine_mode, rtx,
261 unsigned HOST_WIDE_INT);
262 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
263 ATTRIBUTE_UNUSED;
264 static unsigned int ia64_section_type_flags (tree, const char *, int);
265 static void ia64_init_libfuncs (void)
266 ATTRIBUTE_UNUSED;
267 static void ia64_hpux_init_libfuncs (void)
268 ATTRIBUTE_UNUSED;
269 static void ia64_sysv4_init_libfuncs (void)
270 ATTRIBUTE_UNUSED;
271 static void ia64_vms_init_libfuncs (void)
272 ATTRIBUTE_UNUSED;
273
274 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
275 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
280 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
281 static bool ia64_cannot_force_const_mem (rtx);
282 static const char *ia64_mangle_type (const_tree);
283 static const char *ia64_invalid_conversion (const_tree, const_tree);
284 static const char *ia64_invalid_unary_op (int, const_tree);
285 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
286 static enum machine_mode ia64_c_mode_for_suffix (char);
287 \f
288 /* Table of valid machine attributes. */
289 static const struct attribute_spec ia64_attribute_table[] =
290 {
291 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
292 { "syscall_linkage", 0, 0, false, true, true, NULL },
293 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
294 { "version_id", 1, 1, true, false, false,
295 ia64_handle_version_id_attribute },
296 { NULL, 0, 0, false, false, false, NULL }
297 };
298
299 /* Initialize the GCC target structure. */
300 #undef TARGET_ATTRIBUTE_TABLE
301 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
302
303 #undef TARGET_INIT_BUILTINS
304 #define TARGET_INIT_BUILTINS ia64_init_builtins
305
306 #undef TARGET_EXPAND_BUILTIN
307 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
308
309 #undef TARGET_ASM_BYTE_OP
310 #define TARGET_ASM_BYTE_OP "\tdata1\t"
311 #undef TARGET_ASM_ALIGNED_HI_OP
312 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
313 #undef TARGET_ASM_ALIGNED_SI_OP
314 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
315 #undef TARGET_ASM_ALIGNED_DI_OP
316 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
317 #undef TARGET_ASM_UNALIGNED_HI_OP
318 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
319 #undef TARGET_ASM_UNALIGNED_SI_OP
320 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
321 #undef TARGET_ASM_UNALIGNED_DI_OP
322 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
323 #undef TARGET_ASM_INTEGER
324 #define TARGET_ASM_INTEGER ia64_assemble_integer
325
326 #undef TARGET_ASM_FUNCTION_PROLOGUE
327 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
328 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
329 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
330 #undef TARGET_ASM_FUNCTION_EPILOGUE
331 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
332
333 #undef TARGET_IN_SMALL_DATA_P
334 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
335
336 #undef TARGET_SCHED_ADJUST_COST
337 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
338 #undef TARGET_SCHED_ISSUE_RATE
339 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
340 #undef TARGET_SCHED_VARIABLE_ISSUE
341 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
342 #undef TARGET_SCHED_INIT
343 #define TARGET_SCHED_INIT ia64_sched_init
344 #undef TARGET_SCHED_FINISH
345 #define TARGET_SCHED_FINISH ia64_sched_finish
346 #undef TARGET_SCHED_INIT_GLOBAL
347 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
348 #undef TARGET_SCHED_FINISH_GLOBAL
349 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
350 #undef TARGET_SCHED_REORDER
351 #define TARGET_SCHED_REORDER ia64_sched_reorder
352 #undef TARGET_SCHED_REORDER2
353 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
354
355 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
356 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
357
358 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
359 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
360
361 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
362 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
363 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
364 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
365
366 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
367 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
368 ia64_first_cycle_multipass_dfa_lookahead_guard
369
370 #undef TARGET_SCHED_DFA_NEW_CYCLE
371 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
372
373 #undef TARGET_SCHED_H_I_D_EXTENDED
374 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
375
376 #undef TARGET_SCHED_SET_SCHED_FLAGS
377 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
378
379 #undef TARGET_SCHED_SPECULATE_INSN
380 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
381
382 #undef TARGET_SCHED_NEEDS_BLOCK_P
383 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
384
385 #undef TARGET_SCHED_GEN_CHECK
386 #define TARGET_SCHED_GEN_CHECK ia64_gen_check
387
388 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
389 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
390 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
391
392 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
393 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
394 #undef TARGET_ARG_PARTIAL_BYTES
395 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
396
397 #undef TARGET_ASM_OUTPUT_MI_THUNK
398 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
399 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
400 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
401
402 #undef TARGET_ASM_FILE_START
403 #define TARGET_ASM_FILE_START ia64_file_start
404
405 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
406 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
407
408 #undef TARGET_RTX_COSTS
409 #define TARGET_RTX_COSTS ia64_rtx_costs
410 #undef TARGET_ADDRESS_COST
411 #define TARGET_ADDRESS_COST hook_int_rtx_0
412
413 #undef TARGET_UNSPEC_MAY_TRAP_P
414 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
415
416 #undef TARGET_MACHINE_DEPENDENT_REORG
417 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
418
419 #undef TARGET_ENCODE_SECTION_INFO
420 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
421
422 #undef TARGET_SECTION_TYPE_FLAGS
423 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
424
425 #ifdef HAVE_AS_TLS
426 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
427 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
428 #endif
429
430 /* ??? ABI doesn't allow us to define this. */
431 #if 0
432 #undef TARGET_PROMOTE_FUNCTION_ARGS
433 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
434 #endif
435
436 /* ??? ABI doesn't allow us to define this. */
437 #if 0
438 #undef TARGET_PROMOTE_FUNCTION_RETURN
439 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
440 #endif
441
442 /* ??? Investigate. */
443 #if 0
444 #undef TARGET_PROMOTE_PROTOTYPES
445 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
446 #endif
447
448 #undef TARGET_STRUCT_VALUE_RTX
449 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
450 #undef TARGET_RETURN_IN_MEMORY
451 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
452 #undef TARGET_SETUP_INCOMING_VARARGS
453 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
454 #undef TARGET_STRICT_ARGUMENT_NAMING
455 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
456 #undef TARGET_MUST_PASS_IN_STACK
457 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
458
459 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
460 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
461
462 #undef TARGET_UNWIND_EMIT
463 #define TARGET_UNWIND_EMIT process_for_unwind_directive
464
465 #undef TARGET_SCALAR_MODE_SUPPORTED_P
466 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
467 #undef TARGET_VECTOR_MODE_SUPPORTED_P
468 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
469
470 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
471 in an order different from the specified program order. */
472 #undef TARGET_RELAXED_ORDERING
473 #define TARGET_RELAXED_ORDERING true
474
475 #undef TARGET_DEFAULT_TARGET_FLAGS
476 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
477 #undef TARGET_HANDLE_OPTION
478 #define TARGET_HANDLE_OPTION ia64_handle_option
479
480 #undef TARGET_CANNOT_FORCE_CONST_MEM
481 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
482
483 #undef TARGET_MANGLE_TYPE
484 #define TARGET_MANGLE_TYPE ia64_mangle_type
485
486 #undef TARGET_INVALID_CONVERSION
487 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
488 #undef TARGET_INVALID_UNARY_OP
489 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
490 #undef TARGET_INVALID_BINARY_OP
491 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
492
493 #undef TARGET_C_MODE_FOR_SUFFIX
494 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
495
496 struct gcc_target targetm = TARGET_INITIALIZER;
497 \f
498 typedef enum
499 {
500 ADDR_AREA_NORMAL, /* normal address area */
501 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
502 }
503 ia64_addr_area;
504
505 static GTY(()) tree small_ident1;
506 static GTY(()) tree small_ident2;
507
508 static void
509 init_idents (void)
510 {
511 if (small_ident1 == 0)
512 {
513 small_ident1 = get_identifier ("small");
514 small_ident2 = get_identifier ("__small__");
515 }
516 }
517
518 /* Retrieve the address area that has been chosen for the given decl. */
519
520 static ia64_addr_area
521 ia64_get_addr_area (tree decl)
522 {
523 tree model_attr;
524
525 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
526 if (model_attr)
527 {
528 tree id;
529
530 init_idents ();
531 id = TREE_VALUE (TREE_VALUE (model_attr));
532 if (id == small_ident1 || id == small_ident2)
533 return ADDR_AREA_SMALL;
534 }
535 return ADDR_AREA_NORMAL;
536 }
537
538 static tree
539 ia64_handle_model_attribute (tree *node, tree name, tree args,
540 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
541 {
542 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
543 ia64_addr_area area;
544 tree arg, decl = *node;
545
546 init_idents ();
547 arg = TREE_VALUE (args);
548 if (arg == small_ident1 || arg == small_ident2)
549 {
550 addr_area = ADDR_AREA_SMALL;
551 }
552 else
553 {
554 warning (OPT_Wattributes, "invalid argument of %qs attribute",
555 IDENTIFIER_POINTER (name));
556 *no_add_attrs = true;
557 }
558
559 switch (TREE_CODE (decl))
560 {
561 case VAR_DECL:
562 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
563 == FUNCTION_DECL)
564 && !TREE_STATIC (decl))
565 {
566 error ("%Jan address area attribute cannot be specified for "
567 "local variables", decl);
568 *no_add_attrs = true;
569 }
570 area = ia64_get_addr_area (decl);
571 if (area != ADDR_AREA_NORMAL && addr_area != area)
572 {
573 error ("address area of %q+D conflicts with previous "
574 "declaration", decl);
575 *no_add_attrs = true;
576 }
577 break;
578
579 case FUNCTION_DECL:
580 error ("%Jaddress area attribute cannot be specified for functions",
581 decl);
582 *no_add_attrs = true;
583 break;
584
585 default:
586 warning (OPT_Wattributes, "%qs attribute ignored",
587 IDENTIFIER_POINTER (name));
588 *no_add_attrs = true;
589 break;
590 }
591
592 return NULL_TREE;
593 }
594
595 static void
596 ia64_encode_addr_area (tree decl, rtx symbol)
597 {
598 int flags;
599
600 flags = SYMBOL_REF_FLAGS (symbol);
601 switch (ia64_get_addr_area (decl))
602 {
603 case ADDR_AREA_NORMAL: break;
604 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
605 default: gcc_unreachable ();
606 }
607 SYMBOL_REF_FLAGS (symbol) = flags;
608 }
609
610 static void
611 ia64_encode_section_info (tree decl, rtx rtl, int first)
612 {
613 default_encode_section_info (decl, rtl, first);
614
615 /* Careful not to prod global register variables. */
616 if (TREE_CODE (decl) == VAR_DECL
617 && GET_CODE (DECL_RTL (decl)) == MEM
618 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
619 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
620 ia64_encode_addr_area (decl, XEXP (rtl, 0));
621 }
622 \f
623 /* Return 1 if the operands of a move are ok. */
624
625 int
626 ia64_move_ok (rtx dst, rtx src)
627 {
628 /* If we're under init_recog_no_volatile, we'll not be able to use
629 memory_operand. So check the code directly and don't worry about
630 the validity of the underlying address, which should have been
631 checked elsewhere anyway. */
632 if (GET_CODE (dst) != MEM)
633 return 1;
634 if (GET_CODE (src) == MEM)
635 return 0;
636 if (register_operand (src, VOIDmode))
637 return 1;
638
639 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
640 if (INTEGRAL_MODE_P (GET_MODE (dst)))
641 return src == const0_rtx;
642 else
643 return satisfies_constraint_G (src);
644 }
645
646 /* Return 1 if the operands are ok for a floating point load pair. */
647
648 int
649 ia64_load_pair_ok (rtx dst, rtx src)
650 {
651 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
652 return 0;
653 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
654 return 0;
655 switch (GET_CODE (XEXP (src, 0)))
656 {
657 case REG:
658 case POST_INC:
659 break;
660 case POST_DEC:
661 return 0;
662 case POST_MODIFY:
663 {
664 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
665
666 if (GET_CODE (adjust) != CONST_INT
667 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
668 return 0;
669 }
670 break;
671 default:
672 abort ();
673 }
674 return 1;
675 }
676
677 int
678 addp4_optimize_ok (rtx op1, rtx op2)
679 {
680 return (basereg_operand (op1, GET_MODE(op1)) !=
681 basereg_operand (op2, GET_MODE(op2)));
682 }
683
684 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
685 Return the length of the field, or <= 0 on failure. */
686
687 int
688 ia64_depz_field_mask (rtx rop, rtx rshift)
689 {
690 unsigned HOST_WIDE_INT op = INTVAL (rop);
691 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
692
693 /* Get rid of the zero bits we're shifting in. */
694 op >>= shift;
695
696 /* We must now have a solid block of 1's at bit 0. */
697 return exact_log2 (op + 1);
698 }
699
700 /* Return the TLS model to use for ADDR. */
701
702 static enum tls_model
703 tls_symbolic_operand_type (rtx addr)
704 {
705 enum tls_model tls_kind = 0;
706
707 if (GET_CODE (addr) == CONST)
708 {
709 if (GET_CODE (XEXP (addr, 0)) == PLUS
710 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
711 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
712 }
713 else if (GET_CODE (addr) == SYMBOL_REF)
714 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
715
716 return tls_kind;
717 }
718
719 /* Return true if X is a constant that is valid for some immediate
720 field in an instruction. */
721
722 bool
723 ia64_legitimate_constant_p (rtx x)
724 {
725 switch (GET_CODE (x))
726 {
727 case CONST_INT:
728 case LABEL_REF:
729 return true;
730
731 case CONST_DOUBLE:
732 if (GET_MODE (x) == VOIDmode)
733 return true;
734 return satisfies_constraint_G (x);
735
736 case CONST:
737 case SYMBOL_REF:
738 /* ??? Short term workaround for PR 28490. We must make the code here
739 match the code in ia64_expand_move and move_operand, even though they
740 are both technically wrong. */
741 if (tls_symbolic_operand_type (x) == 0)
742 {
743 HOST_WIDE_INT addend = 0;
744 rtx op = x;
745
746 if (GET_CODE (op) == CONST
747 && GET_CODE (XEXP (op, 0)) == PLUS
748 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
749 {
750 addend = INTVAL (XEXP (XEXP (op, 0), 1));
751 op = XEXP (XEXP (op, 0), 0);
752 }
753
754 if (any_offset_symbol_operand (op, GET_MODE (op))
755 || function_operand (op, GET_MODE (op)))
756 return true;
757 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
758 return (addend & 0x3fff) == 0;
759 return false;
760 }
761 return false;
762
763 case CONST_VECTOR:
764 {
765 enum machine_mode mode = GET_MODE (x);
766
767 if (mode == V2SFmode)
768 return satisfies_constraint_Y (x);
769
770 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
771 && GET_MODE_SIZE (mode) <= 8);
772 }
773
774 default:
775 return false;
776 }
777 }
778
779 /* Don't allow TLS addresses to get spilled to memory. */
780
781 static bool
782 ia64_cannot_force_const_mem (rtx x)
783 {
784 return tls_symbolic_operand_type (x) != 0;
785 }
786
787 /* Expand a symbolic constant load. */
788
789 bool
790 ia64_expand_load_address (rtx dest, rtx src)
791 {
792 gcc_assert (GET_CODE (dest) == REG);
793
794 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
795 having to pointer-extend the value afterward. Other forms of address
796 computation below are also more natural to compute as 64-bit quantities.
797 If we've been given an SImode destination register, change it. */
798 if (GET_MODE (dest) != Pmode)
799 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
800 byte_lowpart_offset (Pmode, GET_MODE (dest)));
801
802 if (TARGET_NO_PIC)
803 return false;
804 if (small_addr_symbolic_operand (src, VOIDmode))
805 return false;
806
807 if (TARGET_AUTO_PIC)
808 emit_insn (gen_load_gprel64 (dest, src));
809 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
810 emit_insn (gen_load_fptr (dest, src));
811 else if (sdata_symbolic_operand (src, VOIDmode))
812 emit_insn (gen_load_gprel (dest, src));
813 else
814 {
815 HOST_WIDE_INT addend = 0;
816 rtx tmp;
817
818 /* We did split constant offsets in ia64_expand_move, and we did try
819 to keep them split in move_operand, but we also allowed reload to
820 rematerialize arbitrary constants rather than spill the value to
821 the stack and reload it. So we have to be prepared here to split
822 them apart again. */
823 if (GET_CODE (src) == CONST)
824 {
825 HOST_WIDE_INT hi, lo;
826
827 hi = INTVAL (XEXP (XEXP (src, 0), 1));
828 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
829 hi = hi - lo;
830
831 if (lo != 0)
832 {
833 addend = lo;
834 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
835 }
836 }
837
838 tmp = gen_rtx_HIGH (Pmode, src);
839 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
840 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
841
842 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
843 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
844
845 if (addend)
846 {
847 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
848 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
849 }
850 }
851
852 return true;
853 }
854
855 static GTY(()) rtx gen_tls_tga;
856 static rtx
857 gen_tls_get_addr (void)
858 {
859 if (!gen_tls_tga)
860 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
861 return gen_tls_tga;
862 }
863
864 static GTY(()) rtx thread_pointer_rtx;
865 static rtx
866 gen_thread_pointer (void)
867 {
868 if (!thread_pointer_rtx)
869 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
870 return thread_pointer_rtx;
871 }
872
873 static rtx
874 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
875 rtx orig_op1, HOST_WIDE_INT addend)
876 {
877 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
878 rtx orig_op0 = op0;
879 HOST_WIDE_INT addend_lo, addend_hi;
880
881 switch (tls_kind)
882 {
883 case TLS_MODEL_GLOBAL_DYNAMIC:
884 start_sequence ();
885
886 tga_op1 = gen_reg_rtx (Pmode);
887 emit_insn (gen_load_dtpmod (tga_op1, op1));
888
889 tga_op2 = gen_reg_rtx (Pmode);
890 emit_insn (gen_load_dtprel (tga_op2, op1));
891
892 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
893 LCT_CONST, Pmode, 2, tga_op1,
894 Pmode, tga_op2, Pmode);
895
896 insns = get_insns ();
897 end_sequence ();
898
899 if (GET_MODE (op0) != Pmode)
900 op0 = tga_ret;
901 emit_libcall_block (insns, op0, tga_ret, op1);
902 break;
903
904 case TLS_MODEL_LOCAL_DYNAMIC:
905 /* ??? This isn't the completely proper way to do local-dynamic
906 If the call to __tls_get_addr is used only by a single symbol,
907 then we should (somehow) move the dtprel to the second arg
908 to avoid the extra add. */
909 start_sequence ();
910
911 tga_op1 = gen_reg_rtx (Pmode);
912 emit_insn (gen_load_dtpmod (tga_op1, op1));
913
914 tga_op2 = const0_rtx;
915
916 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
917 LCT_CONST, Pmode, 2, tga_op1,
918 Pmode, tga_op2, Pmode);
919
920 insns = get_insns ();
921 end_sequence ();
922
923 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
924 UNSPEC_LD_BASE);
925 tmp = gen_reg_rtx (Pmode);
926 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
927
928 if (!register_operand (op0, Pmode))
929 op0 = gen_reg_rtx (Pmode);
930 if (TARGET_TLS64)
931 {
932 emit_insn (gen_load_dtprel (op0, op1));
933 emit_insn (gen_adddi3 (op0, tmp, op0));
934 }
935 else
936 emit_insn (gen_add_dtprel (op0, op1, tmp));
937 break;
938
939 case TLS_MODEL_INITIAL_EXEC:
940 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
941 addend_hi = addend - addend_lo;
942
943 op1 = plus_constant (op1, addend_hi);
944 addend = addend_lo;
945
946 tmp = gen_reg_rtx (Pmode);
947 emit_insn (gen_load_tprel (tmp, op1));
948
949 if (!register_operand (op0, Pmode))
950 op0 = gen_reg_rtx (Pmode);
951 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
952 break;
953
954 case TLS_MODEL_LOCAL_EXEC:
955 if (!register_operand (op0, Pmode))
956 op0 = gen_reg_rtx (Pmode);
957
958 op1 = orig_op1;
959 addend = 0;
960 if (TARGET_TLS64)
961 {
962 emit_insn (gen_load_tprel (op0, op1));
963 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
964 }
965 else
966 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
967 break;
968
969 default:
970 gcc_unreachable ();
971 }
972
973 if (addend)
974 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
975 orig_op0, 1, OPTAB_DIRECT);
976 if (orig_op0 == op0)
977 return NULL_RTX;
978 if (GET_MODE (orig_op0) == Pmode)
979 return op0;
980 return gen_lowpart (GET_MODE (orig_op0), op0);
981 }
982
983 rtx
984 ia64_expand_move (rtx op0, rtx op1)
985 {
986 enum machine_mode mode = GET_MODE (op0);
987
988 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
989 op1 = force_reg (mode, op1);
990
991 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
992 {
993 HOST_WIDE_INT addend = 0;
994 enum tls_model tls_kind;
995 rtx sym = op1;
996
997 if (GET_CODE (op1) == CONST
998 && GET_CODE (XEXP (op1, 0)) == PLUS
999 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1000 {
1001 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1002 sym = XEXP (XEXP (op1, 0), 0);
1003 }
1004
1005 tls_kind = tls_symbolic_operand_type (sym);
1006 if (tls_kind)
1007 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1008
1009 if (any_offset_symbol_operand (sym, mode))
1010 addend = 0;
1011 else if (aligned_offset_symbol_operand (sym, mode))
1012 {
1013 HOST_WIDE_INT addend_lo, addend_hi;
1014
1015 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1016 addend_hi = addend - addend_lo;
1017
1018 if (addend_lo != 0)
1019 {
1020 op1 = plus_constant (sym, addend_hi);
1021 addend = addend_lo;
1022 }
1023 else
1024 addend = 0;
1025 }
1026 else
1027 op1 = sym;
1028
1029 if (reload_completed)
1030 {
1031 /* We really should have taken care of this offset earlier. */
1032 gcc_assert (addend == 0);
1033 if (ia64_expand_load_address (op0, op1))
1034 return NULL_RTX;
1035 }
1036
1037 if (addend)
1038 {
1039 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1040
1041 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1042
1043 op1 = expand_simple_binop (mode, PLUS, subtarget,
1044 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1045 if (op0 == op1)
1046 return NULL_RTX;
1047 }
1048 }
1049
1050 return op1;
1051 }
1052
1053 /* Split a move from OP1 to OP0 conditional on COND. */
1054
1055 void
1056 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1057 {
1058 rtx insn, first = get_last_insn ();
1059
1060 emit_move_insn (op0, op1);
1061
1062 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1063 if (INSN_P (insn))
1064 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1065 PATTERN (insn));
1066 }
1067
1068 /* Split a post-reload TImode or TFmode reference into two DImode
1069 components. This is made extra difficult by the fact that we do
1070 not get any scratch registers to work with, because reload cannot
1071 be prevented from giving us a scratch that overlaps the register
1072 pair involved. So instead, when addressing memory, we tweak the
1073 pointer register up and back down with POST_INCs. Or up and not
1074 back down when we can get away with it.
1075
1076 REVERSED is true when the loads must be done in reversed order
1077 (high word first) for correctness. DEAD is true when the pointer
1078 dies with the second insn we generate and therefore the second
1079 address must not carry a postmodify.
1080
1081 May return an insn which is to be emitted after the moves. */
1082
1083 static rtx
1084 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1085 {
1086 rtx fixup = 0;
1087
1088 switch (GET_CODE (in))
1089 {
1090 case REG:
1091 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1092 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1093 break;
1094
1095 case CONST_INT:
1096 case CONST_DOUBLE:
1097 /* Cannot occur reversed. */
1098 gcc_assert (!reversed);
1099
1100 if (GET_MODE (in) != TFmode)
1101 split_double (in, &out[0], &out[1]);
1102 else
1103 /* split_double does not understand how to split a TFmode
1104 quantity into a pair of DImode constants. */
1105 {
1106 REAL_VALUE_TYPE r;
1107 unsigned HOST_WIDE_INT p[2];
1108 long l[4]; /* TFmode is 128 bits */
1109
1110 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1111 real_to_target (l, &r, TFmode);
1112
1113 if (FLOAT_WORDS_BIG_ENDIAN)
1114 {
1115 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1116 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1117 }
1118 else
1119 {
1120 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1121 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1122 }
1123 out[0] = GEN_INT (p[0]);
1124 out[1] = GEN_INT (p[1]);
1125 }
1126 break;
1127
1128 case MEM:
1129 {
1130 rtx base = XEXP (in, 0);
1131 rtx offset;
1132
1133 switch (GET_CODE (base))
1134 {
1135 case REG:
1136 if (!reversed)
1137 {
1138 out[0] = adjust_automodify_address
1139 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1140 out[1] = adjust_automodify_address
1141 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1142 }
1143 else
1144 {
1145 /* Reversal requires a pre-increment, which can only
1146 be done as a separate insn. */
1147 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1148 out[0] = adjust_automodify_address
1149 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1150 out[1] = adjust_address (in, DImode, 0);
1151 }
1152 break;
1153
1154 case POST_INC:
1155 gcc_assert (!reversed && !dead);
1156
1157 /* Just do the increment in two steps. */
1158 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1159 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1160 break;
1161
1162 case POST_DEC:
1163 gcc_assert (!reversed && !dead);
1164
1165 /* Add 8, subtract 24. */
1166 base = XEXP (base, 0);
1167 out[0] = adjust_automodify_address
1168 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1169 out[1] = adjust_automodify_address
1170 (in, DImode,
1171 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1172 8);
1173 break;
1174
1175 case POST_MODIFY:
1176 gcc_assert (!reversed && !dead);
1177
1178 /* Extract and adjust the modification. This case is
1179 trickier than the others, because we might have an
1180 index register, or we might have a combined offset that
1181 doesn't fit a signed 9-bit displacement field. We can
1182 assume the incoming expression is already legitimate. */
1183 offset = XEXP (base, 1);
1184 base = XEXP (base, 0);
1185
1186 out[0] = adjust_automodify_address
1187 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1188
1189 if (GET_CODE (XEXP (offset, 1)) == REG)
1190 {
1191 /* Can't adjust the postmodify to match. Emit the
1192 original, then a separate addition insn. */
1193 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1194 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1195 }
1196 else
1197 {
1198 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1199 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1200 {
1201 /* Again the postmodify cannot be made to match,
1202 but in this case it's more efficient to get rid
1203 of the postmodify entirely and fix up with an
1204 add insn. */
1205 out[1] = adjust_automodify_address (in, DImode, base, 8);
1206 fixup = gen_adddi3
1207 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1208 }
1209 else
1210 {
1211 /* Combined offset still fits in the displacement field.
1212 (We cannot overflow it at the high end.) */
1213 out[1] = adjust_automodify_address
1214 (in, DImode, gen_rtx_POST_MODIFY
1215 (Pmode, base, gen_rtx_PLUS
1216 (Pmode, base,
1217 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1218 8);
1219 }
1220 }
1221 break;
1222
1223 default:
1224 gcc_unreachable ();
1225 }
1226 break;
1227 }
1228
1229 default:
1230 gcc_unreachable ();
1231 }
1232
1233 return fixup;
1234 }
1235
1236 /* Split a TImode or TFmode move instruction after reload.
1237 This is used by *movtf_internal and *movti_internal. */
1238 void
1239 ia64_split_tmode_move (rtx operands[])
1240 {
1241 rtx in[2], out[2], insn;
1242 rtx fixup[2];
1243 bool dead = false;
1244 bool reversed = false;
1245
1246 /* It is possible for reload to decide to overwrite a pointer with
1247 the value it points to. In that case we have to do the loads in
1248 the appropriate order so that the pointer is not destroyed too
1249 early. Also we must not generate a postmodify for that second
1250 load, or rws_access_regno will die. */
1251 if (GET_CODE (operands[1]) == MEM
1252 && reg_overlap_mentioned_p (operands[0], operands[1]))
1253 {
1254 rtx base = XEXP (operands[1], 0);
1255 while (GET_CODE (base) != REG)
1256 base = XEXP (base, 0);
1257
1258 if (REGNO (base) == REGNO (operands[0]))
1259 reversed = true;
1260 dead = true;
1261 }
1262 /* Another reason to do the moves in reversed order is if the first
1263 element of the target register pair is also the second element of
1264 the source register pair. */
1265 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1266 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1267 reversed = true;
1268
1269 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1270 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1271
1272 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1273 if (GET_CODE (EXP) == MEM \
1274 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1275 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1276 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1277 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1278 XEXP (XEXP (EXP, 0), 0), \
1279 REG_NOTES (INSN))
1280
1281 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1282 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1283 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1284
1285 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1286 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1287 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1288
1289 if (fixup[0])
1290 emit_insn (fixup[0]);
1291 if (fixup[1])
1292 emit_insn (fixup[1]);
1293
1294 #undef MAYBE_ADD_REG_INC_NOTE
1295 }
1296
1297 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1298 through memory plus an extra GR scratch register. Except that you can
1299 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1300 SECONDARY_RELOAD_CLASS, but not both.
1301
1302 We got into problems in the first place by allowing a construct like
1303 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1304 This solution attempts to prevent this situation from occurring. When
1305 we see something like the above, we spill the inner register to memory. */
1306
1307 static rtx
1308 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1309 {
1310 if (GET_CODE (in) == SUBREG
1311 && GET_MODE (SUBREG_REG (in)) == TImode
1312 && GET_CODE (SUBREG_REG (in)) == REG)
1313 {
1314 rtx memt = assign_stack_temp (TImode, 16, 0);
1315 emit_move_insn (memt, SUBREG_REG (in));
1316 return adjust_address (memt, mode, 0);
1317 }
1318 else if (force && GET_CODE (in) == REG)
1319 {
1320 rtx memx = assign_stack_temp (mode, 16, 0);
1321 emit_move_insn (memx, in);
1322 return memx;
1323 }
1324 else
1325 return in;
1326 }
1327
1328 /* Expand the movxf or movrf pattern (MODE says which) with the given
1329 OPERANDS, returning true if the pattern should then invoke
1330 DONE. */
1331
1332 bool
1333 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1334 {
1335 rtx op0 = operands[0];
1336
1337 if (GET_CODE (op0) == SUBREG)
1338 op0 = SUBREG_REG (op0);
1339
1340 /* We must support XFmode loads into general registers for stdarg/vararg,
1341 unprototyped calls, and a rare case where a long double is passed as
1342 an argument after a float HFA fills the FP registers. We split them into
1343 DImode loads for convenience. We also need to support XFmode stores
1344 for the last case. This case does not happen for stdarg/vararg routines,
1345 because we do a block store to memory of unnamed arguments. */
1346
1347 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1348 {
1349 rtx out[2];
1350
1351 /* We're hoping to transform everything that deals with XFmode
1352 quantities and GR registers early in the compiler. */
1353 gcc_assert (can_create_pseudo_p ());
1354
1355 /* Struct to register can just use TImode instead. */
1356 if ((GET_CODE (operands[1]) == SUBREG
1357 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1358 || (GET_CODE (operands[1]) == REG
1359 && GR_REGNO_P (REGNO (operands[1]))))
1360 {
1361 rtx op1 = operands[1];
1362
1363 if (GET_CODE (op1) == SUBREG)
1364 op1 = SUBREG_REG (op1);
1365 else
1366 op1 = gen_rtx_REG (TImode, REGNO (op1));
1367
1368 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1369 return true;
1370 }
1371
1372 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1373 {
1374 /* Don't word-swap when reading in the constant. */
1375 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1376 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1377 0, mode));
1378 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1379 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1380 0, mode));
1381 return true;
1382 }
1383
1384 /* If the quantity is in a register not known to be GR, spill it. */
1385 if (register_operand (operands[1], mode))
1386 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1387
1388 gcc_assert (GET_CODE (operands[1]) == MEM);
1389
1390 /* Don't word-swap when reading in the value. */
1391 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1392 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1393
1394 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1395 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1396 return true;
1397 }
1398
1399 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1400 {
1401 /* We're hoping to transform everything that deals with XFmode
1402 quantities and GR registers early in the compiler. */
1403 gcc_assert (can_create_pseudo_p ());
1404
1405 /* Op0 can't be a GR_REG here, as that case is handled above.
1406 If op0 is a register, then we spill op1, so that we now have a
1407 MEM operand. This requires creating an XFmode subreg of a TImode reg
1408 to force the spill. */
1409 if (register_operand (operands[0], mode))
1410 {
1411 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1412 op1 = gen_rtx_SUBREG (mode, op1, 0);
1413 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1414 }
1415
1416 else
1417 {
1418 rtx in[2];
1419
1420 gcc_assert (GET_CODE (operands[0]) == MEM);
1421
1422 /* Don't word-swap when writing out the value. */
1423 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1424 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1425
1426 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1427 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1428 return true;
1429 }
1430 }
1431
1432 if (!reload_in_progress && !reload_completed)
1433 {
1434 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1435
1436 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1437 {
1438 rtx memt, memx, in = operands[1];
1439 if (CONSTANT_P (in))
1440 in = validize_mem (force_const_mem (mode, in));
1441 if (GET_CODE (in) == MEM)
1442 memt = adjust_address (in, TImode, 0);
1443 else
1444 {
1445 memt = assign_stack_temp (TImode, 16, 0);
1446 memx = adjust_address (memt, mode, 0);
1447 emit_move_insn (memx, in);
1448 }
1449 emit_move_insn (op0, memt);
1450 return true;
1451 }
1452
1453 if (!ia64_move_ok (operands[0], operands[1]))
1454 operands[1] = force_reg (mode, operands[1]);
1455 }
1456
1457 return false;
1458 }
1459
1460 /* Emit comparison instruction if necessary, returning the expression
1461 that holds the compare result in the proper mode. */
1462
1463 static GTY(()) rtx cmptf_libfunc;
1464
1465 rtx
1466 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1467 {
1468 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1469 rtx cmp;
1470
1471 /* If we have a BImode input, then we already have a compare result, and
1472 do not need to emit another comparison. */
1473 if (GET_MODE (op0) == BImode)
1474 {
1475 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1476 cmp = op0;
1477 }
1478 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1479 magic number as its third argument, that indicates what to do.
1480 The return value is an integer to be compared against zero. */
1481 else if (GET_MODE (op0) == TFmode)
1482 {
1483 enum qfcmp_magic {
1484 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1485 QCMP_UNORD = 2,
1486 QCMP_EQ = 4,
1487 QCMP_LT = 8,
1488 QCMP_GT = 16
1489 } magic;
1490 enum rtx_code ncode;
1491 rtx ret, insns;
1492
1493 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1494 switch (code)
1495 {
1496 /* 1 = equal, 0 = not equal. Equality operators do
1497 not raise FP_INVALID when given an SNaN operand. */
1498 case EQ: magic = QCMP_EQ; ncode = NE; break;
1499 case NE: magic = QCMP_EQ; ncode = EQ; break;
1500 /* isunordered() from C99. */
1501 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1502 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1503 /* Relational operators raise FP_INVALID when given
1504 an SNaN operand. */
1505 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1506 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1507 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1508 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1509 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1510 Expanders for buneq etc. weuld have to be added to ia64.md
1511 for this to be useful. */
1512 default: gcc_unreachable ();
1513 }
1514
1515 start_sequence ();
1516
1517 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1518 op0, TFmode, op1, TFmode,
1519 GEN_INT (magic), DImode);
1520 cmp = gen_reg_rtx (BImode);
1521 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1522 gen_rtx_fmt_ee (ncode, BImode,
1523 ret, const0_rtx)));
1524
1525 insns = get_insns ();
1526 end_sequence ();
1527
1528 emit_libcall_block (insns, cmp, cmp,
1529 gen_rtx_fmt_ee (code, BImode, op0, op1));
1530 code = NE;
1531 }
1532 else
1533 {
1534 cmp = gen_reg_rtx (BImode);
1535 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1536 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1537 code = NE;
1538 }
1539
1540 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1541 }
1542
1543 /* Generate an integral vector comparison. Return true if the condition has
1544 been reversed, and so the sense of the comparison should be inverted. */
1545
1546 static bool
1547 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1548 rtx dest, rtx op0, rtx op1)
1549 {
1550 bool negate = false;
1551 rtx x;
1552
1553 /* Canonicalize the comparison to EQ, GT, GTU. */
1554 switch (code)
1555 {
1556 case EQ:
1557 case GT:
1558 case GTU:
1559 break;
1560
1561 case NE:
1562 case LE:
1563 case LEU:
1564 code = reverse_condition (code);
1565 negate = true;
1566 break;
1567
1568 case GE:
1569 case GEU:
1570 code = reverse_condition (code);
1571 negate = true;
1572 /* FALLTHRU */
1573
1574 case LT:
1575 case LTU:
1576 code = swap_condition (code);
1577 x = op0, op0 = op1, op1 = x;
1578 break;
1579
1580 default:
1581 gcc_unreachable ();
1582 }
1583
1584 /* Unsigned parallel compare is not supported by the hardware. Play some
1585 tricks to turn this into a signed comparison against 0. */
1586 if (code == GTU)
1587 {
1588 switch (mode)
1589 {
1590 case V2SImode:
1591 {
1592 rtx t1, t2, mask;
1593
1594 /* Perform a parallel modulo subtraction. */
1595 t1 = gen_reg_rtx (V2SImode);
1596 emit_insn (gen_subv2si3 (t1, op0, op1));
1597
1598 /* Extract the original sign bit of op0. */
1599 mask = GEN_INT (-0x80000000);
1600 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1601 mask = force_reg (V2SImode, mask);
1602 t2 = gen_reg_rtx (V2SImode);
1603 emit_insn (gen_andv2si3 (t2, op0, mask));
1604
1605 /* XOR it back into the result of the subtraction. This results
1606 in the sign bit set iff we saw unsigned underflow. */
1607 x = gen_reg_rtx (V2SImode);
1608 emit_insn (gen_xorv2si3 (x, t1, t2));
1609
1610 code = GT;
1611 op0 = x;
1612 op1 = CONST0_RTX (mode);
1613 }
1614 break;
1615
1616 case V8QImode:
1617 case V4HImode:
1618 /* Perform a parallel unsigned saturating subtraction. */
1619 x = gen_reg_rtx (mode);
1620 emit_insn (gen_rtx_SET (VOIDmode, x,
1621 gen_rtx_US_MINUS (mode, op0, op1)));
1622
1623 code = EQ;
1624 op0 = x;
1625 op1 = CONST0_RTX (mode);
1626 negate = !negate;
1627 break;
1628
1629 default:
1630 gcc_unreachable ();
1631 }
1632 }
1633
1634 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1635 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1636
1637 return negate;
1638 }
1639
1640 /* Emit an integral vector conditional move. */
1641
1642 void
1643 ia64_expand_vecint_cmov (rtx operands[])
1644 {
1645 enum machine_mode mode = GET_MODE (operands[0]);
1646 enum rtx_code code = GET_CODE (operands[3]);
1647 bool negate;
1648 rtx cmp, x, ot, of;
1649
1650 cmp = gen_reg_rtx (mode);
1651 negate = ia64_expand_vecint_compare (code, mode, cmp,
1652 operands[4], operands[5]);
1653
1654 ot = operands[1+negate];
1655 of = operands[2-negate];
1656
1657 if (ot == CONST0_RTX (mode))
1658 {
1659 if (of == CONST0_RTX (mode))
1660 {
1661 emit_move_insn (operands[0], ot);
1662 return;
1663 }
1664
1665 x = gen_rtx_NOT (mode, cmp);
1666 x = gen_rtx_AND (mode, x, of);
1667 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1668 }
1669 else if (of == CONST0_RTX (mode))
1670 {
1671 x = gen_rtx_AND (mode, cmp, ot);
1672 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1673 }
1674 else
1675 {
1676 rtx t, f;
1677
1678 t = gen_reg_rtx (mode);
1679 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1680 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1681
1682 f = gen_reg_rtx (mode);
1683 x = gen_rtx_NOT (mode, cmp);
1684 x = gen_rtx_AND (mode, x, operands[2-negate]);
1685 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1686
1687 x = gen_rtx_IOR (mode, t, f);
1688 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1689 }
1690 }
1691
1692 /* Emit an integral vector min or max operation. Return true if all done. */
1693
1694 bool
1695 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1696 rtx operands[])
1697 {
1698 rtx xops[6];
1699
1700 /* These four combinations are supported directly. */
1701 if (mode == V8QImode && (code == UMIN || code == UMAX))
1702 return false;
1703 if (mode == V4HImode && (code == SMIN || code == SMAX))
1704 return false;
1705
1706 /* This combination can be implemented with only saturating subtraction. */
1707 if (mode == V4HImode && code == UMAX)
1708 {
1709 rtx x, tmp = gen_reg_rtx (mode);
1710
1711 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1712 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1713
1714 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1715 return true;
1716 }
1717
1718 /* Everything else implemented via vector comparisons. */
1719 xops[0] = operands[0];
1720 xops[4] = xops[1] = operands[1];
1721 xops[5] = xops[2] = operands[2];
1722
1723 switch (code)
1724 {
1725 case UMIN:
1726 code = LTU;
1727 break;
1728 case UMAX:
1729 code = GTU;
1730 break;
1731 case SMIN:
1732 code = LT;
1733 break;
1734 case SMAX:
1735 code = GT;
1736 break;
1737 default:
1738 gcc_unreachable ();
1739 }
1740 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1741
1742 ia64_expand_vecint_cmov (xops);
1743 return true;
1744 }
1745
1746 /* Emit an integral vector widening sum operations. */
1747
1748 void
1749 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1750 {
1751 rtx l, h, x, s;
1752 enum machine_mode wmode, mode;
1753 rtx (*unpack_l) (rtx, rtx, rtx);
1754 rtx (*unpack_h) (rtx, rtx, rtx);
1755 rtx (*plus) (rtx, rtx, rtx);
1756
1757 wmode = GET_MODE (operands[0]);
1758 mode = GET_MODE (operands[1]);
1759
1760 switch (mode)
1761 {
1762 case V8QImode:
1763 unpack_l = gen_unpack1_l;
1764 unpack_h = gen_unpack1_h;
1765 plus = gen_addv4hi3;
1766 break;
1767 case V4HImode:
1768 unpack_l = gen_unpack2_l;
1769 unpack_h = gen_unpack2_h;
1770 plus = gen_addv2si3;
1771 break;
1772 default:
1773 gcc_unreachable ();
1774 }
1775
1776 /* Fill in x with the sign extension of each element in op1. */
1777 if (unsignedp)
1778 x = CONST0_RTX (mode);
1779 else
1780 {
1781 bool neg;
1782
1783 x = gen_reg_rtx (mode);
1784
1785 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1786 CONST0_RTX (mode));
1787 gcc_assert (!neg);
1788 }
1789
1790 l = gen_reg_rtx (wmode);
1791 h = gen_reg_rtx (wmode);
1792 s = gen_reg_rtx (wmode);
1793
1794 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1795 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1796 emit_insn (plus (s, l, operands[2]));
1797 emit_insn (plus (operands[0], h, s));
1798 }
1799
1800 /* Emit a signed or unsigned V8QI dot product operation. */
1801
1802 void
1803 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1804 {
1805 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1806
1807 /* Fill in x1 and x2 with the sign extension of each element. */
1808 if (unsignedp)
1809 x1 = x2 = CONST0_RTX (V8QImode);
1810 else
1811 {
1812 bool neg;
1813
1814 x1 = gen_reg_rtx (V8QImode);
1815 x2 = gen_reg_rtx (V8QImode);
1816
1817 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1818 CONST0_RTX (V8QImode));
1819 gcc_assert (!neg);
1820 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1821 CONST0_RTX (V8QImode));
1822 gcc_assert (!neg);
1823 }
1824
1825 l1 = gen_reg_rtx (V4HImode);
1826 l2 = gen_reg_rtx (V4HImode);
1827 h1 = gen_reg_rtx (V4HImode);
1828 h2 = gen_reg_rtx (V4HImode);
1829
1830 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1831 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1832 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1833 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1834
1835 p1 = gen_reg_rtx (V2SImode);
1836 p2 = gen_reg_rtx (V2SImode);
1837 p3 = gen_reg_rtx (V2SImode);
1838 p4 = gen_reg_rtx (V2SImode);
1839 emit_insn (gen_pmpy2_r (p1, l1, l2));
1840 emit_insn (gen_pmpy2_l (p2, l1, l2));
1841 emit_insn (gen_pmpy2_r (p3, h1, h2));
1842 emit_insn (gen_pmpy2_l (p4, h1, h2));
1843
1844 s1 = gen_reg_rtx (V2SImode);
1845 s2 = gen_reg_rtx (V2SImode);
1846 s3 = gen_reg_rtx (V2SImode);
1847 emit_insn (gen_addv2si3 (s1, p1, p2));
1848 emit_insn (gen_addv2si3 (s2, p3, p4));
1849 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1850 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1851 }
1852
1853 /* Emit the appropriate sequence for a call. */
1854
1855 void
1856 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1857 int sibcall_p)
1858 {
1859 rtx insn, b0;
1860
1861 addr = XEXP (addr, 0);
1862 addr = convert_memory_address (DImode, addr);
1863 b0 = gen_rtx_REG (DImode, R_BR (0));
1864
1865 /* ??? Should do this for functions known to bind local too. */
1866 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1867 {
1868 if (sibcall_p)
1869 insn = gen_sibcall_nogp (addr);
1870 else if (! retval)
1871 insn = gen_call_nogp (addr, b0);
1872 else
1873 insn = gen_call_value_nogp (retval, addr, b0);
1874 insn = emit_call_insn (insn);
1875 }
1876 else
1877 {
1878 if (sibcall_p)
1879 insn = gen_sibcall_gp (addr);
1880 else if (! retval)
1881 insn = gen_call_gp (addr, b0);
1882 else
1883 insn = gen_call_value_gp (retval, addr, b0);
1884 insn = emit_call_insn (insn);
1885
1886 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1887 }
1888
1889 if (sibcall_p)
1890 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1891 }
1892
1893 static void
1894 reg_emitted (enum ia64_frame_regs r)
1895 {
1896 if (emitted_frame_related_regs[r] == 0)
1897 emitted_frame_related_regs[r] = current_frame_info.r[r];
1898 else
1899 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
1900 }
1901
1902 static int
1903 get_reg (enum ia64_frame_regs r)
1904 {
1905 reg_emitted (r);
1906 return current_frame_info.r[r];
1907 }
1908
1909 static bool
1910 is_emitted (int regno)
1911 {
1912 enum ia64_frame_regs r;
1913
1914 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1915 if (emitted_frame_related_regs[r] == regno)
1916 return true;
1917 return false;
1918 }
1919
1920 void
1921 ia64_reload_gp (void)
1922 {
1923 rtx tmp;
1924
1925 if (current_frame_info.r[reg_save_gp])
1926 {
1927 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
1928 }
1929 else
1930 {
1931 HOST_WIDE_INT offset;
1932 rtx offset_r;
1933
1934 offset = (current_frame_info.spill_cfa_off
1935 + current_frame_info.spill_size);
1936 if (frame_pointer_needed)
1937 {
1938 tmp = hard_frame_pointer_rtx;
1939 offset = -offset;
1940 }
1941 else
1942 {
1943 tmp = stack_pointer_rtx;
1944 offset = current_frame_info.total_size - offset;
1945 }
1946
1947 offset_r = GEN_INT (offset);
1948 if (satisfies_constraint_I (offset_r))
1949 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1950 else
1951 {
1952 emit_move_insn (pic_offset_table_rtx, offset_r);
1953 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1954 pic_offset_table_rtx, tmp));
1955 }
1956
1957 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1958 }
1959
1960 emit_move_insn (pic_offset_table_rtx, tmp);
1961 }
1962
1963 void
1964 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1965 rtx scratch_b, int noreturn_p, int sibcall_p)
1966 {
1967 rtx insn;
1968 bool is_desc = false;
1969
1970 /* If we find we're calling through a register, then we're actually
1971 calling through a descriptor, so load up the values. */
1972 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1973 {
1974 rtx tmp;
1975 bool addr_dead_p;
1976
1977 /* ??? We are currently constrained to *not* use peep2, because
1978 we can legitimately change the global lifetime of the GP
1979 (in the form of killing where previously live). This is
1980 because a call through a descriptor doesn't use the previous
1981 value of the GP, while a direct call does, and we do not
1982 commit to either form until the split here.
1983
1984 That said, this means that we lack precise life info for
1985 whether ADDR is dead after this call. This is not terribly
1986 important, since we can fix things up essentially for free
1987 with the POST_DEC below, but it's nice to not use it when we
1988 can immediately tell it's not necessary. */
1989 addr_dead_p = ((noreturn_p || sibcall_p
1990 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1991 REGNO (addr)))
1992 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1993
1994 /* Load the code address into scratch_b. */
1995 tmp = gen_rtx_POST_INC (Pmode, addr);
1996 tmp = gen_rtx_MEM (Pmode, tmp);
1997 emit_move_insn (scratch_r, tmp);
1998 emit_move_insn (scratch_b, scratch_r);
1999
2000 /* Load the GP address. If ADDR is not dead here, then we must
2001 revert the change made above via the POST_INCREMENT. */
2002 if (!addr_dead_p)
2003 tmp = gen_rtx_POST_DEC (Pmode, addr);
2004 else
2005 tmp = addr;
2006 tmp = gen_rtx_MEM (Pmode, tmp);
2007 emit_move_insn (pic_offset_table_rtx, tmp);
2008
2009 is_desc = true;
2010 addr = scratch_b;
2011 }
2012
2013 if (sibcall_p)
2014 insn = gen_sibcall_nogp (addr);
2015 else if (retval)
2016 insn = gen_call_value_nogp (retval, addr, retaddr);
2017 else
2018 insn = gen_call_nogp (addr, retaddr);
2019 emit_call_insn (insn);
2020
2021 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2022 ia64_reload_gp ();
2023 }
2024
2025 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2026
2027 This differs from the generic code in that we know about the zero-extending
2028 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2029 also know that ld.acq+cmpxchg.rel equals a full barrier.
2030
2031 The loop we want to generate looks like
2032
2033 cmp_reg = mem;
2034 label:
2035 old_reg = cmp_reg;
2036 new_reg = cmp_reg op val;
2037 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2038 if (cmp_reg != old_reg)
2039 goto label;
2040
2041 Note that we only do the plain load from memory once. Subsequent
2042 iterations use the value loaded by the compare-and-swap pattern. */
2043
2044 void
2045 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2046 rtx old_dst, rtx new_dst)
2047 {
2048 enum machine_mode mode = GET_MODE (mem);
2049 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2050 enum insn_code icode;
2051
2052 /* Special case for using fetchadd. */
2053 if ((mode == SImode || mode == DImode)
2054 && (code == PLUS || code == MINUS)
2055 && fetchadd_operand (val, mode))
2056 {
2057 if (code == MINUS)
2058 val = GEN_INT (-INTVAL (val));
2059
2060 if (!old_dst)
2061 old_dst = gen_reg_rtx (mode);
2062
2063 emit_insn (gen_memory_barrier ());
2064
2065 if (mode == SImode)
2066 icode = CODE_FOR_fetchadd_acq_si;
2067 else
2068 icode = CODE_FOR_fetchadd_acq_di;
2069 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2070
2071 if (new_dst)
2072 {
2073 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2074 true, OPTAB_WIDEN);
2075 if (new_reg != new_dst)
2076 emit_move_insn (new_dst, new_reg);
2077 }
2078 return;
2079 }
2080
2081 /* Because of the volatile mem read, we get an ld.acq, which is the
2082 front half of the full barrier. The end half is the cmpxchg.rel. */
2083 gcc_assert (MEM_VOLATILE_P (mem));
2084
2085 old_reg = gen_reg_rtx (DImode);
2086 cmp_reg = gen_reg_rtx (DImode);
2087 label = gen_label_rtx ();
2088
2089 if (mode != DImode)
2090 {
2091 val = simplify_gen_subreg (DImode, val, mode, 0);
2092 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2093 }
2094 else
2095 emit_move_insn (cmp_reg, mem);
2096
2097 emit_label (label);
2098
2099 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2100 emit_move_insn (old_reg, cmp_reg);
2101 emit_move_insn (ar_ccv, cmp_reg);
2102
2103 if (old_dst)
2104 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2105
2106 new_reg = cmp_reg;
2107 if (code == NOT)
2108 {
2109 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2110 code = AND;
2111 }
2112 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2113 true, OPTAB_DIRECT);
2114
2115 if (mode != DImode)
2116 new_reg = gen_lowpart (mode, new_reg);
2117 if (new_dst)
2118 emit_move_insn (new_dst, new_reg);
2119
2120 switch (mode)
2121 {
2122 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2123 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2124 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2125 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2126 default:
2127 gcc_unreachable ();
2128 }
2129
2130 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2131
2132 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2133 }
2134 \f
2135 /* Begin the assembly file. */
2136
2137 static void
2138 ia64_file_start (void)
2139 {
2140 /* Variable tracking should be run after all optimizations which change order
2141 of insns. It also needs a valid CFG. This can't be done in
2142 ia64_override_options, because flag_var_tracking is finalized after
2143 that. */
2144 ia64_flag_var_tracking = flag_var_tracking;
2145 flag_var_tracking = 0;
2146
2147 default_file_start ();
2148 emit_safe_across_calls ();
2149 }
2150
2151 void
2152 emit_safe_across_calls (void)
2153 {
2154 unsigned int rs, re;
2155 int out_state;
2156
2157 rs = 1;
2158 out_state = 0;
2159 while (1)
2160 {
2161 while (rs < 64 && call_used_regs[PR_REG (rs)])
2162 rs++;
2163 if (rs >= 64)
2164 break;
2165 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2166 continue;
2167 if (out_state == 0)
2168 {
2169 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2170 out_state = 1;
2171 }
2172 else
2173 fputc (',', asm_out_file);
2174 if (re == rs + 1)
2175 fprintf (asm_out_file, "p%u", rs);
2176 else
2177 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2178 rs = re + 1;
2179 }
2180 if (out_state)
2181 fputc ('\n', asm_out_file);
2182 }
2183
2184 /* Globalize a declaration. */
2185
2186 static void
2187 ia64_globalize_decl_name (FILE * stream, tree decl)
2188 {
2189 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2190 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2191 if (version_attr)
2192 {
2193 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2194 const char *p = TREE_STRING_POINTER (v);
2195 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2196 }
2197 targetm.asm_out.globalize_label (stream, name);
2198 if (TREE_CODE (decl) == FUNCTION_DECL)
2199 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2200 }
2201
2202 /* Helper function for ia64_compute_frame_size: find an appropriate general
2203 register to spill some special register to. SPECIAL_SPILL_MASK contains
2204 bits in GR0 to GR31 that have already been allocated by this routine.
2205 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2206
2207 static int
2208 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2209 {
2210 int regno;
2211
2212 if (emitted_frame_related_regs[r] != 0)
2213 {
2214 regno = emitted_frame_related_regs[r];
2215 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2216 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2217 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2218 else if (current_function_is_leaf
2219 && regno >= GR_REG (1) && regno <= GR_REG (31))
2220 current_frame_info.gr_used_mask |= 1 << regno;
2221
2222 return regno;
2223 }
2224
2225 /* If this is a leaf function, first try an otherwise unused
2226 call-clobbered register. */
2227 if (current_function_is_leaf)
2228 {
2229 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2230 if (! df_regs_ever_live_p (regno)
2231 && call_used_regs[regno]
2232 && ! fixed_regs[regno]
2233 && ! global_regs[regno]
2234 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2235 && ! is_emitted (regno))
2236 {
2237 current_frame_info.gr_used_mask |= 1 << regno;
2238 return regno;
2239 }
2240 }
2241
2242 if (try_locals)
2243 {
2244 regno = current_frame_info.n_local_regs;
2245 /* If there is a frame pointer, then we can't use loc79, because
2246 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2247 reg_name switching code in ia64_expand_prologue. */
2248 while (regno < (80 - frame_pointer_needed))
2249 if (! is_emitted (LOC_REG (regno++)))
2250 {
2251 current_frame_info.n_local_regs = regno;
2252 return LOC_REG (regno - 1);
2253 }
2254 }
2255
2256 /* Failed to find a general register to spill to. Must use stack. */
2257 return 0;
2258 }
2259
2260 /* In order to make for nice schedules, we try to allocate every temporary
2261 to a different register. We must of course stay away from call-saved,
2262 fixed, and global registers. We must also stay away from registers
2263 allocated in current_frame_info.gr_used_mask, since those include regs
2264 used all through the prologue.
2265
2266 Any register allocated here must be used immediately. The idea is to
2267 aid scheduling, not to solve data flow problems. */
2268
2269 static int last_scratch_gr_reg;
2270
2271 static int
2272 next_scratch_gr_reg (void)
2273 {
2274 int i, regno;
2275
2276 for (i = 0; i < 32; ++i)
2277 {
2278 regno = (last_scratch_gr_reg + i + 1) & 31;
2279 if (call_used_regs[regno]
2280 && ! fixed_regs[regno]
2281 && ! global_regs[regno]
2282 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2283 {
2284 last_scratch_gr_reg = regno;
2285 return regno;
2286 }
2287 }
2288
2289 /* There must be _something_ available. */
2290 gcc_unreachable ();
2291 }
2292
2293 /* Helper function for ia64_compute_frame_size, called through
2294 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2295
2296 static void
2297 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2298 {
2299 unsigned int regno = REGNO (reg);
2300 if (regno < 32)
2301 {
2302 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2303 for (i = 0; i < n; ++i)
2304 current_frame_info.gr_used_mask |= 1 << (regno + i);
2305 }
2306 }
2307
2308
2309 /* Returns the number of bytes offset between the frame pointer and the stack
2310 pointer for the current function. SIZE is the number of bytes of space
2311 needed for local variables. */
2312
2313 static void
2314 ia64_compute_frame_size (HOST_WIDE_INT size)
2315 {
2316 HOST_WIDE_INT total_size;
2317 HOST_WIDE_INT spill_size = 0;
2318 HOST_WIDE_INT extra_spill_size = 0;
2319 HOST_WIDE_INT pretend_args_size;
2320 HARD_REG_SET mask;
2321 int n_spilled = 0;
2322 int spilled_gr_p = 0;
2323 int spilled_fr_p = 0;
2324 unsigned int regno;
2325 int min_regno;
2326 int max_regno;
2327 int i;
2328
2329 if (current_frame_info.initialized)
2330 return;
2331
2332 memset (&current_frame_info, 0, sizeof current_frame_info);
2333 CLEAR_HARD_REG_SET (mask);
2334
2335 /* Don't allocate scratches to the return register. */
2336 diddle_return_value (mark_reg_gr_used_mask, NULL);
2337
2338 /* Don't allocate scratches to the EH scratch registers. */
2339 if (cfun->machine->ia64_eh_epilogue_sp)
2340 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2341 if (cfun->machine->ia64_eh_epilogue_bsp)
2342 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2343
2344 /* Find the size of the register stack frame. We have only 80 local
2345 registers, because we reserve 8 for the inputs and 8 for the
2346 outputs. */
2347
2348 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2349 since we'll be adjusting that down later. */
2350 regno = LOC_REG (78) + ! frame_pointer_needed;
2351 for (; regno >= LOC_REG (0); regno--)
2352 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2353 break;
2354 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2355
2356 /* For functions marked with the syscall_linkage attribute, we must mark
2357 all eight input registers as in use, so that locals aren't visible to
2358 the caller. */
2359
2360 if (cfun->machine->n_varargs > 0
2361 || lookup_attribute ("syscall_linkage",
2362 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2363 current_frame_info.n_input_regs = 8;
2364 else
2365 {
2366 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2367 if (df_regs_ever_live_p (regno))
2368 break;
2369 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2370 }
2371
2372 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2373 if (df_regs_ever_live_p (regno))
2374 break;
2375 i = regno - OUT_REG (0) + 1;
2376
2377 #ifndef PROFILE_HOOK
2378 /* When -p profiling, we need one output register for the mcount argument.
2379 Likewise for -a profiling for the bb_init_func argument. For -ax
2380 profiling, we need two output registers for the two bb_init_trace_func
2381 arguments. */
2382 if (current_function_profile)
2383 i = MAX (i, 1);
2384 #endif
2385 current_frame_info.n_output_regs = i;
2386
2387 /* ??? No rotating register support yet. */
2388 current_frame_info.n_rotate_regs = 0;
2389
2390 /* Discover which registers need spilling, and how much room that
2391 will take. Begin with floating point and general registers,
2392 which will always wind up on the stack. */
2393
2394 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2395 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2396 {
2397 SET_HARD_REG_BIT (mask, regno);
2398 spill_size += 16;
2399 n_spilled += 1;
2400 spilled_fr_p = 1;
2401 }
2402
2403 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2404 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2405 {
2406 SET_HARD_REG_BIT (mask, regno);
2407 spill_size += 8;
2408 n_spilled += 1;
2409 spilled_gr_p = 1;
2410 }
2411
2412 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2413 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2414 {
2415 SET_HARD_REG_BIT (mask, regno);
2416 spill_size += 8;
2417 n_spilled += 1;
2418 }
2419
2420 /* Now come all special registers that might get saved in other
2421 general registers. */
2422
2423 if (frame_pointer_needed)
2424 {
2425 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2426 /* If we did not get a register, then we take LOC79. This is guaranteed
2427 to be free, even if regs_ever_live is already set, because this is
2428 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2429 as we don't count loc79 above. */
2430 if (current_frame_info.r[reg_fp] == 0)
2431 {
2432 current_frame_info.r[reg_fp] = LOC_REG (79);
2433 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2434 }
2435 }
2436
2437 if (! current_function_is_leaf)
2438 {
2439 /* Emit a save of BR0 if we call other functions. Do this even
2440 if this function doesn't return, as EH depends on this to be
2441 able to unwind the stack. */
2442 SET_HARD_REG_BIT (mask, BR_REG (0));
2443
2444 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2445 if (current_frame_info.r[reg_save_b0] == 0)
2446 {
2447 extra_spill_size += 8;
2448 n_spilled += 1;
2449 }
2450
2451 /* Similarly for ar.pfs. */
2452 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2453 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2454 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2455 {
2456 extra_spill_size += 8;
2457 n_spilled += 1;
2458 }
2459
2460 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2461 registers are clobbered, so we fall back to the stack. */
2462 current_frame_info.r[reg_save_gp]
2463 = (current_function_calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2464 if (current_frame_info.r[reg_save_gp] == 0)
2465 {
2466 SET_HARD_REG_BIT (mask, GR_REG (1));
2467 spill_size += 8;
2468 n_spilled += 1;
2469 }
2470 }
2471 else
2472 {
2473 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2474 {
2475 SET_HARD_REG_BIT (mask, BR_REG (0));
2476 extra_spill_size += 8;
2477 n_spilled += 1;
2478 }
2479
2480 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2481 {
2482 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2483 current_frame_info.r[reg_save_ar_pfs]
2484 = find_gr_spill (reg_save_ar_pfs, 1);
2485 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2486 {
2487 extra_spill_size += 8;
2488 n_spilled += 1;
2489 }
2490 }
2491 }
2492
2493 /* Unwind descriptor hackery: things are most efficient if we allocate
2494 consecutive GR save registers for RP, PFS, FP in that order. However,
2495 it is absolutely critical that FP get the only hard register that's
2496 guaranteed to be free, so we allocated it first. If all three did
2497 happen to be allocated hard regs, and are consecutive, rearrange them
2498 into the preferred order now.
2499
2500 If we have already emitted code for any of those registers,
2501 then it's already too late to change. */
2502 min_regno = MIN (current_frame_info.r[reg_fp],
2503 MIN (current_frame_info.r[reg_save_b0],
2504 current_frame_info.r[reg_save_ar_pfs]));
2505 max_regno = MAX (current_frame_info.r[reg_fp],
2506 MAX (current_frame_info.r[reg_save_b0],
2507 current_frame_info.r[reg_save_ar_pfs]));
2508 if (min_regno > 0
2509 && min_regno + 2 == max_regno
2510 && (current_frame_info.r[reg_fp] == min_regno + 1
2511 || current_frame_info.r[reg_save_b0] == min_regno + 1
2512 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2513 && (emitted_frame_related_regs[reg_save_b0] == 0
2514 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2515 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2516 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2517 && (emitted_frame_related_regs[reg_fp] == 0
2518 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2519 {
2520 current_frame_info.r[reg_save_b0] = min_regno;
2521 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2522 current_frame_info.r[reg_fp] = min_regno + 2;
2523 }
2524
2525 /* See if we need to store the predicate register block. */
2526 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2527 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2528 break;
2529 if (regno <= PR_REG (63))
2530 {
2531 SET_HARD_REG_BIT (mask, PR_REG (0));
2532 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2533 if (current_frame_info.r[reg_save_pr] == 0)
2534 {
2535 extra_spill_size += 8;
2536 n_spilled += 1;
2537 }
2538
2539 /* ??? Mark them all as used so that register renaming and such
2540 are free to use them. */
2541 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2542 df_set_regs_ever_live (regno, true);
2543 }
2544
2545 /* If we're forced to use st8.spill, we're forced to save and restore
2546 ar.unat as well. The check for existing liveness allows inline asm
2547 to touch ar.unat. */
2548 if (spilled_gr_p || cfun->machine->n_varargs
2549 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2550 {
2551 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2552 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2553 current_frame_info.r[reg_save_ar_unat]
2554 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2555 if (current_frame_info.r[reg_save_ar_unat] == 0)
2556 {
2557 extra_spill_size += 8;
2558 n_spilled += 1;
2559 }
2560 }
2561
2562 if (df_regs_ever_live_p (AR_LC_REGNUM))
2563 {
2564 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2565 current_frame_info.r[reg_save_ar_lc]
2566 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2567 if (current_frame_info.r[reg_save_ar_lc] == 0)
2568 {
2569 extra_spill_size += 8;
2570 n_spilled += 1;
2571 }
2572 }
2573
2574 /* If we have an odd number of words of pretend arguments written to
2575 the stack, then the FR save area will be unaligned. We round the
2576 size of this area up to keep things 16 byte aligned. */
2577 if (spilled_fr_p)
2578 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2579 else
2580 pretend_args_size = crtl->args.pretend_args_size;
2581
2582 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2583 + crtl->outgoing_args_size);
2584 total_size = IA64_STACK_ALIGN (total_size);
2585
2586 /* We always use the 16-byte scratch area provided by the caller, but
2587 if we are a leaf function, there's no one to which we need to provide
2588 a scratch area. */
2589 if (current_function_is_leaf)
2590 total_size = MAX (0, total_size - 16);
2591
2592 current_frame_info.total_size = total_size;
2593 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2594 current_frame_info.spill_size = spill_size;
2595 current_frame_info.extra_spill_size = extra_spill_size;
2596 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2597 current_frame_info.n_spilled = n_spilled;
2598 current_frame_info.initialized = reload_completed;
2599 }
2600
2601 /* Compute the initial difference between the specified pair of registers. */
2602
2603 HOST_WIDE_INT
2604 ia64_initial_elimination_offset (int from, int to)
2605 {
2606 HOST_WIDE_INT offset;
2607
2608 ia64_compute_frame_size (get_frame_size ());
2609 switch (from)
2610 {
2611 case FRAME_POINTER_REGNUM:
2612 switch (to)
2613 {
2614 case HARD_FRAME_POINTER_REGNUM:
2615 if (current_function_is_leaf)
2616 offset = -current_frame_info.total_size;
2617 else
2618 offset = -(current_frame_info.total_size
2619 - crtl->outgoing_args_size - 16);
2620 break;
2621
2622 case STACK_POINTER_REGNUM:
2623 if (current_function_is_leaf)
2624 offset = 0;
2625 else
2626 offset = 16 + crtl->outgoing_args_size;
2627 break;
2628
2629 default:
2630 gcc_unreachable ();
2631 }
2632 break;
2633
2634 case ARG_POINTER_REGNUM:
2635 /* Arguments start above the 16 byte save area, unless stdarg
2636 in which case we store through the 16 byte save area. */
2637 switch (to)
2638 {
2639 case HARD_FRAME_POINTER_REGNUM:
2640 offset = 16 - crtl->args.pretend_args_size;
2641 break;
2642
2643 case STACK_POINTER_REGNUM:
2644 offset = (current_frame_info.total_size
2645 + 16 - crtl->args.pretend_args_size);
2646 break;
2647
2648 default:
2649 gcc_unreachable ();
2650 }
2651 break;
2652
2653 default:
2654 gcc_unreachable ();
2655 }
2656
2657 return offset;
2658 }
2659
2660 /* If there are more than a trivial number of register spills, we use
2661 two interleaved iterators so that we can get two memory references
2662 per insn group.
2663
2664 In order to simplify things in the prologue and epilogue expanders,
2665 we use helper functions to fix up the memory references after the
2666 fact with the appropriate offsets to a POST_MODIFY memory mode.
2667 The following data structure tracks the state of the two iterators
2668 while insns are being emitted. */
2669
2670 struct spill_fill_data
2671 {
2672 rtx init_after; /* point at which to emit initializations */
2673 rtx init_reg[2]; /* initial base register */
2674 rtx iter_reg[2]; /* the iterator registers */
2675 rtx *prev_addr[2]; /* address of last memory use */
2676 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2677 HOST_WIDE_INT prev_off[2]; /* last offset */
2678 int n_iter; /* number of iterators in use */
2679 int next_iter; /* next iterator to use */
2680 unsigned int save_gr_used_mask;
2681 };
2682
2683 static struct spill_fill_data spill_fill_data;
2684
2685 static void
2686 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2687 {
2688 int i;
2689
2690 spill_fill_data.init_after = get_last_insn ();
2691 spill_fill_data.init_reg[0] = init_reg;
2692 spill_fill_data.init_reg[1] = init_reg;
2693 spill_fill_data.prev_addr[0] = NULL;
2694 spill_fill_data.prev_addr[1] = NULL;
2695 spill_fill_data.prev_insn[0] = NULL;
2696 spill_fill_data.prev_insn[1] = NULL;
2697 spill_fill_data.prev_off[0] = cfa_off;
2698 spill_fill_data.prev_off[1] = cfa_off;
2699 spill_fill_data.next_iter = 0;
2700 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2701
2702 spill_fill_data.n_iter = 1 + (n_spills > 2);
2703 for (i = 0; i < spill_fill_data.n_iter; ++i)
2704 {
2705 int regno = next_scratch_gr_reg ();
2706 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2707 current_frame_info.gr_used_mask |= 1 << regno;
2708 }
2709 }
2710
2711 static void
2712 finish_spill_pointers (void)
2713 {
2714 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2715 }
2716
2717 static rtx
2718 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2719 {
2720 int iter = spill_fill_data.next_iter;
2721 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2722 rtx disp_rtx = GEN_INT (disp);
2723 rtx mem;
2724
2725 if (spill_fill_data.prev_addr[iter])
2726 {
2727 if (satisfies_constraint_N (disp_rtx))
2728 {
2729 *spill_fill_data.prev_addr[iter]
2730 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2731 gen_rtx_PLUS (DImode,
2732 spill_fill_data.iter_reg[iter],
2733 disp_rtx));
2734 REG_NOTES (spill_fill_data.prev_insn[iter])
2735 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2736 REG_NOTES (spill_fill_data.prev_insn[iter]));
2737 }
2738 else
2739 {
2740 /* ??? Could use register post_modify for loads. */
2741 if (!satisfies_constraint_I (disp_rtx))
2742 {
2743 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2744 emit_move_insn (tmp, disp_rtx);
2745 disp_rtx = tmp;
2746 }
2747 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2748 spill_fill_data.iter_reg[iter], disp_rtx));
2749 }
2750 }
2751 /* Micro-optimization: if we've created a frame pointer, it's at
2752 CFA 0, which may allow the real iterator to be initialized lower,
2753 slightly increasing parallelism. Also, if there are few saves
2754 it may eliminate the iterator entirely. */
2755 else if (disp == 0
2756 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2757 && frame_pointer_needed)
2758 {
2759 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2760 set_mem_alias_set (mem, get_varargs_alias_set ());
2761 return mem;
2762 }
2763 else
2764 {
2765 rtx seq, insn;
2766
2767 if (disp == 0)
2768 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2769 spill_fill_data.init_reg[iter]);
2770 else
2771 {
2772 start_sequence ();
2773
2774 if (!satisfies_constraint_I (disp_rtx))
2775 {
2776 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2777 emit_move_insn (tmp, disp_rtx);
2778 disp_rtx = tmp;
2779 }
2780
2781 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2782 spill_fill_data.init_reg[iter],
2783 disp_rtx));
2784
2785 seq = get_insns ();
2786 end_sequence ();
2787 }
2788
2789 /* Careful for being the first insn in a sequence. */
2790 if (spill_fill_data.init_after)
2791 insn = emit_insn_after (seq, spill_fill_data.init_after);
2792 else
2793 {
2794 rtx first = get_insns ();
2795 if (first)
2796 insn = emit_insn_before (seq, first);
2797 else
2798 insn = emit_insn (seq);
2799 }
2800 spill_fill_data.init_after = insn;
2801 }
2802
2803 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2804
2805 /* ??? Not all of the spills are for varargs, but some of them are.
2806 The rest of the spills belong in an alias set of their own. But
2807 it doesn't actually hurt to include them here. */
2808 set_mem_alias_set (mem, get_varargs_alias_set ());
2809
2810 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2811 spill_fill_data.prev_off[iter] = cfa_off;
2812
2813 if (++iter >= spill_fill_data.n_iter)
2814 iter = 0;
2815 spill_fill_data.next_iter = iter;
2816
2817 return mem;
2818 }
2819
2820 static void
2821 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2822 rtx frame_reg)
2823 {
2824 int iter = spill_fill_data.next_iter;
2825 rtx mem, insn;
2826
2827 mem = spill_restore_mem (reg, cfa_off);
2828 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2829 spill_fill_data.prev_insn[iter] = insn;
2830
2831 if (frame_reg)
2832 {
2833 rtx base;
2834 HOST_WIDE_INT off;
2835
2836 RTX_FRAME_RELATED_P (insn) = 1;
2837
2838 /* Don't even pretend that the unwind code can intuit its way
2839 through a pair of interleaved post_modify iterators. Just
2840 provide the correct answer. */
2841
2842 if (frame_pointer_needed)
2843 {
2844 base = hard_frame_pointer_rtx;
2845 off = - cfa_off;
2846 }
2847 else
2848 {
2849 base = stack_pointer_rtx;
2850 off = current_frame_info.total_size - cfa_off;
2851 }
2852
2853 REG_NOTES (insn)
2854 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2855 gen_rtx_SET (VOIDmode,
2856 gen_rtx_MEM (GET_MODE (reg),
2857 plus_constant (base, off)),
2858 frame_reg),
2859 REG_NOTES (insn));
2860 }
2861 }
2862
2863 static void
2864 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2865 {
2866 int iter = spill_fill_data.next_iter;
2867 rtx insn;
2868
2869 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2870 GEN_INT (cfa_off)));
2871 spill_fill_data.prev_insn[iter] = insn;
2872 }
2873
2874 /* Wrapper functions that discards the CONST_INT spill offset. These
2875 exist so that we can give gr_spill/gr_fill the offset they need and
2876 use a consistent function interface. */
2877
2878 static rtx
2879 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2880 {
2881 return gen_movdi (dest, src);
2882 }
2883
2884 static rtx
2885 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2886 {
2887 return gen_fr_spill (dest, src);
2888 }
2889
2890 static rtx
2891 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2892 {
2893 return gen_fr_restore (dest, src);
2894 }
2895
2896 /* Called after register allocation to add any instructions needed for the
2897 prologue. Using a prologue insn is favored compared to putting all of the
2898 instructions in output_function_prologue(), since it allows the scheduler
2899 to intermix instructions with the saves of the caller saved registers. In
2900 some cases, it might be necessary to emit a barrier instruction as the last
2901 insn to prevent such scheduling.
2902
2903 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2904 so that the debug info generation code can handle them properly.
2905
2906 The register save area is layed out like so:
2907 cfa+16
2908 [ varargs spill area ]
2909 [ fr register spill area ]
2910 [ br register spill area ]
2911 [ ar register spill area ]
2912 [ pr register spill area ]
2913 [ gr register spill area ] */
2914
2915 /* ??? Get inefficient code when the frame size is larger than can fit in an
2916 adds instruction. */
2917
2918 void
2919 ia64_expand_prologue (void)
2920 {
2921 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2922 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2923 rtx reg, alt_reg;
2924
2925 ia64_compute_frame_size (get_frame_size ());
2926 last_scratch_gr_reg = 15;
2927
2928 if (dump_file)
2929 {
2930 fprintf (dump_file, "ia64 frame related registers "
2931 "recorded in current_frame_info.r[]:\n");
2932 #define PRINTREG(a) if (current_frame_info.r[a]) \
2933 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
2934 PRINTREG(reg_fp);
2935 PRINTREG(reg_save_b0);
2936 PRINTREG(reg_save_pr);
2937 PRINTREG(reg_save_ar_pfs);
2938 PRINTREG(reg_save_ar_unat);
2939 PRINTREG(reg_save_ar_lc);
2940 PRINTREG(reg_save_gp);
2941 #undef PRINTREG
2942 }
2943
2944 /* If there is no epilogue, then we don't need some prologue insns.
2945 We need to avoid emitting the dead prologue insns, because flow
2946 will complain about them. */
2947 if (optimize)
2948 {
2949 edge e;
2950 edge_iterator ei;
2951
2952 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2953 if ((e->flags & EDGE_FAKE) == 0
2954 && (e->flags & EDGE_FALLTHRU) != 0)
2955 break;
2956 epilogue_p = (e != NULL);
2957 }
2958 else
2959 epilogue_p = 1;
2960
2961 /* Set the local, input, and output register names. We need to do this
2962 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2963 half. If we use in/loc/out register names, then we get assembler errors
2964 in crtn.S because there is no alloc insn or regstk directive in there. */
2965 if (! TARGET_REG_NAMES)
2966 {
2967 int inputs = current_frame_info.n_input_regs;
2968 int locals = current_frame_info.n_local_regs;
2969 int outputs = current_frame_info.n_output_regs;
2970
2971 for (i = 0; i < inputs; i++)
2972 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2973 for (i = 0; i < locals; i++)
2974 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2975 for (i = 0; i < outputs; i++)
2976 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2977 }
2978
2979 /* Set the frame pointer register name. The regnum is logically loc79,
2980 but of course we'll not have allocated that many locals. Rather than
2981 worrying about renumbering the existing rtxs, we adjust the name. */
2982 /* ??? This code means that we can never use one local register when
2983 there is a frame pointer. loc79 gets wasted in this case, as it is
2984 renamed to a register that will never be used. See also the try_locals
2985 code in find_gr_spill. */
2986 if (current_frame_info.r[reg_fp])
2987 {
2988 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2989 reg_names[HARD_FRAME_POINTER_REGNUM]
2990 = reg_names[current_frame_info.r[reg_fp]];
2991 reg_names[current_frame_info.r[reg_fp]] = tmp;
2992 }
2993
2994 /* We don't need an alloc instruction if we've used no outputs or locals. */
2995 if (current_frame_info.n_local_regs == 0
2996 && current_frame_info.n_output_regs == 0
2997 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
2998 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2999 {
3000 /* If there is no alloc, but there are input registers used, then we
3001 need a .regstk directive. */
3002 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3003 ar_pfs_save_reg = NULL_RTX;
3004 }
3005 else
3006 {
3007 current_frame_info.need_regstk = 0;
3008
3009 if (current_frame_info.r[reg_save_ar_pfs])
3010 {
3011 regno = current_frame_info.r[reg_save_ar_pfs];
3012 reg_emitted (reg_save_ar_pfs);
3013 }
3014 else
3015 regno = next_scratch_gr_reg ();
3016 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3017
3018 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3019 GEN_INT (current_frame_info.n_input_regs),
3020 GEN_INT (current_frame_info.n_local_regs),
3021 GEN_INT (current_frame_info.n_output_regs),
3022 GEN_INT (current_frame_info.n_rotate_regs)));
3023 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3024 }
3025
3026 /* Set up frame pointer, stack pointer, and spill iterators. */
3027
3028 n_varargs = cfun->machine->n_varargs;
3029 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3030 stack_pointer_rtx, 0);
3031
3032 if (frame_pointer_needed)
3033 {
3034 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3035 RTX_FRAME_RELATED_P (insn) = 1;
3036 }
3037
3038 if (current_frame_info.total_size != 0)
3039 {
3040 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3041 rtx offset;
3042
3043 if (satisfies_constraint_I (frame_size_rtx))
3044 offset = frame_size_rtx;
3045 else
3046 {
3047 regno = next_scratch_gr_reg ();
3048 offset = gen_rtx_REG (DImode, regno);
3049 emit_move_insn (offset, frame_size_rtx);
3050 }
3051
3052 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3053 stack_pointer_rtx, offset));
3054
3055 if (! frame_pointer_needed)
3056 {
3057 RTX_FRAME_RELATED_P (insn) = 1;
3058 if (GET_CODE (offset) != CONST_INT)
3059 {
3060 REG_NOTES (insn)
3061 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3062 gen_rtx_SET (VOIDmode,
3063 stack_pointer_rtx,
3064 gen_rtx_PLUS (DImode,
3065 stack_pointer_rtx,
3066 frame_size_rtx)),
3067 REG_NOTES (insn));
3068 }
3069 }
3070
3071 /* ??? At this point we must generate a magic insn that appears to
3072 modify the stack pointer, the frame pointer, and all spill
3073 iterators. This would allow the most scheduling freedom. For
3074 now, just hard stop. */
3075 emit_insn (gen_blockage ());
3076 }
3077
3078 /* Must copy out ar.unat before doing any integer spills. */
3079 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3080 {
3081 if (current_frame_info.r[reg_save_ar_unat])
3082 {
3083 ar_unat_save_reg
3084 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3085 reg_emitted (reg_save_ar_unat);
3086 }
3087 else
3088 {
3089 alt_regno = next_scratch_gr_reg ();
3090 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3091 current_frame_info.gr_used_mask |= 1 << alt_regno;
3092 }
3093
3094 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3095 insn = emit_move_insn (ar_unat_save_reg, reg);
3096 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3097
3098 /* Even if we're not going to generate an epilogue, we still
3099 need to save the register so that EH works. */
3100 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3101 emit_insn (gen_prologue_use (ar_unat_save_reg));
3102 }
3103 else
3104 ar_unat_save_reg = NULL_RTX;
3105
3106 /* Spill all varargs registers. Do this before spilling any GR registers,
3107 since we want the UNAT bits for the GR registers to override the UNAT
3108 bits from varargs, which we don't care about. */
3109
3110 cfa_off = -16;
3111 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3112 {
3113 reg = gen_rtx_REG (DImode, regno);
3114 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3115 }
3116
3117 /* Locate the bottom of the register save area. */
3118 cfa_off = (current_frame_info.spill_cfa_off
3119 + current_frame_info.spill_size
3120 + current_frame_info.extra_spill_size);
3121
3122 /* Save the predicate register block either in a register or in memory. */
3123 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3124 {
3125 reg = gen_rtx_REG (DImode, PR_REG (0));
3126 if (current_frame_info.r[reg_save_pr] != 0)
3127 {
3128 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3129 reg_emitted (reg_save_pr);
3130 insn = emit_move_insn (alt_reg, reg);
3131
3132 /* ??? Denote pr spill/fill by a DImode move that modifies all
3133 64 hard registers. */
3134 RTX_FRAME_RELATED_P (insn) = 1;
3135 REG_NOTES (insn)
3136 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3137 gen_rtx_SET (VOIDmode, alt_reg, reg),
3138 REG_NOTES (insn));
3139
3140 /* Even if we're not going to generate an epilogue, we still
3141 need to save the register so that EH works. */
3142 if (! epilogue_p)
3143 emit_insn (gen_prologue_use (alt_reg));
3144 }
3145 else
3146 {
3147 alt_regno = next_scratch_gr_reg ();
3148 alt_reg = gen_rtx_REG (DImode, alt_regno);
3149 insn = emit_move_insn (alt_reg, reg);
3150 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3151 cfa_off -= 8;
3152 }
3153 }
3154
3155 /* Handle AR regs in numerical order. All of them get special handling. */
3156 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3157 && current_frame_info.r[reg_save_ar_unat] == 0)
3158 {
3159 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3160 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3161 cfa_off -= 8;
3162 }
3163
3164 /* The alloc insn already copied ar.pfs into a general register. The
3165 only thing we have to do now is copy that register to a stack slot
3166 if we'd not allocated a local register for the job. */
3167 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3168 && current_frame_info.r[reg_save_ar_pfs] == 0)
3169 {
3170 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3171 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3172 cfa_off -= 8;
3173 }
3174
3175 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3176 {
3177 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3178 if (current_frame_info.r[reg_save_ar_lc] != 0)
3179 {
3180 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3181 reg_emitted (reg_save_ar_lc);
3182 insn = emit_move_insn (alt_reg, reg);
3183 RTX_FRAME_RELATED_P (insn) = 1;
3184
3185 /* Even if we're not going to generate an epilogue, we still
3186 need to save the register so that EH works. */
3187 if (! epilogue_p)
3188 emit_insn (gen_prologue_use (alt_reg));
3189 }
3190 else
3191 {
3192 alt_regno = next_scratch_gr_reg ();
3193 alt_reg = gen_rtx_REG (DImode, alt_regno);
3194 emit_move_insn (alt_reg, reg);
3195 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3196 cfa_off -= 8;
3197 }
3198 }
3199
3200 /* Save the return pointer. */
3201 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3202 {
3203 reg = gen_rtx_REG (DImode, BR_REG (0));
3204 if (current_frame_info.r[reg_save_b0] != 0)
3205 {
3206 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3207 reg_emitted (reg_save_b0);
3208 insn = emit_move_insn (alt_reg, reg);
3209 RTX_FRAME_RELATED_P (insn) = 1;
3210
3211 /* Even if we're not going to generate an epilogue, we still
3212 need to save the register so that EH works. */
3213 if (! epilogue_p)
3214 emit_insn (gen_prologue_use (alt_reg));
3215 }
3216 else
3217 {
3218 alt_regno = next_scratch_gr_reg ();
3219 alt_reg = gen_rtx_REG (DImode, alt_regno);
3220 emit_move_insn (alt_reg, reg);
3221 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3222 cfa_off -= 8;
3223 }
3224 }
3225
3226 if (current_frame_info.r[reg_save_gp])
3227 {
3228 reg_emitted (reg_save_gp);
3229 insn = emit_move_insn (gen_rtx_REG (DImode,
3230 current_frame_info.r[reg_save_gp]),
3231 pic_offset_table_rtx);
3232 }
3233
3234 /* We should now be at the base of the gr/br/fr spill area. */
3235 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3236 + current_frame_info.spill_size));
3237
3238 /* Spill all general registers. */
3239 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3240 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3241 {
3242 reg = gen_rtx_REG (DImode, regno);
3243 do_spill (gen_gr_spill, reg, cfa_off, reg);
3244 cfa_off -= 8;
3245 }
3246
3247 /* Spill the rest of the BR registers. */
3248 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3249 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3250 {
3251 alt_regno = next_scratch_gr_reg ();
3252 alt_reg = gen_rtx_REG (DImode, alt_regno);
3253 reg = gen_rtx_REG (DImode, regno);
3254 emit_move_insn (alt_reg, reg);
3255 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3256 cfa_off -= 8;
3257 }
3258
3259 /* Align the frame and spill all FR registers. */
3260 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3261 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3262 {
3263 gcc_assert (!(cfa_off & 15));
3264 reg = gen_rtx_REG (XFmode, regno);
3265 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3266 cfa_off -= 16;
3267 }
3268
3269 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3270
3271 finish_spill_pointers ();
3272 }
3273
3274 /* Called after register allocation to add any instructions needed for the
3275 epilogue. Using an epilogue insn is favored compared to putting all of the
3276 instructions in output_function_prologue(), since it allows the scheduler
3277 to intermix instructions with the saves of the caller saved registers. In
3278 some cases, it might be necessary to emit a barrier instruction as the last
3279 insn to prevent such scheduling. */
3280
3281 void
3282 ia64_expand_epilogue (int sibcall_p)
3283 {
3284 rtx insn, reg, alt_reg, ar_unat_save_reg;
3285 int regno, alt_regno, cfa_off;
3286
3287 ia64_compute_frame_size (get_frame_size ());
3288
3289 /* If there is a frame pointer, then we use it instead of the stack
3290 pointer, so that the stack pointer does not need to be valid when
3291 the epilogue starts. See EXIT_IGNORE_STACK. */
3292 if (frame_pointer_needed)
3293 setup_spill_pointers (current_frame_info.n_spilled,
3294 hard_frame_pointer_rtx, 0);
3295 else
3296 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3297 current_frame_info.total_size);
3298
3299 if (current_frame_info.total_size != 0)
3300 {
3301 /* ??? At this point we must generate a magic insn that appears to
3302 modify the spill iterators and the frame pointer. This would
3303 allow the most scheduling freedom. For now, just hard stop. */
3304 emit_insn (gen_blockage ());
3305 }
3306
3307 /* Locate the bottom of the register save area. */
3308 cfa_off = (current_frame_info.spill_cfa_off
3309 + current_frame_info.spill_size
3310 + current_frame_info.extra_spill_size);
3311
3312 /* Restore the predicate registers. */
3313 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3314 {
3315 if (current_frame_info.r[reg_save_pr] != 0)
3316 {
3317 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3318 reg_emitted (reg_save_pr);
3319 }
3320 else
3321 {
3322 alt_regno = next_scratch_gr_reg ();
3323 alt_reg = gen_rtx_REG (DImode, alt_regno);
3324 do_restore (gen_movdi_x, alt_reg, cfa_off);
3325 cfa_off -= 8;
3326 }
3327 reg = gen_rtx_REG (DImode, PR_REG (0));
3328 emit_move_insn (reg, alt_reg);
3329 }
3330
3331 /* Restore the application registers. */
3332
3333 /* Load the saved unat from the stack, but do not restore it until
3334 after the GRs have been restored. */
3335 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3336 {
3337 if (current_frame_info.r[reg_save_ar_unat] != 0)
3338 {
3339 ar_unat_save_reg
3340 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3341 reg_emitted (reg_save_ar_unat);
3342 }
3343 else
3344 {
3345 alt_regno = next_scratch_gr_reg ();
3346 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3347 current_frame_info.gr_used_mask |= 1 << alt_regno;
3348 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3349 cfa_off -= 8;
3350 }
3351 }
3352 else
3353 ar_unat_save_reg = NULL_RTX;
3354
3355 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3356 {
3357 reg_emitted (reg_save_ar_pfs);
3358 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3359 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3360 emit_move_insn (reg, alt_reg);
3361 }
3362 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3363 {
3364 alt_regno = next_scratch_gr_reg ();
3365 alt_reg = gen_rtx_REG (DImode, alt_regno);
3366 do_restore (gen_movdi_x, alt_reg, cfa_off);
3367 cfa_off -= 8;
3368 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3369 emit_move_insn (reg, alt_reg);
3370 }
3371
3372 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3373 {
3374 if (current_frame_info.r[reg_save_ar_lc] != 0)
3375 {
3376 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3377 reg_emitted (reg_save_ar_lc);
3378 }
3379 else
3380 {
3381 alt_regno = next_scratch_gr_reg ();
3382 alt_reg = gen_rtx_REG (DImode, alt_regno);
3383 do_restore (gen_movdi_x, alt_reg, cfa_off);
3384 cfa_off -= 8;
3385 }
3386 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3387 emit_move_insn (reg, alt_reg);
3388 }
3389
3390 /* Restore the return pointer. */
3391 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3392 {
3393 if (current_frame_info.r[reg_save_b0] != 0)
3394 {
3395 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3396 reg_emitted (reg_save_b0);
3397 }
3398 else
3399 {
3400 alt_regno = next_scratch_gr_reg ();
3401 alt_reg = gen_rtx_REG (DImode, alt_regno);
3402 do_restore (gen_movdi_x, alt_reg, cfa_off);
3403 cfa_off -= 8;
3404 }
3405 reg = gen_rtx_REG (DImode, BR_REG (0));
3406 emit_move_insn (reg, alt_reg);
3407 }
3408
3409 /* We should now be at the base of the gr/br/fr spill area. */
3410 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3411 + current_frame_info.spill_size));
3412
3413 /* The GP may be stored on the stack in the prologue, but it's
3414 never restored in the epilogue. Skip the stack slot. */
3415 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3416 cfa_off -= 8;
3417
3418 /* Restore all general registers. */
3419 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3420 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3421 {
3422 reg = gen_rtx_REG (DImode, regno);
3423 do_restore (gen_gr_restore, reg, cfa_off);
3424 cfa_off -= 8;
3425 }
3426
3427 /* Restore the branch registers. */
3428 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3429 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3430 {
3431 alt_regno = next_scratch_gr_reg ();
3432 alt_reg = gen_rtx_REG (DImode, alt_regno);
3433 do_restore (gen_movdi_x, alt_reg, cfa_off);
3434 cfa_off -= 8;
3435 reg = gen_rtx_REG (DImode, regno);
3436 emit_move_insn (reg, alt_reg);
3437 }
3438
3439 /* Restore floating point registers. */
3440 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3441 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3442 {
3443 gcc_assert (!(cfa_off & 15));
3444 reg = gen_rtx_REG (XFmode, regno);
3445 do_restore (gen_fr_restore_x, reg, cfa_off);
3446 cfa_off -= 16;
3447 }
3448
3449 /* Restore ar.unat for real. */
3450 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3451 {
3452 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3453 emit_move_insn (reg, ar_unat_save_reg);
3454 }
3455
3456 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3457
3458 finish_spill_pointers ();
3459
3460 if (current_frame_info.total_size
3461 || cfun->machine->ia64_eh_epilogue_sp
3462 || frame_pointer_needed)
3463 {
3464 /* ??? At this point we must generate a magic insn that appears to
3465 modify the spill iterators, the stack pointer, and the frame
3466 pointer. This would allow the most scheduling freedom. For now,
3467 just hard stop. */
3468 emit_insn (gen_blockage ());
3469 }
3470
3471 if (cfun->machine->ia64_eh_epilogue_sp)
3472 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3473 else if (frame_pointer_needed)
3474 {
3475 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3476 RTX_FRAME_RELATED_P (insn) = 1;
3477 }
3478 else if (current_frame_info.total_size)
3479 {
3480 rtx offset, frame_size_rtx;
3481
3482 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3483 if (satisfies_constraint_I (frame_size_rtx))
3484 offset = frame_size_rtx;
3485 else
3486 {
3487 regno = next_scratch_gr_reg ();
3488 offset = gen_rtx_REG (DImode, regno);
3489 emit_move_insn (offset, frame_size_rtx);
3490 }
3491
3492 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3493 offset));
3494
3495 RTX_FRAME_RELATED_P (insn) = 1;
3496 if (GET_CODE (offset) != CONST_INT)
3497 {
3498 REG_NOTES (insn)
3499 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3500 gen_rtx_SET (VOIDmode,
3501 stack_pointer_rtx,
3502 gen_rtx_PLUS (DImode,
3503 stack_pointer_rtx,
3504 frame_size_rtx)),
3505 REG_NOTES (insn));
3506 }
3507 }
3508
3509 if (cfun->machine->ia64_eh_epilogue_bsp)
3510 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3511
3512 if (! sibcall_p)
3513 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3514 else
3515 {
3516 int fp = GR_REG (2);
3517 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3518 first available call clobbered register. If there was a frame_pointer
3519 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3520 so we have to make sure we're using the string "r2" when emitting
3521 the register name for the assembler. */
3522 if (current_frame_info.r[reg_fp]
3523 && current_frame_info.r[reg_fp] == GR_REG (2))
3524 fp = HARD_FRAME_POINTER_REGNUM;
3525
3526 /* We must emit an alloc to force the input registers to become output
3527 registers. Otherwise, if the callee tries to pass its parameters
3528 through to another call without an intervening alloc, then these
3529 values get lost. */
3530 /* ??? We don't need to preserve all input registers. We only need to
3531 preserve those input registers used as arguments to the sibling call.
3532 It is unclear how to compute that number here. */
3533 if (current_frame_info.n_input_regs != 0)
3534 {
3535 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3536 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3537 const0_rtx, const0_rtx,
3538 n_inputs, const0_rtx));
3539 RTX_FRAME_RELATED_P (insn) = 1;
3540 }
3541 }
3542 }
3543
3544 /* Return 1 if br.ret can do all the work required to return from a
3545 function. */
3546
3547 int
3548 ia64_direct_return (void)
3549 {
3550 if (reload_completed && ! frame_pointer_needed)
3551 {
3552 ia64_compute_frame_size (get_frame_size ());
3553
3554 return (current_frame_info.total_size == 0
3555 && current_frame_info.n_spilled == 0
3556 && current_frame_info.r[reg_save_b0] == 0
3557 && current_frame_info.r[reg_save_pr] == 0
3558 && current_frame_info.r[reg_save_ar_pfs] == 0
3559 && current_frame_info.r[reg_save_ar_unat] == 0
3560 && current_frame_info.r[reg_save_ar_lc] == 0);
3561 }
3562 return 0;
3563 }
3564
3565 /* Return the magic cookie that we use to hold the return address
3566 during early compilation. */
3567
3568 rtx
3569 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3570 {
3571 if (count != 0)
3572 return NULL;
3573 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3574 }
3575
3576 /* Split this value after reload, now that we know where the return
3577 address is saved. */
3578
3579 void
3580 ia64_split_return_addr_rtx (rtx dest)
3581 {
3582 rtx src;
3583
3584 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3585 {
3586 if (current_frame_info.r[reg_save_b0] != 0)
3587 {
3588 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3589 reg_emitted (reg_save_b0);
3590 }
3591 else
3592 {
3593 HOST_WIDE_INT off;
3594 unsigned int regno;
3595 rtx off_r;
3596
3597 /* Compute offset from CFA for BR0. */
3598 /* ??? Must be kept in sync with ia64_expand_prologue. */
3599 off = (current_frame_info.spill_cfa_off
3600 + current_frame_info.spill_size);
3601 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3602 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3603 off -= 8;
3604
3605 /* Convert CFA offset to a register based offset. */
3606 if (frame_pointer_needed)
3607 src = hard_frame_pointer_rtx;
3608 else
3609 {
3610 src = stack_pointer_rtx;
3611 off += current_frame_info.total_size;
3612 }
3613
3614 /* Load address into scratch register. */
3615 off_r = GEN_INT (off);
3616 if (satisfies_constraint_I (off_r))
3617 emit_insn (gen_adddi3 (dest, src, off_r));
3618 else
3619 {
3620 emit_move_insn (dest, off_r);
3621 emit_insn (gen_adddi3 (dest, src, dest));
3622 }
3623
3624 src = gen_rtx_MEM (Pmode, dest);
3625 }
3626 }
3627 else
3628 src = gen_rtx_REG (DImode, BR_REG (0));
3629
3630 emit_move_insn (dest, src);
3631 }
3632
3633 int
3634 ia64_hard_regno_rename_ok (int from, int to)
3635 {
3636 /* Don't clobber any of the registers we reserved for the prologue. */
3637 enum ia64_frame_regs r;
3638
3639 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3640 if (to == current_frame_info.r[r]
3641 || from == current_frame_info.r[r]
3642 || to == emitted_frame_related_regs[r]
3643 || from == emitted_frame_related_regs[r])
3644 return 0;
3645
3646 /* Don't use output registers outside the register frame. */
3647 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3648 return 0;
3649
3650 /* Retain even/oddness on predicate register pairs. */
3651 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3652 return (from & 1) == (to & 1);
3653
3654 return 1;
3655 }
3656
3657 /* Target hook for assembling integer objects. Handle word-sized
3658 aligned objects and detect the cases when @fptr is needed. */
3659
3660 static bool
3661 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3662 {
3663 if (size == POINTER_SIZE / BITS_PER_UNIT
3664 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3665 && GET_CODE (x) == SYMBOL_REF
3666 && SYMBOL_REF_FUNCTION_P (x))
3667 {
3668 static const char * const directive[2][2] = {
3669 /* 64-bit pointer */ /* 32-bit pointer */
3670 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3671 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3672 };
3673 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3674 output_addr_const (asm_out_file, x);
3675 fputs (")\n", asm_out_file);
3676 return true;
3677 }
3678 return default_assemble_integer (x, size, aligned_p);
3679 }
3680
3681 /* Emit the function prologue. */
3682
3683 static void
3684 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3685 {
3686 int mask, grsave, grsave_prev;
3687
3688 if (current_frame_info.need_regstk)
3689 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3690 current_frame_info.n_input_regs,
3691 current_frame_info.n_local_regs,
3692 current_frame_info.n_output_regs,
3693 current_frame_info.n_rotate_regs);
3694
3695 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3696 return;
3697
3698 /* Emit the .prologue directive. */
3699
3700 mask = 0;
3701 grsave = grsave_prev = 0;
3702 if (current_frame_info.r[reg_save_b0] != 0)
3703 {
3704 mask |= 8;
3705 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3706 }
3707 if (current_frame_info.r[reg_save_ar_pfs] != 0
3708 && (grsave_prev == 0
3709 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3710 {
3711 mask |= 4;
3712 if (grsave_prev == 0)
3713 grsave = current_frame_info.r[reg_save_ar_pfs];
3714 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3715 }
3716 if (current_frame_info.r[reg_fp] != 0
3717 && (grsave_prev == 0
3718 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3719 {
3720 mask |= 2;
3721 if (grsave_prev == 0)
3722 grsave = HARD_FRAME_POINTER_REGNUM;
3723 grsave_prev = current_frame_info.r[reg_fp];
3724 }
3725 if (current_frame_info.r[reg_save_pr] != 0
3726 && (grsave_prev == 0
3727 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3728 {
3729 mask |= 1;
3730 if (grsave_prev == 0)
3731 grsave = current_frame_info.r[reg_save_pr];
3732 }
3733
3734 if (mask && TARGET_GNU_AS)
3735 fprintf (file, "\t.prologue %d, %d\n", mask,
3736 ia64_dbx_register_number (grsave));
3737 else
3738 fputs ("\t.prologue\n", file);
3739
3740 /* Emit a .spill directive, if necessary, to relocate the base of
3741 the register spill area. */
3742 if (current_frame_info.spill_cfa_off != -16)
3743 fprintf (file, "\t.spill %ld\n",
3744 (long) (current_frame_info.spill_cfa_off
3745 + current_frame_info.spill_size));
3746 }
3747
3748 /* Emit the .body directive at the scheduled end of the prologue. */
3749
3750 static void
3751 ia64_output_function_end_prologue (FILE *file)
3752 {
3753 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3754 return;
3755
3756 fputs ("\t.body\n", file);
3757 }
3758
3759 /* Emit the function epilogue. */
3760
3761 static void
3762 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3763 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3764 {
3765 int i;
3766
3767 if (current_frame_info.r[reg_fp])
3768 {
3769 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3770 reg_names[HARD_FRAME_POINTER_REGNUM]
3771 = reg_names[current_frame_info.r[reg_fp]];
3772 reg_names[current_frame_info.r[reg_fp]] = tmp;
3773 reg_emitted (reg_fp);
3774 }
3775 if (! TARGET_REG_NAMES)
3776 {
3777 for (i = 0; i < current_frame_info.n_input_regs; i++)
3778 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3779 for (i = 0; i < current_frame_info.n_local_regs; i++)
3780 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3781 for (i = 0; i < current_frame_info.n_output_regs; i++)
3782 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3783 }
3784
3785 current_frame_info.initialized = 0;
3786 }
3787
3788 int
3789 ia64_dbx_register_number (int regno)
3790 {
3791 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3792 from its home at loc79 to something inside the register frame. We
3793 must perform the same renumbering here for the debug info. */
3794 if (current_frame_info.r[reg_fp])
3795 {
3796 if (regno == HARD_FRAME_POINTER_REGNUM)
3797 regno = current_frame_info.r[reg_fp];
3798 else if (regno == current_frame_info.r[reg_fp])
3799 regno = HARD_FRAME_POINTER_REGNUM;
3800 }
3801
3802 if (IN_REGNO_P (regno))
3803 return 32 + regno - IN_REG (0);
3804 else if (LOC_REGNO_P (regno))
3805 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3806 else if (OUT_REGNO_P (regno))
3807 return (32 + current_frame_info.n_input_regs
3808 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3809 else
3810 return regno;
3811 }
3812
3813 void
3814 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3815 {
3816 rtx addr_reg, eight = GEN_INT (8);
3817
3818 /* The Intel assembler requires that the global __ia64_trampoline symbol
3819 be declared explicitly */
3820 if (!TARGET_GNU_AS)
3821 {
3822 static bool declared_ia64_trampoline = false;
3823
3824 if (!declared_ia64_trampoline)
3825 {
3826 declared_ia64_trampoline = true;
3827 (*targetm.asm_out.globalize_label) (asm_out_file,
3828 "__ia64_trampoline");
3829 }
3830 }
3831
3832 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3833 addr = convert_memory_address (Pmode, addr);
3834 fnaddr = convert_memory_address (Pmode, fnaddr);
3835 static_chain = convert_memory_address (Pmode, static_chain);
3836
3837 /* Load up our iterator. */
3838 addr_reg = gen_reg_rtx (Pmode);
3839 emit_move_insn (addr_reg, addr);
3840
3841 /* The first two words are the fake descriptor:
3842 __ia64_trampoline, ADDR+16. */
3843 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3844 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3845 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3846
3847 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3848 copy_to_reg (plus_constant (addr, 16)));
3849 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3850
3851 /* The third word is the target descriptor. */
3852 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3853 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3854
3855 /* The fourth word is the static chain. */
3856 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3857 }
3858 \f
3859 /* Do any needed setup for a variadic function. CUM has not been updated
3860 for the last named argument which has type TYPE and mode MODE.
3861
3862 We generate the actual spill instructions during prologue generation. */
3863
3864 static void
3865 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3866 tree type, int * pretend_size,
3867 int second_time ATTRIBUTE_UNUSED)
3868 {
3869 CUMULATIVE_ARGS next_cum = *cum;
3870
3871 /* Skip the current argument. */
3872 ia64_function_arg_advance (&next_cum, mode, type, 1);
3873
3874 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3875 {
3876 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3877 *pretend_size = n * UNITS_PER_WORD;
3878 cfun->machine->n_varargs = n;
3879 }
3880 }
3881
3882 /* Check whether TYPE is a homogeneous floating point aggregate. If
3883 it is, return the mode of the floating point type that appears
3884 in all leafs. If it is not, return VOIDmode.
3885
3886 An aggregate is a homogeneous floating point aggregate is if all
3887 fields/elements in it have the same floating point type (e.g,
3888 SFmode). 128-bit quad-precision floats are excluded.
3889
3890 Variable sized aggregates should never arrive here, since we should
3891 have already decided to pass them by reference. Top-level zero-sized
3892 aggregates are excluded because our parallels crash the middle-end. */
3893
3894 static enum machine_mode
3895 hfa_element_mode (const_tree type, bool nested)
3896 {
3897 enum machine_mode element_mode = VOIDmode;
3898 enum machine_mode mode;
3899 enum tree_code code = TREE_CODE (type);
3900 int know_element_mode = 0;
3901 tree t;
3902
3903 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3904 return VOIDmode;
3905
3906 switch (code)
3907 {
3908 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3909 case BOOLEAN_TYPE: case POINTER_TYPE:
3910 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3911 case LANG_TYPE: case FUNCTION_TYPE:
3912 return VOIDmode;
3913
3914 /* Fortran complex types are supposed to be HFAs, so we need to handle
3915 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3916 types though. */
3917 case COMPLEX_TYPE:
3918 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3919 && TYPE_MODE (type) != TCmode)
3920 return GET_MODE_INNER (TYPE_MODE (type));
3921 else
3922 return VOIDmode;
3923
3924 case REAL_TYPE:
3925 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3926 mode if this is contained within an aggregate. */
3927 if (nested && TYPE_MODE (type) != TFmode)
3928 return TYPE_MODE (type);
3929 else
3930 return VOIDmode;
3931
3932 case ARRAY_TYPE:
3933 return hfa_element_mode (TREE_TYPE (type), 1);
3934
3935 case RECORD_TYPE:
3936 case UNION_TYPE:
3937 case QUAL_UNION_TYPE:
3938 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3939 {
3940 if (TREE_CODE (t) != FIELD_DECL)
3941 continue;
3942
3943 mode = hfa_element_mode (TREE_TYPE (t), 1);
3944 if (know_element_mode)
3945 {
3946 if (mode != element_mode)
3947 return VOIDmode;
3948 }
3949 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3950 return VOIDmode;
3951 else
3952 {
3953 know_element_mode = 1;
3954 element_mode = mode;
3955 }
3956 }
3957 return element_mode;
3958
3959 default:
3960 /* If we reach here, we probably have some front-end specific type
3961 that the backend doesn't know about. This can happen via the
3962 aggregate_value_p call in init_function_start. All we can do is
3963 ignore unknown tree types. */
3964 return VOIDmode;
3965 }
3966
3967 return VOIDmode;
3968 }
3969
3970 /* Return the number of words required to hold a quantity of TYPE and MODE
3971 when passed as an argument. */
3972 static int
3973 ia64_function_arg_words (tree type, enum machine_mode mode)
3974 {
3975 int words;
3976
3977 if (mode == BLKmode)
3978 words = int_size_in_bytes (type);
3979 else
3980 words = GET_MODE_SIZE (mode);
3981
3982 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3983 }
3984
3985 /* Return the number of registers that should be skipped so the current
3986 argument (described by TYPE and WORDS) will be properly aligned.
3987
3988 Integer and float arguments larger than 8 bytes start at the next
3989 even boundary. Aggregates larger than 8 bytes start at the next
3990 even boundary if the aggregate has 16 byte alignment. Note that
3991 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3992 but are still to be aligned in registers.
3993
3994 ??? The ABI does not specify how to handle aggregates with
3995 alignment from 9 to 15 bytes, or greater than 16. We handle them
3996 all as if they had 16 byte alignment. Such aggregates can occur
3997 only if gcc extensions are used. */
3998 static int
3999 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4000 {
4001 if ((cum->words & 1) == 0)
4002 return 0;
4003
4004 if (type
4005 && TREE_CODE (type) != INTEGER_TYPE
4006 && TREE_CODE (type) != REAL_TYPE)
4007 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4008 else
4009 return words > 1;
4010 }
4011
4012 /* Return rtx for register where argument is passed, or zero if it is passed
4013 on the stack. */
4014 /* ??? 128-bit quad-precision floats are always passed in general
4015 registers. */
4016
4017 rtx
4018 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4019 int named, int incoming)
4020 {
4021 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4022 int words = ia64_function_arg_words (type, mode);
4023 int offset = ia64_function_arg_offset (cum, type, words);
4024 enum machine_mode hfa_mode = VOIDmode;
4025
4026 /* If all argument slots are used, then it must go on the stack. */
4027 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4028 return 0;
4029
4030 /* Check for and handle homogeneous FP aggregates. */
4031 if (type)
4032 hfa_mode = hfa_element_mode (type, 0);
4033
4034 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4035 and unprototyped hfas are passed specially. */
4036 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4037 {
4038 rtx loc[16];
4039 int i = 0;
4040 int fp_regs = cum->fp_regs;
4041 int int_regs = cum->words + offset;
4042 int hfa_size = GET_MODE_SIZE (hfa_mode);
4043 int byte_size;
4044 int args_byte_size;
4045
4046 /* If prototyped, pass it in FR regs then GR regs.
4047 If not prototyped, pass it in both FR and GR regs.
4048
4049 If this is an SFmode aggregate, then it is possible to run out of
4050 FR regs while GR regs are still left. In that case, we pass the
4051 remaining part in the GR regs. */
4052
4053 /* Fill the FP regs. We do this always. We stop if we reach the end
4054 of the argument, the last FP register, or the last argument slot. */
4055
4056 byte_size = ((mode == BLKmode)
4057 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4058 args_byte_size = int_regs * UNITS_PER_WORD;
4059 offset = 0;
4060 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4061 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4062 {
4063 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4064 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4065 + fp_regs)),
4066 GEN_INT (offset));
4067 offset += hfa_size;
4068 args_byte_size += hfa_size;
4069 fp_regs++;
4070 }
4071
4072 /* If no prototype, then the whole thing must go in GR regs. */
4073 if (! cum->prototype)
4074 offset = 0;
4075 /* If this is an SFmode aggregate, then we might have some left over
4076 that needs to go in GR regs. */
4077 else if (byte_size != offset)
4078 int_regs += offset / UNITS_PER_WORD;
4079
4080 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4081
4082 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4083 {
4084 enum machine_mode gr_mode = DImode;
4085 unsigned int gr_size;
4086
4087 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4088 then this goes in a GR reg left adjusted/little endian, right
4089 adjusted/big endian. */
4090 /* ??? Currently this is handled wrong, because 4-byte hunks are
4091 always right adjusted/little endian. */
4092 if (offset & 0x4)
4093 gr_mode = SImode;
4094 /* If we have an even 4 byte hunk because the aggregate is a
4095 multiple of 4 bytes in size, then this goes in a GR reg right
4096 adjusted/little endian. */
4097 else if (byte_size - offset == 4)
4098 gr_mode = SImode;
4099
4100 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4101 gen_rtx_REG (gr_mode, (basereg
4102 + int_regs)),
4103 GEN_INT (offset));
4104
4105 gr_size = GET_MODE_SIZE (gr_mode);
4106 offset += gr_size;
4107 if (gr_size == UNITS_PER_WORD
4108 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4109 int_regs++;
4110 else if (gr_size > UNITS_PER_WORD)
4111 int_regs += gr_size / UNITS_PER_WORD;
4112 }
4113 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4114 }
4115
4116 /* Integral and aggregates go in general registers. If we have run out of
4117 FR registers, then FP values must also go in general registers. This can
4118 happen when we have a SFmode HFA. */
4119 else if (mode == TFmode || mode == TCmode
4120 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4121 {
4122 int byte_size = ((mode == BLKmode)
4123 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4124 if (BYTES_BIG_ENDIAN
4125 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4126 && byte_size < UNITS_PER_WORD
4127 && byte_size > 0)
4128 {
4129 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4130 gen_rtx_REG (DImode,
4131 (basereg + cum->words
4132 + offset)),
4133 const0_rtx);
4134 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4135 }
4136 else
4137 return gen_rtx_REG (mode, basereg + cum->words + offset);
4138
4139 }
4140
4141 /* If there is a prototype, then FP values go in a FR register when
4142 named, and in a GR register when unnamed. */
4143 else if (cum->prototype)
4144 {
4145 if (named)
4146 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4147 /* In big-endian mode, an anonymous SFmode value must be represented
4148 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4149 the value into the high half of the general register. */
4150 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4151 return gen_rtx_PARALLEL (mode,
4152 gen_rtvec (1,
4153 gen_rtx_EXPR_LIST (VOIDmode,
4154 gen_rtx_REG (DImode, basereg + cum->words + offset),
4155 const0_rtx)));
4156 else
4157 return gen_rtx_REG (mode, basereg + cum->words + offset);
4158 }
4159 /* If there is no prototype, then FP values go in both FR and GR
4160 registers. */
4161 else
4162 {
4163 /* See comment above. */
4164 enum machine_mode inner_mode =
4165 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4166
4167 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4168 gen_rtx_REG (mode, (FR_ARG_FIRST
4169 + cum->fp_regs)),
4170 const0_rtx);
4171 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4172 gen_rtx_REG (inner_mode,
4173 (basereg + cum->words
4174 + offset)),
4175 const0_rtx);
4176
4177 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4178 }
4179 }
4180
4181 /* Return number of bytes, at the beginning of the argument, that must be
4182 put in registers. 0 is the argument is entirely in registers or entirely
4183 in memory. */
4184
4185 static int
4186 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4187 tree type, bool named ATTRIBUTE_UNUSED)
4188 {
4189 int words = ia64_function_arg_words (type, mode);
4190 int offset = ia64_function_arg_offset (cum, type, words);
4191
4192 /* If all argument slots are used, then it must go on the stack. */
4193 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4194 return 0;
4195
4196 /* It doesn't matter whether the argument goes in FR or GR regs. If
4197 it fits within the 8 argument slots, then it goes entirely in
4198 registers. If it extends past the last argument slot, then the rest
4199 goes on the stack. */
4200
4201 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4202 return 0;
4203
4204 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4205 }
4206
4207 /* Update CUM to point after this argument. This is patterned after
4208 ia64_function_arg. */
4209
4210 void
4211 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4212 tree type, int named)
4213 {
4214 int words = ia64_function_arg_words (type, mode);
4215 int offset = ia64_function_arg_offset (cum, type, words);
4216 enum machine_mode hfa_mode = VOIDmode;
4217
4218 /* If all arg slots are already full, then there is nothing to do. */
4219 if (cum->words >= MAX_ARGUMENT_SLOTS)
4220 return;
4221
4222 cum->words += words + offset;
4223
4224 /* Check for and handle homogeneous FP aggregates. */
4225 if (type)
4226 hfa_mode = hfa_element_mode (type, 0);
4227
4228 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4229 and unprototyped hfas are passed specially. */
4230 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4231 {
4232 int fp_regs = cum->fp_regs;
4233 /* This is the original value of cum->words + offset. */
4234 int int_regs = cum->words - words;
4235 int hfa_size = GET_MODE_SIZE (hfa_mode);
4236 int byte_size;
4237 int args_byte_size;
4238
4239 /* If prototyped, pass it in FR regs then GR regs.
4240 If not prototyped, pass it in both FR and GR regs.
4241
4242 If this is an SFmode aggregate, then it is possible to run out of
4243 FR regs while GR regs are still left. In that case, we pass the
4244 remaining part in the GR regs. */
4245
4246 /* Fill the FP regs. We do this always. We stop if we reach the end
4247 of the argument, the last FP register, or the last argument slot. */
4248
4249 byte_size = ((mode == BLKmode)
4250 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4251 args_byte_size = int_regs * UNITS_PER_WORD;
4252 offset = 0;
4253 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4254 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4255 {
4256 offset += hfa_size;
4257 args_byte_size += hfa_size;
4258 fp_regs++;
4259 }
4260
4261 cum->fp_regs = fp_regs;
4262 }
4263
4264 /* Integral and aggregates go in general registers. So do TFmode FP values.
4265 If we have run out of FR registers, then other FP values must also go in
4266 general registers. This can happen when we have a SFmode HFA. */
4267 else if (mode == TFmode || mode == TCmode
4268 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4269 cum->int_regs = cum->words;
4270
4271 /* If there is a prototype, then FP values go in a FR register when
4272 named, and in a GR register when unnamed. */
4273 else if (cum->prototype)
4274 {
4275 if (! named)
4276 cum->int_regs = cum->words;
4277 else
4278 /* ??? Complex types should not reach here. */
4279 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4280 }
4281 /* If there is no prototype, then FP values go in both FR and GR
4282 registers. */
4283 else
4284 {
4285 /* ??? Complex types should not reach here. */
4286 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4287 cum->int_regs = cum->words;
4288 }
4289 }
4290
4291 /* Arguments with alignment larger than 8 bytes start at the next even
4292 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4293 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4294
4295 int
4296 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4297 {
4298
4299 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4300 return PARM_BOUNDARY * 2;
4301
4302 if (type)
4303 {
4304 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4305 return PARM_BOUNDARY * 2;
4306 else
4307 return PARM_BOUNDARY;
4308 }
4309
4310 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4311 return PARM_BOUNDARY * 2;
4312 else
4313 return PARM_BOUNDARY;
4314 }
4315
4316 /* True if it is OK to do sibling call optimization for the specified
4317 call expression EXP. DECL will be the called function, or NULL if
4318 this is an indirect call. */
4319 static bool
4320 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4321 {
4322 /* We can't perform a sibcall if the current function has the syscall_linkage
4323 attribute. */
4324 if (lookup_attribute ("syscall_linkage",
4325 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4326 return false;
4327
4328 /* We must always return with our current GP. This means we can
4329 only sibcall to functions defined in the current module. */
4330 return decl && (*targetm.binds_local_p) (decl);
4331 }
4332 \f
4333
4334 /* Implement va_arg. */
4335
4336 static tree
4337 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4338 {
4339 /* Variable sized types are passed by reference. */
4340 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4341 {
4342 tree ptrtype = build_pointer_type (type);
4343 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4344 return build_va_arg_indirect_ref (addr);
4345 }
4346
4347 /* Aggregate arguments with alignment larger than 8 bytes start at
4348 the next even boundary. Integer and floating point arguments
4349 do so if they are larger than 8 bytes, whether or not they are
4350 also aligned larger than 8 bytes. */
4351 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4352 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4353 {
4354 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4355 size_int (2 * UNITS_PER_WORD - 1));
4356 t = fold_convert (sizetype, t);
4357 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4358 size_int (-2 * UNITS_PER_WORD));
4359 t = fold_convert (TREE_TYPE (valist), t);
4360 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist, t);
4361 gimplify_and_add (t, pre_p);
4362 }
4363
4364 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4365 }
4366 \f
4367 /* Return 1 if function return value returned in memory. Return 0 if it is
4368 in a register. */
4369
4370 static bool
4371 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4372 {
4373 enum machine_mode mode;
4374 enum machine_mode hfa_mode;
4375 HOST_WIDE_INT byte_size;
4376
4377 mode = TYPE_MODE (valtype);
4378 byte_size = GET_MODE_SIZE (mode);
4379 if (mode == BLKmode)
4380 {
4381 byte_size = int_size_in_bytes (valtype);
4382 if (byte_size < 0)
4383 return true;
4384 }
4385
4386 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4387
4388 hfa_mode = hfa_element_mode (valtype, 0);
4389 if (hfa_mode != VOIDmode)
4390 {
4391 int hfa_size = GET_MODE_SIZE (hfa_mode);
4392
4393 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4394 return true;
4395 else
4396 return false;
4397 }
4398 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4399 return true;
4400 else
4401 return false;
4402 }
4403
4404 /* Return rtx for register that holds the function return value. */
4405
4406 rtx
4407 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
4408 {
4409 enum machine_mode mode;
4410 enum machine_mode hfa_mode;
4411
4412 mode = TYPE_MODE (valtype);
4413 hfa_mode = hfa_element_mode (valtype, 0);
4414
4415 if (hfa_mode != VOIDmode)
4416 {
4417 rtx loc[8];
4418 int i;
4419 int hfa_size;
4420 int byte_size;
4421 int offset;
4422
4423 hfa_size = GET_MODE_SIZE (hfa_mode);
4424 byte_size = ((mode == BLKmode)
4425 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4426 offset = 0;
4427 for (i = 0; offset < byte_size; i++)
4428 {
4429 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4430 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4431 GEN_INT (offset));
4432 offset += hfa_size;
4433 }
4434 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4435 }
4436 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4437 return gen_rtx_REG (mode, FR_ARG_FIRST);
4438 else
4439 {
4440 bool need_parallel = false;
4441
4442 /* In big-endian mode, we need to manage the layout of aggregates
4443 in the registers so that we get the bits properly aligned in
4444 the highpart of the registers. */
4445 if (BYTES_BIG_ENDIAN
4446 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4447 need_parallel = true;
4448
4449 /* Something like struct S { long double x; char a[0] } is not an
4450 HFA structure, and therefore doesn't go in fp registers. But
4451 the middle-end will give it XFmode anyway, and XFmode values
4452 don't normally fit in integer registers. So we need to smuggle
4453 the value inside a parallel. */
4454 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4455 need_parallel = true;
4456
4457 if (need_parallel)
4458 {
4459 rtx loc[8];
4460 int offset;
4461 int bytesize;
4462 int i;
4463
4464 offset = 0;
4465 bytesize = int_size_in_bytes (valtype);
4466 /* An empty PARALLEL is invalid here, but the return value
4467 doesn't matter for empty structs. */
4468 if (bytesize == 0)
4469 return gen_rtx_REG (mode, GR_RET_FIRST);
4470 for (i = 0; offset < bytesize; i++)
4471 {
4472 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4473 gen_rtx_REG (DImode,
4474 GR_RET_FIRST + i),
4475 GEN_INT (offset));
4476 offset += UNITS_PER_WORD;
4477 }
4478 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4479 }
4480
4481 return gen_rtx_REG (mode, GR_RET_FIRST);
4482 }
4483 }
4484
4485 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4486 We need to emit DTP-relative relocations. */
4487
4488 static void
4489 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4490 {
4491 gcc_assert (size == 4 || size == 8);
4492 if (size == 4)
4493 fputs ("\tdata4.ua\t@dtprel(", file);
4494 else
4495 fputs ("\tdata8.ua\t@dtprel(", file);
4496 output_addr_const (file, x);
4497 fputs (")", file);
4498 }
4499
4500 /* Print a memory address as an operand to reference that memory location. */
4501
4502 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4503 also call this from ia64_print_operand for memory addresses. */
4504
4505 void
4506 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4507 rtx address ATTRIBUTE_UNUSED)
4508 {
4509 }
4510
4511 /* Print an operand to an assembler instruction.
4512 C Swap and print a comparison operator.
4513 D Print an FP comparison operator.
4514 E Print 32 - constant, for SImode shifts as extract.
4515 e Print 64 - constant, for DImode rotates.
4516 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4517 a floating point register emitted normally.
4518 I Invert a predicate register by adding 1.
4519 J Select the proper predicate register for a condition.
4520 j Select the inverse predicate register for a condition.
4521 O Append .acq for volatile load.
4522 P Postincrement of a MEM.
4523 Q Append .rel for volatile store.
4524 R Print .s .d or nothing for a single, double or no truncation.
4525 S Shift amount for shladd instruction.
4526 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4527 for Intel assembler.
4528 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4529 for Intel assembler.
4530 X A pair of floating point registers.
4531 r Print register name, or constant 0 as r0. HP compatibility for
4532 Linux kernel.
4533 v Print vector constant value as an 8-byte integer value. */
4534
4535 void
4536 ia64_print_operand (FILE * file, rtx x, int code)
4537 {
4538 const char *str;
4539
4540 switch (code)
4541 {
4542 case 0:
4543 /* Handled below. */
4544 break;
4545
4546 case 'C':
4547 {
4548 enum rtx_code c = swap_condition (GET_CODE (x));
4549 fputs (GET_RTX_NAME (c), file);
4550 return;
4551 }
4552
4553 case 'D':
4554 switch (GET_CODE (x))
4555 {
4556 case NE:
4557 str = "neq";
4558 break;
4559 case UNORDERED:
4560 str = "unord";
4561 break;
4562 case ORDERED:
4563 str = "ord";
4564 break;
4565 case UNLT:
4566 str = "nge";
4567 break;
4568 case UNLE:
4569 str = "ngt";
4570 break;
4571 case UNGT:
4572 str = "nle";
4573 break;
4574 case UNGE:
4575 str = "nlt";
4576 break;
4577 default:
4578 str = GET_RTX_NAME (GET_CODE (x));
4579 break;
4580 }
4581 fputs (str, file);
4582 return;
4583
4584 case 'E':
4585 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4586 return;
4587
4588 case 'e':
4589 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4590 return;
4591
4592 case 'F':
4593 if (x == CONST0_RTX (GET_MODE (x)))
4594 str = reg_names [FR_REG (0)];
4595 else if (x == CONST1_RTX (GET_MODE (x)))
4596 str = reg_names [FR_REG (1)];
4597 else
4598 {
4599 gcc_assert (GET_CODE (x) == REG);
4600 str = reg_names [REGNO (x)];
4601 }
4602 fputs (str, file);
4603 return;
4604
4605 case 'I':
4606 fputs (reg_names [REGNO (x) + 1], file);
4607 return;
4608
4609 case 'J':
4610 case 'j':
4611 {
4612 unsigned int regno = REGNO (XEXP (x, 0));
4613 if (GET_CODE (x) == EQ)
4614 regno += 1;
4615 if (code == 'j')
4616 regno ^= 1;
4617 fputs (reg_names [regno], file);
4618 }
4619 return;
4620
4621 case 'O':
4622 if (MEM_VOLATILE_P (x))
4623 fputs(".acq", file);
4624 return;
4625
4626 case 'P':
4627 {
4628 HOST_WIDE_INT value;
4629
4630 switch (GET_CODE (XEXP (x, 0)))
4631 {
4632 default:
4633 return;
4634
4635 case POST_MODIFY:
4636 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4637 if (GET_CODE (x) == CONST_INT)
4638 value = INTVAL (x);
4639 else
4640 {
4641 gcc_assert (GET_CODE (x) == REG);
4642 fprintf (file, ", %s", reg_names[REGNO (x)]);
4643 return;
4644 }
4645 break;
4646
4647 case POST_INC:
4648 value = GET_MODE_SIZE (GET_MODE (x));
4649 break;
4650
4651 case POST_DEC:
4652 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4653 break;
4654 }
4655
4656 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4657 return;
4658 }
4659
4660 case 'Q':
4661 if (MEM_VOLATILE_P (x))
4662 fputs(".rel", file);
4663 return;
4664
4665 case 'R':
4666 if (x == CONST0_RTX (GET_MODE (x)))
4667 fputs(".s", file);
4668 else if (x == CONST1_RTX (GET_MODE (x)))
4669 fputs(".d", file);
4670 else if (x == CONST2_RTX (GET_MODE (x)))
4671 ;
4672 else
4673 output_operand_lossage ("invalid %%R value");
4674 return;
4675
4676 case 'S':
4677 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4678 return;
4679
4680 case 'T':
4681 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4682 {
4683 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4684 return;
4685 }
4686 break;
4687
4688 case 'U':
4689 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4690 {
4691 const char *prefix = "0x";
4692 if (INTVAL (x) & 0x80000000)
4693 {
4694 fprintf (file, "0xffffffff");
4695 prefix = "";
4696 }
4697 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4698 return;
4699 }
4700 break;
4701
4702 case 'X':
4703 {
4704 unsigned int regno = REGNO (x);
4705 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4706 }
4707 return;
4708
4709 case 'r':
4710 /* If this operand is the constant zero, write it as register zero.
4711 Any register, zero, or CONST_INT value is OK here. */
4712 if (GET_CODE (x) == REG)
4713 fputs (reg_names[REGNO (x)], file);
4714 else if (x == CONST0_RTX (GET_MODE (x)))
4715 fputs ("r0", file);
4716 else if (GET_CODE (x) == CONST_INT)
4717 output_addr_const (file, x);
4718 else
4719 output_operand_lossage ("invalid %%r value");
4720 return;
4721
4722 case 'v':
4723 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4724 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4725 break;
4726
4727 case '+':
4728 {
4729 const char *which;
4730
4731 /* For conditional branches, returns or calls, substitute
4732 sptk, dptk, dpnt, or spnt for %s. */
4733 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4734 if (x)
4735 {
4736 int pred_val = INTVAL (XEXP (x, 0));
4737
4738 /* Guess top and bottom 10% statically predicted. */
4739 if (pred_val < REG_BR_PROB_BASE / 50
4740 && br_prob_note_reliable_p (x))
4741 which = ".spnt";
4742 else if (pred_val < REG_BR_PROB_BASE / 2)
4743 which = ".dpnt";
4744 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4745 || !br_prob_note_reliable_p (x))
4746 which = ".dptk";
4747 else
4748 which = ".sptk";
4749 }
4750 else if (GET_CODE (current_output_insn) == CALL_INSN)
4751 which = ".sptk";
4752 else
4753 which = ".dptk";
4754
4755 fputs (which, file);
4756 return;
4757 }
4758
4759 case ',':
4760 x = current_insn_predicate;
4761 if (x)
4762 {
4763 unsigned int regno = REGNO (XEXP (x, 0));
4764 if (GET_CODE (x) == EQ)
4765 regno += 1;
4766 fprintf (file, "(%s) ", reg_names [regno]);
4767 }
4768 return;
4769
4770 default:
4771 output_operand_lossage ("ia64_print_operand: unknown code");
4772 return;
4773 }
4774
4775 switch (GET_CODE (x))
4776 {
4777 /* This happens for the spill/restore instructions. */
4778 case POST_INC:
4779 case POST_DEC:
4780 case POST_MODIFY:
4781 x = XEXP (x, 0);
4782 /* ... fall through ... */
4783
4784 case REG:
4785 fputs (reg_names [REGNO (x)], file);
4786 break;
4787
4788 case MEM:
4789 {
4790 rtx addr = XEXP (x, 0);
4791 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4792 addr = XEXP (addr, 0);
4793 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4794 break;
4795 }
4796
4797 default:
4798 output_addr_const (file, x);
4799 break;
4800 }
4801
4802 return;
4803 }
4804 \f
4805 /* Compute a (partial) cost for rtx X. Return true if the complete
4806 cost has been computed, and false if subexpressions should be
4807 scanned. In either case, *TOTAL contains the cost result. */
4808 /* ??? This is incomplete. */
4809
4810 static bool
4811 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4812 {
4813 switch (code)
4814 {
4815 case CONST_INT:
4816 switch (outer_code)
4817 {
4818 case SET:
4819 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4820 return true;
4821 case PLUS:
4822 if (satisfies_constraint_I (x))
4823 *total = 0;
4824 else if (satisfies_constraint_J (x))
4825 *total = 1;
4826 else
4827 *total = COSTS_N_INSNS (1);
4828 return true;
4829 default:
4830 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4831 *total = 0;
4832 else
4833 *total = COSTS_N_INSNS (1);
4834 return true;
4835 }
4836
4837 case CONST_DOUBLE:
4838 *total = COSTS_N_INSNS (1);
4839 return true;
4840
4841 case CONST:
4842 case SYMBOL_REF:
4843 case LABEL_REF:
4844 *total = COSTS_N_INSNS (3);
4845 return true;
4846
4847 case MULT:
4848 /* For multiplies wider than HImode, we have to go to the FPU,
4849 which normally involves copies. Plus there's the latency
4850 of the multiply itself, and the latency of the instructions to
4851 transfer integer regs to FP regs. */
4852 /* ??? Check for FP mode. */
4853 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4854 *total = COSTS_N_INSNS (10);
4855 else
4856 *total = COSTS_N_INSNS (2);
4857 return true;
4858
4859 case PLUS:
4860 case MINUS:
4861 case ASHIFT:
4862 case ASHIFTRT:
4863 case LSHIFTRT:
4864 *total = COSTS_N_INSNS (1);
4865 return true;
4866
4867 case DIV:
4868 case UDIV:
4869 case MOD:
4870 case UMOD:
4871 /* We make divide expensive, so that divide-by-constant will be
4872 optimized to a multiply. */
4873 *total = COSTS_N_INSNS (60);
4874 return true;
4875
4876 default:
4877 return false;
4878 }
4879 }
4880
4881 /* Calculate the cost of moving data from a register in class FROM to
4882 one in class TO, using MODE. */
4883
4884 int
4885 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4886 enum reg_class to)
4887 {
4888 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4889 if (to == ADDL_REGS)
4890 to = GR_REGS;
4891 if (from == ADDL_REGS)
4892 from = GR_REGS;
4893
4894 /* All costs are symmetric, so reduce cases by putting the
4895 lower number class as the destination. */
4896 if (from < to)
4897 {
4898 enum reg_class tmp = to;
4899 to = from, from = tmp;
4900 }
4901
4902 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4903 so that we get secondary memory reloads. Between FR_REGS,
4904 we have to make this at least as expensive as MEMORY_MOVE_COST
4905 to avoid spectacularly poor register class preferencing. */
4906 if (mode == XFmode || mode == RFmode)
4907 {
4908 if (to != GR_REGS || from != GR_REGS)
4909 return MEMORY_MOVE_COST (mode, to, 0);
4910 else
4911 return 3;
4912 }
4913
4914 switch (to)
4915 {
4916 case PR_REGS:
4917 /* Moving between PR registers takes two insns. */
4918 if (from == PR_REGS)
4919 return 3;
4920 /* Moving between PR and anything but GR is impossible. */
4921 if (from != GR_REGS)
4922 return MEMORY_MOVE_COST (mode, to, 0);
4923 break;
4924
4925 case BR_REGS:
4926 /* Moving between BR and anything but GR is impossible. */
4927 if (from != GR_REGS && from != GR_AND_BR_REGS)
4928 return MEMORY_MOVE_COST (mode, to, 0);
4929 break;
4930
4931 case AR_I_REGS:
4932 case AR_M_REGS:
4933 /* Moving between AR and anything but GR is impossible. */
4934 if (from != GR_REGS)
4935 return MEMORY_MOVE_COST (mode, to, 0);
4936 break;
4937
4938 case GR_REGS:
4939 case FR_REGS:
4940 case FP_REGS:
4941 case GR_AND_FR_REGS:
4942 case GR_AND_BR_REGS:
4943 case ALL_REGS:
4944 break;
4945
4946 default:
4947 gcc_unreachable ();
4948 }
4949
4950 return 2;
4951 }
4952
4953 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4954 to use when copying X into that class. */
4955
4956 enum reg_class
4957 ia64_preferred_reload_class (rtx x, enum reg_class class)
4958 {
4959 switch (class)
4960 {
4961 case FR_REGS:
4962 case FP_REGS:
4963 /* Don't allow volatile mem reloads into floating point registers.
4964 This is defined to force reload to choose the r/m case instead
4965 of the f/f case when reloading (set (reg fX) (mem/v)). */
4966 if (MEM_P (x) && MEM_VOLATILE_P (x))
4967 return NO_REGS;
4968
4969 /* Force all unrecognized constants into the constant pool. */
4970 if (CONSTANT_P (x))
4971 return NO_REGS;
4972 break;
4973
4974 case AR_M_REGS:
4975 case AR_I_REGS:
4976 if (!OBJECT_P (x))
4977 return NO_REGS;
4978 break;
4979
4980 default:
4981 break;
4982 }
4983
4984 return class;
4985 }
4986
4987 /* This function returns the register class required for a secondary
4988 register when copying between one of the registers in CLASS, and X,
4989 using MODE. A return value of NO_REGS means that no secondary register
4990 is required. */
4991
4992 enum reg_class
4993 ia64_secondary_reload_class (enum reg_class class,
4994 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4995 {
4996 int regno = -1;
4997
4998 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4999 regno = true_regnum (x);
5000
5001 switch (class)
5002 {
5003 case BR_REGS:
5004 case AR_M_REGS:
5005 case AR_I_REGS:
5006 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5007 interaction. We end up with two pseudos with overlapping lifetimes
5008 both of which are equiv to the same constant, and both which need
5009 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5010 changes depending on the path length, which means the qty_first_reg
5011 check in make_regs_eqv can give different answers at different times.
5012 At some point I'll probably need a reload_indi pattern to handle
5013 this.
5014
5015 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5016 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5017 non-general registers for good measure. */
5018 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5019 return GR_REGS;
5020
5021 /* This is needed if a pseudo used as a call_operand gets spilled to a
5022 stack slot. */
5023 if (GET_CODE (x) == MEM)
5024 return GR_REGS;
5025 break;
5026
5027 case FR_REGS:
5028 case FP_REGS:
5029 /* Need to go through general registers to get to other class regs. */
5030 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5031 return GR_REGS;
5032
5033 /* This can happen when a paradoxical subreg is an operand to the
5034 muldi3 pattern. */
5035 /* ??? This shouldn't be necessary after instruction scheduling is
5036 enabled, because paradoxical subregs are not accepted by
5037 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5038 stop the paradoxical subreg stupidity in the *_operand functions
5039 in recog.c. */
5040 if (GET_CODE (x) == MEM
5041 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5042 || GET_MODE (x) == QImode))
5043 return GR_REGS;
5044
5045 /* This can happen because of the ior/and/etc patterns that accept FP
5046 registers as operands. If the third operand is a constant, then it
5047 needs to be reloaded into a FP register. */
5048 if (GET_CODE (x) == CONST_INT)
5049 return GR_REGS;
5050
5051 /* This can happen because of register elimination in a muldi3 insn.
5052 E.g. `26107 * (unsigned long)&u'. */
5053 if (GET_CODE (x) == PLUS)
5054 return GR_REGS;
5055 break;
5056
5057 case PR_REGS:
5058 /* ??? This happens if we cse/gcse a BImode value across a call,
5059 and the function has a nonlocal goto. This is because global
5060 does not allocate call crossing pseudos to hard registers when
5061 current_function_has_nonlocal_goto is true. This is relatively
5062 common for C++ programs that use exceptions. To reproduce,
5063 return NO_REGS and compile libstdc++. */
5064 if (GET_CODE (x) == MEM)
5065 return GR_REGS;
5066
5067 /* This can happen when we take a BImode subreg of a DImode value,
5068 and that DImode value winds up in some non-GR register. */
5069 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5070 return GR_REGS;
5071 break;
5072
5073 default:
5074 break;
5075 }
5076
5077 return NO_REGS;
5078 }
5079
5080 \f
5081 /* Implement targetm.unspec_may_trap_p hook. */
5082 static int
5083 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5084 {
5085 if (GET_CODE (x) == UNSPEC)
5086 {
5087 switch (XINT (x, 1))
5088 {
5089 case UNSPEC_LDA:
5090 case UNSPEC_LDS:
5091 case UNSPEC_LDSA:
5092 case UNSPEC_LDCCLR:
5093 case UNSPEC_CHKACLR:
5094 case UNSPEC_CHKS:
5095 /* These unspecs are just wrappers. */
5096 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5097 }
5098 }
5099
5100 return default_unspec_may_trap_p (x, flags);
5101 }
5102
5103 \f
5104 /* Parse the -mfixed-range= option string. */
5105
5106 static void
5107 fix_range (const char *const_str)
5108 {
5109 int i, first, last;
5110 char *str, *dash, *comma;
5111
5112 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5113 REG2 are either register names or register numbers. The effect
5114 of this option is to mark the registers in the range from REG1 to
5115 REG2 as ``fixed'' so they won't be used by the compiler. This is
5116 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5117
5118 i = strlen (const_str);
5119 str = (char *) alloca (i + 1);
5120 memcpy (str, const_str, i + 1);
5121
5122 while (1)
5123 {
5124 dash = strchr (str, '-');
5125 if (!dash)
5126 {
5127 warning (0, "value of -mfixed-range must have form REG1-REG2");
5128 return;
5129 }
5130 *dash = '\0';
5131
5132 comma = strchr (dash + 1, ',');
5133 if (comma)
5134 *comma = '\0';
5135
5136 first = decode_reg_name (str);
5137 if (first < 0)
5138 {
5139 warning (0, "unknown register name: %s", str);
5140 return;
5141 }
5142
5143 last = decode_reg_name (dash + 1);
5144 if (last < 0)
5145 {
5146 warning (0, "unknown register name: %s", dash + 1);
5147 return;
5148 }
5149
5150 *dash = '-';
5151
5152 if (first > last)
5153 {
5154 warning (0, "%s-%s is an empty range", str, dash + 1);
5155 return;
5156 }
5157
5158 for (i = first; i <= last; ++i)
5159 fixed_regs[i] = call_used_regs[i] = 1;
5160
5161 if (!comma)
5162 break;
5163
5164 *comma = ',';
5165 str = comma + 1;
5166 }
5167 }
5168
5169 /* Implement TARGET_HANDLE_OPTION. */
5170
5171 static bool
5172 ia64_handle_option (size_t code, const char *arg, int value)
5173 {
5174 switch (code)
5175 {
5176 case OPT_mfixed_range_:
5177 fix_range (arg);
5178 return true;
5179
5180 case OPT_mtls_size_:
5181 if (value != 14 && value != 22 && value != 64)
5182 error ("bad value %<%s%> for -mtls-size= switch", arg);
5183 return true;
5184
5185 case OPT_mtune_:
5186 {
5187 static struct pta
5188 {
5189 const char *name; /* processor name or nickname. */
5190 enum processor_type processor;
5191 }
5192 const processor_alias_table[] =
5193 {
5194 {"itanium", PROCESSOR_ITANIUM},
5195 {"itanium1", PROCESSOR_ITANIUM},
5196 {"merced", PROCESSOR_ITANIUM},
5197 {"itanium2", PROCESSOR_ITANIUM2},
5198 {"mckinley", PROCESSOR_ITANIUM2},
5199 };
5200 int const pta_size = ARRAY_SIZE (processor_alias_table);
5201 int i;
5202
5203 for (i = 0; i < pta_size; i++)
5204 if (!strcmp (arg, processor_alias_table[i].name))
5205 {
5206 ia64_tune = processor_alias_table[i].processor;
5207 break;
5208 }
5209 if (i == pta_size)
5210 error ("bad value %<%s%> for -mtune= switch", arg);
5211 return true;
5212 }
5213
5214 default:
5215 return true;
5216 }
5217 }
5218
5219 /* Implement OVERRIDE_OPTIONS. */
5220
5221 void
5222 ia64_override_options (void)
5223 {
5224 if (TARGET_AUTO_PIC)
5225 target_flags |= MASK_CONST_GP;
5226
5227 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5228 {
5229 warning (0, "not yet implemented: latency-optimized inline square root");
5230 TARGET_INLINE_SQRT = INL_MAX_THR;
5231 }
5232
5233 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5234 flag_schedule_insns_after_reload = 0;
5235
5236 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5237
5238 init_machine_status = ia64_init_machine_status;
5239 }
5240
5241 /* Initialize the record of emitted frame related registers. */
5242
5243 void ia64_init_expanders (void)
5244 {
5245 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5246 }
5247
5248 static struct machine_function *
5249 ia64_init_machine_status (void)
5250 {
5251 return ggc_alloc_cleared (sizeof (struct machine_function));
5252 }
5253 \f
5254 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5255 static enum attr_type ia64_safe_type (rtx);
5256
5257 static enum attr_itanium_class
5258 ia64_safe_itanium_class (rtx insn)
5259 {
5260 if (recog_memoized (insn) >= 0)
5261 return get_attr_itanium_class (insn);
5262 else
5263 return ITANIUM_CLASS_UNKNOWN;
5264 }
5265
5266 static enum attr_type
5267 ia64_safe_type (rtx insn)
5268 {
5269 if (recog_memoized (insn) >= 0)
5270 return get_attr_type (insn);
5271 else
5272 return TYPE_UNKNOWN;
5273 }
5274 \f
5275 /* The following collection of routines emit instruction group stop bits as
5276 necessary to avoid dependencies. */
5277
5278 /* Need to track some additional registers as far as serialization is
5279 concerned so we can properly handle br.call and br.ret. We could
5280 make these registers visible to gcc, but since these registers are
5281 never explicitly used in gcc generated code, it seems wasteful to
5282 do so (plus it would make the call and return patterns needlessly
5283 complex). */
5284 #define REG_RP (BR_REG (0))
5285 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5286 /* This is used for volatile asms which may require a stop bit immediately
5287 before and after them. */
5288 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5289 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5290 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5291
5292 /* For each register, we keep track of how it has been written in the
5293 current instruction group.
5294
5295 If a register is written unconditionally (no qualifying predicate),
5296 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5297
5298 If a register is written if its qualifying predicate P is true, we
5299 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5300 may be written again by the complement of P (P^1) and when this happens,
5301 WRITE_COUNT gets set to 2.
5302
5303 The result of this is that whenever an insn attempts to write a register
5304 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5305
5306 If a predicate register is written by a floating-point insn, we set
5307 WRITTEN_BY_FP to true.
5308
5309 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5310 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5311
5312 #if GCC_VERSION >= 4000
5313 #define RWS_FIELD_TYPE __extension__ unsigned short
5314 #else
5315 #define RWS_FIELD_TYPE unsigned int
5316 #endif
5317 struct reg_write_state
5318 {
5319 RWS_FIELD_TYPE write_count : 2;
5320 RWS_FIELD_TYPE first_pred : 10;
5321 RWS_FIELD_TYPE written_by_fp : 1;
5322 RWS_FIELD_TYPE written_by_and : 1;
5323 RWS_FIELD_TYPE written_by_or : 1;
5324 };
5325
5326 /* Cumulative info for the current instruction group. */
5327 struct reg_write_state rws_sum[NUM_REGS];
5328 #ifdef ENABLE_CHECKING
5329 /* Bitmap whether a register has been written in the current insn. */
5330 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5331 / HOST_BITS_PER_WIDEST_FAST_INT];
5332
5333 static inline void
5334 rws_insn_set (int regno)
5335 {
5336 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5337 SET_HARD_REG_BIT (rws_insn, regno);
5338 }
5339
5340 static inline int
5341 rws_insn_test (int regno)
5342 {
5343 return TEST_HARD_REG_BIT (rws_insn, regno);
5344 }
5345 #else
5346 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5347 unsigned char rws_insn[2];
5348
5349 static inline void
5350 rws_insn_set (int regno)
5351 {
5352 if (regno == REG_AR_CFM)
5353 rws_insn[0] = 1;
5354 else if (regno == REG_VOLATILE)
5355 rws_insn[1] = 1;
5356 }
5357
5358 static inline int
5359 rws_insn_test (int regno)
5360 {
5361 if (regno == REG_AR_CFM)
5362 return rws_insn[0];
5363 if (regno == REG_VOLATILE)
5364 return rws_insn[1];
5365 return 0;
5366 }
5367 #endif
5368
5369 /* Indicates whether this is the first instruction after a stop bit,
5370 in which case we don't need another stop bit. Without this,
5371 ia64_variable_issue will die when scheduling an alloc. */
5372 static int first_instruction;
5373
5374 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5375 RTL for one instruction. */
5376 struct reg_flags
5377 {
5378 unsigned int is_write : 1; /* Is register being written? */
5379 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5380 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5381 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5382 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5383 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5384 };
5385
5386 static void rws_update (int, struct reg_flags, int);
5387 static int rws_access_regno (int, struct reg_flags, int);
5388 static int rws_access_reg (rtx, struct reg_flags, int);
5389 static void update_set_flags (rtx, struct reg_flags *);
5390 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5391 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5392 static void init_insn_group_barriers (void);
5393 static int group_barrier_needed (rtx);
5394 static int safe_group_barrier_needed (rtx);
5395 static int in_safe_group_barrier;
5396
5397 /* Update *RWS for REGNO, which is being written by the current instruction,
5398 with predicate PRED, and associated register flags in FLAGS. */
5399
5400 static void
5401 rws_update (int regno, struct reg_flags flags, int pred)
5402 {
5403 if (pred)
5404 rws_sum[regno].write_count++;
5405 else
5406 rws_sum[regno].write_count = 2;
5407 rws_sum[regno].written_by_fp |= flags.is_fp;
5408 /* ??? Not tracking and/or across differing predicates. */
5409 rws_sum[regno].written_by_and = flags.is_and;
5410 rws_sum[regno].written_by_or = flags.is_or;
5411 rws_sum[regno].first_pred = pred;
5412 }
5413
5414 /* Handle an access to register REGNO of type FLAGS using predicate register
5415 PRED. Update rws_sum array. Return 1 if this access creates
5416 a dependency with an earlier instruction in the same group. */
5417
5418 static int
5419 rws_access_regno (int regno, struct reg_flags flags, int pred)
5420 {
5421 int need_barrier = 0;
5422
5423 gcc_assert (regno < NUM_REGS);
5424
5425 if (! PR_REGNO_P (regno))
5426 flags.is_and = flags.is_or = 0;
5427
5428 if (flags.is_write)
5429 {
5430 int write_count;
5431
5432 rws_insn_set (regno);
5433 write_count = rws_sum[regno].write_count;
5434
5435 switch (write_count)
5436 {
5437 case 0:
5438 /* The register has not been written yet. */
5439 if (!in_safe_group_barrier)
5440 rws_update (regno, flags, pred);
5441 break;
5442
5443 case 1:
5444 /* The register has been written via a predicate. If this is
5445 not a complementary predicate, then we need a barrier. */
5446 /* ??? This assumes that P and P+1 are always complementary
5447 predicates for P even. */
5448 if (flags.is_and && rws_sum[regno].written_by_and)
5449 ;
5450 else if (flags.is_or && rws_sum[regno].written_by_or)
5451 ;
5452 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5453 need_barrier = 1;
5454 if (!in_safe_group_barrier)
5455 rws_update (regno, flags, pred);
5456 break;
5457
5458 case 2:
5459 /* The register has been unconditionally written already. We
5460 need a barrier. */
5461 if (flags.is_and && rws_sum[regno].written_by_and)
5462 ;
5463 else if (flags.is_or && rws_sum[regno].written_by_or)
5464 ;
5465 else
5466 need_barrier = 1;
5467 if (!in_safe_group_barrier)
5468 {
5469 rws_sum[regno].written_by_and = flags.is_and;
5470 rws_sum[regno].written_by_or = flags.is_or;
5471 }
5472 break;
5473
5474 default:
5475 gcc_unreachable ();
5476 }
5477 }
5478 else
5479 {
5480 if (flags.is_branch)
5481 {
5482 /* Branches have several RAW exceptions that allow to avoid
5483 barriers. */
5484
5485 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5486 /* RAW dependencies on branch regs are permissible as long
5487 as the writer is a non-branch instruction. Since we
5488 never generate code that uses a branch register written
5489 by a branch instruction, handling this case is
5490 easy. */
5491 return 0;
5492
5493 if (REGNO_REG_CLASS (regno) == PR_REGS
5494 && ! rws_sum[regno].written_by_fp)
5495 /* The predicates of a branch are available within the
5496 same insn group as long as the predicate was written by
5497 something other than a floating-point instruction. */
5498 return 0;
5499 }
5500
5501 if (flags.is_and && rws_sum[regno].written_by_and)
5502 return 0;
5503 if (flags.is_or && rws_sum[regno].written_by_or)
5504 return 0;
5505
5506 switch (rws_sum[regno].write_count)
5507 {
5508 case 0:
5509 /* The register has not been written yet. */
5510 break;
5511
5512 case 1:
5513 /* The register has been written via a predicate. If this is
5514 not a complementary predicate, then we need a barrier. */
5515 /* ??? This assumes that P and P+1 are always complementary
5516 predicates for P even. */
5517 if ((rws_sum[regno].first_pred ^ 1) != pred)
5518 need_barrier = 1;
5519 break;
5520
5521 case 2:
5522 /* The register has been unconditionally written already. We
5523 need a barrier. */
5524 need_barrier = 1;
5525 break;
5526
5527 default:
5528 gcc_unreachable ();
5529 }
5530 }
5531
5532 return need_barrier;
5533 }
5534
5535 static int
5536 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5537 {
5538 int regno = REGNO (reg);
5539 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5540
5541 if (n == 1)
5542 return rws_access_regno (regno, flags, pred);
5543 else
5544 {
5545 int need_barrier = 0;
5546 while (--n >= 0)
5547 need_barrier |= rws_access_regno (regno + n, flags, pred);
5548 return need_barrier;
5549 }
5550 }
5551
5552 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5553 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5554
5555 static void
5556 update_set_flags (rtx x, struct reg_flags *pflags)
5557 {
5558 rtx src = SET_SRC (x);
5559
5560 switch (GET_CODE (src))
5561 {
5562 case CALL:
5563 return;
5564
5565 case IF_THEN_ELSE:
5566 /* There are four cases here:
5567 (1) The destination is (pc), in which case this is a branch,
5568 nothing here applies.
5569 (2) The destination is ar.lc, in which case this is a
5570 doloop_end_internal,
5571 (3) The destination is an fp register, in which case this is
5572 an fselect instruction.
5573 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5574 this is a check load.
5575 In all cases, nothing we do in this function applies. */
5576 return;
5577
5578 default:
5579 if (COMPARISON_P (src)
5580 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5581 /* Set pflags->is_fp to 1 so that we know we're dealing
5582 with a floating point comparison when processing the
5583 destination of the SET. */
5584 pflags->is_fp = 1;
5585
5586 /* Discover if this is a parallel comparison. We only handle
5587 and.orcm and or.andcm at present, since we must retain a
5588 strict inverse on the predicate pair. */
5589 else if (GET_CODE (src) == AND)
5590 pflags->is_and = 1;
5591 else if (GET_CODE (src) == IOR)
5592 pflags->is_or = 1;
5593
5594 break;
5595 }
5596 }
5597
5598 /* Subroutine of rtx_needs_barrier; this function determines whether the
5599 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5600 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5601 for this insn. */
5602
5603 static int
5604 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5605 {
5606 int need_barrier = 0;
5607 rtx dst;
5608 rtx src = SET_SRC (x);
5609
5610 if (GET_CODE (src) == CALL)
5611 /* We don't need to worry about the result registers that
5612 get written by subroutine call. */
5613 return rtx_needs_barrier (src, flags, pred);
5614 else if (SET_DEST (x) == pc_rtx)
5615 {
5616 /* X is a conditional branch. */
5617 /* ??? This seems redundant, as the caller sets this bit for
5618 all JUMP_INSNs. */
5619 if (!ia64_spec_check_src_p (src))
5620 flags.is_branch = 1;
5621 return rtx_needs_barrier (src, flags, pred);
5622 }
5623
5624 if (ia64_spec_check_src_p (src))
5625 /* Avoid checking one register twice (in condition
5626 and in 'then' section) for ldc pattern. */
5627 {
5628 gcc_assert (REG_P (XEXP (src, 2)));
5629 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5630
5631 /* We process MEM below. */
5632 src = XEXP (src, 1);
5633 }
5634
5635 need_barrier |= rtx_needs_barrier (src, flags, pred);
5636
5637 dst = SET_DEST (x);
5638 if (GET_CODE (dst) == ZERO_EXTRACT)
5639 {
5640 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5641 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5642 }
5643 return need_barrier;
5644 }
5645
5646 /* Handle an access to rtx X of type FLAGS using predicate register
5647 PRED. Return 1 if this access creates a dependency with an earlier
5648 instruction in the same group. */
5649
5650 static int
5651 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5652 {
5653 int i, j;
5654 int is_complemented = 0;
5655 int need_barrier = 0;
5656 const char *format_ptr;
5657 struct reg_flags new_flags;
5658 rtx cond;
5659
5660 if (! x)
5661 return 0;
5662
5663 new_flags = flags;
5664
5665 switch (GET_CODE (x))
5666 {
5667 case SET:
5668 update_set_flags (x, &new_flags);
5669 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5670 if (GET_CODE (SET_SRC (x)) != CALL)
5671 {
5672 new_flags.is_write = 1;
5673 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5674 }
5675 break;
5676
5677 case CALL:
5678 new_flags.is_write = 0;
5679 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5680
5681 /* Avoid multiple register writes, in case this is a pattern with
5682 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5683 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
5684 {
5685 new_flags.is_write = 1;
5686 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5687 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5688 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5689 }
5690 break;
5691
5692 case COND_EXEC:
5693 /* X is a predicated instruction. */
5694
5695 cond = COND_EXEC_TEST (x);
5696 gcc_assert (!pred);
5697 need_barrier = rtx_needs_barrier (cond, flags, 0);
5698
5699 if (GET_CODE (cond) == EQ)
5700 is_complemented = 1;
5701 cond = XEXP (cond, 0);
5702 gcc_assert (GET_CODE (cond) == REG
5703 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5704 pred = REGNO (cond);
5705 if (is_complemented)
5706 ++pred;
5707
5708 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5709 return need_barrier;
5710
5711 case CLOBBER:
5712 case USE:
5713 /* Clobber & use are for earlier compiler-phases only. */
5714 break;
5715
5716 case ASM_OPERANDS:
5717 case ASM_INPUT:
5718 /* We always emit stop bits for traditional asms. We emit stop bits
5719 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5720 if (GET_CODE (x) != ASM_OPERANDS
5721 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5722 {
5723 /* Avoid writing the register multiple times if we have multiple
5724 asm outputs. This avoids a failure in rws_access_reg. */
5725 if (! rws_insn_test (REG_VOLATILE))
5726 {
5727 new_flags.is_write = 1;
5728 rws_access_regno (REG_VOLATILE, new_flags, pred);
5729 }
5730 return 1;
5731 }
5732
5733 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5734 We cannot just fall through here since then we would be confused
5735 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5736 traditional asms unlike their normal usage. */
5737
5738 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5739 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5740 need_barrier = 1;
5741 break;
5742
5743 case PARALLEL:
5744 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5745 {
5746 rtx pat = XVECEXP (x, 0, i);
5747 switch (GET_CODE (pat))
5748 {
5749 case SET:
5750 update_set_flags (pat, &new_flags);
5751 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5752 break;
5753
5754 case USE:
5755 case CALL:
5756 case ASM_OPERANDS:
5757 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5758 break;
5759
5760 case CLOBBER:
5761 case RETURN:
5762 break;
5763
5764 default:
5765 gcc_unreachable ();
5766 }
5767 }
5768 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5769 {
5770 rtx pat = XVECEXP (x, 0, i);
5771 if (GET_CODE (pat) == SET)
5772 {
5773 if (GET_CODE (SET_SRC (pat)) != CALL)
5774 {
5775 new_flags.is_write = 1;
5776 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5777 pred);
5778 }
5779 }
5780 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5781 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5782 }
5783 break;
5784
5785 case SUBREG:
5786 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5787 break;
5788 case REG:
5789 if (REGNO (x) == AR_UNAT_REGNUM)
5790 {
5791 for (i = 0; i < 64; ++i)
5792 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5793 }
5794 else
5795 need_barrier = rws_access_reg (x, flags, pred);
5796 break;
5797
5798 case MEM:
5799 /* Find the regs used in memory address computation. */
5800 new_flags.is_write = 0;
5801 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5802 break;
5803
5804 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5805 case SYMBOL_REF: case LABEL_REF: case CONST:
5806 break;
5807
5808 /* Operators with side-effects. */
5809 case POST_INC: case POST_DEC:
5810 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5811
5812 new_flags.is_write = 0;
5813 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5814 new_flags.is_write = 1;
5815 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5816 break;
5817
5818 case POST_MODIFY:
5819 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5820
5821 new_flags.is_write = 0;
5822 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5823 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5824 new_flags.is_write = 1;
5825 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5826 break;
5827
5828 /* Handle common unary and binary ops for efficiency. */
5829 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5830 case MOD: case UDIV: case UMOD: case AND: case IOR:
5831 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5832 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5833 case NE: case EQ: case GE: case GT: case LE:
5834 case LT: case GEU: case GTU: case LEU: case LTU:
5835 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5836 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5837 break;
5838
5839 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5840 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5841 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5842 case SQRT: case FFS: case POPCOUNT:
5843 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5844 break;
5845
5846 case VEC_SELECT:
5847 /* VEC_SELECT's second argument is a PARALLEL with integers that
5848 describe the elements selected. On ia64, those integers are
5849 always constants. Avoid walking the PARALLEL so that we don't
5850 get confused with "normal" parallels and then die. */
5851 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5852 break;
5853
5854 case UNSPEC:
5855 switch (XINT (x, 1))
5856 {
5857 case UNSPEC_LTOFF_DTPMOD:
5858 case UNSPEC_LTOFF_DTPREL:
5859 case UNSPEC_DTPREL:
5860 case UNSPEC_LTOFF_TPREL:
5861 case UNSPEC_TPREL:
5862 case UNSPEC_PRED_REL_MUTEX:
5863 case UNSPEC_PIC_CALL:
5864 case UNSPEC_MF:
5865 case UNSPEC_FETCHADD_ACQ:
5866 case UNSPEC_BSP_VALUE:
5867 case UNSPEC_FLUSHRS:
5868 case UNSPEC_BUNDLE_SELECTOR:
5869 break;
5870
5871 case UNSPEC_GR_SPILL:
5872 case UNSPEC_GR_RESTORE:
5873 {
5874 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5875 HOST_WIDE_INT bit = (offset >> 3) & 63;
5876
5877 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5878 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5879 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5880 new_flags, pred);
5881 break;
5882 }
5883
5884 case UNSPEC_FR_SPILL:
5885 case UNSPEC_FR_RESTORE:
5886 case UNSPEC_GETF_EXP:
5887 case UNSPEC_SETF_EXP:
5888 case UNSPEC_ADDP4:
5889 case UNSPEC_FR_SQRT_RECIP_APPROX:
5890 case UNSPEC_LDA:
5891 case UNSPEC_LDS:
5892 case UNSPEC_LDSA:
5893 case UNSPEC_CHKACLR:
5894 case UNSPEC_CHKS:
5895 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5896 break;
5897
5898 case UNSPEC_FR_RECIP_APPROX:
5899 case UNSPEC_SHRP:
5900 case UNSPEC_COPYSIGN:
5901 case UNSPEC_FR_RECIP_APPROX_RES:
5902 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5903 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5904 break;
5905
5906 case UNSPEC_CMPXCHG_ACQ:
5907 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5908 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5909 break;
5910
5911 default:
5912 gcc_unreachable ();
5913 }
5914 break;
5915
5916 case UNSPEC_VOLATILE:
5917 switch (XINT (x, 1))
5918 {
5919 case UNSPECV_ALLOC:
5920 /* Alloc must always be the first instruction of a group.
5921 We force this by always returning true. */
5922 /* ??? We might get better scheduling if we explicitly check for
5923 input/local/output register dependencies, and modify the
5924 scheduler so that alloc is always reordered to the start of
5925 the current group. We could then eliminate all of the
5926 first_instruction code. */
5927 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5928
5929 new_flags.is_write = 1;
5930 rws_access_regno (REG_AR_CFM, new_flags, pred);
5931 return 1;
5932
5933 case UNSPECV_SET_BSP:
5934 need_barrier = 1;
5935 break;
5936
5937 case UNSPECV_BLOCKAGE:
5938 case UNSPECV_INSN_GROUP_BARRIER:
5939 case UNSPECV_BREAK:
5940 case UNSPECV_PSAC_ALL:
5941 case UNSPECV_PSAC_NORMAL:
5942 return 0;
5943
5944 default:
5945 gcc_unreachable ();
5946 }
5947 break;
5948
5949 case RETURN:
5950 new_flags.is_write = 0;
5951 need_barrier = rws_access_regno (REG_RP, flags, pred);
5952 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5953
5954 new_flags.is_write = 1;
5955 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5956 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5957 break;
5958
5959 default:
5960 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5961 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5962 switch (format_ptr[i])
5963 {
5964 case '0': /* unused field */
5965 case 'i': /* integer */
5966 case 'n': /* note */
5967 case 'w': /* wide integer */
5968 case 's': /* pointer to string */
5969 case 'S': /* optional pointer to string */
5970 break;
5971
5972 case 'e':
5973 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5974 need_barrier = 1;
5975 break;
5976
5977 case 'E':
5978 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5979 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5980 need_barrier = 1;
5981 break;
5982
5983 default:
5984 gcc_unreachable ();
5985 }
5986 break;
5987 }
5988 return need_barrier;
5989 }
5990
5991 /* Clear out the state for group_barrier_needed at the start of a
5992 sequence of insns. */
5993
5994 static void
5995 init_insn_group_barriers (void)
5996 {
5997 memset (rws_sum, 0, sizeof (rws_sum));
5998 first_instruction = 1;
5999 }
6000
6001 /* Given the current state, determine whether a group barrier (a stop bit) is
6002 necessary before INSN. Return nonzero if so. This modifies the state to
6003 include the effects of INSN as a side-effect. */
6004
6005 static int
6006 group_barrier_needed (rtx insn)
6007 {
6008 rtx pat;
6009 int need_barrier = 0;
6010 struct reg_flags flags;
6011
6012 memset (&flags, 0, sizeof (flags));
6013 switch (GET_CODE (insn))
6014 {
6015 case NOTE:
6016 break;
6017
6018 case BARRIER:
6019 /* A barrier doesn't imply an instruction group boundary. */
6020 break;
6021
6022 case CODE_LABEL:
6023 memset (rws_insn, 0, sizeof (rws_insn));
6024 return 1;
6025
6026 case CALL_INSN:
6027 flags.is_branch = 1;
6028 flags.is_sibcall = SIBLING_CALL_P (insn);
6029 memset (rws_insn, 0, sizeof (rws_insn));
6030
6031 /* Don't bundle a call following another call. */
6032 if ((pat = prev_active_insn (insn))
6033 && GET_CODE (pat) == CALL_INSN)
6034 {
6035 need_barrier = 1;
6036 break;
6037 }
6038
6039 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6040 break;
6041
6042 case JUMP_INSN:
6043 if (!ia64_spec_check_p (insn))
6044 flags.is_branch = 1;
6045
6046 /* Don't bundle a jump following a call. */
6047 if ((pat = prev_active_insn (insn))
6048 && GET_CODE (pat) == CALL_INSN)
6049 {
6050 need_barrier = 1;
6051 break;
6052 }
6053 /* FALLTHRU */
6054
6055 case INSN:
6056 if (GET_CODE (PATTERN (insn)) == USE
6057 || GET_CODE (PATTERN (insn)) == CLOBBER)
6058 /* Don't care about USE and CLOBBER "insns"---those are used to
6059 indicate to the optimizer that it shouldn't get rid of
6060 certain operations. */
6061 break;
6062
6063 pat = PATTERN (insn);
6064
6065 /* Ug. Hack hacks hacked elsewhere. */
6066 switch (recog_memoized (insn))
6067 {
6068 /* We play dependency tricks with the epilogue in order
6069 to get proper schedules. Undo this for dv analysis. */
6070 case CODE_FOR_epilogue_deallocate_stack:
6071 case CODE_FOR_prologue_allocate_stack:
6072 pat = XVECEXP (pat, 0, 0);
6073 break;
6074
6075 /* The pattern we use for br.cloop confuses the code above.
6076 The second element of the vector is representative. */
6077 case CODE_FOR_doloop_end_internal:
6078 pat = XVECEXP (pat, 0, 1);
6079 break;
6080
6081 /* Doesn't generate code. */
6082 case CODE_FOR_pred_rel_mutex:
6083 case CODE_FOR_prologue_use:
6084 return 0;
6085
6086 default:
6087 break;
6088 }
6089
6090 memset (rws_insn, 0, sizeof (rws_insn));
6091 need_barrier = rtx_needs_barrier (pat, flags, 0);
6092
6093 /* Check to see if the previous instruction was a volatile
6094 asm. */
6095 if (! need_barrier)
6096 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6097 break;
6098
6099 default:
6100 gcc_unreachable ();
6101 }
6102
6103 if (first_instruction && INSN_P (insn)
6104 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6105 && GET_CODE (PATTERN (insn)) != USE
6106 && GET_CODE (PATTERN (insn)) != CLOBBER)
6107 {
6108 need_barrier = 0;
6109 first_instruction = 0;
6110 }
6111
6112 return need_barrier;
6113 }
6114
6115 /* Like group_barrier_needed, but do not clobber the current state. */
6116
6117 static int
6118 safe_group_barrier_needed (rtx insn)
6119 {
6120 int saved_first_instruction;
6121 int t;
6122
6123 saved_first_instruction = first_instruction;
6124 in_safe_group_barrier = 1;
6125
6126 t = group_barrier_needed (insn);
6127
6128 first_instruction = saved_first_instruction;
6129 in_safe_group_barrier = 0;
6130
6131 return t;
6132 }
6133
6134 /* Scan the current function and insert stop bits as necessary to
6135 eliminate dependencies. This function assumes that a final
6136 instruction scheduling pass has been run which has already
6137 inserted most of the necessary stop bits. This function only
6138 inserts new ones at basic block boundaries, since these are
6139 invisible to the scheduler. */
6140
6141 static void
6142 emit_insn_group_barriers (FILE *dump)
6143 {
6144 rtx insn;
6145 rtx last_label = 0;
6146 int insns_since_last_label = 0;
6147
6148 init_insn_group_barriers ();
6149
6150 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6151 {
6152 if (GET_CODE (insn) == CODE_LABEL)
6153 {
6154 if (insns_since_last_label)
6155 last_label = insn;
6156 insns_since_last_label = 0;
6157 }
6158 else if (GET_CODE (insn) == NOTE
6159 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6160 {
6161 if (insns_since_last_label)
6162 last_label = insn;
6163 insns_since_last_label = 0;
6164 }
6165 else if (GET_CODE (insn) == INSN
6166 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6167 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6168 {
6169 init_insn_group_barriers ();
6170 last_label = 0;
6171 }
6172 else if (INSN_P (insn))
6173 {
6174 insns_since_last_label = 1;
6175
6176 if (group_barrier_needed (insn))
6177 {
6178 if (last_label)
6179 {
6180 if (dump)
6181 fprintf (dump, "Emitting stop before label %d\n",
6182 INSN_UID (last_label));
6183 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6184 insn = last_label;
6185
6186 init_insn_group_barriers ();
6187 last_label = 0;
6188 }
6189 }
6190 }
6191 }
6192 }
6193
6194 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6195 This function has to emit all necessary group barriers. */
6196
6197 static void
6198 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6199 {
6200 rtx insn;
6201
6202 init_insn_group_barriers ();
6203
6204 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6205 {
6206 if (GET_CODE (insn) == BARRIER)
6207 {
6208 rtx last = prev_active_insn (insn);
6209
6210 if (! last)
6211 continue;
6212 if (GET_CODE (last) == JUMP_INSN
6213 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6214 last = prev_active_insn (last);
6215 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6216 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6217
6218 init_insn_group_barriers ();
6219 }
6220 else if (INSN_P (insn))
6221 {
6222 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6223 init_insn_group_barriers ();
6224 else if (group_barrier_needed (insn))
6225 {
6226 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6227 init_insn_group_barriers ();
6228 group_barrier_needed (insn);
6229 }
6230 }
6231 }
6232 }
6233
6234 \f
6235
6236 /* Instruction scheduling support. */
6237
6238 #define NR_BUNDLES 10
6239
6240 /* A list of names of all available bundles. */
6241
6242 static const char *bundle_name [NR_BUNDLES] =
6243 {
6244 ".mii",
6245 ".mmi",
6246 ".mfi",
6247 ".mmf",
6248 #if NR_BUNDLES == 10
6249 ".bbb",
6250 ".mbb",
6251 #endif
6252 ".mib",
6253 ".mmb",
6254 ".mfb",
6255 ".mlx"
6256 };
6257
6258 /* Nonzero if we should insert stop bits into the schedule. */
6259
6260 int ia64_final_schedule = 0;
6261
6262 /* Codes of the corresponding queried units: */
6263
6264 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6265 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6266
6267 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6268 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6269
6270 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6271
6272 /* The following variable value is an insn group barrier. */
6273
6274 static rtx dfa_stop_insn;
6275
6276 /* The following variable value is the last issued insn. */
6277
6278 static rtx last_scheduled_insn;
6279
6280 /* The following variable value is size of the DFA state. */
6281
6282 static size_t dfa_state_size;
6283
6284 /* The following variable value is pointer to a DFA state used as
6285 temporary variable. */
6286
6287 static state_t temp_dfa_state = NULL;
6288
6289 /* The following variable value is DFA state after issuing the last
6290 insn. */
6291
6292 static state_t prev_cycle_state = NULL;
6293
6294 /* The following array element values are TRUE if the corresponding
6295 insn requires to add stop bits before it. */
6296
6297 static char *stops_p = NULL;
6298
6299 /* The following array element values are ZERO for non-speculative
6300 instructions and hold corresponding speculation check number for
6301 speculative instructions. */
6302 static int *spec_check_no = NULL;
6303
6304 /* Size of spec_check_no array. */
6305 static int max_uid = 0;
6306
6307 /* The following variable is used to set up the mentioned above array. */
6308
6309 static int stop_before_p = 0;
6310
6311 /* The following variable value is length of the arrays `clocks' and
6312 `add_cycles'. */
6313
6314 static int clocks_length;
6315
6316 /* The following array element values are cycles on which the
6317 corresponding insn will be issued. The array is used only for
6318 Itanium1. */
6319
6320 static int *clocks;
6321
6322 /* The following array element values are numbers of cycles should be
6323 added to improve insn scheduling for MM_insns for Itanium1. */
6324
6325 static int *add_cycles;
6326
6327 /* The following variable value is number of data speculations in progress. */
6328 static int pending_data_specs = 0;
6329
6330 static rtx ia64_single_set (rtx);
6331 static void ia64_emit_insn_before (rtx, rtx);
6332
6333 /* Map a bundle number to its pseudo-op. */
6334
6335 const char *
6336 get_bundle_name (int b)
6337 {
6338 return bundle_name[b];
6339 }
6340
6341
6342 /* Return the maximum number of instructions a cpu can issue. */
6343
6344 static int
6345 ia64_issue_rate (void)
6346 {
6347 return 6;
6348 }
6349
6350 /* Helper function - like single_set, but look inside COND_EXEC. */
6351
6352 static rtx
6353 ia64_single_set (rtx insn)
6354 {
6355 rtx x = PATTERN (insn), ret;
6356 if (GET_CODE (x) == COND_EXEC)
6357 x = COND_EXEC_CODE (x);
6358 if (GET_CODE (x) == SET)
6359 return x;
6360
6361 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6362 Although they are not classical single set, the second set is there just
6363 to protect it from moving past FP-relative stack accesses. */
6364 switch (recog_memoized (insn))
6365 {
6366 case CODE_FOR_prologue_allocate_stack:
6367 case CODE_FOR_epilogue_deallocate_stack:
6368 ret = XVECEXP (x, 0, 0);
6369 break;
6370
6371 default:
6372 ret = single_set_2 (insn, x);
6373 break;
6374 }
6375
6376 return ret;
6377 }
6378
6379 /* Adjust the cost of a scheduling dependency. Return the new cost of
6380 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6381
6382 static int
6383 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6384 {
6385 enum attr_itanium_class dep_class;
6386 enum attr_itanium_class insn_class;
6387
6388 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6389 return cost;
6390
6391 insn_class = ia64_safe_itanium_class (insn);
6392 dep_class = ia64_safe_itanium_class (dep_insn);
6393 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6394 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6395 return 0;
6396
6397 return cost;
6398 }
6399
6400 /* Like emit_insn_before, but skip cycle_display notes.
6401 ??? When cycle display notes are implemented, update this. */
6402
6403 static void
6404 ia64_emit_insn_before (rtx insn, rtx before)
6405 {
6406 emit_insn_before (insn, before);
6407 }
6408
6409 /* The following function marks insns who produce addresses for load
6410 and store insns. Such insns will be placed into M slots because it
6411 decrease latency time for Itanium1 (see function
6412 `ia64_produce_address_p' and the DFA descriptions). */
6413
6414 static void
6415 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6416 {
6417 rtx insn, next, next_tail;
6418
6419 /* Before reload, which_alternative is not set, which means that
6420 ia64_safe_itanium_class will produce wrong results for (at least)
6421 move instructions. */
6422 if (!reload_completed)
6423 return;
6424
6425 next_tail = NEXT_INSN (tail);
6426 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6427 if (INSN_P (insn))
6428 insn->call = 0;
6429 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6430 if (INSN_P (insn)
6431 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6432 {
6433 sd_iterator_def sd_it;
6434 dep_t dep;
6435 bool has_mem_op_consumer_p = false;
6436
6437 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6438 {
6439 enum attr_itanium_class c;
6440
6441 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6442 continue;
6443
6444 next = DEP_CON (dep);
6445 c = ia64_safe_itanium_class (next);
6446 if ((c == ITANIUM_CLASS_ST
6447 || c == ITANIUM_CLASS_STF)
6448 && ia64_st_address_bypass_p (insn, next))
6449 {
6450 has_mem_op_consumer_p = true;
6451 break;
6452 }
6453 else if ((c == ITANIUM_CLASS_LD
6454 || c == ITANIUM_CLASS_FLD
6455 || c == ITANIUM_CLASS_FLDP)
6456 && ia64_ld_address_bypass_p (insn, next))
6457 {
6458 has_mem_op_consumer_p = true;
6459 break;
6460 }
6461 }
6462
6463 insn->call = has_mem_op_consumer_p;
6464 }
6465 }
6466
6467 /* We're beginning a new block. Initialize data structures as necessary. */
6468
6469 static void
6470 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6471 int sched_verbose ATTRIBUTE_UNUSED,
6472 int max_ready ATTRIBUTE_UNUSED)
6473 {
6474 #ifdef ENABLE_CHECKING
6475 rtx insn;
6476
6477 if (reload_completed)
6478 for (insn = NEXT_INSN (current_sched_info->prev_head);
6479 insn != current_sched_info->next_tail;
6480 insn = NEXT_INSN (insn))
6481 gcc_assert (!SCHED_GROUP_P (insn));
6482 #endif
6483 last_scheduled_insn = NULL_RTX;
6484 init_insn_group_barriers ();
6485 }
6486
6487 /* We're beginning a scheduling pass. Check assertion. */
6488
6489 static void
6490 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6491 int sched_verbose ATTRIBUTE_UNUSED,
6492 int max_ready ATTRIBUTE_UNUSED)
6493 {
6494 gcc_assert (!pending_data_specs);
6495 }
6496
6497 /* Scheduling pass is now finished. Free/reset static variable. */
6498 static void
6499 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6500 int sched_verbose ATTRIBUTE_UNUSED)
6501 {
6502 free (spec_check_no);
6503 spec_check_no = 0;
6504 max_uid = 0;
6505 }
6506
6507 /* We are about to being issuing insns for this clock cycle.
6508 Override the default sort algorithm to better slot instructions. */
6509
6510 static int
6511 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6512 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6513 int reorder_type)
6514 {
6515 int n_asms;
6516 int n_ready = *pn_ready;
6517 rtx *e_ready = ready + n_ready;
6518 rtx *insnp;
6519
6520 if (sched_verbose)
6521 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6522
6523 if (reorder_type == 0)
6524 {
6525 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6526 n_asms = 0;
6527 for (insnp = ready; insnp < e_ready; insnp++)
6528 if (insnp < e_ready)
6529 {
6530 rtx insn = *insnp;
6531 enum attr_type t = ia64_safe_type (insn);
6532 if (t == TYPE_UNKNOWN)
6533 {
6534 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6535 || asm_noperands (PATTERN (insn)) >= 0)
6536 {
6537 rtx lowest = ready[n_asms];
6538 ready[n_asms] = insn;
6539 *insnp = lowest;
6540 n_asms++;
6541 }
6542 else
6543 {
6544 rtx highest = ready[n_ready - 1];
6545 ready[n_ready - 1] = insn;
6546 *insnp = highest;
6547 return 1;
6548 }
6549 }
6550 }
6551
6552 if (n_asms < n_ready)
6553 {
6554 /* Some normal insns to process. Skip the asms. */
6555 ready += n_asms;
6556 n_ready -= n_asms;
6557 }
6558 else if (n_ready > 0)
6559 return 1;
6560 }
6561
6562 if (ia64_final_schedule)
6563 {
6564 int deleted = 0;
6565 int nr_need_stop = 0;
6566
6567 for (insnp = ready; insnp < e_ready; insnp++)
6568 if (safe_group_barrier_needed (*insnp))
6569 nr_need_stop++;
6570
6571 if (reorder_type == 1 && n_ready == nr_need_stop)
6572 return 0;
6573 if (reorder_type == 0)
6574 return 1;
6575 insnp = e_ready;
6576 /* Move down everything that needs a stop bit, preserving
6577 relative order. */
6578 while (insnp-- > ready + deleted)
6579 while (insnp >= ready + deleted)
6580 {
6581 rtx insn = *insnp;
6582 if (! safe_group_barrier_needed (insn))
6583 break;
6584 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6585 *ready = insn;
6586 deleted++;
6587 }
6588 n_ready -= deleted;
6589 ready += deleted;
6590 }
6591
6592 return 1;
6593 }
6594
6595 /* We are about to being issuing insns for this clock cycle. Override
6596 the default sort algorithm to better slot instructions. */
6597
6598 static int
6599 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6600 int clock_var)
6601 {
6602 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6603 pn_ready, clock_var, 0);
6604 }
6605
6606 /* Like ia64_sched_reorder, but called after issuing each insn.
6607 Override the default sort algorithm to better slot instructions. */
6608
6609 static int
6610 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6611 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6612 int *pn_ready, int clock_var)
6613 {
6614 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6615 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6616 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6617 clock_var, 1);
6618 }
6619
6620 /* We are about to issue INSN. Return the number of insns left on the
6621 ready queue that can be issued this cycle. */
6622
6623 static int
6624 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6625 int sched_verbose ATTRIBUTE_UNUSED,
6626 rtx insn ATTRIBUTE_UNUSED,
6627 int can_issue_more ATTRIBUTE_UNUSED)
6628 {
6629 if (current_sched_info->flags & DO_SPECULATION)
6630 /* Modulo scheduling does not extend h_i_d when emitting
6631 new instructions. Deal with it. */
6632 {
6633 if (DONE_SPEC (insn) & BEGIN_DATA)
6634 pending_data_specs++;
6635 if (CHECK_SPEC (insn) & BEGIN_DATA)
6636 pending_data_specs--;
6637 }
6638
6639 last_scheduled_insn = insn;
6640 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6641 if (reload_completed)
6642 {
6643 int needed = group_barrier_needed (insn);
6644
6645 gcc_assert (!needed);
6646 if (GET_CODE (insn) == CALL_INSN)
6647 init_insn_group_barriers ();
6648 stops_p [INSN_UID (insn)] = stop_before_p;
6649 stop_before_p = 0;
6650 }
6651 return 1;
6652 }
6653
6654 /* We are choosing insn from the ready queue. Return nonzero if INSN
6655 can be chosen. */
6656
6657 static int
6658 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6659 {
6660 gcc_assert (insn && INSN_P (insn));
6661 return ((!reload_completed
6662 || !safe_group_barrier_needed (insn))
6663 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6664 }
6665
6666 /* We are choosing insn from the ready queue. Return nonzero if INSN
6667 can be chosen. */
6668
6669 static bool
6670 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
6671 {
6672 gcc_assert (insn && INSN_P (insn));
6673 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6674 we keep ALAT half-empty. */
6675 return (pending_data_specs < 16
6676 || !(TODO_SPEC (insn) & BEGIN_DATA));
6677 }
6678
6679 /* The following variable value is pseudo-insn used by the DFA insn
6680 scheduler to change the DFA state when the simulated clock is
6681 increased. */
6682
6683 static rtx dfa_pre_cycle_insn;
6684
6685 /* We are about to being issuing INSN. Return nonzero if we cannot
6686 issue it on given cycle CLOCK and return zero if we should not sort
6687 the ready queue on the next clock start. */
6688
6689 static int
6690 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6691 int clock, int *sort_p)
6692 {
6693 int setup_clocks_p = FALSE;
6694
6695 gcc_assert (insn && INSN_P (insn));
6696 if ((reload_completed && safe_group_barrier_needed (insn))
6697 || (last_scheduled_insn
6698 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6699 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6700 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6701 {
6702 init_insn_group_barriers ();
6703 if (verbose && dump)
6704 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6705 last_clock == clock ? " + cycle advance" : "");
6706 stop_before_p = 1;
6707 if (last_clock == clock)
6708 {
6709 state_transition (curr_state, dfa_stop_insn);
6710 if (TARGET_EARLY_STOP_BITS)
6711 *sort_p = (last_scheduled_insn == NULL_RTX
6712 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6713 else
6714 *sort_p = 0;
6715 return 1;
6716 }
6717 else if (reload_completed)
6718 setup_clocks_p = TRUE;
6719 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6720 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6721 state_reset (curr_state);
6722 else
6723 {
6724 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6725 state_transition (curr_state, dfa_stop_insn);
6726 state_transition (curr_state, dfa_pre_cycle_insn);
6727 state_transition (curr_state, NULL);
6728 }
6729 }
6730 else if (reload_completed)
6731 setup_clocks_p = TRUE;
6732 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6733 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6734 && asm_noperands (PATTERN (insn)) < 0)
6735 {
6736 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6737
6738 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6739 {
6740 sd_iterator_def sd_it;
6741 dep_t dep;
6742 int d = -1;
6743
6744 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6745 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6746 {
6747 enum attr_itanium_class dep_class;
6748 rtx dep_insn = DEP_PRO (dep);
6749
6750 dep_class = ia64_safe_itanium_class (dep_insn);
6751 if ((dep_class == ITANIUM_CLASS_MMMUL
6752 || dep_class == ITANIUM_CLASS_MMSHF)
6753 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6754 && (d < 0
6755 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6756 d = last_clock - clocks [INSN_UID (dep_insn)];
6757 }
6758 if (d >= 0)
6759 add_cycles [INSN_UID (insn)] = 3 - d;
6760 }
6761 }
6762 return 0;
6763 }
6764
6765 /* Implement targetm.sched.h_i_d_extended hook.
6766 Extend internal data structures. */
6767 static void
6768 ia64_h_i_d_extended (void)
6769 {
6770 if (current_sched_info->flags & DO_SPECULATION)
6771 {
6772 int new_max_uid = get_max_uid () + 1;
6773
6774 spec_check_no = xrecalloc (spec_check_no, new_max_uid,
6775 max_uid, sizeof (*spec_check_no));
6776 max_uid = new_max_uid;
6777 }
6778
6779 if (stops_p != NULL)
6780 {
6781 int new_clocks_length = get_max_uid () + 1;
6782
6783 stops_p = xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6784
6785 if (ia64_tune == PROCESSOR_ITANIUM)
6786 {
6787 clocks = xrecalloc (clocks, new_clocks_length, clocks_length,
6788 sizeof (int));
6789 add_cycles = xrecalloc (add_cycles, new_clocks_length, clocks_length,
6790 sizeof (int));
6791 }
6792
6793 clocks_length = new_clocks_length;
6794 }
6795 }
6796
6797 /* Constants that help mapping 'enum machine_mode' to int. */
6798 enum SPEC_MODES
6799 {
6800 SPEC_MODE_INVALID = -1,
6801 SPEC_MODE_FIRST = 0,
6802 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6803 SPEC_MODE_FOR_EXTEND_LAST = 3,
6804 SPEC_MODE_LAST = 8
6805 };
6806
6807 /* Return index of the MODE. */
6808 static int
6809 ia64_mode_to_int (enum machine_mode mode)
6810 {
6811 switch (mode)
6812 {
6813 case BImode: return 0; /* SPEC_MODE_FIRST */
6814 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6815 case HImode: return 2;
6816 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6817 case DImode: return 4;
6818 case SFmode: return 5;
6819 case DFmode: return 6;
6820 case XFmode: return 7;
6821 case TImode:
6822 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6823 mentioned in itanium[12].md. Predicate fp_register_operand also
6824 needs to be defined. Bottom line: better disable for now. */
6825 return SPEC_MODE_INVALID;
6826 default: return SPEC_MODE_INVALID;
6827 }
6828 }
6829
6830 /* Provide information about speculation capabilities. */
6831 static void
6832 ia64_set_sched_flags (spec_info_t spec_info)
6833 {
6834 unsigned int *flags = &(current_sched_info->flags);
6835
6836 if (*flags & SCHED_RGN
6837 || *flags & SCHED_EBB)
6838 {
6839 int mask = 0;
6840
6841 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6842 || (mflag_sched_ar_data_spec && reload_completed))
6843 {
6844 mask |= BEGIN_DATA;
6845
6846 if ((mflag_sched_br_in_data_spec && !reload_completed)
6847 || (mflag_sched_ar_in_data_spec && reload_completed))
6848 mask |= BE_IN_DATA;
6849 }
6850
6851 if (mflag_sched_control_spec)
6852 {
6853 mask |= BEGIN_CONTROL;
6854
6855 if (mflag_sched_in_control_spec)
6856 mask |= BE_IN_CONTROL;
6857 }
6858
6859 if (mask)
6860 {
6861 *flags |= USE_DEPS_LIST | DO_SPECULATION;
6862
6863 if (mask & BE_IN_SPEC)
6864 *flags |= NEW_BBS;
6865
6866 spec_info->mask = mask;
6867 spec_info->flags = 0;
6868
6869 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6870 spec_info->flags |= PREFER_NON_DATA_SPEC;
6871
6872 if ((mask & CONTROL_SPEC)
6873 && mflag_sched_prefer_non_control_spec_insns)
6874 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6875
6876 if (mflag_sched_spec_verbose)
6877 {
6878 if (sched_verbose >= 1)
6879 spec_info->dump = sched_dump;
6880 else
6881 spec_info->dump = stderr;
6882 }
6883 else
6884 spec_info->dump = 0;
6885
6886 if (mflag_sched_count_spec_in_critical_path)
6887 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6888 }
6889 }
6890 }
6891
6892 /* Implement targetm.sched.speculate_insn hook.
6893 Check if the INSN can be TS speculative.
6894 If 'no' - return -1.
6895 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6896 If current pattern of the INSN already provides TS speculation, return 0. */
6897 static int
6898 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6899 {
6900 rtx pat, reg, mem, mem_reg;
6901 int mode_no, gen_p = 1;
6902 bool extend_p;
6903
6904 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6905
6906 pat = PATTERN (insn);
6907
6908 if (GET_CODE (pat) == COND_EXEC)
6909 pat = COND_EXEC_CODE (pat);
6910
6911 /* This should be a SET ... */
6912 if (GET_CODE (pat) != SET)
6913 return -1;
6914
6915 reg = SET_DEST (pat);
6916 /* ... to the general/fp register ... */
6917 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6918 return -1;
6919
6920 /* ... from the mem ... */
6921 mem = SET_SRC (pat);
6922
6923 /* ... that can, possibly, be a zero_extend ... */
6924 if (GET_CODE (mem) == ZERO_EXTEND)
6925 {
6926 mem = XEXP (mem, 0);
6927 extend_p = true;
6928 }
6929 else
6930 extend_p = false;
6931
6932 /* ... or a speculative load. */
6933 if (GET_CODE (mem) == UNSPEC)
6934 {
6935 int code;
6936
6937 code = XINT (mem, 1);
6938 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6939 return -1;
6940
6941 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6942 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6943 || code == UNSPEC_LDSA)
6944 gen_p = 0;
6945
6946 mem = XVECEXP (mem, 0, 0);
6947 gcc_assert (MEM_P (mem));
6948 }
6949
6950 /* Source should be a mem ... */
6951 if (!MEM_P (mem))
6952 return -1;
6953
6954 /* ... addressed by a register. */
6955 mem_reg = XEXP (mem, 0);
6956 if (!REG_P (mem_reg))
6957 return -1;
6958
6959 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6960 will always be DImode. */
6961 mode_no = ia64_mode_to_int (GET_MODE (mem));
6962
6963 if (mode_no == SPEC_MODE_INVALID
6964 || (extend_p
6965 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6966 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6967 return -1;
6968
6969 extract_insn_cached (insn);
6970 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6971
6972 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6973
6974 return gen_p;
6975 }
6976
6977 enum
6978 {
6979 /* Offset to reach ZERO_EXTEND patterns. */
6980 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6981 /* Number of patterns for each speculation mode. */
6982 SPEC_N = (SPEC_MODE_LAST
6983 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6984 };
6985
6986 enum SPEC_GEN_LD_MAP
6987 {
6988 /* Offset to ld.a patterns. */
6989 SPEC_GEN_A = 0 * SPEC_N,
6990 /* Offset to ld.s patterns. */
6991 SPEC_GEN_S = 1 * SPEC_N,
6992 /* Offset to ld.sa patterns. */
6993 SPEC_GEN_SA = 2 * SPEC_N,
6994 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6995 mutate to chk.s. */
6996 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
6997 };
6998
6999 /* These offsets are used to get (4 * SPEC_N). */
7000 enum SPEC_GEN_CHECK_OFFSET
7001 {
7002 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
7003 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
7004 };
7005
7006 /* If GEN_P is true, calculate the index of needed speculation check and return
7007 speculative pattern for INSN with speculative mode TS, machine mode
7008 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7009 If GEN_P is false, just calculate the index of needed speculation check. */
7010 static rtx
7011 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
7012 {
7013 rtx pat, new_pat;
7014 int load_no;
7015 int shift = 0;
7016
7017 static rtx (* const gen_load[]) (rtx, rtx) = {
7018 gen_movbi_advanced,
7019 gen_movqi_advanced,
7020 gen_movhi_advanced,
7021 gen_movsi_advanced,
7022 gen_movdi_advanced,
7023 gen_movsf_advanced,
7024 gen_movdf_advanced,
7025 gen_movxf_advanced,
7026 gen_movti_advanced,
7027 gen_zero_extendqidi2_advanced,
7028 gen_zero_extendhidi2_advanced,
7029 gen_zero_extendsidi2_advanced,
7030
7031 gen_movbi_speculative,
7032 gen_movqi_speculative,
7033 gen_movhi_speculative,
7034 gen_movsi_speculative,
7035 gen_movdi_speculative,
7036 gen_movsf_speculative,
7037 gen_movdf_speculative,
7038 gen_movxf_speculative,
7039 gen_movti_speculative,
7040 gen_zero_extendqidi2_speculative,
7041 gen_zero_extendhidi2_speculative,
7042 gen_zero_extendsidi2_speculative,
7043
7044 gen_movbi_speculative_advanced,
7045 gen_movqi_speculative_advanced,
7046 gen_movhi_speculative_advanced,
7047 gen_movsi_speculative_advanced,
7048 gen_movdi_speculative_advanced,
7049 gen_movsf_speculative_advanced,
7050 gen_movdf_speculative_advanced,
7051 gen_movxf_speculative_advanced,
7052 gen_movti_speculative_advanced,
7053 gen_zero_extendqidi2_speculative_advanced,
7054 gen_zero_extendhidi2_speculative_advanced,
7055 gen_zero_extendsidi2_speculative_advanced,
7056
7057 gen_movbi_speculative_advanced,
7058 gen_movqi_speculative_advanced,
7059 gen_movhi_speculative_advanced,
7060 gen_movsi_speculative_advanced,
7061 gen_movdi_speculative_advanced,
7062 gen_movsf_speculative_advanced,
7063 gen_movdf_speculative_advanced,
7064 gen_movxf_speculative_advanced,
7065 gen_movti_speculative_advanced,
7066 gen_zero_extendqidi2_speculative_advanced,
7067 gen_zero_extendhidi2_speculative_advanced,
7068 gen_zero_extendsidi2_speculative_advanced
7069 };
7070
7071 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
7072
7073 if (ts & BEGIN_DATA)
7074 {
7075 /* We don't need recovery because even if this is ld.sa
7076 ALAT entry will be allocated only if NAT bit is set to zero.
7077 So it is enough to use ld.c here. */
7078
7079 if (ts & BEGIN_CONTROL)
7080 {
7081 load_no += SPEC_GEN_SA;
7082
7083 if (!mflag_sched_ldc)
7084 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
7085 }
7086 else
7087 {
7088 load_no += SPEC_GEN_A;
7089
7090 if (!mflag_sched_ldc)
7091 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
7092 }
7093 }
7094 else if (ts & BEGIN_CONTROL)
7095 {
7096 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
7097 if (!mflag_control_ldc)
7098 load_no += SPEC_GEN_S;
7099 else
7100 {
7101 gcc_assert (mflag_sched_ldc);
7102 load_no += SPEC_GEN_SA_FOR_S;
7103 }
7104 }
7105 else
7106 gcc_unreachable ();
7107
7108 /* Set the desired check index. We add '1', because zero element in this
7109 array means, that instruction with such uid is non-speculative. */
7110 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
7111
7112 if (!gen_p)
7113 return 0;
7114
7115 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
7116 copy_rtx (recog_data.operand[1]));
7117
7118 pat = PATTERN (insn);
7119 if (GET_CODE (pat) == COND_EXEC)
7120 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
7121 (COND_EXEC_TEST (pat)), new_pat);
7122
7123 return new_pat;
7124 }
7125
7126 /* Offset to branchy checks. */
7127 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
7128
7129 /* Return nonzero, if INSN needs branchy recovery check. */
7130 static bool
7131 ia64_needs_block_p (const_rtx insn)
7132 {
7133 int check_no;
7134
7135 check_no = spec_check_no[INSN_UID(insn)] - 1;
7136 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
7137
7138 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
7139 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
7140 }
7141
7142 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7143 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7144 Otherwise, generate a simple check. */
7145 static rtx
7146 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
7147 {
7148 rtx op1, pat, check_pat;
7149
7150 static rtx (* const gen_check[]) (rtx, rtx) = {
7151 gen_movbi_clr,
7152 gen_movqi_clr,
7153 gen_movhi_clr,
7154 gen_movsi_clr,
7155 gen_movdi_clr,
7156 gen_movsf_clr,
7157 gen_movdf_clr,
7158 gen_movxf_clr,
7159 gen_movti_clr,
7160 gen_zero_extendqidi2_clr,
7161 gen_zero_extendhidi2_clr,
7162 gen_zero_extendsidi2_clr,
7163
7164 gen_speculation_check_bi,
7165 gen_speculation_check_qi,
7166 gen_speculation_check_hi,
7167 gen_speculation_check_si,
7168 gen_speculation_check_di,
7169 gen_speculation_check_sf,
7170 gen_speculation_check_df,
7171 gen_speculation_check_xf,
7172 gen_speculation_check_ti,
7173 gen_speculation_check_di,
7174 gen_speculation_check_di,
7175 gen_speculation_check_di,
7176
7177 gen_movbi_clr,
7178 gen_movqi_clr,
7179 gen_movhi_clr,
7180 gen_movsi_clr,
7181 gen_movdi_clr,
7182 gen_movsf_clr,
7183 gen_movdf_clr,
7184 gen_movxf_clr,
7185 gen_movti_clr,
7186 gen_zero_extendqidi2_clr,
7187 gen_zero_extendhidi2_clr,
7188 gen_zero_extendsidi2_clr,
7189
7190 gen_movbi_clr,
7191 gen_movqi_clr,
7192 gen_movhi_clr,
7193 gen_movsi_clr,
7194 gen_movdi_clr,
7195 gen_movsf_clr,
7196 gen_movdf_clr,
7197 gen_movxf_clr,
7198 gen_movti_clr,
7199 gen_zero_extendqidi2_clr,
7200 gen_zero_extendhidi2_clr,
7201 gen_zero_extendsidi2_clr,
7202
7203 gen_advanced_load_check_clr_bi,
7204 gen_advanced_load_check_clr_qi,
7205 gen_advanced_load_check_clr_hi,
7206 gen_advanced_load_check_clr_si,
7207 gen_advanced_load_check_clr_di,
7208 gen_advanced_load_check_clr_sf,
7209 gen_advanced_load_check_clr_df,
7210 gen_advanced_load_check_clr_xf,
7211 gen_advanced_load_check_clr_ti,
7212 gen_advanced_load_check_clr_di,
7213 gen_advanced_load_check_clr_di,
7214 gen_advanced_load_check_clr_di,
7215
7216 /* Following checks are generated during mutation. */
7217 gen_advanced_load_check_clr_bi,
7218 gen_advanced_load_check_clr_qi,
7219 gen_advanced_load_check_clr_hi,
7220 gen_advanced_load_check_clr_si,
7221 gen_advanced_load_check_clr_di,
7222 gen_advanced_load_check_clr_sf,
7223 gen_advanced_load_check_clr_df,
7224 gen_advanced_load_check_clr_xf,
7225 gen_advanced_load_check_clr_ti,
7226 gen_advanced_load_check_clr_di,
7227 gen_advanced_load_check_clr_di,
7228 gen_advanced_load_check_clr_di,
7229
7230 0,0,0,0,0,0,0,0,0,0,0,0,
7231
7232 gen_advanced_load_check_clr_bi,
7233 gen_advanced_load_check_clr_qi,
7234 gen_advanced_load_check_clr_hi,
7235 gen_advanced_load_check_clr_si,
7236 gen_advanced_load_check_clr_di,
7237 gen_advanced_load_check_clr_sf,
7238 gen_advanced_load_check_clr_df,
7239 gen_advanced_load_check_clr_xf,
7240 gen_advanced_load_check_clr_ti,
7241 gen_advanced_load_check_clr_di,
7242 gen_advanced_load_check_clr_di,
7243 gen_advanced_load_check_clr_di,
7244
7245 gen_speculation_check_bi,
7246 gen_speculation_check_qi,
7247 gen_speculation_check_hi,
7248 gen_speculation_check_si,
7249 gen_speculation_check_di,
7250 gen_speculation_check_sf,
7251 gen_speculation_check_df,
7252 gen_speculation_check_xf,
7253 gen_speculation_check_ti,
7254 gen_speculation_check_di,
7255 gen_speculation_check_di,
7256 gen_speculation_check_di
7257 };
7258
7259 extract_insn_cached (insn);
7260
7261 if (label)
7262 {
7263 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7264 op1 = label;
7265 }
7266 else
7267 {
7268 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7269 op1 = copy_rtx (recog_data.operand[1]);
7270 }
7271
7272 if (mutate_p)
7273 /* INSN is ld.c.
7274 Find the speculation check number by searching for original
7275 speculative load in the RESOLVED_DEPS list of INSN.
7276 As long as patterns are unique for each instruction, this can be
7277 accomplished by matching ORIG_PAT fields. */
7278 {
7279 sd_iterator_def sd_it;
7280 dep_t dep;
7281 int check_no = 0;
7282 rtx orig_pat = ORIG_PAT (insn);
7283
7284 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7285 {
7286 rtx x = DEP_PRO (dep);
7287
7288 if (ORIG_PAT (x) == orig_pat)
7289 check_no = spec_check_no[INSN_UID (x)];
7290 }
7291 gcc_assert (check_no);
7292
7293 spec_check_no[INSN_UID (insn)] = (check_no
7294 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7295 }
7296
7297 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7298 (copy_rtx (recog_data.operand[0]), op1));
7299
7300 pat = PATTERN (insn);
7301 if (GET_CODE (pat) == COND_EXEC)
7302 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7303 check_pat);
7304
7305 return check_pat;
7306 }
7307
7308 /* Return nonzero, if X is branchy recovery check. */
7309 static int
7310 ia64_spec_check_p (rtx x)
7311 {
7312 x = PATTERN (x);
7313 if (GET_CODE (x) == COND_EXEC)
7314 x = COND_EXEC_CODE (x);
7315 if (GET_CODE (x) == SET)
7316 return ia64_spec_check_src_p (SET_SRC (x));
7317 return 0;
7318 }
7319
7320 /* Return nonzero, if SRC belongs to recovery check. */
7321 static int
7322 ia64_spec_check_src_p (rtx src)
7323 {
7324 if (GET_CODE (src) == IF_THEN_ELSE)
7325 {
7326 rtx t;
7327
7328 t = XEXP (src, 0);
7329 if (GET_CODE (t) == NE)
7330 {
7331 t = XEXP (t, 0);
7332
7333 if (GET_CODE (t) == UNSPEC)
7334 {
7335 int code;
7336
7337 code = XINT (t, 1);
7338
7339 if (code == UNSPEC_CHKACLR
7340 || code == UNSPEC_CHKS
7341 || code == UNSPEC_LDCCLR)
7342 {
7343 gcc_assert (code != 0);
7344 return code;
7345 }
7346 }
7347 }
7348 }
7349 return 0;
7350 }
7351 \f
7352
7353 /* The following page contains abstract data `bundle states' which are
7354 used for bundling insns (inserting nops and template generation). */
7355
7356 /* The following describes state of insn bundling. */
7357
7358 struct bundle_state
7359 {
7360 /* Unique bundle state number to identify them in the debugging
7361 output */
7362 int unique_num;
7363 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7364 /* number nops before and after the insn */
7365 short before_nops_num, after_nops_num;
7366 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7367 insn */
7368 int cost; /* cost of the state in cycles */
7369 int accumulated_insns_num; /* number of all previous insns including
7370 nops. L is considered as 2 insns */
7371 int branch_deviation; /* deviation of previous branches from 3rd slots */
7372 struct bundle_state *next; /* next state with the same insn_num */
7373 struct bundle_state *originator; /* originator (previous insn state) */
7374 /* All bundle states are in the following chain. */
7375 struct bundle_state *allocated_states_chain;
7376 /* The DFA State after issuing the insn and the nops. */
7377 state_t dfa_state;
7378 };
7379
7380 /* The following is map insn number to the corresponding bundle state. */
7381
7382 static struct bundle_state **index_to_bundle_states;
7383
7384 /* The unique number of next bundle state. */
7385
7386 static int bundle_states_num;
7387
7388 /* All allocated bundle states are in the following chain. */
7389
7390 static struct bundle_state *allocated_bundle_states_chain;
7391
7392 /* All allocated but not used bundle states are in the following
7393 chain. */
7394
7395 static struct bundle_state *free_bundle_state_chain;
7396
7397
7398 /* The following function returns a free bundle state. */
7399
7400 static struct bundle_state *
7401 get_free_bundle_state (void)
7402 {
7403 struct bundle_state *result;
7404
7405 if (free_bundle_state_chain != NULL)
7406 {
7407 result = free_bundle_state_chain;
7408 free_bundle_state_chain = result->next;
7409 }
7410 else
7411 {
7412 result = xmalloc (sizeof (struct bundle_state));
7413 result->dfa_state = xmalloc (dfa_state_size);
7414 result->allocated_states_chain = allocated_bundle_states_chain;
7415 allocated_bundle_states_chain = result;
7416 }
7417 result->unique_num = bundle_states_num++;
7418 return result;
7419
7420 }
7421
7422 /* The following function frees given bundle state. */
7423
7424 static void
7425 free_bundle_state (struct bundle_state *state)
7426 {
7427 state->next = free_bundle_state_chain;
7428 free_bundle_state_chain = state;
7429 }
7430
7431 /* Start work with abstract data `bundle states'. */
7432
7433 static void
7434 initiate_bundle_states (void)
7435 {
7436 bundle_states_num = 0;
7437 free_bundle_state_chain = NULL;
7438 allocated_bundle_states_chain = NULL;
7439 }
7440
7441 /* Finish work with abstract data `bundle states'. */
7442
7443 static void
7444 finish_bundle_states (void)
7445 {
7446 struct bundle_state *curr_state, *next_state;
7447
7448 for (curr_state = allocated_bundle_states_chain;
7449 curr_state != NULL;
7450 curr_state = next_state)
7451 {
7452 next_state = curr_state->allocated_states_chain;
7453 free (curr_state->dfa_state);
7454 free (curr_state);
7455 }
7456 }
7457
7458 /* Hash table of the bundle states. The key is dfa_state and insn_num
7459 of the bundle states. */
7460
7461 static htab_t bundle_state_table;
7462
7463 /* The function returns hash of BUNDLE_STATE. */
7464
7465 static unsigned
7466 bundle_state_hash (const void *bundle_state)
7467 {
7468 const struct bundle_state *const state
7469 = (const struct bundle_state *) bundle_state;
7470 unsigned result, i;
7471
7472 for (result = i = 0; i < dfa_state_size; i++)
7473 result += (((unsigned char *) state->dfa_state) [i]
7474 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7475 return result + state->insn_num;
7476 }
7477
7478 /* The function returns nonzero if the bundle state keys are equal. */
7479
7480 static int
7481 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7482 {
7483 const struct bundle_state *const state1
7484 = (const struct bundle_state *) bundle_state_1;
7485 const struct bundle_state *const state2
7486 = (const struct bundle_state *) bundle_state_2;
7487
7488 return (state1->insn_num == state2->insn_num
7489 && memcmp (state1->dfa_state, state2->dfa_state,
7490 dfa_state_size) == 0);
7491 }
7492
7493 /* The function inserts the BUNDLE_STATE into the hash table. The
7494 function returns nonzero if the bundle has been inserted into the
7495 table. The table contains the best bundle state with given key. */
7496
7497 static int
7498 insert_bundle_state (struct bundle_state *bundle_state)
7499 {
7500 void **entry_ptr;
7501
7502 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7503 if (*entry_ptr == NULL)
7504 {
7505 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7506 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7507 *entry_ptr = (void *) bundle_state;
7508 return TRUE;
7509 }
7510 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7511 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7512 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7513 > bundle_state->accumulated_insns_num
7514 || (((struct bundle_state *)
7515 *entry_ptr)->accumulated_insns_num
7516 == bundle_state->accumulated_insns_num
7517 && ((struct bundle_state *)
7518 *entry_ptr)->branch_deviation
7519 > bundle_state->branch_deviation))))
7520
7521 {
7522 struct bundle_state temp;
7523
7524 temp = *(struct bundle_state *) *entry_ptr;
7525 *(struct bundle_state *) *entry_ptr = *bundle_state;
7526 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7527 *bundle_state = temp;
7528 }
7529 return FALSE;
7530 }
7531
7532 /* Start work with the hash table. */
7533
7534 static void
7535 initiate_bundle_state_table (void)
7536 {
7537 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7538 (htab_del) 0);
7539 }
7540
7541 /* Finish work with the hash table. */
7542
7543 static void
7544 finish_bundle_state_table (void)
7545 {
7546 htab_delete (bundle_state_table);
7547 }
7548
7549 \f
7550
7551 /* The following variable is a insn `nop' used to check bundle states
7552 with different number of inserted nops. */
7553
7554 static rtx ia64_nop;
7555
7556 /* The following function tries to issue NOPS_NUM nops for the current
7557 state without advancing processor cycle. If it failed, the
7558 function returns FALSE and frees the current state. */
7559
7560 static int
7561 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7562 {
7563 int i;
7564
7565 for (i = 0; i < nops_num; i++)
7566 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7567 {
7568 free_bundle_state (curr_state);
7569 return FALSE;
7570 }
7571 return TRUE;
7572 }
7573
7574 /* The following function tries to issue INSN for the current
7575 state without advancing processor cycle. If it failed, the
7576 function returns FALSE and frees the current state. */
7577
7578 static int
7579 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7580 {
7581 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7582 {
7583 free_bundle_state (curr_state);
7584 return FALSE;
7585 }
7586 return TRUE;
7587 }
7588
7589 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7590 starting with ORIGINATOR without advancing processor cycle. If
7591 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7592 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7593 If it was successful, the function creates new bundle state and
7594 insert into the hash table and into `index_to_bundle_states'. */
7595
7596 static void
7597 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7598 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7599 {
7600 struct bundle_state *curr_state;
7601
7602 curr_state = get_free_bundle_state ();
7603 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7604 curr_state->insn = insn;
7605 curr_state->insn_num = originator->insn_num + 1;
7606 curr_state->cost = originator->cost;
7607 curr_state->originator = originator;
7608 curr_state->before_nops_num = before_nops_num;
7609 curr_state->after_nops_num = 0;
7610 curr_state->accumulated_insns_num
7611 = originator->accumulated_insns_num + before_nops_num;
7612 curr_state->branch_deviation = originator->branch_deviation;
7613 gcc_assert (insn);
7614 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7615 {
7616 gcc_assert (GET_MODE (insn) != TImode);
7617 if (!try_issue_nops (curr_state, before_nops_num))
7618 return;
7619 if (!try_issue_insn (curr_state, insn))
7620 return;
7621 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7622 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7623 && curr_state->accumulated_insns_num % 3 != 0)
7624 {
7625 free_bundle_state (curr_state);
7626 return;
7627 }
7628 }
7629 else if (GET_MODE (insn) != TImode)
7630 {
7631 if (!try_issue_nops (curr_state, before_nops_num))
7632 return;
7633 if (!try_issue_insn (curr_state, insn))
7634 return;
7635 curr_state->accumulated_insns_num++;
7636 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7637 && asm_noperands (PATTERN (insn)) < 0);
7638
7639 if (ia64_safe_type (insn) == TYPE_L)
7640 curr_state->accumulated_insns_num++;
7641 }
7642 else
7643 {
7644 /* If this is an insn that must be first in a group, then don't allow
7645 nops to be emitted before it. Currently, alloc is the only such
7646 supported instruction. */
7647 /* ??? The bundling automatons should handle this for us, but they do
7648 not yet have support for the first_insn attribute. */
7649 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7650 {
7651 free_bundle_state (curr_state);
7652 return;
7653 }
7654
7655 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7656 state_transition (curr_state->dfa_state, NULL);
7657 curr_state->cost++;
7658 if (!try_issue_nops (curr_state, before_nops_num))
7659 return;
7660 if (!try_issue_insn (curr_state, insn))
7661 return;
7662 curr_state->accumulated_insns_num++;
7663 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7664 || asm_noperands (PATTERN (insn)) >= 0)
7665 {
7666 /* Finish bundle containing asm insn. */
7667 curr_state->after_nops_num
7668 = 3 - curr_state->accumulated_insns_num % 3;
7669 curr_state->accumulated_insns_num
7670 += 3 - curr_state->accumulated_insns_num % 3;
7671 }
7672 else if (ia64_safe_type (insn) == TYPE_L)
7673 curr_state->accumulated_insns_num++;
7674 }
7675 if (ia64_safe_type (insn) == TYPE_B)
7676 curr_state->branch_deviation
7677 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7678 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7679 {
7680 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7681 {
7682 state_t dfa_state;
7683 struct bundle_state *curr_state1;
7684 struct bundle_state *allocated_states_chain;
7685
7686 curr_state1 = get_free_bundle_state ();
7687 dfa_state = curr_state1->dfa_state;
7688 allocated_states_chain = curr_state1->allocated_states_chain;
7689 *curr_state1 = *curr_state;
7690 curr_state1->dfa_state = dfa_state;
7691 curr_state1->allocated_states_chain = allocated_states_chain;
7692 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7693 dfa_state_size);
7694 curr_state = curr_state1;
7695 }
7696 if (!try_issue_nops (curr_state,
7697 3 - curr_state->accumulated_insns_num % 3))
7698 return;
7699 curr_state->after_nops_num
7700 = 3 - curr_state->accumulated_insns_num % 3;
7701 curr_state->accumulated_insns_num
7702 += 3 - curr_state->accumulated_insns_num % 3;
7703 }
7704 if (!insert_bundle_state (curr_state))
7705 free_bundle_state (curr_state);
7706 return;
7707 }
7708
7709 /* The following function returns position in the two window bundle
7710 for given STATE. */
7711
7712 static int
7713 get_max_pos (state_t state)
7714 {
7715 if (cpu_unit_reservation_p (state, pos_6))
7716 return 6;
7717 else if (cpu_unit_reservation_p (state, pos_5))
7718 return 5;
7719 else if (cpu_unit_reservation_p (state, pos_4))
7720 return 4;
7721 else if (cpu_unit_reservation_p (state, pos_3))
7722 return 3;
7723 else if (cpu_unit_reservation_p (state, pos_2))
7724 return 2;
7725 else if (cpu_unit_reservation_p (state, pos_1))
7726 return 1;
7727 else
7728 return 0;
7729 }
7730
7731 /* The function returns code of a possible template for given position
7732 and state. The function should be called only with 2 values of
7733 position equal to 3 or 6. We avoid generating F NOPs by putting
7734 templates containing F insns at the end of the template search
7735 because undocumented anomaly in McKinley derived cores which can
7736 cause stalls if an F-unit insn (including a NOP) is issued within a
7737 six-cycle window after reading certain application registers (such
7738 as ar.bsp). Furthermore, power-considerations also argue against
7739 the use of F-unit instructions unless they're really needed. */
7740
7741 static int
7742 get_template (state_t state, int pos)
7743 {
7744 switch (pos)
7745 {
7746 case 3:
7747 if (cpu_unit_reservation_p (state, _0mmi_))
7748 return 1;
7749 else if (cpu_unit_reservation_p (state, _0mii_))
7750 return 0;
7751 else if (cpu_unit_reservation_p (state, _0mmb_))
7752 return 7;
7753 else if (cpu_unit_reservation_p (state, _0mib_))
7754 return 6;
7755 else if (cpu_unit_reservation_p (state, _0mbb_))
7756 return 5;
7757 else if (cpu_unit_reservation_p (state, _0bbb_))
7758 return 4;
7759 else if (cpu_unit_reservation_p (state, _0mmf_))
7760 return 3;
7761 else if (cpu_unit_reservation_p (state, _0mfi_))
7762 return 2;
7763 else if (cpu_unit_reservation_p (state, _0mfb_))
7764 return 8;
7765 else if (cpu_unit_reservation_p (state, _0mlx_))
7766 return 9;
7767 else
7768 gcc_unreachable ();
7769 case 6:
7770 if (cpu_unit_reservation_p (state, _1mmi_))
7771 return 1;
7772 else if (cpu_unit_reservation_p (state, _1mii_))
7773 return 0;
7774 else if (cpu_unit_reservation_p (state, _1mmb_))
7775 return 7;
7776 else if (cpu_unit_reservation_p (state, _1mib_))
7777 return 6;
7778 else if (cpu_unit_reservation_p (state, _1mbb_))
7779 return 5;
7780 else if (cpu_unit_reservation_p (state, _1bbb_))
7781 return 4;
7782 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7783 return 3;
7784 else if (cpu_unit_reservation_p (state, _1mfi_))
7785 return 2;
7786 else if (cpu_unit_reservation_p (state, _1mfb_))
7787 return 8;
7788 else if (cpu_unit_reservation_p (state, _1mlx_))
7789 return 9;
7790 else
7791 gcc_unreachable ();
7792 default:
7793 gcc_unreachable ();
7794 }
7795 }
7796
7797 /* The following function returns an insn important for insn bundling
7798 followed by INSN and before TAIL. */
7799
7800 static rtx
7801 get_next_important_insn (rtx insn, rtx tail)
7802 {
7803 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7804 if (INSN_P (insn)
7805 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7806 && GET_CODE (PATTERN (insn)) != USE
7807 && GET_CODE (PATTERN (insn)) != CLOBBER)
7808 return insn;
7809 return NULL_RTX;
7810 }
7811
7812 /* Add a bundle selector TEMPLATE0 before INSN. */
7813
7814 static void
7815 ia64_add_bundle_selector_before (int template0, rtx insn)
7816 {
7817 rtx b = gen_bundle_selector (GEN_INT (template0));
7818
7819 ia64_emit_insn_before (b, insn);
7820 #if NR_BUNDLES == 10
7821 if ((template0 == 4 || template0 == 5)
7822 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7823 {
7824 int i;
7825 rtx note = NULL_RTX;
7826
7827 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7828 first or second slot. If it is and has REG_EH_NOTE set, copy it
7829 to following nops, as br.call sets rp to the address of following
7830 bundle and therefore an EH region end must be on a bundle
7831 boundary. */
7832 insn = PREV_INSN (insn);
7833 for (i = 0; i < 3; i++)
7834 {
7835 do
7836 insn = next_active_insn (insn);
7837 while (GET_CODE (insn) == INSN
7838 && get_attr_empty (insn) == EMPTY_YES);
7839 if (GET_CODE (insn) == CALL_INSN)
7840 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7841 else if (note)
7842 {
7843 int code;
7844
7845 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7846 || code == CODE_FOR_nop_b);
7847 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7848 note = NULL_RTX;
7849 else
7850 REG_NOTES (insn)
7851 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7852 REG_NOTES (insn));
7853 }
7854 }
7855 }
7856 #endif
7857 }
7858
7859 /* The following function does insn bundling. Bundling means
7860 inserting templates and nop insns to fit insn groups into permitted
7861 templates. Instruction scheduling uses NDFA (non-deterministic
7862 finite automata) encoding informations about the templates and the
7863 inserted nops. Nondeterminism of the automata permits follows
7864 all possible insn sequences very fast.
7865
7866 Unfortunately it is not possible to get information about inserting
7867 nop insns and used templates from the automata states. The
7868 automata only says that we can issue an insn possibly inserting
7869 some nops before it and using some template. Therefore insn
7870 bundling in this function is implemented by using DFA
7871 (deterministic finite automata). We follow all possible insn
7872 sequences by inserting 0-2 nops (that is what the NDFA describe for
7873 insn scheduling) before/after each insn being bundled. We know the
7874 start of simulated processor cycle from insn scheduling (insn
7875 starting a new cycle has TImode).
7876
7877 Simple implementation of insn bundling would create enormous
7878 number of possible insn sequences satisfying information about new
7879 cycle ticks taken from the insn scheduling. To make the algorithm
7880 practical we use dynamic programming. Each decision (about
7881 inserting nops and implicitly about previous decisions) is described
7882 by structure bundle_state (see above). If we generate the same
7883 bundle state (key is automaton state after issuing the insns and
7884 nops for it), we reuse already generated one. As consequence we
7885 reject some decisions which cannot improve the solution and
7886 reduce memory for the algorithm.
7887
7888 When we reach the end of EBB (extended basic block), we choose the
7889 best sequence and then, moving back in EBB, insert templates for
7890 the best alternative. The templates are taken from querying
7891 automaton state for each insn in chosen bundle states.
7892
7893 So the algorithm makes two (forward and backward) passes through
7894 EBB. There is an additional forward pass through EBB for Itanium1
7895 processor. This pass inserts more nops to make dependency between
7896 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7897
7898 static void
7899 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7900 {
7901 struct bundle_state *curr_state, *next_state, *best_state;
7902 rtx insn, next_insn;
7903 int insn_num;
7904 int i, bundle_end_p, only_bundle_end_p, asm_p;
7905 int pos = 0, max_pos, template0, template1;
7906 rtx b;
7907 rtx nop;
7908 enum attr_type type;
7909
7910 insn_num = 0;
7911 /* Count insns in the EBB. */
7912 for (insn = NEXT_INSN (prev_head_insn);
7913 insn && insn != tail;
7914 insn = NEXT_INSN (insn))
7915 if (INSN_P (insn))
7916 insn_num++;
7917 if (insn_num == 0)
7918 return;
7919 bundling_p = 1;
7920 dfa_clean_insn_cache ();
7921 initiate_bundle_state_table ();
7922 index_to_bundle_states = xmalloc ((insn_num + 2)
7923 * sizeof (struct bundle_state *));
7924 /* First (forward) pass -- generation of bundle states. */
7925 curr_state = get_free_bundle_state ();
7926 curr_state->insn = NULL;
7927 curr_state->before_nops_num = 0;
7928 curr_state->after_nops_num = 0;
7929 curr_state->insn_num = 0;
7930 curr_state->cost = 0;
7931 curr_state->accumulated_insns_num = 0;
7932 curr_state->branch_deviation = 0;
7933 curr_state->next = NULL;
7934 curr_state->originator = NULL;
7935 state_reset (curr_state->dfa_state);
7936 index_to_bundle_states [0] = curr_state;
7937 insn_num = 0;
7938 /* Shift cycle mark if it is put on insn which could be ignored. */
7939 for (insn = NEXT_INSN (prev_head_insn);
7940 insn != tail;
7941 insn = NEXT_INSN (insn))
7942 if (INSN_P (insn)
7943 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7944 || GET_CODE (PATTERN (insn)) == USE
7945 || GET_CODE (PATTERN (insn)) == CLOBBER)
7946 && GET_MODE (insn) == TImode)
7947 {
7948 PUT_MODE (insn, VOIDmode);
7949 for (next_insn = NEXT_INSN (insn);
7950 next_insn != tail;
7951 next_insn = NEXT_INSN (next_insn))
7952 if (INSN_P (next_insn)
7953 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7954 && GET_CODE (PATTERN (next_insn)) != USE
7955 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7956 {
7957 PUT_MODE (next_insn, TImode);
7958 break;
7959 }
7960 }
7961 /* Forward pass: generation of bundle states. */
7962 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7963 insn != NULL_RTX;
7964 insn = next_insn)
7965 {
7966 gcc_assert (INSN_P (insn)
7967 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7968 && GET_CODE (PATTERN (insn)) != USE
7969 && GET_CODE (PATTERN (insn)) != CLOBBER);
7970 type = ia64_safe_type (insn);
7971 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7972 insn_num++;
7973 index_to_bundle_states [insn_num] = NULL;
7974 for (curr_state = index_to_bundle_states [insn_num - 1];
7975 curr_state != NULL;
7976 curr_state = next_state)
7977 {
7978 pos = curr_state->accumulated_insns_num % 3;
7979 next_state = curr_state->next;
7980 /* We must fill up the current bundle in order to start a
7981 subsequent asm insn in a new bundle. Asm insn is always
7982 placed in a separate bundle. */
7983 only_bundle_end_p
7984 = (next_insn != NULL_RTX
7985 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7986 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7987 /* We may fill up the current bundle if it is the cycle end
7988 without a group barrier. */
7989 bundle_end_p
7990 = (only_bundle_end_p || next_insn == NULL_RTX
7991 || (GET_MODE (next_insn) == TImode
7992 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7993 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7994 || type == TYPE_S
7995 /* We need to insert 2 nops for cases like M_MII. To
7996 guarantee issuing all insns on the same cycle for
7997 Itanium 1, we need to issue 2 nops after the first M
7998 insn (MnnMII where n is a nop insn). */
7999 || ((type == TYPE_M || type == TYPE_A)
8000 && ia64_tune == PROCESSOR_ITANIUM
8001 && !bundle_end_p && pos == 1))
8002 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8003 only_bundle_end_p);
8004 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8005 only_bundle_end_p);
8006 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8007 only_bundle_end_p);
8008 }
8009 gcc_assert (index_to_bundle_states [insn_num]);
8010 for (curr_state = index_to_bundle_states [insn_num];
8011 curr_state != NULL;
8012 curr_state = curr_state->next)
8013 if (verbose >= 2 && dump)
8014 {
8015 /* This structure is taken from generated code of the
8016 pipeline hazard recognizer (see file insn-attrtab.c).
8017 Please don't forget to change the structure if a new
8018 automaton is added to .md file. */
8019 struct DFA_chip
8020 {
8021 unsigned short one_automaton_state;
8022 unsigned short oneb_automaton_state;
8023 unsigned short two_automaton_state;
8024 unsigned short twob_automaton_state;
8025 };
8026
8027 fprintf
8028 (dump,
8029 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8030 curr_state->unique_num,
8031 (curr_state->originator == NULL
8032 ? -1 : curr_state->originator->unique_num),
8033 curr_state->cost,
8034 curr_state->before_nops_num, curr_state->after_nops_num,
8035 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8036 (ia64_tune == PROCESSOR_ITANIUM
8037 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8038 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8039 INSN_UID (insn));
8040 }
8041 }
8042
8043 /* We should find a solution because the 2nd insn scheduling has
8044 found one. */
8045 gcc_assert (index_to_bundle_states [insn_num]);
8046 /* Find a state corresponding to the best insn sequence. */
8047 best_state = NULL;
8048 for (curr_state = index_to_bundle_states [insn_num];
8049 curr_state != NULL;
8050 curr_state = curr_state->next)
8051 /* We are just looking at the states with fully filled up last
8052 bundle. The first we prefer insn sequences with minimal cost
8053 then with minimal inserted nops and finally with branch insns
8054 placed in the 3rd slots. */
8055 if (curr_state->accumulated_insns_num % 3 == 0
8056 && (best_state == NULL || best_state->cost > curr_state->cost
8057 || (best_state->cost == curr_state->cost
8058 && (curr_state->accumulated_insns_num
8059 < best_state->accumulated_insns_num
8060 || (curr_state->accumulated_insns_num
8061 == best_state->accumulated_insns_num
8062 && curr_state->branch_deviation
8063 < best_state->branch_deviation)))))
8064 best_state = curr_state;
8065 /* Second (backward) pass: adding nops and templates. */
8066 insn_num = best_state->before_nops_num;
8067 template0 = template1 = -1;
8068 for (curr_state = best_state;
8069 curr_state->originator != NULL;
8070 curr_state = curr_state->originator)
8071 {
8072 insn = curr_state->insn;
8073 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8074 || asm_noperands (PATTERN (insn)) >= 0);
8075 insn_num++;
8076 if (verbose >= 2 && dump)
8077 {
8078 struct DFA_chip
8079 {
8080 unsigned short one_automaton_state;
8081 unsigned short oneb_automaton_state;
8082 unsigned short two_automaton_state;
8083 unsigned short twob_automaton_state;
8084 };
8085
8086 fprintf
8087 (dump,
8088 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8089 curr_state->unique_num,
8090 (curr_state->originator == NULL
8091 ? -1 : curr_state->originator->unique_num),
8092 curr_state->cost,
8093 curr_state->before_nops_num, curr_state->after_nops_num,
8094 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8095 (ia64_tune == PROCESSOR_ITANIUM
8096 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8097 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8098 INSN_UID (insn));
8099 }
8100 /* Find the position in the current bundle window. The window can
8101 contain at most two bundles. Two bundle window means that
8102 the processor will make two bundle rotation. */
8103 max_pos = get_max_pos (curr_state->dfa_state);
8104 if (max_pos == 6
8105 /* The following (negative template number) means that the
8106 processor did one bundle rotation. */
8107 || (max_pos == 3 && template0 < 0))
8108 {
8109 /* We are at the end of the window -- find template(s) for
8110 its bundle(s). */
8111 pos = max_pos;
8112 if (max_pos == 3)
8113 template0 = get_template (curr_state->dfa_state, 3);
8114 else
8115 {
8116 template1 = get_template (curr_state->dfa_state, 3);
8117 template0 = get_template (curr_state->dfa_state, 6);
8118 }
8119 }
8120 if (max_pos > 3 && template1 < 0)
8121 /* It may happen when we have the stop inside a bundle. */
8122 {
8123 gcc_assert (pos <= 3);
8124 template1 = get_template (curr_state->dfa_state, 3);
8125 pos += 3;
8126 }
8127 if (!asm_p)
8128 /* Emit nops after the current insn. */
8129 for (i = 0; i < curr_state->after_nops_num; i++)
8130 {
8131 nop = gen_nop ();
8132 emit_insn_after (nop, insn);
8133 pos--;
8134 gcc_assert (pos >= 0);
8135 if (pos % 3 == 0)
8136 {
8137 /* We are at the start of a bundle: emit the template
8138 (it should be defined). */
8139 gcc_assert (template0 >= 0);
8140 ia64_add_bundle_selector_before (template0, nop);
8141 /* If we have two bundle window, we make one bundle
8142 rotation. Otherwise template0 will be undefined
8143 (negative value). */
8144 template0 = template1;
8145 template1 = -1;
8146 }
8147 }
8148 /* Move the position backward in the window. Group barrier has
8149 no slot. Asm insn takes all bundle. */
8150 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8151 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8152 && asm_noperands (PATTERN (insn)) < 0)
8153 pos--;
8154 /* Long insn takes 2 slots. */
8155 if (ia64_safe_type (insn) == TYPE_L)
8156 pos--;
8157 gcc_assert (pos >= 0);
8158 if (pos % 3 == 0
8159 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8160 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8161 && asm_noperands (PATTERN (insn)) < 0)
8162 {
8163 /* The current insn is at the bundle start: emit the
8164 template. */
8165 gcc_assert (template0 >= 0);
8166 ia64_add_bundle_selector_before (template0, insn);
8167 b = PREV_INSN (insn);
8168 insn = b;
8169 /* See comment above in analogous place for emitting nops
8170 after the insn. */
8171 template0 = template1;
8172 template1 = -1;
8173 }
8174 /* Emit nops after the current insn. */
8175 for (i = 0; i < curr_state->before_nops_num; i++)
8176 {
8177 nop = gen_nop ();
8178 ia64_emit_insn_before (nop, insn);
8179 nop = PREV_INSN (insn);
8180 insn = nop;
8181 pos--;
8182 gcc_assert (pos >= 0);
8183 if (pos % 3 == 0)
8184 {
8185 /* See comment above in analogous place for emitting nops
8186 after the insn. */
8187 gcc_assert (template0 >= 0);
8188 ia64_add_bundle_selector_before (template0, insn);
8189 b = PREV_INSN (insn);
8190 insn = b;
8191 template0 = template1;
8192 template1 = -1;
8193 }
8194 }
8195 }
8196 if (ia64_tune == PROCESSOR_ITANIUM)
8197 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8198 Itanium1 has a strange design, if the distance between an insn
8199 and dependent MM-insn is less 4 then we have a 6 additional
8200 cycles stall. So we make the distance equal to 4 cycles if it
8201 is less. */
8202 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8203 insn != NULL_RTX;
8204 insn = next_insn)
8205 {
8206 gcc_assert (INSN_P (insn)
8207 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8208 && GET_CODE (PATTERN (insn)) != USE
8209 && GET_CODE (PATTERN (insn)) != CLOBBER);
8210 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8211 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8212 /* We found a MM-insn which needs additional cycles. */
8213 {
8214 rtx last;
8215 int i, j, n;
8216 int pred_stop_p;
8217
8218 /* Now we are searching for a template of the bundle in
8219 which the MM-insn is placed and the position of the
8220 insn in the bundle (0, 1, 2). Also we are searching
8221 for that there is a stop before the insn. */
8222 last = prev_active_insn (insn);
8223 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8224 if (pred_stop_p)
8225 last = prev_active_insn (last);
8226 n = 0;
8227 for (;; last = prev_active_insn (last))
8228 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8229 {
8230 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8231 if (template0 == 9)
8232 /* The insn is in MLX bundle. Change the template
8233 onto MFI because we will add nops before the
8234 insn. It simplifies subsequent code a lot. */
8235 PATTERN (last)
8236 = gen_bundle_selector (const2_rtx); /* -> MFI */
8237 break;
8238 }
8239 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8240 && (ia64_safe_itanium_class (last)
8241 != ITANIUM_CLASS_IGNORE))
8242 n++;
8243 /* Some check of correctness: the stop is not at the
8244 bundle start, there are no more 3 insns in the bundle,
8245 and the MM-insn is not at the start of bundle with
8246 template MLX. */
8247 gcc_assert ((!pred_stop_p || n)
8248 && n <= 2
8249 && (template0 != 9 || !n));
8250 /* Put nops after the insn in the bundle. */
8251 for (j = 3 - n; j > 0; j --)
8252 ia64_emit_insn_before (gen_nop (), insn);
8253 /* It takes into account that we will add more N nops
8254 before the insn lately -- please see code below. */
8255 add_cycles [INSN_UID (insn)]--;
8256 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8257 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8258 insn);
8259 if (pred_stop_p)
8260 add_cycles [INSN_UID (insn)]--;
8261 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8262 {
8263 /* Insert "MII;" template. */
8264 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8265 insn);
8266 ia64_emit_insn_before (gen_nop (), insn);
8267 ia64_emit_insn_before (gen_nop (), insn);
8268 if (i > 1)
8269 {
8270 /* To decrease code size, we use "MI;I;"
8271 template. */
8272 ia64_emit_insn_before
8273 (gen_insn_group_barrier (GEN_INT (3)), insn);
8274 i--;
8275 }
8276 ia64_emit_insn_before (gen_nop (), insn);
8277 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8278 insn);
8279 }
8280 /* Put the MM-insn in the same slot of a bundle with the
8281 same template as the original one. */
8282 ia64_add_bundle_selector_before (template0, insn);
8283 /* To put the insn in the same slot, add necessary number
8284 of nops. */
8285 for (j = n; j > 0; j --)
8286 ia64_emit_insn_before (gen_nop (), insn);
8287 /* Put the stop if the original bundle had it. */
8288 if (pred_stop_p)
8289 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8290 insn);
8291 }
8292 }
8293 free (index_to_bundle_states);
8294 finish_bundle_state_table ();
8295 bundling_p = 0;
8296 dfa_clean_insn_cache ();
8297 }
8298
8299 /* The following function is called at the end of scheduling BB or
8300 EBB. After reload, it inserts stop bits and does insn bundling. */
8301
8302 static void
8303 ia64_sched_finish (FILE *dump, int sched_verbose)
8304 {
8305 if (sched_verbose)
8306 fprintf (dump, "// Finishing schedule.\n");
8307 if (!reload_completed)
8308 return;
8309 if (reload_completed)
8310 {
8311 final_emit_insn_group_barriers (dump);
8312 bundling (dump, sched_verbose, current_sched_info->prev_head,
8313 current_sched_info->next_tail);
8314 if (sched_verbose && dump)
8315 fprintf (dump, "// finishing %d-%d\n",
8316 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8317 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8318
8319 return;
8320 }
8321 }
8322
8323 /* The following function inserts stop bits in scheduled BB or EBB. */
8324
8325 static void
8326 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8327 {
8328 rtx insn;
8329 int need_barrier_p = 0;
8330 rtx prev_insn = NULL_RTX;
8331
8332 init_insn_group_barriers ();
8333
8334 for (insn = NEXT_INSN (current_sched_info->prev_head);
8335 insn != current_sched_info->next_tail;
8336 insn = NEXT_INSN (insn))
8337 {
8338 if (GET_CODE (insn) == BARRIER)
8339 {
8340 rtx last = prev_active_insn (insn);
8341
8342 if (! last)
8343 continue;
8344 if (GET_CODE (last) == JUMP_INSN
8345 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8346 last = prev_active_insn (last);
8347 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8348 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8349
8350 init_insn_group_barriers ();
8351 need_barrier_p = 0;
8352 prev_insn = NULL_RTX;
8353 }
8354 else if (INSN_P (insn))
8355 {
8356 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8357 {
8358 init_insn_group_barriers ();
8359 need_barrier_p = 0;
8360 prev_insn = NULL_RTX;
8361 }
8362 else if (need_barrier_p || group_barrier_needed (insn))
8363 {
8364 if (TARGET_EARLY_STOP_BITS)
8365 {
8366 rtx last;
8367
8368 for (last = insn;
8369 last != current_sched_info->prev_head;
8370 last = PREV_INSN (last))
8371 if (INSN_P (last) && GET_MODE (last) == TImode
8372 && stops_p [INSN_UID (last)])
8373 break;
8374 if (last == current_sched_info->prev_head)
8375 last = insn;
8376 last = prev_active_insn (last);
8377 if (last
8378 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8379 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8380 last);
8381 init_insn_group_barriers ();
8382 for (last = NEXT_INSN (last);
8383 last != insn;
8384 last = NEXT_INSN (last))
8385 if (INSN_P (last))
8386 group_barrier_needed (last);
8387 }
8388 else
8389 {
8390 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8391 insn);
8392 init_insn_group_barriers ();
8393 }
8394 group_barrier_needed (insn);
8395 prev_insn = NULL_RTX;
8396 }
8397 else if (recog_memoized (insn) >= 0)
8398 prev_insn = insn;
8399 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8400 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8401 || asm_noperands (PATTERN (insn)) >= 0);
8402 }
8403 }
8404 }
8405
8406 \f
8407
8408 /* If the following function returns TRUE, we will use the DFA
8409 insn scheduler. */
8410
8411 static int
8412 ia64_first_cycle_multipass_dfa_lookahead (void)
8413 {
8414 return (reload_completed ? 6 : 4);
8415 }
8416
8417 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8418
8419 static void
8420 ia64_init_dfa_pre_cycle_insn (void)
8421 {
8422 if (temp_dfa_state == NULL)
8423 {
8424 dfa_state_size = state_size ();
8425 temp_dfa_state = xmalloc (dfa_state_size);
8426 prev_cycle_state = xmalloc (dfa_state_size);
8427 }
8428 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8429 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8430 recog_memoized (dfa_pre_cycle_insn);
8431 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8432 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8433 recog_memoized (dfa_stop_insn);
8434 }
8435
8436 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8437 used by the DFA insn scheduler. */
8438
8439 static rtx
8440 ia64_dfa_pre_cycle_insn (void)
8441 {
8442 return dfa_pre_cycle_insn;
8443 }
8444
8445 /* The following function returns TRUE if PRODUCER (of type ilog or
8446 ld) produces address for CONSUMER (of type st or stf). */
8447
8448 int
8449 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8450 {
8451 rtx dest, reg, mem;
8452
8453 gcc_assert (producer && consumer);
8454 dest = ia64_single_set (producer);
8455 gcc_assert (dest);
8456 reg = SET_DEST (dest);
8457 gcc_assert (reg);
8458 if (GET_CODE (reg) == SUBREG)
8459 reg = SUBREG_REG (reg);
8460 gcc_assert (GET_CODE (reg) == REG);
8461
8462 dest = ia64_single_set (consumer);
8463 gcc_assert (dest);
8464 mem = SET_DEST (dest);
8465 gcc_assert (mem && GET_CODE (mem) == MEM);
8466 return reg_mentioned_p (reg, mem);
8467 }
8468
8469 /* The following function returns TRUE if PRODUCER (of type ilog or
8470 ld) produces address for CONSUMER (of type ld or fld). */
8471
8472 int
8473 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8474 {
8475 rtx dest, src, reg, mem;
8476
8477 gcc_assert (producer && consumer);
8478 dest = ia64_single_set (producer);
8479 gcc_assert (dest);
8480 reg = SET_DEST (dest);
8481 gcc_assert (reg);
8482 if (GET_CODE (reg) == SUBREG)
8483 reg = SUBREG_REG (reg);
8484 gcc_assert (GET_CODE (reg) == REG);
8485
8486 src = ia64_single_set (consumer);
8487 gcc_assert (src);
8488 mem = SET_SRC (src);
8489 gcc_assert (mem);
8490
8491 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8492 mem = XVECEXP (mem, 0, 0);
8493 else if (GET_CODE (mem) == IF_THEN_ELSE)
8494 /* ??? Is this bypass necessary for ld.c? */
8495 {
8496 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8497 mem = XEXP (mem, 1);
8498 }
8499
8500 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8501 mem = XEXP (mem, 0);
8502
8503 if (GET_CODE (mem) == UNSPEC)
8504 {
8505 int c = XINT (mem, 1);
8506
8507 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8508 mem = XVECEXP (mem, 0, 0);
8509 }
8510
8511 /* Note that LO_SUM is used for GOT loads. */
8512 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8513
8514 return reg_mentioned_p (reg, mem);
8515 }
8516
8517 /* The following function returns TRUE if INSN produces address for a
8518 load/store insn. We will place such insns into M slot because it
8519 decreases its latency time. */
8520
8521 int
8522 ia64_produce_address_p (rtx insn)
8523 {
8524 return insn->call;
8525 }
8526
8527 \f
8528 /* Emit pseudo-ops for the assembler to describe predicate relations.
8529 At present this assumes that we only consider predicate pairs to
8530 be mutex, and that the assembler can deduce proper values from
8531 straight-line code. */
8532
8533 static void
8534 emit_predicate_relation_info (void)
8535 {
8536 basic_block bb;
8537
8538 FOR_EACH_BB_REVERSE (bb)
8539 {
8540 int r;
8541 rtx head = BB_HEAD (bb);
8542
8543 /* We only need such notes at code labels. */
8544 if (GET_CODE (head) != CODE_LABEL)
8545 continue;
8546 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
8547 head = NEXT_INSN (head);
8548
8549 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8550 grabbing the entire block of predicate registers. */
8551 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8552 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
8553 {
8554 rtx p = gen_rtx_REG (BImode, r);
8555 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8556 if (head == BB_END (bb))
8557 BB_END (bb) = n;
8558 head = n;
8559 }
8560 }
8561
8562 /* Look for conditional calls that do not return, and protect predicate
8563 relations around them. Otherwise the assembler will assume the call
8564 returns, and complain about uses of call-clobbered predicates after
8565 the call. */
8566 FOR_EACH_BB_REVERSE (bb)
8567 {
8568 rtx insn = BB_HEAD (bb);
8569
8570 while (1)
8571 {
8572 if (GET_CODE (insn) == CALL_INSN
8573 && GET_CODE (PATTERN (insn)) == COND_EXEC
8574 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8575 {
8576 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8577 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8578 if (BB_HEAD (bb) == insn)
8579 BB_HEAD (bb) = b;
8580 if (BB_END (bb) == insn)
8581 BB_END (bb) = a;
8582 }
8583
8584 if (insn == BB_END (bb))
8585 break;
8586 insn = NEXT_INSN (insn);
8587 }
8588 }
8589 }
8590
8591 /* Perform machine dependent operations on the rtl chain INSNS. */
8592
8593 static void
8594 ia64_reorg (void)
8595 {
8596 /* We are freeing block_for_insn in the toplev to keep compatibility
8597 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8598 compute_bb_for_insn ();
8599
8600 /* If optimizing, we'll have split before scheduling. */
8601 if (optimize == 0)
8602 split_all_insns ();
8603
8604 if (optimize && ia64_flag_schedule_insns2 && dbg_cnt (ia64_sched2))
8605 {
8606 timevar_push (TV_SCHED2);
8607 ia64_final_schedule = 1;
8608
8609 initiate_bundle_states ();
8610 ia64_nop = make_insn_raw (gen_nop ());
8611 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8612 recog_memoized (ia64_nop);
8613 clocks_length = get_max_uid () + 1;
8614 stops_p = xcalloc (1, clocks_length);
8615 if (ia64_tune == PROCESSOR_ITANIUM)
8616 {
8617 clocks = xcalloc (clocks_length, sizeof (int));
8618 add_cycles = xcalloc (clocks_length, sizeof (int));
8619 }
8620 if (ia64_tune == PROCESSOR_ITANIUM2)
8621 {
8622 pos_1 = get_cpu_unit_code ("2_1");
8623 pos_2 = get_cpu_unit_code ("2_2");
8624 pos_3 = get_cpu_unit_code ("2_3");
8625 pos_4 = get_cpu_unit_code ("2_4");
8626 pos_5 = get_cpu_unit_code ("2_5");
8627 pos_6 = get_cpu_unit_code ("2_6");
8628 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8629 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8630 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8631 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8632 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8633 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8634 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8635 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8636 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8637 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8638 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8639 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8640 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8641 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8642 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8643 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8644 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8645 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8646 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8647 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8648 }
8649 else
8650 {
8651 pos_1 = get_cpu_unit_code ("1_1");
8652 pos_2 = get_cpu_unit_code ("1_2");
8653 pos_3 = get_cpu_unit_code ("1_3");
8654 pos_4 = get_cpu_unit_code ("1_4");
8655 pos_5 = get_cpu_unit_code ("1_5");
8656 pos_6 = get_cpu_unit_code ("1_6");
8657 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8658 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8659 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8660 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8661 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8662 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8663 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8664 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8665 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8666 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8667 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8668 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8669 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8670 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8671 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8672 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8673 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8674 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8675 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8676 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8677 }
8678 schedule_ebbs ();
8679 /* We cannot reuse this one because it has been corrupted by the
8680 evil glat. */
8681 finish_bundle_states ();
8682 if (ia64_tune == PROCESSOR_ITANIUM)
8683 {
8684 free (add_cycles);
8685 free (clocks);
8686 }
8687 free (stops_p);
8688 stops_p = NULL;
8689 emit_insn_group_barriers (dump_file);
8690
8691 ia64_final_schedule = 0;
8692 timevar_pop (TV_SCHED2);
8693 }
8694 else
8695 emit_all_insn_group_barriers (dump_file);
8696
8697 df_analyze ();
8698
8699 /* A call must not be the last instruction in a function, so that the
8700 return address is still within the function, so that unwinding works
8701 properly. Note that IA-64 differs from dwarf2 on this point. */
8702 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8703 {
8704 rtx insn;
8705 int saw_stop = 0;
8706
8707 insn = get_last_insn ();
8708 if (! INSN_P (insn))
8709 insn = prev_active_insn (insn);
8710 /* Skip over insns that expand to nothing. */
8711 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8712 {
8713 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8714 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8715 saw_stop = 1;
8716 insn = prev_active_insn (insn);
8717 }
8718 if (GET_CODE (insn) == CALL_INSN)
8719 {
8720 if (! saw_stop)
8721 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8722 emit_insn (gen_break_f ());
8723 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8724 }
8725 }
8726
8727 emit_predicate_relation_info ();
8728
8729 if (ia64_flag_var_tracking)
8730 {
8731 timevar_push (TV_VAR_TRACKING);
8732 variable_tracking_main ();
8733 timevar_pop (TV_VAR_TRACKING);
8734 }
8735 df_finish_pass (false);
8736 }
8737 \f
8738 /* Return true if REGNO is used by the epilogue. */
8739
8740 int
8741 ia64_epilogue_uses (int regno)
8742 {
8743 switch (regno)
8744 {
8745 case R_GR (1):
8746 /* With a call to a function in another module, we will write a new
8747 value to "gp". After returning from such a call, we need to make
8748 sure the function restores the original gp-value, even if the
8749 function itself does not use the gp anymore. */
8750 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8751
8752 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8753 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8754 /* For functions defined with the syscall_linkage attribute, all
8755 input registers are marked as live at all function exits. This
8756 prevents the register allocator from using the input registers,
8757 which in turn makes it possible to restart a system call after
8758 an interrupt without having to save/restore the input registers.
8759 This also prevents kernel data from leaking to application code. */
8760 return lookup_attribute ("syscall_linkage",
8761 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8762
8763 case R_BR (0):
8764 /* Conditional return patterns can't represent the use of `b0' as
8765 the return address, so we force the value live this way. */
8766 return 1;
8767
8768 case AR_PFS_REGNUM:
8769 /* Likewise for ar.pfs, which is used by br.ret. */
8770 return 1;
8771
8772 default:
8773 return 0;
8774 }
8775 }
8776
8777 /* Return true if REGNO is used by the frame unwinder. */
8778
8779 int
8780 ia64_eh_uses (int regno)
8781 {
8782 enum ia64_frame_regs r;
8783
8784 if (! reload_completed)
8785 return 0;
8786
8787 if (regno == 0)
8788 return 0;
8789
8790 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
8791 if (regno == current_frame_info.r[r]
8792 || regno == emitted_frame_related_regs[r])
8793 return 1;
8794
8795 return 0;
8796 }
8797 \f
8798 /* Return true if this goes in small data/bss. */
8799
8800 /* ??? We could also support own long data here. Generating movl/add/ld8
8801 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8802 code faster because there is one less load. This also includes incomplete
8803 types which can't go in sdata/sbss. */
8804
8805 static bool
8806 ia64_in_small_data_p (const_tree exp)
8807 {
8808 if (TARGET_NO_SDATA)
8809 return false;
8810
8811 /* We want to merge strings, so we never consider them small data. */
8812 if (TREE_CODE (exp) == STRING_CST)
8813 return false;
8814
8815 /* Functions are never small data. */
8816 if (TREE_CODE (exp) == FUNCTION_DECL)
8817 return false;
8818
8819 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8820 {
8821 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8822
8823 if (strcmp (section, ".sdata") == 0
8824 || strncmp (section, ".sdata.", 7) == 0
8825 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8826 || strcmp (section, ".sbss") == 0
8827 || strncmp (section, ".sbss.", 6) == 0
8828 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8829 return true;
8830 }
8831 else
8832 {
8833 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8834
8835 /* If this is an incomplete type with size 0, then we can't put it
8836 in sdata because it might be too big when completed. */
8837 if (size > 0 && size <= ia64_section_threshold)
8838 return true;
8839 }
8840
8841 return false;
8842 }
8843 \f
8844 /* Output assembly directives for prologue regions. */
8845
8846 /* The current basic block number. */
8847
8848 static bool last_block;
8849
8850 /* True if we need a copy_state command at the start of the next block. */
8851
8852 static bool need_copy_state;
8853
8854 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8855 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8856 #endif
8857
8858 /* Emit a debugging label after a call-frame-related insn. We'd
8859 rather output the label right away, but we'd have to output it
8860 after, not before, the instruction, and the instruction has not
8861 been output yet. So we emit the label after the insn, delete it to
8862 avoid introducing basic blocks, and mark it as preserved, such that
8863 it is still output, given that it is referenced in debug info. */
8864
8865 static const char *
8866 ia64_emit_deleted_label_after_insn (rtx insn)
8867 {
8868 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8869 rtx lb = gen_label_rtx ();
8870 rtx label_insn = emit_label_after (lb, insn);
8871
8872 LABEL_PRESERVE_P (lb) = 1;
8873
8874 delete_insn (label_insn);
8875
8876 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8877
8878 return xstrdup (label);
8879 }
8880
8881 /* Define the CFA after INSN with the steady-state definition. */
8882
8883 static void
8884 ia64_dwarf2out_def_steady_cfa (rtx insn)
8885 {
8886 rtx fp = frame_pointer_needed
8887 ? hard_frame_pointer_rtx
8888 : stack_pointer_rtx;
8889
8890 dwarf2out_def_cfa
8891 (ia64_emit_deleted_label_after_insn (insn),
8892 REGNO (fp),
8893 ia64_initial_elimination_offset
8894 (REGNO (arg_pointer_rtx), REGNO (fp))
8895 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8896 }
8897
8898 /* The generic dwarf2 frame debug info generator does not define a
8899 separate region for the very end of the epilogue, so refrain from
8900 doing so in the IA64-specific code as well. */
8901
8902 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8903
8904 /* The function emits unwind directives for the start of an epilogue. */
8905
8906 static void
8907 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8908 {
8909 /* If this isn't the last block of the function, then we need to label the
8910 current state, and copy it back in at the start of the next block. */
8911
8912 if (!last_block)
8913 {
8914 if (unwind)
8915 fprintf (asm_out_file, "\t.label_state %d\n",
8916 ++cfun->machine->state_num);
8917 need_copy_state = true;
8918 }
8919
8920 if (unwind)
8921 fprintf (asm_out_file, "\t.restore sp\n");
8922 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8923 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8924 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8925 }
8926
8927 /* This function processes a SET pattern looking for specific patterns
8928 which result in emitting an assembly directive required for unwinding. */
8929
8930 static int
8931 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8932 {
8933 rtx src = SET_SRC (pat);
8934 rtx dest = SET_DEST (pat);
8935 int src_regno, dest_regno;
8936
8937 /* Look for the ALLOC insn. */
8938 if (GET_CODE (src) == UNSPEC_VOLATILE
8939 && XINT (src, 1) == UNSPECV_ALLOC
8940 && GET_CODE (dest) == REG)
8941 {
8942 dest_regno = REGNO (dest);
8943
8944 /* If this is the final destination for ar.pfs, then this must
8945 be the alloc in the prologue. */
8946 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
8947 {
8948 if (unwind)
8949 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8950 ia64_dbx_register_number (dest_regno));
8951 }
8952 else
8953 {
8954 /* This must be an alloc before a sibcall. We must drop the
8955 old frame info. The easiest way to drop the old frame
8956 info is to ensure we had a ".restore sp" directive
8957 followed by a new prologue. If the procedure doesn't
8958 have a memory-stack frame, we'll issue a dummy ".restore
8959 sp" now. */
8960 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8961 /* if haven't done process_epilogue() yet, do it now */
8962 process_epilogue (asm_out_file, insn, unwind, frame);
8963 if (unwind)
8964 fprintf (asm_out_file, "\t.prologue\n");
8965 }
8966 return 1;
8967 }
8968
8969 /* Look for SP = .... */
8970 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8971 {
8972 if (GET_CODE (src) == PLUS)
8973 {
8974 rtx op0 = XEXP (src, 0);
8975 rtx op1 = XEXP (src, 1);
8976
8977 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8978
8979 if (INTVAL (op1) < 0)
8980 {
8981 gcc_assert (!frame_pointer_needed);
8982 if (unwind)
8983 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8984 -INTVAL (op1));
8985 if (frame)
8986 ia64_dwarf2out_def_steady_cfa (insn);
8987 }
8988 else
8989 process_epilogue (asm_out_file, insn, unwind, frame);
8990 }
8991 else
8992 {
8993 gcc_assert (GET_CODE (src) == REG
8994 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8995 process_epilogue (asm_out_file, insn, unwind, frame);
8996 }
8997
8998 return 1;
8999 }
9000
9001 /* Register move we need to look at. */
9002 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
9003 {
9004 src_regno = REGNO (src);
9005 dest_regno = REGNO (dest);
9006
9007 switch (src_regno)
9008 {
9009 case BR_REG (0):
9010 /* Saving return address pointer. */
9011 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9012 if (unwind)
9013 fprintf (asm_out_file, "\t.save rp, r%d\n",
9014 ia64_dbx_register_number (dest_regno));
9015 return 1;
9016
9017 case PR_REG (0):
9018 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9019 if (unwind)
9020 fprintf (asm_out_file, "\t.save pr, r%d\n",
9021 ia64_dbx_register_number (dest_regno));
9022 return 1;
9023
9024 case AR_UNAT_REGNUM:
9025 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9026 if (unwind)
9027 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9028 ia64_dbx_register_number (dest_regno));
9029 return 1;
9030
9031 case AR_LC_REGNUM:
9032 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9033 if (unwind)
9034 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9035 ia64_dbx_register_number (dest_regno));
9036 return 1;
9037
9038 case STACK_POINTER_REGNUM:
9039 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9040 && frame_pointer_needed);
9041 if (unwind)
9042 fprintf (asm_out_file, "\t.vframe r%d\n",
9043 ia64_dbx_register_number (dest_regno));
9044 if (frame)
9045 ia64_dwarf2out_def_steady_cfa (insn);
9046 return 1;
9047
9048 default:
9049 /* Everything else should indicate being stored to memory. */
9050 gcc_unreachable ();
9051 }
9052 }
9053
9054 /* Memory store we need to look at. */
9055 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
9056 {
9057 long off;
9058 rtx base;
9059 const char *saveop;
9060
9061 if (GET_CODE (XEXP (dest, 0)) == REG)
9062 {
9063 base = XEXP (dest, 0);
9064 off = 0;
9065 }
9066 else
9067 {
9068 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9069 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9070 base = XEXP (XEXP (dest, 0), 0);
9071 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9072 }
9073
9074 if (base == hard_frame_pointer_rtx)
9075 {
9076 saveop = ".savepsp";
9077 off = - off;
9078 }
9079 else
9080 {
9081 gcc_assert (base == stack_pointer_rtx);
9082 saveop = ".savesp";
9083 }
9084
9085 src_regno = REGNO (src);
9086 switch (src_regno)
9087 {
9088 case BR_REG (0):
9089 gcc_assert (!current_frame_info.r[reg_save_b0]);
9090 if (unwind)
9091 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9092 return 1;
9093
9094 case PR_REG (0):
9095 gcc_assert (!current_frame_info.r[reg_save_pr]);
9096 if (unwind)
9097 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9098 return 1;
9099
9100 case AR_LC_REGNUM:
9101 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9102 if (unwind)
9103 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9104 return 1;
9105
9106 case AR_PFS_REGNUM:
9107 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9108 if (unwind)
9109 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9110 return 1;
9111
9112 case AR_UNAT_REGNUM:
9113 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9114 if (unwind)
9115 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9116 return 1;
9117
9118 case GR_REG (4):
9119 case GR_REG (5):
9120 case GR_REG (6):
9121 case GR_REG (7):
9122 if (unwind)
9123 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9124 1 << (src_regno - GR_REG (4)));
9125 return 1;
9126
9127 case BR_REG (1):
9128 case BR_REG (2):
9129 case BR_REG (3):
9130 case BR_REG (4):
9131 case BR_REG (5):
9132 if (unwind)
9133 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9134 1 << (src_regno - BR_REG (1)));
9135 return 1;
9136
9137 case FR_REG (2):
9138 case FR_REG (3):
9139 case FR_REG (4):
9140 case FR_REG (5):
9141 if (unwind)
9142 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9143 1 << (src_regno - FR_REG (2)));
9144 return 1;
9145
9146 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9147 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9148 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9149 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9150 if (unwind)
9151 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9152 1 << (src_regno - FR_REG (12)));
9153 return 1;
9154
9155 default:
9156 return 0;
9157 }
9158 }
9159
9160 return 0;
9161 }
9162
9163
9164 /* This function looks at a single insn and emits any directives
9165 required to unwind this insn. */
9166 void
9167 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9168 {
9169 bool unwind = (flag_unwind_tables
9170 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9171 bool frame = dwarf2out_do_frame ();
9172
9173 if (unwind || frame)
9174 {
9175 rtx pat;
9176
9177 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9178 {
9179 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9180
9181 /* Restore unwind state from immediately before the epilogue. */
9182 if (need_copy_state)
9183 {
9184 if (unwind)
9185 {
9186 fprintf (asm_out_file, "\t.body\n");
9187 fprintf (asm_out_file, "\t.copy_state %d\n",
9188 cfun->machine->state_num);
9189 }
9190 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9191 ia64_dwarf2out_def_steady_cfa (insn);
9192 need_copy_state = false;
9193 }
9194 }
9195
9196 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9197 return;
9198
9199 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9200 if (pat)
9201 pat = XEXP (pat, 0);
9202 else
9203 pat = PATTERN (insn);
9204
9205 switch (GET_CODE (pat))
9206 {
9207 case SET:
9208 process_set (asm_out_file, pat, insn, unwind, frame);
9209 break;
9210
9211 case PARALLEL:
9212 {
9213 int par_index;
9214 int limit = XVECLEN (pat, 0);
9215 for (par_index = 0; par_index < limit; par_index++)
9216 {
9217 rtx x = XVECEXP (pat, 0, par_index);
9218 if (GET_CODE (x) == SET)
9219 process_set (asm_out_file, x, insn, unwind, frame);
9220 }
9221 break;
9222 }
9223
9224 default:
9225 gcc_unreachable ();
9226 }
9227 }
9228 }
9229
9230 \f
9231 enum ia64_builtins
9232 {
9233 IA64_BUILTIN_BSP,
9234 IA64_BUILTIN_FLUSHRS
9235 };
9236
9237 void
9238 ia64_init_builtins (void)
9239 {
9240 tree fpreg_type;
9241 tree float80_type;
9242
9243 /* The __fpreg type. */
9244 fpreg_type = make_node (REAL_TYPE);
9245 TYPE_PRECISION (fpreg_type) = 82;
9246 layout_type (fpreg_type);
9247 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9248
9249 /* The __float80 type. */
9250 float80_type = make_node (REAL_TYPE);
9251 TYPE_PRECISION (float80_type) = 80;
9252 layout_type (float80_type);
9253 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9254
9255 /* The __float128 type. */
9256 if (!TARGET_HPUX)
9257 {
9258 tree float128_type = make_node (REAL_TYPE);
9259 TYPE_PRECISION (float128_type) = 128;
9260 layout_type (float128_type);
9261 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9262 }
9263 else
9264 /* Under HPUX, this is a synonym for "long double". */
9265 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9266 "__float128");
9267
9268 #define def_builtin(name, type, code) \
9269 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9270 NULL, NULL_TREE)
9271
9272 def_builtin ("__builtin_ia64_bsp",
9273 build_function_type (ptr_type_node, void_list_node),
9274 IA64_BUILTIN_BSP);
9275
9276 def_builtin ("__builtin_ia64_flushrs",
9277 build_function_type (void_type_node, void_list_node),
9278 IA64_BUILTIN_FLUSHRS);
9279
9280 #undef def_builtin
9281
9282 if (TARGET_HPUX)
9283 {
9284 if (built_in_decls [BUILT_IN_FINITE])
9285 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9286 "_Isfinite");
9287 if (built_in_decls [BUILT_IN_FINITEF])
9288 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9289 "_Isfinitef");
9290 if (built_in_decls [BUILT_IN_FINITEL])
9291 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9292 "_Isfinitef128");
9293 }
9294 }
9295
9296 rtx
9297 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9298 enum machine_mode mode ATTRIBUTE_UNUSED,
9299 int ignore ATTRIBUTE_UNUSED)
9300 {
9301 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9302 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9303
9304 switch (fcode)
9305 {
9306 case IA64_BUILTIN_BSP:
9307 if (! target || ! register_operand (target, DImode))
9308 target = gen_reg_rtx (DImode);
9309 emit_insn (gen_bsp_value (target));
9310 #ifdef POINTERS_EXTEND_UNSIGNED
9311 target = convert_memory_address (ptr_mode, target);
9312 #endif
9313 return target;
9314
9315 case IA64_BUILTIN_FLUSHRS:
9316 emit_insn (gen_flushrs ());
9317 return const0_rtx;
9318
9319 default:
9320 break;
9321 }
9322
9323 return NULL_RTX;
9324 }
9325
9326 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9327 most significant bits of the stack slot. */
9328
9329 enum direction
9330 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
9331 {
9332 /* Exception to normal case for structures/unions/etc. */
9333
9334 if (type && AGGREGATE_TYPE_P (type)
9335 && int_size_in_bytes (type) < UNITS_PER_WORD)
9336 return upward;
9337
9338 /* Fall back to the default. */
9339 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9340 }
9341
9342 /* Emit text to declare externally defined variables and functions, because
9343 the Intel assembler does not support undefined externals. */
9344
9345 void
9346 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9347 {
9348 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9349 set in order to avoid putting out names that are never really
9350 used. */
9351 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9352 {
9353 /* maybe_assemble_visibility will return 1 if the assembler
9354 visibility directive is output. */
9355 int need_visibility = ((*targetm.binds_local_p) (decl)
9356 && maybe_assemble_visibility (decl));
9357
9358 /* GNU as does not need anything here, but the HP linker does
9359 need something for external functions. */
9360 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9361 && TREE_CODE (decl) == FUNCTION_DECL)
9362 (*targetm.asm_out.globalize_decl_name) (file, decl);
9363 else if (need_visibility && !TARGET_GNU_AS)
9364 (*targetm.asm_out.globalize_label) (file, name);
9365 }
9366 }
9367
9368 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9369 modes of word_mode and larger. Rename the TFmode libfuncs using the
9370 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9371 backward compatibility. */
9372
9373 static void
9374 ia64_init_libfuncs (void)
9375 {
9376 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9377 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9378 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9379 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9380
9381 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9382 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9383 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9384 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9385 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9386
9387 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9388 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9389 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9390 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9391 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9392 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9393
9394 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9395 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9396 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9397 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9398 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9399
9400 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9401 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9402 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9403 /* HP-UX 11.23 libc does not have a function for unsigned
9404 SImode-to-TFmode conversion. */
9405 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9406 }
9407
9408 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9409
9410 static void
9411 ia64_hpux_init_libfuncs (void)
9412 {
9413 ia64_init_libfuncs ();
9414
9415 /* The HP SI millicode division and mod functions expect DI arguments.
9416 By turning them off completely we avoid using both libgcc and the
9417 non-standard millicode routines and use the HP DI millicode routines
9418 instead. */
9419
9420 set_optab_libfunc (sdiv_optab, SImode, 0);
9421 set_optab_libfunc (udiv_optab, SImode, 0);
9422 set_optab_libfunc (smod_optab, SImode, 0);
9423 set_optab_libfunc (umod_optab, SImode, 0);
9424
9425 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9426 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9427 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9428 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9429
9430 /* HP-UX libc has TF min/max/abs routines in it. */
9431 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9432 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9433 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9434
9435 /* ia64_expand_compare uses this. */
9436 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9437
9438 /* These should never be used. */
9439 set_optab_libfunc (eq_optab, TFmode, 0);
9440 set_optab_libfunc (ne_optab, TFmode, 0);
9441 set_optab_libfunc (gt_optab, TFmode, 0);
9442 set_optab_libfunc (ge_optab, TFmode, 0);
9443 set_optab_libfunc (lt_optab, TFmode, 0);
9444 set_optab_libfunc (le_optab, TFmode, 0);
9445 }
9446
9447 /* Rename the division and modulus functions in VMS. */
9448
9449 static void
9450 ia64_vms_init_libfuncs (void)
9451 {
9452 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9453 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9454 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9455 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9456 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9457 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9458 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9459 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9460 }
9461
9462 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9463 the HPUX conventions. */
9464
9465 static void
9466 ia64_sysv4_init_libfuncs (void)
9467 {
9468 ia64_init_libfuncs ();
9469
9470 /* These functions are not part of the HPUX TFmode interface. We
9471 use them instead of _U_Qfcmp, which doesn't work the way we
9472 expect. */
9473 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9474 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9475 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9476 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9477 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9478 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9479
9480 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9481 glibc doesn't have them. */
9482 }
9483 \f
9484 /* For HPUX, it is illegal to have relocations in shared segments. */
9485
9486 static int
9487 ia64_hpux_reloc_rw_mask (void)
9488 {
9489 return 3;
9490 }
9491
9492 /* For others, relax this so that relocations to local data goes in
9493 read-only segments, but we still cannot allow global relocations
9494 in read-only segments. */
9495
9496 static int
9497 ia64_reloc_rw_mask (void)
9498 {
9499 return flag_pic ? 3 : 2;
9500 }
9501
9502 /* Return the section to use for X. The only special thing we do here
9503 is to honor small data. */
9504
9505 static section *
9506 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9507 unsigned HOST_WIDE_INT align)
9508 {
9509 if (GET_MODE_SIZE (mode) > 0
9510 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9511 && !TARGET_NO_SDATA)
9512 return sdata_section;
9513 else
9514 return default_elf_select_rtx_section (mode, x, align);
9515 }
9516
9517 static unsigned int
9518 ia64_section_type_flags (tree decl, const char *name, int reloc)
9519 {
9520 unsigned int flags = 0;
9521
9522 if (strcmp (name, ".sdata") == 0
9523 || strncmp (name, ".sdata.", 7) == 0
9524 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9525 || strncmp (name, ".sdata2.", 8) == 0
9526 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9527 || strcmp (name, ".sbss") == 0
9528 || strncmp (name, ".sbss.", 6) == 0
9529 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9530 flags = SECTION_SMALL;
9531
9532 flags |= default_section_type_flags (decl, name, reloc);
9533 return flags;
9534 }
9535
9536 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9537 structure type and that the address of that type should be passed
9538 in out0, rather than in r8. */
9539
9540 static bool
9541 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9542 {
9543 tree ret_type = TREE_TYPE (fntype);
9544
9545 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9546 as the structure return address parameter, if the return value
9547 type has a non-trivial copy constructor or destructor. It is not
9548 clear if this same convention should be used for other
9549 programming languages. Until G++ 3.4, we incorrectly used r8 for
9550 these return values. */
9551 return (abi_version_at_least (2)
9552 && ret_type
9553 && TYPE_MODE (ret_type) == BLKmode
9554 && TREE_ADDRESSABLE (ret_type)
9555 && strcmp (lang_hooks.name, "GNU C++") == 0);
9556 }
9557
9558 /* Output the assembler code for a thunk function. THUNK_DECL is the
9559 declaration for the thunk function itself, FUNCTION is the decl for
9560 the target function. DELTA is an immediate constant offset to be
9561 added to THIS. If VCALL_OFFSET is nonzero, the word at
9562 *(*this + vcall_offset) should be added to THIS. */
9563
9564 static void
9565 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9566 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9567 tree function)
9568 {
9569 rtx this, insn, funexp;
9570 unsigned int this_parmno;
9571 unsigned int this_regno;
9572 rtx delta_rtx;
9573
9574 reload_completed = 1;
9575 epilogue_completed = 1;
9576
9577 /* Set things up as ia64_expand_prologue might. */
9578 last_scratch_gr_reg = 15;
9579
9580 memset (&current_frame_info, 0, sizeof (current_frame_info));
9581 current_frame_info.spill_cfa_off = -16;
9582 current_frame_info.n_input_regs = 1;
9583 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9584
9585 /* Mark the end of the (empty) prologue. */
9586 emit_note (NOTE_INSN_PROLOGUE_END);
9587
9588 /* Figure out whether "this" will be the first parameter (the
9589 typical case) or the second parameter (as happens when the
9590 virtual function returns certain class objects). */
9591 this_parmno
9592 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9593 ? 1 : 0);
9594 this_regno = IN_REG (this_parmno);
9595 if (!TARGET_REG_NAMES)
9596 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9597
9598 this = gen_rtx_REG (Pmode, this_regno);
9599
9600 /* Apply the constant offset, if required. */
9601 delta_rtx = GEN_INT (delta);
9602 if (TARGET_ILP32)
9603 {
9604 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9605 REG_POINTER (tmp) = 1;
9606 if (delta && satisfies_constraint_I (delta_rtx))
9607 {
9608 emit_insn (gen_ptr_extend_plus_imm (this, tmp, delta_rtx));
9609 delta = 0;
9610 }
9611 else
9612 emit_insn (gen_ptr_extend (this, tmp));
9613 }
9614 if (delta)
9615 {
9616 if (!satisfies_constraint_I (delta_rtx))
9617 {
9618 rtx tmp = gen_rtx_REG (Pmode, 2);
9619 emit_move_insn (tmp, delta_rtx);
9620 delta_rtx = tmp;
9621 }
9622 emit_insn (gen_adddi3 (this, this, delta_rtx));
9623 }
9624
9625 /* Apply the offset from the vtable, if required. */
9626 if (vcall_offset)
9627 {
9628 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9629 rtx tmp = gen_rtx_REG (Pmode, 2);
9630
9631 if (TARGET_ILP32)
9632 {
9633 rtx t = gen_rtx_REG (ptr_mode, 2);
9634 REG_POINTER (t) = 1;
9635 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
9636 if (satisfies_constraint_I (vcall_offset_rtx))
9637 {
9638 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
9639 vcall_offset = 0;
9640 }
9641 else
9642 emit_insn (gen_ptr_extend (tmp, t));
9643 }
9644 else
9645 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9646
9647 if (vcall_offset)
9648 {
9649 if (!satisfies_constraint_J (vcall_offset_rtx))
9650 {
9651 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9652 emit_move_insn (tmp2, vcall_offset_rtx);
9653 vcall_offset_rtx = tmp2;
9654 }
9655 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9656 }
9657
9658 if (TARGET_ILP32)
9659 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
9660 else
9661 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9662
9663 emit_insn (gen_adddi3 (this, this, tmp));
9664 }
9665
9666 /* Generate a tail call to the target function. */
9667 if (! TREE_USED (function))
9668 {
9669 assemble_external (function);
9670 TREE_USED (function) = 1;
9671 }
9672 funexp = XEXP (DECL_RTL (function), 0);
9673 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9674 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9675 insn = get_last_insn ();
9676 SIBLING_CALL_P (insn) = 1;
9677
9678 /* Code generation for calls relies on splitting. */
9679 reload_completed = 1;
9680 epilogue_completed = 1;
9681 try_split (PATTERN (insn), insn, 0);
9682
9683 emit_barrier ();
9684
9685 /* Run just enough of rest_of_compilation to get the insns emitted.
9686 There's not really enough bulk here to make other passes such as
9687 instruction scheduling worth while. Note that use_thunk calls
9688 assemble_start_function and assemble_end_function. */
9689
9690 insn_locators_alloc ();
9691 emit_all_insn_group_barriers (NULL);
9692 insn = get_insns ();
9693 shorten_branches (insn);
9694 final_start_function (insn, file, 1);
9695 final (insn, file, 1);
9696 final_end_function ();
9697 free_after_compilation (cfun);
9698
9699 reload_completed = 0;
9700 epilogue_completed = 0;
9701 }
9702
9703 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9704
9705 static rtx
9706 ia64_struct_value_rtx (tree fntype,
9707 int incoming ATTRIBUTE_UNUSED)
9708 {
9709 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9710 return NULL_RTX;
9711 return gen_rtx_REG (Pmode, GR_REG (8));
9712 }
9713
9714 static bool
9715 ia64_scalar_mode_supported_p (enum machine_mode mode)
9716 {
9717 switch (mode)
9718 {
9719 case QImode:
9720 case HImode:
9721 case SImode:
9722 case DImode:
9723 case TImode:
9724 return true;
9725
9726 case SFmode:
9727 case DFmode:
9728 case XFmode:
9729 case RFmode:
9730 return true;
9731
9732 case TFmode:
9733 return TARGET_HPUX;
9734
9735 default:
9736 return false;
9737 }
9738 }
9739
9740 static bool
9741 ia64_vector_mode_supported_p (enum machine_mode mode)
9742 {
9743 switch (mode)
9744 {
9745 case V8QImode:
9746 case V4HImode:
9747 case V2SImode:
9748 return true;
9749
9750 case V2SFmode:
9751 return true;
9752
9753 default:
9754 return false;
9755 }
9756 }
9757
9758 /* Implement the FUNCTION_PROFILER macro. */
9759
9760 void
9761 ia64_output_function_profiler (FILE *file, int labelno)
9762 {
9763 bool indirect_call;
9764
9765 /* If the function needs a static chain and the static chain
9766 register is r15, we use an indirect call so as to bypass
9767 the PLT stub in case the executable is dynamically linked,
9768 because the stub clobbers r15 as per 5.3.6 of the psABI.
9769 We don't need to do that in non canonical PIC mode. */
9770
9771 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9772 {
9773 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9774 indirect_call = true;
9775 }
9776 else
9777 indirect_call = false;
9778
9779 if (TARGET_GNU_AS)
9780 fputs ("\t.prologue 4, r40\n", file);
9781 else
9782 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9783 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9784
9785 if (NO_PROFILE_COUNTERS)
9786 fputs ("\tmov out3 = r0\n", file);
9787 else
9788 {
9789 char buf[20];
9790 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9791
9792 if (TARGET_AUTO_PIC)
9793 fputs ("\tmovl out3 = @gprel(", file);
9794 else
9795 fputs ("\taddl out3 = @ltoff(", file);
9796 assemble_name (file, buf);
9797 if (TARGET_AUTO_PIC)
9798 fputs (")\n", file);
9799 else
9800 fputs ("), r1\n", file);
9801 }
9802
9803 if (indirect_call)
9804 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9805 fputs ("\t;;\n", file);
9806
9807 fputs ("\t.save rp, r42\n", file);
9808 fputs ("\tmov out2 = b0\n", file);
9809 if (indirect_call)
9810 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9811 fputs ("\t.body\n", file);
9812 fputs ("\tmov out1 = r1\n", file);
9813 if (indirect_call)
9814 {
9815 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9816 fputs ("\tmov b6 = r16\n", file);
9817 fputs ("\tld8 r1 = [r14]\n", file);
9818 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9819 }
9820 else
9821 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9822 }
9823
9824 static GTY(()) rtx mcount_func_rtx;
9825 static rtx
9826 gen_mcount_func_rtx (void)
9827 {
9828 if (!mcount_func_rtx)
9829 mcount_func_rtx = init_one_libfunc ("_mcount");
9830 return mcount_func_rtx;
9831 }
9832
9833 void
9834 ia64_profile_hook (int labelno)
9835 {
9836 rtx label, ip;
9837
9838 if (NO_PROFILE_COUNTERS)
9839 label = const0_rtx;
9840 else
9841 {
9842 char buf[30];
9843 const char *label_name;
9844 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9845 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9846 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9847 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9848 }
9849 ip = gen_reg_rtx (Pmode);
9850 emit_insn (gen_ip_value (ip));
9851 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9852 VOIDmode, 3,
9853 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9854 ip, Pmode,
9855 label, Pmode);
9856 }
9857
9858 /* Return the mangling of TYPE if it is an extended fundamental type. */
9859
9860 static const char *
9861 ia64_mangle_type (const_tree type)
9862 {
9863 type = TYPE_MAIN_VARIANT (type);
9864
9865 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
9866 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
9867 return NULL;
9868
9869 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9870 mangled as "e". */
9871 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9872 return "g";
9873 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9874 an extended mangling. Elsewhere, "e" is available since long
9875 double is 80 bits. */
9876 if (TYPE_MODE (type) == XFmode)
9877 return TARGET_HPUX ? "u9__float80" : "e";
9878 if (TYPE_MODE (type) == RFmode)
9879 return "u7__fpreg";
9880 return NULL;
9881 }
9882
9883 /* Return the diagnostic message string if conversion from FROMTYPE to
9884 TOTYPE is not allowed, NULL otherwise. */
9885 static const char *
9886 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
9887 {
9888 /* Reject nontrivial conversion to or from __fpreg. */
9889 if (TYPE_MODE (fromtype) == RFmode
9890 && TYPE_MODE (totype) != RFmode
9891 && TYPE_MODE (totype) != VOIDmode)
9892 return N_("invalid conversion from %<__fpreg%>");
9893 if (TYPE_MODE (totype) == RFmode
9894 && TYPE_MODE (fromtype) != RFmode)
9895 return N_("invalid conversion to %<__fpreg%>");
9896 return NULL;
9897 }
9898
9899 /* Return the diagnostic message string if the unary operation OP is
9900 not permitted on TYPE, NULL otherwise. */
9901 static const char *
9902 ia64_invalid_unary_op (int op, const_tree type)
9903 {
9904 /* Reject operations on __fpreg other than unary + or &. */
9905 if (TYPE_MODE (type) == RFmode
9906 && op != CONVERT_EXPR
9907 && op != ADDR_EXPR)
9908 return N_("invalid operation on %<__fpreg%>");
9909 return NULL;
9910 }
9911
9912 /* Return the diagnostic message string if the binary operation OP is
9913 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9914 static const char *
9915 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
9916 {
9917 /* Reject operations on __fpreg. */
9918 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9919 return N_("invalid operation on %<__fpreg%>");
9920 return NULL;
9921 }
9922
9923 /* Implement overriding of the optimization options. */
9924 void
9925 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9926 int size ATTRIBUTE_UNUSED)
9927 {
9928 /* Let the scheduler form additional regions. */
9929 set_param_value ("max-sched-extend-regions-iters", 2);
9930
9931 /* Set the default values for cache-related parameters. */
9932 set_param_value ("simultaneous-prefetches", 6);
9933 set_param_value ("l1-cache-line-size", 32);
9934
9935 }
9936
9937 /* HP-UX version_id attribute.
9938 For object foo, if the version_id is set to 1234 put out an alias
9939 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9940 other than an alias statement because it is an illegal symbol name. */
9941
9942 static tree
9943 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9944 tree name ATTRIBUTE_UNUSED,
9945 tree args,
9946 int flags ATTRIBUTE_UNUSED,
9947 bool *no_add_attrs)
9948 {
9949 tree arg = TREE_VALUE (args);
9950
9951 if (TREE_CODE (arg) != STRING_CST)
9952 {
9953 error("version attribute is not a string");
9954 *no_add_attrs = true;
9955 return NULL_TREE;
9956 }
9957 return NULL_TREE;
9958 }
9959
9960 /* Target hook for c_mode_for_suffix. */
9961
9962 static enum machine_mode
9963 ia64_c_mode_for_suffix (char suffix)
9964 {
9965 if (suffix == 'q')
9966 return TFmode;
9967 if (suffix == 'w')
9968 return XFmode;
9969
9970 return VOIDmode;
9971 }
9972
9973 #include "gt-ia64.h"