]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/ia64/ia64.c
function.h: Flatten file.
[thirdparty/gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "stor-layout.h"
30 #include "calls.h"
31 #include "varasm.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "hashtab.h"
44 #include "hash-set.h"
45 #include "vec.h"
46 #include "machmode.h"
47 #include "input.h"
48 #include "function.h"
49 #include "ggc.h"
50 #include "basic-block.h"
51 #include "libfuncs.h"
52 #include "diagnostic-core.h"
53 #include "sched-int.h"
54 #include "timevar.h"
55 #include "target.h"
56 #include "target-def.h"
57 #include "common/common-target.h"
58 #include "tm_p.h"
59 #include "hash-table.h"
60 #include "langhooks.h"
61 #include "basic-block.h"
62 #include "tree-ssa-alias.h"
63 #include "internal-fn.h"
64 #include "gimple-fold.h"
65 #include "tree-eh.h"
66 #include "gimple-expr.h"
67 #include "is-a.h"
68 #include "gimple.h"
69 #include "gimplify.h"
70 #include "intl.h"
71 #include "df.h"
72 #include "debug.h"
73 #include "params.h"
74 #include "dbgcnt.h"
75 #include "tm-constrs.h"
76 #include "sel-sched.h"
77 #include "reload.h"
78 #include "opts.h"
79 #include "dumpfile.h"
80 #include "builtins.h"
81
82 /* This is used for communication between ASM_OUTPUT_LABEL and
83 ASM_OUTPUT_LABELREF. */
84 int ia64_asm_output_label = 0;
85
86 /* Register names for ia64_expand_prologue. */
87 static const char * const ia64_reg_numbers[96] =
88 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
89 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
90 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
91 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
92 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
93 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
94 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
95 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
96 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
97 "r104","r105","r106","r107","r108","r109","r110","r111",
98 "r112","r113","r114","r115","r116","r117","r118","r119",
99 "r120","r121","r122","r123","r124","r125","r126","r127"};
100
101 /* ??? These strings could be shared with REGISTER_NAMES. */
102 static const char * const ia64_input_reg_names[8] =
103 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
104
105 /* ??? These strings could be shared with REGISTER_NAMES. */
106 static const char * const ia64_local_reg_names[80] =
107 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
108 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
109 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
110 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
111 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
112 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
113 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
114 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
115 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
116 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
117
118 /* ??? These strings could be shared with REGISTER_NAMES. */
119 static const char * const ia64_output_reg_names[8] =
120 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
121
122 /* Variables which are this size or smaller are put in the sdata/sbss
123 sections. */
124
125 unsigned int ia64_section_threshold;
126
127 /* The following variable is used by the DFA insn scheduler. The value is
128 TRUE if we do insn bundling instead of insn scheduling. */
129 int bundling_p = 0;
130
131 enum ia64_frame_regs
132 {
133 reg_fp,
134 reg_save_b0,
135 reg_save_pr,
136 reg_save_ar_pfs,
137 reg_save_ar_unat,
138 reg_save_ar_lc,
139 reg_save_gp,
140 number_of_ia64_frame_regs
141 };
142
143 /* Structure to be filled in by ia64_compute_frame_size with register
144 save masks and offsets for the current function. */
145
146 struct ia64_frame_info
147 {
148 HOST_WIDE_INT total_size; /* size of the stack frame, not including
149 the caller's scratch area. */
150 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
151 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
152 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
153 HARD_REG_SET mask; /* mask of saved registers. */
154 unsigned int gr_used_mask; /* mask of registers in use as gr spill
155 registers or long-term scratches. */
156 int n_spilled; /* number of spilled registers. */
157 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
158 int n_input_regs; /* number of input registers used. */
159 int n_local_regs; /* number of local registers used. */
160 int n_output_regs; /* number of output registers used. */
161 int n_rotate_regs; /* number of rotating registers used. */
162
163 char need_regstk; /* true if a .regstk directive needed. */
164 char initialized; /* true if the data is finalized. */
165 };
166
167 /* Current frame information calculated by ia64_compute_frame_size. */
168 static struct ia64_frame_info current_frame_info;
169 /* The actual registers that are emitted. */
170 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
171 \f
172 static int ia64_first_cycle_multipass_dfa_lookahead (void);
173 static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *);
174 static void ia64_init_dfa_pre_cycle_insn (void);
175 static rtx ia64_dfa_pre_cycle_insn (void);
176 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
177 static int ia64_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *);
178 static void ia64_h_i_d_extended (void);
179 static void * ia64_alloc_sched_context (void);
180 static void ia64_init_sched_context (void *, bool);
181 static void ia64_set_sched_context (void *);
182 static void ia64_clear_sched_context (void *);
183 static void ia64_free_sched_context (void *);
184 static int ia64_mode_to_int (enum machine_mode);
185 static void ia64_set_sched_flags (spec_info_t);
186 static ds_t ia64_get_insn_spec_ds (rtx_insn *);
187 static ds_t ia64_get_insn_checked_ds (rtx_insn *);
188 static bool ia64_skip_rtx_p (const_rtx);
189 static int ia64_speculate_insn (rtx_insn *, ds_t, rtx *);
190 static bool ia64_needs_block_p (ds_t);
191 static rtx ia64_gen_spec_check (rtx_insn *, rtx_insn *, ds_t);
192 static int ia64_spec_check_p (rtx);
193 static int ia64_spec_check_src_p (rtx);
194 static rtx gen_tls_get_addr (void);
195 static rtx gen_thread_pointer (void);
196 static int find_gr_spill (enum ia64_frame_regs, int);
197 static int next_scratch_gr_reg (void);
198 static void mark_reg_gr_used_mask (rtx, void *);
199 static void ia64_compute_frame_size (HOST_WIDE_INT);
200 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
201 static void finish_spill_pointers (void);
202 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
203 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
204 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
205 static rtx gen_movdi_x (rtx, rtx, rtx);
206 static rtx gen_fr_spill_x (rtx, rtx, rtx);
207 static rtx gen_fr_restore_x (rtx, rtx, rtx);
208
209 static void ia64_option_override (void);
210 static bool ia64_can_eliminate (const int, const int);
211 static enum machine_mode hfa_element_mode (const_tree, bool);
212 static void ia64_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
213 tree, int *, int);
214 static int ia64_arg_partial_bytes (cumulative_args_t, enum machine_mode,
215 tree, bool);
216 static rtx ia64_function_arg_1 (cumulative_args_t, enum machine_mode,
217 const_tree, bool, bool);
218 static rtx ia64_function_arg (cumulative_args_t, enum machine_mode,
219 const_tree, bool);
220 static rtx ia64_function_incoming_arg (cumulative_args_t,
221 enum machine_mode, const_tree, bool);
222 static void ia64_function_arg_advance (cumulative_args_t, enum machine_mode,
223 const_tree, bool);
224 static unsigned int ia64_function_arg_boundary (enum machine_mode,
225 const_tree);
226 static bool ia64_function_ok_for_sibcall (tree, tree);
227 static bool ia64_return_in_memory (const_tree, const_tree);
228 static rtx ia64_function_value (const_tree, const_tree, bool);
229 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
230 static bool ia64_function_value_regno_p (const unsigned int);
231 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
232 reg_class_t);
233 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
234 bool);
235 static bool ia64_rtx_costs (rtx, int, int, int, int *, bool);
236 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
237 static void fix_range (const char *);
238 static struct machine_function * ia64_init_machine_status (void);
239 static void emit_insn_group_barriers (FILE *);
240 static void emit_all_insn_group_barriers (FILE *);
241 static void final_emit_insn_group_barriers (FILE *);
242 static void emit_predicate_relation_info (void);
243 static void ia64_reorg (void);
244 static bool ia64_in_small_data_p (const_tree);
245 static void process_epilogue (FILE *, rtx, bool, bool);
246
247 static bool ia64_assemble_integer (rtx, unsigned int, int);
248 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
249 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
250 static void ia64_output_function_end_prologue (FILE *);
251
252 static void ia64_print_operand (FILE *, rtx, int);
253 static void ia64_print_operand_address (FILE *, rtx);
254 static bool ia64_print_operand_punct_valid_p (unsigned char code);
255
256 static int ia64_issue_rate (void);
257 static int ia64_adjust_cost_2 (rtx_insn *, int, rtx_insn *, int, dw_t);
258 static void ia64_sched_init (FILE *, int, int);
259 static void ia64_sched_init_global (FILE *, int, int);
260 static void ia64_sched_finish_global (FILE *, int);
261 static void ia64_sched_finish (FILE *, int);
262 static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int);
263 static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int);
264 static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int);
265 static int ia64_variable_issue (FILE *, int, rtx_insn *, int);
266
267 static void ia64_asm_unwind_emit (FILE *, rtx_insn *);
268 static void ia64_asm_emit_except_personality (rtx);
269 static void ia64_asm_init_sections (void);
270
271 static enum unwind_info_type ia64_debug_unwind_info (void);
272
273 static struct bundle_state *get_free_bundle_state (void);
274 static void free_bundle_state (struct bundle_state *);
275 static void initiate_bundle_states (void);
276 static void finish_bundle_states (void);
277 static int insert_bundle_state (struct bundle_state *);
278 static void initiate_bundle_state_table (void);
279 static void finish_bundle_state_table (void);
280 static int try_issue_nops (struct bundle_state *, int);
281 static int try_issue_insn (struct bundle_state *, rtx);
282 static void issue_nops_and_insn (struct bundle_state *, int, rtx_insn *,
283 int, int);
284 static int get_max_pos (state_t);
285 static int get_template (state_t, int);
286
287 static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
288 static bool important_for_bundling_p (rtx_insn *);
289 static bool unknown_for_bundling_p (rtx_insn *);
290 static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
291
292 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
293 HOST_WIDE_INT, tree);
294 static void ia64_file_start (void);
295 static void ia64_globalize_decl_name (FILE *, tree);
296
297 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
298 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
299 static section *ia64_select_rtx_section (enum machine_mode, rtx,
300 unsigned HOST_WIDE_INT);
301 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
302 ATTRIBUTE_UNUSED;
303 static unsigned int ia64_section_type_flags (tree, const char *, int);
304 static void ia64_init_libfuncs (void)
305 ATTRIBUTE_UNUSED;
306 static void ia64_hpux_init_libfuncs (void)
307 ATTRIBUTE_UNUSED;
308 static void ia64_sysv4_init_libfuncs (void)
309 ATTRIBUTE_UNUSED;
310 static void ia64_vms_init_libfuncs (void)
311 ATTRIBUTE_UNUSED;
312 static void ia64_soft_fp_init_libfuncs (void)
313 ATTRIBUTE_UNUSED;
314 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
315 ATTRIBUTE_UNUSED;
316 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
317 ATTRIBUTE_UNUSED;
318
319 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
320 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
321 static void ia64_encode_section_info (tree, rtx, int);
322 static rtx ia64_struct_value_rtx (tree, int);
323 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
324 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
325 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
326 static bool ia64_libgcc_floating_mode_supported_p (enum machine_mode mode);
327 static bool ia64_legitimate_constant_p (enum machine_mode, rtx);
328 static bool ia64_legitimate_address_p (enum machine_mode, rtx, bool);
329 static bool ia64_cannot_force_const_mem (enum machine_mode, rtx);
330 static const char *ia64_mangle_type (const_tree);
331 static const char *ia64_invalid_conversion (const_tree, const_tree);
332 static const char *ia64_invalid_unary_op (int, const_tree);
333 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
334 static enum machine_mode ia64_c_mode_for_suffix (char);
335 static void ia64_trampoline_init (rtx, tree, rtx);
336 static void ia64_override_options_after_change (void);
337 static bool ia64_member_type_forces_blk (const_tree, enum machine_mode);
338
339 static tree ia64_builtin_decl (unsigned, bool);
340
341 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
342 static enum machine_mode ia64_get_reg_raw_mode (int regno);
343 static section * ia64_hpux_function_section (tree, enum node_frequency,
344 bool, bool);
345
346 static bool ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
347 const unsigned char *sel);
348
349 #define MAX_VECT_LEN 8
350
351 struct expand_vec_perm_d
352 {
353 rtx target, op0, op1;
354 unsigned char perm[MAX_VECT_LEN];
355 enum machine_mode vmode;
356 unsigned char nelt;
357 bool one_operand_p;
358 bool testing_p;
359 };
360
361 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
362
363 \f
364 /* Table of valid machine attributes. */
365 static const struct attribute_spec ia64_attribute_table[] =
366 {
367 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
368 affects_type_identity } */
369 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
370 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
371 false },
372 #if TARGET_ABI_OPEN_VMS
373 { "common_object", 1, 1, true, false, false,
374 ia64_vms_common_object_attribute, false },
375 #endif
376 { "version_id", 1, 1, true, false, false,
377 ia64_handle_version_id_attribute, false },
378 { NULL, 0, 0, false, false, false, NULL, false }
379 };
380
381 /* Initialize the GCC target structure. */
382 #undef TARGET_ATTRIBUTE_TABLE
383 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
384
385 #undef TARGET_INIT_BUILTINS
386 #define TARGET_INIT_BUILTINS ia64_init_builtins
387
388 #undef TARGET_EXPAND_BUILTIN
389 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
390
391 #undef TARGET_BUILTIN_DECL
392 #define TARGET_BUILTIN_DECL ia64_builtin_decl
393
394 #undef TARGET_ASM_BYTE_OP
395 #define TARGET_ASM_BYTE_OP "\tdata1\t"
396 #undef TARGET_ASM_ALIGNED_HI_OP
397 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
398 #undef TARGET_ASM_ALIGNED_SI_OP
399 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
400 #undef TARGET_ASM_ALIGNED_DI_OP
401 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
402 #undef TARGET_ASM_UNALIGNED_HI_OP
403 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
404 #undef TARGET_ASM_UNALIGNED_SI_OP
405 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
406 #undef TARGET_ASM_UNALIGNED_DI_OP
407 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
408 #undef TARGET_ASM_INTEGER
409 #define TARGET_ASM_INTEGER ia64_assemble_integer
410
411 #undef TARGET_OPTION_OVERRIDE
412 #define TARGET_OPTION_OVERRIDE ia64_option_override
413
414 #undef TARGET_ASM_FUNCTION_PROLOGUE
415 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
416 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
417 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
418 #undef TARGET_ASM_FUNCTION_EPILOGUE
419 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
420
421 #undef TARGET_PRINT_OPERAND
422 #define TARGET_PRINT_OPERAND ia64_print_operand
423 #undef TARGET_PRINT_OPERAND_ADDRESS
424 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
425 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
426 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
427
428 #undef TARGET_IN_SMALL_DATA_P
429 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
430
431 #undef TARGET_SCHED_ADJUST_COST_2
432 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
433 #undef TARGET_SCHED_ISSUE_RATE
434 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
435 #undef TARGET_SCHED_VARIABLE_ISSUE
436 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
437 #undef TARGET_SCHED_INIT
438 #define TARGET_SCHED_INIT ia64_sched_init
439 #undef TARGET_SCHED_FINISH
440 #define TARGET_SCHED_FINISH ia64_sched_finish
441 #undef TARGET_SCHED_INIT_GLOBAL
442 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
443 #undef TARGET_SCHED_FINISH_GLOBAL
444 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
445 #undef TARGET_SCHED_REORDER
446 #define TARGET_SCHED_REORDER ia64_sched_reorder
447 #undef TARGET_SCHED_REORDER2
448 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
449
450 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
451 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
452
453 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
454 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
455
456 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
457 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
458 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
459 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
460
461 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
462 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
463 ia64_first_cycle_multipass_dfa_lookahead_guard
464
465 #undef TARGET_SCHED_DFA_NEW_CYCLE
466 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
467
468 #undef TARGET_SCHED_H_I_D_EXTENDED
469 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
470
471 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
472 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
473
474 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
475 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
476
477 #undef TARGET_SCHED_SET_SCHED_CONTEXT
478 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
479
480 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
481 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
482
483 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
484 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
485
486 #undef TARGET_SCHED_SET_SCHED_FLAGS
487 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
488
489 #undef TARGET_SCHED_GET_INSN_SPEC_DS
490 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
491
492 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
493 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
494
495 #undef TARGET_SCHED_SPECULATE_INSN
496 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
497
498 #undef TARGET_SCHED_NEEDS_BLOCK_P
499 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
500
501 #undef TARGET_SCHED_GEN_SPEC_CHECK
502 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
503
504 #undef TARGET_SCHED_SKIP_RTX_P
505 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
506
507 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
508 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
509 #undef TARGET_ARG_PARTIAL_BYTES
510 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
511 #undef TARGET_FUNCTION_ARG
512 #define TARGET_FUNCTION_ARG ia64_function_arg
513 #undef TARGET_FUNCTION_INCOMING_ARG
514 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
515 #undef TARGET_FUNCTION_ARG_ADVANCE
516 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
517 #undef TARGET_FUNCTION_ARG_BOUNDARY
518 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
519
520 #undef TARGET_ASM_OUTPUT_MI_THUNK
521 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
522 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
523 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
524
525 #undef TARGET_ASM_FILE_START
526 #define TARGET_ASM_FILE_START ia64_file_start
527
528 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
529 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
530
531 #undef TARGET_REGISTER_MOVE_COST
532 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
533 #undef TARGET_MEMORY_MOVE_COST
534 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
535 #undef TARGET_RTX_COSTS
536 #define TARGET_RTX_COSTS ia64_rtx_costs
537 #undef TARGET_ADDRESS_COST
538 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
539
540 #undef TARGET_UNSPEC_MAY_TRAP_P
541 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
542
543 #undef TARGET_MACHINE_DEPENDENT_REORG
544 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
545
546 #undef TARGET_ENCODE_SECTION_INFO
547 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
548
549 #undef TARGET_SECTION_TYPE_FLAGS
550 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
551
552 #ifdef HAVE_AS_TLS
553 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
554 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
555 #endif
556
557 /* ??? Investigate. */
558 #if 0
559 #undef TARGET_PROMOTE_PROTOTYPES
560 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
561 #endif
562
563 #undef TARGET_FUNCTION_VALUE
564 #define TARGET_FUNCTION_VALUE ia64_function_value
565 #undef TARGET_LIBCALL_VALUE
566 #define TARGET_LIBCALL_VALUE ia64_libcall_value
567 #undef TARGET_FUNCTION_VALUE_REGNO_P
568 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
569
570 #undef TARGET_STRUCT_VALUE_RTX
571 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
572 #undef TARGET_RETURN_IN_MEMORY
573 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
574 #undef TARGET_SETUP_INCOMING_VARARGS
575 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
576 #undef TARGET_STRICT_ARGUMENT_NAMING
577 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
578 #undef TARGET_MUST_PASS_IN_STACK
579 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
580 #undef TARGET_GET_RAW_RESULT_MODE
581 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
582 #undef TARGET_GET_RAW_ARG_MODE
583 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
584
585 #undef TARGET_MEMBER_TYPE_FORCES_BLK
586 #define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
587
588 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
589 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
590
591 #undef TARGET_ASM_UNWIND_EMIT
592 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
593 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
594 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
595 #undef TARGET_ASM_INIT_SECTIONS
596 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
597
598 #undef TARGET_DEBUG_UNWIND_INFO
599 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
600
601 #undef TARGET_SCALAR_MODE_SUPPORTED_P
602 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
603 #undef TARGET_VECTOR_MODE_SUPPORTED_P
604 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
605
606 #undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
607 #define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P \
608 ia64_libgcc_floating_mode_supported_p
609
610 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
611 in an order different from the specified program order. */
612 #undef TARGET_RELAXED_ORDERING
613 #define TARGET_RELAXED_ORDERING true
614
615 #undef TARGET_LEGITIMATE_CONSTANT_P
616 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
617 #undef TARGET_LEGITIMATE_ADDRESS_P
618 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
619
620 #undef TARGET_CANNOT_FORCE_CONST_MEM
621 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
622
623 #undef TARGET_MANGLE_TYPE
624 #define TARGET_MANGLE_TYPE ia64_mangle_type
625
626 #undef TARGET_INVALID_CONVERSION
627 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
628 #undef TARGET_INVALID_UNARY_OP
629 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
630 #undef TARGET_INVALID_BINARY_OP
631 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
632
633 #undef TARGET_C_MODE_FOR_SUFFIX
634 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
635
636 #undef TARGET_CAN_ELIMINATE
637 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
638
639 #undef TARGET_TRAMPOLINE_INIT
640 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
641
642 #undef TARGET_CAN_USE_DOLOOP_P
643 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
644 #undef TARGET_INVALID_WITHIN_DOLOOP
645 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
646
647 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
648 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
649
650 #undef TARGET_PREFERRED_RELOAD_CLASS
651 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
652
653 #undef TARGET_DELAY_SCHED2
654 #define TARGET_DELAY_SCHED2 true
655
656 /* Variable tracking should be run after all optimizations which
657 change order of insns. It also needs a valid CFG. */
658 #undef TARGET_DELAY_VARTRACK
659 #define TARGET_DELAY_VARTRACK true
660
661 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
662 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok
663
664 struct gcc_target targetm = TARGET_INITIALIZER;
665 \f
666 typedef enum
667 {
668 ADDR_AREA_NORMAL, /* normal address area */
669 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
670 }
671 ia64_addr_area;
672
673 static GTY(()) tree small_ident1;
674 static GTY(()) tree small_ident2;
675
676 static void
677 init_idents (void)
678 {
679 if (small_ident1 == 0)
680 {
681 small_ident1 = get_identifier ("small");
682 small_ident2 = get_identifier ("__small__");
683 }
684 }
685
686 /* Retrieve the address area that has been chosen for the given decl. */
687
688 static ia64_addr_area
689 ia64_get_addr_area (tree decl)
690 {
691 tree model_attr;
692
693 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
694 if (model_attr)
695 {
696 tree id;
697
698 init_idents ();
699 id = TREE_VALUE (TREE_VALUE (model_attr));
700 if (id == small_ident1 || id == small_ident2)
701 return ADDR_AREA_SMALL;
702 }
703 return ADDR_AREA_NORMAL;
704 }
705
706 static tree
707 ia64_handle_model_attribute (tree *node, tree name, tree args,
708 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
709 {
710 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
711 ia64_addr_area area;
712 tree arg, decl = *node;
713
714 init_idents ();
715 arg = TREE_VALUE (args);
716 if (arg == small_ident1 || arg == small_ident2)
717 {
718 addr_area = ADDR_AREA_SMALL;
719 }
720 else
721 {
722 warning (OPT_Wattributes, "invalid argument of %qE attribute",
723 name);
724 *no_add_attrs = true;
725 }
726
727 switch (TREE_CODE (decl))
728 {
729 case VAR_DECL:
730 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
731 == FUNCTION_DECL)
732 && !TREE_STATIC (decl))
733 {
734 error_at (DECL_SOURCE_LOCATION (decl),
735 "an address area attribute cannot be specified for "
736 "local variables");
737 *no_add_attrs = true;
738 }
739 area = ia64_get_addr_area (decl);
740 if (area != ADDR_AREA_NORMAL && addr_area != area)
741 {
742 error ("address area of %q+D conflicts with previous "
743 "declaration", decl);
744 *no_add_attrs = true;
745 }
746 break;
747
748 case FUNCTION_DECL:
749 error_at (DECL_SOURCE_LOCATION (decl),
750 "address area attribute cannot be specified for "
751 "functions");
752 *no_add_attrs = true;
753 break;
754
755 default:
756 warning (OPT_Wattributes, "%qE attribute ignored",
757 name);
758 *no_add_attrs = true;
759 break;
760 }
761
762 return NULL_TREE;
763 }
764
765 /* Part of the low level implementation of DEC Ada pragma Common_Object which
766 enables the shared use of variables stored in overlaid linker areas
767 corresponding to the use of Fortran COMMON. */
768
769 static tree
770 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
771 int flags ATTRIBUTE_UNUSED,
772 bool *no_add_attrs)
773 {
774 tree decl = *node;
775 tree id;
776
777 gcc_assert (DECL_P (decl));
778
779 DECL_COMMON (decl) = 1;
780 id = TREE_VALUE (args);
781 if (TREE_CODE (id) != IDENTIFIER_NODE && TREE_CODE (id) != STRING_CST)
782 {
783 error ("%qE attribute requires a string constant argument", name);
784 *no_add_attrs = true;
785 return NULL_TREE;
786 }
787 return NULL_TREE;
788 }
789
790 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
791
792 void
793 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
794 unsigned HOST_WIDE_INT size,
795 unsigned int align)
796 {
797 tree attr = DECL_ATTRIBUTES (decl);
798
799 if (attr)
800 attr = lookup_attribute ("common_object", attr);
801 if (attr)
802 {
803 tree id = TREE_VALUE (TREE_VALUE (attr));
804 const char *name;
805
806 if (TREE_CODE (id) == IDENTIFIER_NODE)
807 name = IDENTIFIER_POINTER (id);
808 else if (TREE_CODE (id) == STRING_CST)
809 name = TREE_STRING_POINTER (id);
810 else
811 abort ();
812
813 fprintf (file, "\t.vms_common\t\"%s\",", name);
814 }
815 else
816 fprintf (file, "%s", COMMON_ASM_OP);
817
818 /* Code from elfos.h. */
819 assemble_name (file, name);
820 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u",
821 size, align / BITS_PER_UNIT);
822
823 fputc ('\n', file);
824 }
825
826 static void
827 ia64_encode_addr_area (tree decl, rtx symbol)
828 {
829 int flags;
830
831 flags = SYMBOL_REF_FLAGS (symbol);
832 switch (ia64_get_addr_area (decl))
833 {
834 case ADDR_AREA_NORMAL: break;
835 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
836 default: gcc_unreachable ();
837 }
838 SYMBOL_REF_FLAGS (symbol) = flags;
839 }
840
841 static void
842 ia64_encode_section_info (tree decl, rtx rtl, int first)
843 {
844 default_encode_section_info (decl, rtl, first);
845
846 /* Careful not to prod global register variables. */
847 if (TREE_CODE (decl) == VAR_DECL
848 && GET_CODE (DECL_RTL (decl)) == MEM
849 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
850 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
851 ia64_encode_addr_area (decl, XEXP (rtl, 0));
852 }
853 \f
854 /* Return 1 if the operands of a move are ok. */
855
856 int
857 ia64_move_ok (rtx dst, rtx src)
858 {
859 /* If we're under init_recog_no_volatile, we'll not be able to use
860 memory_operand. So check the code directly and don't worry about
861 the validity of the underlying address, which should have been
862 checked elsewhere anyway. */
863 if (GET_CODE (dst) != MEM)
864 return 1;
865 if (GET_CODE (src) == MEM)
866 return 0;
867 if (register_operand (src, VOIDmode))
868 return 1;
869
870 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
871 if (INTEGRAL_MODE_P (GET_MODE (dst)))
872 return src == const0_rtx;
873 else
874 return satisfies_constraint_G (src);
875 }
876
877 /* Return 1 if the operands are ok for a floating point load pair. */
878
879 int
880 ia64_load_pair_ok (rtx dst, rtx src)
881 {
882 /* ??? There is a thinko in the implementation of the "x" constraint and the
883 FP_REGS class. The constraint will also reject (reg f30:TI) so we must
884 also return false for it. */
885 if (GET_CODE (dst) != REG
886 || !(FP_REGNO_P (REGNO (dst)) && FP_REGNO_P (REGNO (dst) + 1)))
887 return 0;
888 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
889 return 0;
890 switch (GET_CODE (XEXP (src, 0)))
891 {
892 case REG:
893 case POST_INC:
894 break;
895 case POST_DEC:
896 return 0;
897 case POST_MODIFY:
898 {
899 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
900
901 if (GET_CODE (adjust) != CONST_INT
902 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
903 return 0;
904 }
905 break;
906 default:
907 abort ();
908 }
909 return 1;
910 }
911
912 int
913 addp4_optimize_ok (rtx op1, rtx op2)
914 {
915 return (basereg_operand (op1, GET_MODE(op1)) !=
916 basereg_operand (op2, GET_MODE(op2)));
917 }
918
919 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
920 Return the length of the field, or <= 0 on failure. */
921
922 int
923 ia64_depz_field_mask (rtx rop, rtx rshift)
924 {
925 unsigned HOST_WIDE_INT op = INTVAL (rop);
926 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
927
928 /* Get rid of the zero bits we're shifting in. */
929 op >>= shift;
930
931 /* We must now have a solid block of 1's at bit 0. */
932 return exact_log2 (op + 1);
933 }
934
935 /* Return the TLS model to use for ADDR. */
936
937 static enum tls_model
938 tls_symbolic_operand_type (rtx addr)
939 {
940 enum tls_model tls_kind = TLS_MODEL_NONE;
941
942 if (GET_CODE (addr) == CONST)
943 {
944 if (GET_CODE (XEXP (addr, 0)) == PLUS
945 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
946 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
947 }
948 else if (GET_CODE (addr) == SYMBOL_REF)
949 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
950
951 return tls_kind;
952 }
953
954 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
955 as a base register. */
956
957 static inline bool
958 ia64_reg_ok_for_base_p (const_rtx reg, bool strict)
959 {
960 if (strict
961 && REGNO_OK_FOR_BASE_P (REGNO (reg)))
962 return true;
963 else if (!strict
964 && (GENERAL_REGNO_P (REGNO (reg))
965 || !HARD_REGISTER_P (reg)))
966 return true;
967 else
968 return false;
969 }
970
971 static bool
972 ia64_legitimate_address_reg (const_rtx reg, bool strict)
973 {
974 if ((REG_P (reg) && ia64_reg_ok_for_base_p (reg, strict))
975 || (GET_CODE (reg) == SUBREG && REG_P (XEXP (reg, 0))
976 && ia64_reg_ok_for_base_p (XEXP (reg, 0), strict)))
977 return true;
978
979 return false;
980 }
981
982 static bool
983 ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict)
984 {
985 if (GET_CODE (disp) == PLUS
986 && rtx_equal_p (reg, XEXP (disp, 0))
987 && (ia64_legitimate_address_reg (XEXP (disp, 1), strict)
988 || (CONST_INT_P (XEXP (disp, 1))
989 && IN_RANGE (INTVAL (XEXP (disp, 1)), -256, 255))))
990 return true;
991
992 return false;
993 }
994
995 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
996
997 static bool
998 ia64_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
999 rtx x, bool strict)
1000 {
1001 if (ia64_legitimate_address_reg (x, strict))
1002 return true;
1003 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
1004 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1005 && XEXP (x, 0) != arg_pointer_rtx)
1006 return true;
1007 else if (GET_CODE (x) == POST_MODIFY
1008 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1009 && XEXP (x, 0) != arg_pointer_rtx
1010 && ia64_legitimate_address_disp (XEXP (x, 0), XEXP (x, 1), strict))
1011 return true;
1012 else
1013 return false;
1014 }
1015
1016 /* Return true if X is a constant that is valid for some immediate
1017 field in an instruction. */
1018
1019 static bool
1020 ia64_legitimate_constant_p (enum machine_mode mode, rtx x)
1021 {
1022 switch (GET_CODE (x))
1023 {
1024 case CONST_INT:
1025 case LABEL_REF:
1026 return true;
1027
1028 case CONST_DOUBLE:
1029 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
1030 return true;
1031 return satisfies_constraint_G (x);
1032
1033 case CONST:
1034 case SYMBOL_REF:
1035 /* ??? Short term workaround for PR 28490. We must make the code here
1036 match the code in ia64_expand_move and move_operand, even though they
1037 are both technically wrong. */
1038 if (tls_symbolic_operand_type (x) == 0)
1039 {
1040 HOST_WIDE_INT addend = 0;
1041 rtx op = x;
1042
1043 if (GET_CODE (op) == CONST
1044 && GET_CODE (XEXP (op, 0)) == PLUS
1045 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1046 {
1047 addend = INTVAL (XEXP (XEXP (op, 0), 1));
1048 op = XEXP (XEXP (op, 0), 0);
1049 }
1050
1051 if (any_offset_symbol_operand (op, mode)
1052 || function_operand (op, mode))
1053 return true;
1054 if (aligned_offset_symbol_operand (op, mode))
1055 return (addend & 0x3fff) == 0;
1056 return false;
1057 }
1058 return false;
1059
1060 case CONST_VECTOR:
1061 if (mode == V2SFmode)
1062 return satisfies_constraint_Y (x);
1063
1064 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1065 && GET_MODE_SIZE (mode) <= 8);
1066
1067 default:
1068 return false;
1069 }
1070 }
1071
1072 /* Don't allow TLS addresses to get spilled to memory. */
1073
1074 static bool
1075 ia64_cannot_force_const_mem (enum machine_mode mode, rtx x)
1076 {
1077 if (mode == RFmode)
1078 return true;
1079 return tls_symbolic_operand_type (x) != 0;
1080 }
1081
1082 /* Expand a symbolic constant load. */
1083
1084 bool
1085 ia64_expand_load_address (rtx dest, rtx src)
1086 {
1087 gcc_assert (GET_CODE (dest) == REG);
1088
1089 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1090 having to pointer-extend the value afterward. Other forms of address
1091 computation below are also more natural to compute as 64-bit quantities.
1092 If we've been given an SImode destination register, change it. */
1093 if (GET_MODE (dest) != Pmode)
1094 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1095 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1096
1097 if (TARGET_NO_PIC)
1098 return false;
1099 if (small_addr_symbolic_operand (src, VOIDmode))
1100 return false;
1101
1102 if (TARGET_AUTO_PIC)
1103 emit_insn (gen_load_gprel64 (dest, src));
1104 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1105 emit_insn (gen_load_fptr (dest, src));
1106 else if (sdata_symbolic_operand (src, VOIDmode))
1107 emit_insn (gen_load_gprel (dest, src));
1108 else
1109 {
1110 HOST_WIDE_INT addend = 0;
1111 rtx tmp;
1112
1113 /* We did split constant offsets in ia64_expand_move, and we did try
1114 to keep them split in move_operand, but we also allowed reload to
1115 rematerialize arbitrary constants rather than spill the value to
1116 the stack and reload it. So we have to be prepared here to split
1117 them apart again. */
1118 if (GET_CODE (src) == CONST)
1119 {
1120 HOST_WIDE_INT hi, lo;
1121
1122 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1123 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1124 hi = hi - lo;
1125
1126 if (lo != 0)
1127 {
1128 addend = lo;
1129 src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
1130 }
1131 }
1132
1133 tmp = gen_rtx_HIGH (Pmode, src);
1134 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1135 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1136
1137 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1138 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1139
1140 if (addend)
1141 {
1142 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1143 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1144 }
1145 }
1146
1147 return true;
1148 }
1149
1150 static GTY(()) rtx gen_tls_tga;
1151 static rtx
1152 gen_tls_get_addr (void)
1153 {
1154 if (!gen_tls_tga)
1155 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1156 return gen_tls_tga;
1157 }
1158
1159 static GTY(()) rtx thread_pointer_rtx;
1160 static rtx
1161 gen_thread_pointer (void)
1162 {
1163 if (!thread_pointer_rtx)
1164 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1165 return thread_pointer_rtx;
1166 }
1167
1168 static rtx
1169 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1170 rtx orig_op1, HOST_WIDE_INT addend)
1171 {
1172 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp;
1173 rtx_insn *insns;
1174 rtx orig_op0 = op0;
1175 HOST_WIDE_INT addend_lo, addend_hi;
1176
1177 switch (tls_kind)
1178 {
1179 case TLS_MODEL_GLOBAL_DYNAMIC:
1180 start_sequence ();
1181
1182 tga_op1 = gen_reg_rtx (Pmode);
1183 emit_insn (gen_load_dtpmod (tga_op1, op1));
1184
1185 tga_op2 = gen_reg_rtx (Pmode);
1186 emit_insn (gen_load_dtprel (tga_op2, op1));
1187
1188 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1189 LCT_CONST, Pmode, 2, tga_op1,
1190 Pmode, tga_op2, Pmode);
1191
1192 insns = get_insns ();
1193 end_sequence ();
1194
1195 if (GET_MODE (op0) != Pmode)
1196 op0 = tga_ret;
1197 emit_libcall_block (insns, op0, tga_ret, op1);
1198 break;
1199
1200 case TLS_MODEL_LOCAL_DYNAMIC:
1201 /* ??? This isn't the completely proper way to do local-dynamic
1202 If the call to __tls_get_addr is used only by a single symbol,
1203 then we should (somehow) move the dtprel to the second arg
1204 to avoid the extra add. */
1205 start_sequence ();
1206
1207 tga_op1 = gen_reg_rtx (Pmode);
1208 emit_insn (gen_load_dtpmod (tga_op1, op1));
1209
1210 tga_op2 = const0_rtx;
1211
1212 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1213 LCT_CONST, Pmode, 2, tga_op1,
1214 Pmode, tga_op2, Pmode);
1215
1216 insns = get_insns ();
1217 end_sequence ();
1218
1219 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1220 UNSPEC_LD_BASE);
1221 tmp = gen_reg_rtx (Pmode);
1222 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1223
1224 if (!register_operand (op0, Pmode))
1225 op0 = gen_reg_rtx (Pmode);
1226 if (TARGET_TLS64)
1227 {
1228 emit_insn (gen_load_dtprel (op0, op1));
1229 emit_insn (gen_adddi3 (op0, tmp, op0));
1230 }
1231 else
1232 emit_insn (gen_add_dtprel (op0, op1, tmp));
1233 break;
1234
1235 case TLS_MODEL_INITIAL_EXEC:
1236 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1237 addend_hi = addend - addend_lo;
1238
1239 op1 = plus_constant (Pmode, op1, addend_hi);
1240 addend = addend_lo;
1241
1242 tmp = gen_reg_rtx (Pmode);
1243 emit_insn (gen_load_tprel (tmp, op1));
1244
1245 if (!register_operand (op0, Pmode))
1246 op0 = gen_reg_rtx (Pmode);
1247 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1248 break;
1249
1250 case TLS_MODEL_LOCAL_EXEC:
1251 if (!register_operand (op0, Pmode))
1252 op0 = gen_reg_rtx (Pmode);
1253
1254 op1 = orig_op1;
1255 addend = 0;
1256 if (TARGET_TLS64)
1257 {
1258 emit_insn (gen_load_tprel (op0, op1));
1259 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1260 }
1261 else
1262 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1263 break;
1264
1265 default:
1266 gcc_unreachable ();
1267 }
1268
1269 if (addend)
1270 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1271 orig_op0, 1, OPTAB_DIRECT);
1272 if (orig_op0 == op0)
1273 return NULL_RTX;
1274 if (GET_MODE (orig_op0) == Pmode)
1275 return op0;
1276 return gen_lowpart (GET_MODE (orig_op0), op0);
1277 }
1278
1279 rtx
1280 ia64_expand_move (rtx op0, rtx op1)
1281 {
1282 enum machine_mode mode = GET_MODE (op0);
1283
1284 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1285 op1 = force_reg (mode, op1);
1286
1287 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1288 {
1289 HOST_WIDE_INT addend = 0;
1290 enum tls_model tls_kind;
1291 rtx sym = op1;
1292
1293 if (GET_CODE (op1) == CONST
1294 && GET_CODE (XEXP (op1, 0)) == PLUS
1295 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1296 {
1297 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1298 sym = XEXP (XEXP (op1, 0), 0);
1299 }
1300
1301 tls_kind = tls_symbolic_operand_type (sym);
1302 if (tls_kind)
1303 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1304
1305 if (any_offset_symbol_operand (sym, mode))
1306 addend = 0;
1307 else if (aligned_offset_symbol_operand (sym, mode))
1308 {
1309 HOST_WIDE_INT addend_lo, addend_hi;
1310
1311 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1312 addend_hi = addend - addend_lo;
1313
1314 if (addend_lo != 0)
1315 {
1316 op1 = plus_constant (mode, sym, addend_hi);
1317 addend = addend_lo;
1318 }
1319 else
1320 addend = 0;
1321 }
1322 else
1323 op1 = sym;
1324
1325 if (reload_completed)
1326 {
1327 /* We really should have taken care of this offset earlier. */
1328 gcc_assert (addend == 0);
1329 if (ia64_expand_load_address (op0, op1))
1330 return NULL_RTX;
1331 }
1332
1333 if (addend)
1334 {
1335 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1336
1337 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1338
1339 op1 = expand_simple_binop (mode, PLUS, subtarget,
1340 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1341 if (op0 == op1)
1342 return NULL_RTX;
1343 }
1344 }
1345
1346 return op1;
1347 }
1348
1349 /* Split a move from OP1 to OP0 conditional on COND. */
1350
1351 void
1352 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1353 {
1354 rtx_insn *insn, *first = get_last_insn ();
1355
1356 emit_move_insn (op0, op1);
1357
1358 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1359 if (INSN_P (insn))
1360 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1361 PATTERN (insn));
1362 }
1363
1364 /* Split a post-reload TImode or TFmode reference into two DImode
1365 components. This is made extra difficult by the fact that we do
1366 not get any scratch registers to work with, because reload cannot
1367 be prevented from giving us a scratch that overlaps the register
1368 pair involved. So instead, when addressing memory, we tweak the
1369 pointer register up and back down with POST_INCs. Or up and not
1370 back down when we can get away with it.
1371
1372 REVERSED is true when the loads must be done in reversed order
1373 (high word first) for correctness. DEAD is true when the pointer
1374 dies with the second insn we generate and therefore the second
1375 address must not carry a postmodify.
1376
1377 May return an insn which is to be emitted after the moves. */
1378
1379 static rtx
1380 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1381 {
1382 rtx fixup = 0;
1383
1384 switch (GET_CODE (in))
1385 {
1386 case REG:
1387 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1388 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1389 break;
1390
1391 case CONST_INT:
1392 case CONST_DOUBLE:
1393 /* Cannot occur reversed. */
1394 gcc_assert (!reversed);
1395
1396 if (GET_MODE (in) != TFmode)
1397 split_double (in, &out[0], &out[1]);
1398 else
1399 /* split_double does not understand how to split a TFmode
1400 quantity into a pair of DImode constants. */
1401 {
1402 REAL_VALUE_TYPE r;
1403 unsigned HOST_WIDE_INT p[2];
1404 long l[4]; /* TFmode is 128 bits */
1405
1406 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1407 real_to_target (l, &r, TFmode);
1408
1409 if (FLOAT_WORDS_BIG_ENDIAN)
1410 {
1411 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1412 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1413 }
1414 else
1415 {
1416 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1417 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1418 }
1419 out[0] = GEN_INT (p[0]);
1420 out[1] = GEN_INT (p[1]);
1421 }
1422 break;
1423
1424 case MEM:
1425 {
1426 rtx base = XEXP (in, 0);
1427 rtx offset;
1428
1429 switch (GET_CODE (base))
1430 {
1431 case REG:
1432 if (!reversed)
1433 {
1434 out[0] = adjust_automodify_address
1435 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1436 out[1] = adjust_automodify_address
1437 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1438 }
1439 else
1440 {
1441 /* Reversal requires a pre-increment, which can only
1442 be done as a separate insn. */
1443 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1444 out[0] = adjust_automodify_address
1445 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1446 out[1] = adjust_address (in, DImode, 0);
1447 }
1448 break;
1449
1450 case POST_INC:
1451 gcc_assert (!reversed && !dead);
1452
1453 /* Just do the increment in two steps. */
1454 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1455 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1456 break;
1457
1458 case POST_DEC:
1459 gcc_assert (!reversed && !dead);
1460
1461 /* Add 8, subtract 24. */
1462 base = XEXP (base, 0);
1463 out[0] = adjust_automodify_address
1464 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1465 out[1] = adjust_automodify_address
1466 (in, DImode,
1467 gen_rtx_POST_MODIFY (Pmode, base,
1468 plus_constant (Pmode, base, -24)),
1469 8);
1470 break;
1471
1472 case POST_MODIFY:
1473 gcc_assert (!reversed && !dead);
1474
1475 /* Extract and adjust the modification. This case is
1476 trickier than the others, because we might have an
1477 index register, or we might have a combined offset that
1478 doesn't fit a signed 9-bit displacement field. We can
1479 assume the incoming expression is already legitimate. */
1480 offset = XEXP (base, 1);
1481 base = XEXP (base, 0);
1482
1483 out[0] = adjust_automodify_address
1484 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1485
1486 if (GET_CODE (XEXP (offset, 1)) == REG)
1487 {
1488 /* Can't adjust the postmodify to match. Emit the
1489 original, then a separate addition insn. */
1490 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1491 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1492 }
1493 else
1494 {
1495 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1496 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1497 {
1498 /* Again the postmodify cannot be made to match,
1499 but in this case it's more efficient to get rid
1500 of the postmodify entirely and fix up with an
1501 add insn. */
1502 out[1] = adjust_automodify_address (in, DImode, base, 8);
1503 fixup = gen_adddi3
1504 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1505 }
1506 else
1507 {
1508 /* Combined offset still fits in the displacement field.
1509 (We cannot overflow it at the high end.) */
1510 out[1] = adjust_automodify_address
1511 (in, DImode, gen_rtx_POST_MODIFY
1512 (Pmode, base, gen_rtx_PLUS
1513 (Pmode, base,
1514 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1515 8);
1516 }
1517 }
1518 break;
1519
1520 default:
1521 gcc_unreachable ();
1522 }
1523 break;
1524 }
1525
1526 default:
1527 gcc_unreachable ();
1528 }
1529
1530 return fixup;
1531 }
1532
1533 /* Split a TImode or TFmode move instruction after reload.
1534 This is used by *movtf_internal and *movti_internal. */
1535 void
1536 ia64_split_tmode_move (rtx operands[])
1537 {
1538 rtx in[2], out[2], insn;
1539 rtx fixup[2];
1540 bool dead = false;
1541 bool reversed = false;
1542
1543 /* It is possible for reload to decide to overwrite a pointer with
1544 the value it points to. In that case we have to do the loads in
1545 the appropriate order so that the pointer is not destroyed too
1546 early. Also we must not generate a postmodify for that second
1547 load, or rws_access_regno will die. And we must not generate a
1548 postmodify for the second load if the destination register
1549 overlaps with the base register. */
1550 if (GET_CODE (operands[1]) == MEM
1551 && reg_overlap_mentioned_p (operands[0], operands[1]))
1552 {
1553 rtx base = XEXP (operands[1], 0);
1554 while (GET_CODE (base) != REG)
1555 base = XEXP (base, 0);
1556
1557 if (REGNO (base) == REGNO (operands[0]))
1558 reversed = true;
1559
1560 if (refers_to_regno_p (REGNO (operands[0]),
1561 REGNO (operands[0])+2,
1562 base, 0))
1563 dead = true;
1564 }
1565 /* Another reason to do the moves in reversed order is if the first
1566 element of the target register pair is also the second element of
1567 the source register pair. */
1568 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1569 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1570 reversed = true;
1571
1572 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1573 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1574
1575 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1576 if (GET_CODE (EXP) == MEM \
1577 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1578 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1579 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1580 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1581
1582 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1583 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1584 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1585
1586 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1587 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1588 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1589
1590 if (fixup[0])
1591 emit_insn (fixup[0]);
1592 if (fixup[1])
1593 emit_insn (fixup[1]);
1594
1595 #undef MAYBE_ADD_REG_INC_NOTE
1596 }
1597
1598 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1599 through memory plus an extra GR scratch register. Except that you can
1600 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1601 SECONDARY_RELOAD_CLASS, but not both.
1602
1603 We got into problems in the first place by allowing a construct like
1604 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1605 This solution attempts to prevent this situation from occurring. When
1606 we see something like the above, we spill the inner register to memory. */
1607
1608 static rtx
1609 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1610 {
1611 if (GET_CODE (in) == SUBREG
1612 && GET_MODE (SUBREG_REG (in)) == TImode
1613 && GET_CODE (SUBREG_REG (in)) == REG)
1614 {
1615 rtx memt = assign_stack_temp (TImode, 16);
1616 emit_move_insn (memt, SUBREG_REG (in));
1617 return adjust_address (memt, mode, 0);
1618 }
1619 else if (force && GET_CODE (in) == REG)
1620 {
1621 rtx memx = assign_stack_temp (mode, 16);
1622 emit_move_insn (memx, in);
1623 return memx;
1624 }
1625 else
1626 return in;
1627 }
1628
1629 /* Expand the movxf or movrf pattern (MODE says which) with the given
1630 OPERANDS, returning true if the pattern should then invoke
1631 DONE. */
1632
1633 bool
1634 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1635 {
1636 rtx op0 = operands[0];
1637
1638 if (GET_CODE (op0) == SUBREG)
1639 op0 = SUBREG_REG (op0);
1640
1641 /* We must support XFmode loads into general registers for stdarg/vararg,
1642 unprototyped calls, and a rare case where a long double is passed as
1643 an argument after a float HFA fills the FP registers. We split them into
1644 DImode loads for convenience. We also need to support XFmode stores
1645 for the last case. This case does not happen for stdarg/vararg routines,
1646 because we do a block store to memory of unnamed arguments. */
1647
1648 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1649 {
1650 rtx out[2];
1651
1652 /* We're hoping to transform everything that deals with XFmode
1653 quantities and GR registers early in the compiler. */
1654 gcc_assert (can_create_pseudo_p ());
1655
1656 /* Struct to register can just use TImode instead. */
1657 if ((GET_CODE (operands[1]) == SUBREG
1658 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1659 || (GET_CODE (operands[1]) == REG
1660 && GR_REGNO_P (REGNO (operands[1]))))
1661 {
1662 rtx op1 = operands[1];
1663
1664 if (GET_CODE (op1) == SUBREG)
1665 op1 = SUBREG_REG (op1);
1666 else
1667 op1 = gen_rtx_REG (TImode, REGNO (op1));
1668
1669 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1670 return true;
1671 }
1672
1673 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1674 {
1675 /* Don't word-swap when reading in the constant. */
1676 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1677 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1678 0, mode));
1679 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1680 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1681 0, mode));
1682 return true;
1683 }
1684
1685 /* If the quantity is in a register not known to be GR, spill it. */
1686 if (register_operand (operands[1], mode))
1687 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1688
1689 gcc_assert (GET_CODE (operands[1]) == MEM);
1690
1691 /* Don't word-swap when reading in the value. */
1692 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1693 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1694
1695 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1696 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1697 return true;
1698 }
1699
1700 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1701 {
1702 /* We're hoping to transform everything that deals with XFmode
1703 quantities and GR registers early in the compiler. */
1704 gcc_assert (can_create_pseudo_p ());
1705
1706 /* Op0 can't be a GR_REG here, as that case is handled above.
1707 If op0 is a register, then we spill op1, so that we now have a
1708 MEM operand. This requires creating an XFmode subreg of a TImode reg
1709 to force the spill. */
1710 if (register_operand (operands[0], mode))
1711 {
1712 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1713 op1 = gen_rtx_SUBREG (mode, op1, 0);
1714 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1715 }
1716
1717 else
1718 {
1719 rtx in[2];
1720
1721 gcc_assert (GET_CODE (operands[0]) == MEM);
1722
1723 /* Don't word-swap when writing out the value. */
1724 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1725 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1726
1727 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1728 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1729 return true;
1730 }
1731 }
1732
1733 if (!reload_in_progress && !reload_completed)
1734 {
1735 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1736
1737 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1738 {
1739 rtx memt, memx, in = operands[1];
1740 if (CONSTANT_P (in))
1741 in = validize_mem (force_const_mem (mode, in));
1742 if (GET_CODE (in) == MEM)
1743 memt = adjust_address (in, TImode, 0);
1744 else
1745 {
1746 memt = assign_stack_temp (TImode, 16);
1747 memx = adjust_address (memt, mode, 0);
1748 emit_move_insn (memx, in);
1749 }
1750 emit_move_insn (op0, memt);
1751 return true;
1752 }
1753
1754 if (!ia64_move_ok (operands[0], operands[1]))
1755 operands[1] = force_reg (mode, operands[1]);
1756 }
1757
1758 return false;
1759 }
1760
1761 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1762 with the expression that holds the compare result (in VOIDmode). */
1763
1764 static GTY(()) rtx cmptf_libfunc;
1765
1766 void
1767 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1768 {
1769 enum rtx_code code = GET_CODE (*expr);
1770 rtx cmp;
1771
1772 /* If we have a BImode input, then we already have a compare result, and
1773 do not need to emit another comparison. */
1774 if (GET_MODE (*op0) == BImode)
1775 {
1776 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1777 cmp = *op0;
1778 }
1779 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1780 magic number as its third argument, that indicates what to do.
1781 The return value is an integer to be compared against zero. */
1782 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1783 {
1784 enum qfcmp_magic {
1785 QCMP_INV = 1, /* Raise FP_INVALID on NaNs as a side effect. */
1786 QCMP_UNORD = 2,
1787 QCMP_EQ = 4,
1788 QCMP_LT = 8,
1789 QCMP_GT = 16
1790 };
1791 int magic;
1792 enum rtx_code ncode;
1793 rtx ret, insns;
1794
1795 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1796 switch (code)
1797 {
1798 /* 1 = equal, 0 = not equal. Equality operators do
1799 not raise FP_INVALID when given a NaN operand. */
1800 case EQ: magic = QCMP_EQ; ncode = NE; break;
1801 case NE: magic = QCMP_EQ; ncode = EQ; break;
1802 /* isunordered() from C99. */
1803 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1804 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1805 /* Relational operators raise FP_INVALID when given
1806 a NaN operand. */
1807 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1808 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1809 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1810 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1811 /* Unordered relational operators do not raise FP_INVALID
1812 when given a NaN operand. */
1813 case UNLT: magic = QCMP_LT |QCMP_UNORD; ncode = NE; break;
1814 case UNLE: magic = QCMP_LT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1815 case UNGT: magic = QCMP_GT |QCMP_UNORD; ncode = NE; break;
1816 case UNGE: magic = QCMP_GT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1817 /* Not supported. */
1818 case UNEQ:
1819 case LTGT:
1820 default: gcc_unreachable ();
1821 }
1822
1823 start_sequence ();
1824
1825 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1826 *op0, TFmode, *op1, TFmode,
1827 GEN_INT (magic), DImode);
1828 cmp = gen_reg_rtx (BImode);
1829 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1830 gen_rtx_fmt_ee (ncode, BImode,
1831 ret, const0_rtx)));
1832
1833 insns = get_insns ();
1834 end_sequence ();
1835
1836 emit_libcall_block (insns, cmp, cmp,
1837 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1838 code = NE;
1839 }
1840 else
1841 {
1842 cmp = gen_reg_rtx (BImode);
1843 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1844 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1845 code = NE;
1846 }
1847
1848 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1849 *op0 = cmp;
1850 *op1 = const0_rtx;
1851 }
1852
1853 /* Generate an integral vector comparison. Return true if the condition has
1854 been reversed, and so the sense of the comparison should be inverted. */
1855
1856 static bool
1857 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1858 rtx dest, rtx op0, rtx op1)
1859 {
1860 bool negate = false;
1861 rtx x;
1862
1863 /* Canonicalize the comparison to EQ, GT, GTU. */
1864 switch (code)
1865 {
1866 case EQ:
1867 case GT:
1868 case GTU:
1869 break;
1870
1871 case NE:
1872 case LE:
1873 case LEU:
1874 code = reverse_condition (code);
1875 negate = true;
1876 break;
1877
1878 case GE:
1879 case GEU:
1880 code = reverse_condition (code);
1881 negate = true;
1882 /* FALLTHRU */
1883
1884 case LT:
1885 case LTU:
1886 code = swap_condition (code);
1887 x = op0, op0 = op1, op1 = x;
1888 break;
1889
1890 default:
1891 gcc_unreachable ();
1892 }
1893
1894 /* Unsigned parallel compare is not supported by the hardware. Play some
1895 tricks to turn this into a signed comparison against 0. */
1896 if (code == GTU)
1897 {
1898 switch (mode)
1899 {
1900 case V2SImode:
1901 {
1902 rtx t1, t2, mask;
1903
1904 /* Subtract (-(INT MAX) - 1) from both operands to make
1905 them signed. */
1906 mask = GEN_INT (0x80000000);
1907 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1908 mask = force_reg (mode, mask);
1909 t1 = gen_reg_rtx (mode);
1910 emit_insn (gen_subv2si3 (t1, op0, mask));
1911 t2 = gen_reg_rtx (mode);
1912 emit_insn (gen_subv2si3 (t2, op1, mask));
1913 op0 = t1;
1914 op1 = t2;
1915 code = GT;
1916 }
1917 break;
1918
1919 case V8QImode:
1920 case V4HImode:
1921 /* Perform a parallel unsigned saturating subtraction. */
1922 x = gen_reg_rtx (mode);
1923 emit_insn (gen_rtx_SET (VOIDmode, x,
1924 gen_rtx_US_MINUS (mode, op0, op1)));
1925
1926 code = EQ;
1927 op0 = x;
1928 op1 = CONST0_RTX (mode);
1929 negate = !negate;
1930 break;
1931
1932 default:
1933 gcc_unreachable ();
1934 }
1935 }
1936
1937 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1938 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1939
1940 return negate;
1941 }
1942
1943 /* Emit an integral vector conditional move. */
1944
1945 void
1946 ia64_expand_vecint_cmov (rtx operands[])
1947 {
1948 enum machine_mode mode = GET_MODE (operands[0]);
1949 enum rtx_code code = GET_CODE (operands[3]);
1950 bool negate;
1951 rtx cmp, x, ot, of;
1952
1953 cmp = gen_reg_rtx (mode);
1954 negate = ia64_expand_vecint_compare (code, mode, cmp,
1955 operands[4], operands[5]);
1956
1957 ot = operands[1+negate];
1958 of = operands[2-negate];
1959
1960 if (ot == CONST0_RTX (mode))
1961 {
1962 if (of == CONST0_RTX (mode))
1963 {
1964 emit_move_insn (operands[0], ot);
1965 return;
1966 }
1967
1968 x = gen_rtx_NOT (mode, cmp);
1969 x = gen_rtx_AND (mode, x, of);
1970 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1971 }
1972 else if (of == CONST0_RTX (mode))
1973 {
1974 x = gen_rtx_AND (mode, cmp, ot);
1975 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1976 }
1977 else
1978 {
1979 rtx t, f;
1980
1981 t = gen_reg_rtx (mode);
1982 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1983 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1984
1985 f = gen_reg_rtx (mode);
1986 x = gen_rtx_NOT (mode, cmp);
1987 x = gen_rtx_AND (mode, x, operands[2-negate]);
1988 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1989
1990 x = gen_rtx_IOR (mode, t, f);
1991 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1992 }
1993 }
1994
1995 /* Emit an integral vector min or max operation. Return true if all done. */
1996
1997 bool
1998 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1999 rtx operands[])
2000 {
2001 rtx xops[6];
2002
2003 /* These four combinations are supported directly. */
2004 if (mode == V8QImode && (code == UMIN || code == UMAX))
2005 return false;
2006 if (mode == V4HImode && (code == SMIN || code == SMAX))
2007 return false;
2008
2009 /* This combination can be implemented with only saturating subtraction. */
2010 if (mode == V4HImode && code == UMAX)
2011 {
2012 rtx x, tmp = gen_reg_rtx (mode);
2013
2014 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
2015 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
2016
2017 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
2018 return true;
2019 }
2020
2021 /* Everything else implemented via vector comparisons. */
2022 xops[0] = operands[0];
2023 xops[4] = xops[1] = operands[1];
2024 xops[5] = xops[2] = operands[2];
2025
2026 switch (code)
2027 {
2028 case UMIN:
2029 code = LTU;
2030 break;
2031 case UMAX:
2032 code = GTU;
2033 break;
2034 case SMIN:
2035 code = LT;
2036 break;
2037 case SMAX:
2038 code = GT;
2039 break;
2040 default:
2041 gcc_unreachable ();
2042 }
2043 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
2044
2045 ia64_expand_vecint_cmov (xops);
2046 return true;
2047 }
2048
2049 /* The vectors LO and HI each contain N halves of a double-wide vector.
2050 Reassemble either the first N/2 or the second N/2 elements. */
2051
2052 void
2053 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
2054 {
2055 enum machine_mode vmode = GET_MODE (lo);
2056 unsigned int i, high, nelt = GET_MODE_NUNITS (vmode);
2057 struct expand_vec_perm_d d;
2058 bool ok;
2059
2060 d.target = gen_lowpart (vmode, out);
2061 d.op0 = (TARGET_BIG_ENDIAN ? hi : lo);
2062 d.op1 = (TARGET_BIG_ENDIAN ? lo : hi);
2063 d.vmode = vmode;
2064 d.nelt = nelt;
2065 d.one_operand_p = false;
2066 d.testing_p = false;
2067
2068 high = (highp ? nelt / 2 : 0);
2069 for (i = 0; i < nelt / 2; ++i)
2070 {
2071 d.perm[i * 2] = i + high;
2072 d.perm[i * 2 + 1] = i + high + nelt;
2073 }
2074
2075 ok = ia64_expand_vec_perm_const_1 (&d);
2076 gcc_assert (ok);
2077 }
2078
2079 /* Return a vector of the sign-extension of VEC. */
2080
2081 static rtx
2082 ia64_unpack_sign (rtx vec, bool unsignedp)
2083 {
2084 enum machine_mode mode = GET_MODE (vec);
2085 rtx zero = CONST0_RTX (mode);
2086
2087 if (unsignedp)
2088 return zero;
2089 else
2090 {
2091 rtx sign = gen_reg_rtx (mode);
2092 bool neg;
2093
2094 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2095 gcc_assert (!neg);
2096
2097 return sign;
2098 }
2099 }
2100
2101 /* Emit an integral vector unpack operation. */
2102
2103 void
2104 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2105 {
2106 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2107 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2108 }
2109
2110 /* Emit an integral vector widening sum operations. */
2111
2112 void
2113 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2114 {
2115 enum machine_mode wmode;
2116 rtx l, h, t, sign;
2117
2118 sign = ia64_unpack_sign (operands[1], unsignedp);
2119
2120 wmode = GET_MODE (operands[0]);
2121 l = gen_reg_rtx (wmode);
2122 h = gen_reg_rtx (wmode);
2123
2124 ia64_unpack_assemble (l, operands[1], sign, false);
2125 ia64_unpack_assemble (h, operands[1], sign, true);
2126
2127 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2128 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2129 if (t != operands[0])
2130 emit_move_insn (operands[0], t);
2131 }
2132
2133 /* Emit the appropriate sequence for a call. */
2134
2135 void
2136 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2137 int sibcall_p)
2138 {
2139 rtx insn, b0;
2140
2141 addr = XEXP (addr, 0);
2142 addr = convert_memory_address (DImode, addr);
2143 b0 = gen_rtx_REG (DImode, R_BR (0));
2144
2145 /* ??? Should do this for functions known to bind local too. */
2146 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2147 {
2148 if (sibcall_p)
2149 insn = gen_sibcall_nogp (addr);
2150 else if (! retval)
2151 insn = gen_call_nogp (addr, b0);
2152 else
2153 insn = gen_call_value_nogp (retval, addr, b0);
2154 insn = emit_call_insn (insn);
2155 }
2156 else
2157 {
2158 if (sibcall_p)
2159 insn = gen_sibcall_gp (addr);
2160 else if (! retval)
2161 insn = gen_call_gp (addr, b0);
2162 else
2163 insn = gen_call_value_gp (retval, addr, b0);
2164 insn = emit_call_insn (insn);
2165
2166 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2167 }
2168
2169 if (sibcall_p)
2170 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2171
2172 if (TARGET_ABI_OPEN_VMS)
2173 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2174 gen_rtx_REG (DImode, GR_REG (25)));
2175 }
2176
2177 static void
2178 reg_emitted (enum ia64_frame_regs r)
2179 {
2180 if (emitted_frame_related_regs[r] == 0)
2181 emitted_frame_related_regs[r] = current_frame_info.r[r];
2182 else
2183 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2184 }
2185
2186 static int
2187 get_reg (enum ia64_frame_regs r)
2188 {
2189 reg_emitted (r);
2190 return current_frame_info.r[r];
2191 }
2192
2193 static bool
2194 is_emitted (int regno)
2195 {
2196 unsigned int r;
2197
2198 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2199 if (emitted_frame_related_regs[r] == regno)
2200 return true;
2201 return false;
2202 }
2203
2204 void
2205 ia64_reload_gp (void)
2206 {
2207 rtx tmp;
2208
2209 if (current_frame_info.r[reg_save_gp])
2210 {
2211 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2212 }
2213 else
2214 {
2215 HOST_WIDE_INT offset;
2216 rtx offset_r;
2217
2218 offset = (current_frame_info.spill_cfa_off
2219 + current_frame_info.spill_size);
2220 if (frame_pointer_needed)
2221 {
2222 tmp = hard_frame_pointer_rtx;
2223 offset = -offset;
2224 }
2225 else
2226 {
2227 tmp = stack_pointer_rtx;
2228 offset = current_frame_info.total_size - offset;
2229 }
2230
2231 offset_r = GEN_INT (offset);
2232 if (satisfies_constraint_I (offset_r))
2233 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2234 else
2235 {
2236 emit_move_insn (pic_offset_table_rtx, offset_r);
2237 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2238 pic_offset_table_rtx, tmp));
2239 }
2240
2241 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2242 }
2243
2244 emit_move_insn (pic_offset_table_rtx, tmp);
2245 }
2246
2247 void
2248 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2249 rtx scratch_b, int noreturn_p, int sibcall_p)
2250 {
2251 rtx insn;
2252 bool is_desc = false;
2253
2254 /* If we find we're calling through a register, then we're actually
2255 calling through a descriptor, so load up the values. */
2256 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2257 {
2258 rtx tmp;
2259 bool addr_dead_p;
2260
2261 /* ??? We are currently constrained to *not* use peep2, because
2262 we can legitimately change the global lifetime of the GP
2263 (in the form of killing where previously live). This is
2264 because a call through a descriptor doesn't use the previous
2265 value of the GP, while a direct call does, and we do not
2266 commit to either form until the split here.
2267
2268 That said, this means that we lack precise life info for
2269 whether ADDR is dead after this call. This is not terribly
2270 important, since we can fix things up essentially for free
2271 with the POST_DEC below, but it's nice to not use it when we
2272 can immediately tell it's not necessary. */
2273 addr_dead_p = ((noreturn_p || sibcall_p
2274 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2275 REGNO (addr)))
2276 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2277
2278 /* Load the code address into scratch_b. */
2279 tmp = gen_rtx_POST_INC (Pmode, addr);
2280 tmp = gen_rtx_MEM (Pmode, tmp);
2281 emit_move_insn (scratch_r, tmp);
2282 emit_move_insn (scratch_b, scratch_r);
2283
2284 /* Load the GP address. If ADDR is not dead here, then we must
2285 revert the change made above via the POST_INCREMENT. */
2286 if (!addr_dead_p)
2287 tmp = gen_rtx_POST_DEC (Pmode, addr);
2288 else
2289 tmp = addr;
2290 tmp = gen_rtx_MEM (Pmode, tmp);
2291 emit_move_insn (pic_offset_table_rtx, tmp);
2292
2293 is_desc = true;
2294 addr = scratch_b;
2295 }
2296
2297 if (sibcall_p)
2298 insn = gen_sibcall_nogp (addr);
2299 else if (retval)
2300 insn = gen_call_value_nogp (retval, addr, retaddr);
2301 else
2302 insn = gen_call_nogp (addr, retaddr);
2303 emit_call_insn (insn);
2304
2305 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2306 ia64_reload_gp ();
2307 }
2308
2309 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2310
2311 This differs from the generic code in that we know about the zero-extending
2312 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2313 also know that ld.acq+cmpxchg.rel equals a full barrier.
2314
2315 The loop we want to generate looks like
2316
2317 cmp_reg = mem;
2318 label:
2319 old_reg = cmp_reg;
2320 new_reg = cmp_reg op val;
2321 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2322 if (cmp_reg != old_reg)
2323 goto label;
2324
2325 Note that we only do the plain load from memory once. Subsequent
2326 iterations use the value loaded by the compare-and-swap pattern. */
2327
2328 void
2329 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2330 rtx old_dst, rtx new_dst, enum memmodel model)
2331 {
2332 enum machine_mode mode = GET_MODE (mem);
2333 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2334 enum insn_code icode;
2335
2336 /* Special case for using fetchadd. */
2337 if ((mode == SImode || mode == DImode)
2338 && (code == PLUS || code == MINUS)
2339 && fetchadd_operand (val, mode))
2340 {
2341 if (code == MINUS)
2342 val = GEN_INT (-INTVAL (val));
2343
2344 if (!old_dst)
2345 old_dst = gen_reg_rtx (mode);
2346
2347 switch (model)
2348 {
2349 case MEMMODEL_ACQ_REL:
2350 case MEMMODEL_SEQ_CST:
2351 emit_insn (gen_memory_barrier ());
2352 /* FALLTHRU */
2353 case MEMMODEL_RELAXED:
2354 case MEMMODEL_ACQUIRE:
2355 case MEMMODEL_CONSUME:
2356 if (mode == SImode)
2357 icode = CODE_FOR_fetchadd_acq_si;
2358 else
2359 icode = CODE_FOR_fetchadd_acq_di;
2360 break;
2361 case MEMMODEL_RELEASE:
2362 if (mode == SImode)
2363 icode = CODE_FOR_fetchadd_rel_si;
2364 else
2365 icode = CODE_FOR_fetchadd_rel_di;
2366 break;
2367
2368 default:
2369 gcc_unreachable ();
2370 }
2371
2372 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2373
2374 if (new_dst)
2375 {
2376 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2377 true, OPTAB_WIDEN);
2378 if (new_reg != new_dst)
2379 emit_move_insn (new_dst, new_reg);
2380 }
2381 return;
2382 }
2383
2384 /* Because of the volatile mem read, we get an ld.acq, which is the
2385 front half of the full barrier. The end half is the cmpxchg.rel.
2386 For relaxed and release memory models, we don't need this. But we
2387 also don't bother trying to prevent it either. */
2388 gcc_assert (model == MEMMODEL_RELAXED
2389 || model == MEMMODEL_RELEASE
2390 || MEM_VOLATILE_P (mem));
2391
2392 old_reg = gen_reg_rtx (DImode);
2393 cmp_reg = gen_reg_rtx (DImode);
2394 label = gen_label_rtx ();
2395
2396 if (mode != DImode)
2397 {
2398 val = simplify_gen_subreg (DImode, val, mode, 0);
2399 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2400 }
2401 else
2402 emit_move_insn (cmp_reg, mem);
2403
2404 emit_label (label);
2405
2406 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2407 emit_move_insn (old_reg, cmp_reg);
2408 emit_move_insn (ar_ccv, cmp_reg);
2409
2410 if (old_dst)
2411 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2412
2413 new_reg = cmp_reg;
2414 if (code == NOT)
2415 {
2416 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2417 true, OPTAB_DIRECT);
2418 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2419 }
2420 else
2421 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2422 true, OPTAB_DIRECT);
2423
2424 if (mode != DImode)
2425 new_reg = gen_lowpart (mode, new_reg);
2426 if (new_dst)
2427 emit_move_insn (new_dst, new_reg);
2428
2429 switch (model)
2430 {
2431 case MEMMODEL_RELAXED:
2432 case MEMMODEL_ACQUIRE:
2433 case MEMMODEL_CONSUME:
2434 switch (mode)
2435 {
2436 case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2437 case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2438 case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2439 case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2440 default:
2441 gcc_unreachable ();
2442 }
2443 break;
2444
2445 case MEMMODEL_RELEASE:
2446 case MEMMODEL_ACQ_REL:
2447 case MEMMODEL_SEQ_CST:
2448 switch (mode)
2449 {
2450 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2451 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2452 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2453 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2454 default:
2455 gcc_unreachable ();
2456 }
2457 break;
2458
2459 default:
2460 gcc_unreachable ();
2461 }
2462
2463 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2464
2465 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2466 }
2467 \f
2468 /* Begin the assembly file. */
2469
2470 static void
2471 ia64_file_start (void)
2472 {
2473 default_file_start ();
2474 emit_safe_across_calls ();
2475 }
2476
2477 void
2478 emit_safe_across_calls (void)
2479 {
2480 unsigned int rs, re;
2481 int out_state;
2482
2483 rs = 1;
2484 out_state = 0;
2485 while (1)
2486 {
2487 while (rs < 64 && call_used_regs[PR_REG (rs)])
2488 rs++;
2489 if (rs >= 64)
2490 break;
2491 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2492 continue;
2493 if (out_state == 0)
2494 {
2495 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2496 out_state = 1;
2497 }
2498 else
2499 fputc (',', asm_out_file);
2500 if (re == rs + 1)
2501 fprintf (asm_out_file, "p%u", rs);
2502 else
2503 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2504 rs = re + 1;
2505 }
2506 if (out_state)
2507 fputc ('\n', asm_out_file);
2508 }
2509
2510 /* Globalize a declaration. */
2511
2512 static void
2513 ia64_globalize_decl_name (FILE * stream, tree decl)
2514 {
2515 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2516 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2517 if (version_attr)
2518 {
2519 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2520 const char *p = TREE_STRING_POINTER (v);
2521 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2522 }
2523 targetm.asm_out.globalize_label (stream, name);
2524 if (TREE_CODE (decl) == FUNCTION_DECL)
2525 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2526 }
2527
2528 /* Helper function for ia64_compute_frame_size: find an appropriate general
2529 register to spill some special register to. SPECIAL_SPILL_MASK contains
2530 bits in GR0 to GR31 that have already been allocated by this routine.
2531 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2532
2533 static int
2534 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2535 {
2536 int regno;
2537
2538 if (emitted_frame_related_regs[r] != 0)
2539 {
2540 regno = emitted_frame_related_regs[r];
2541 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2542 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2543 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2544 else if (crtl->is_leaf
2545 && regno >= GR_REG (1) && regno <= GR_REG (31))
2546 current_frame_info.gr_used_mask |= 1 << regno;
2547
2548 return regno;
2549 }
2550
2551 /* If this is a leaf function, first try an otherwise unused
2552 call-clobbered register. */
2553 if (crtl->is_leaf)
2554 {
2555 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2556 if (! df_regs_ever_live_p (regno)
2557 && call_used_regs[regno]
2558 && ! fixed_regs[regno]
2559 && ! global_regs[regno]
2560 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2561 && ! is_emitted (regno))
2562 {
2563 current_frame_info.gr_used_mask |= 1 << regno;
2564 return regno;
2565 }
2566 }
2567
2568 if (try_locals)
2569 {
2570 regno = current_frame_info.n_local_regs;
2571 /* If there is a frame pointer, then we can't use loc79, because
2572 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2573 reg_name switching code in ia64_expand_prologue. */
2574 while (regno < (80 - frame_pointer_needed))
2575 if (! is_emitted (LOC_REG (regno++)))
2576 {
2577 current_frame_info.n_local_regs = regno;
2578 return LOC_REG (regno - 1);
2579 }
2580 }
2581
2582 /* Failed to find a general register to spill to. Must use stack. */
2583 return 0;
2584 }
2585
2586 /* In order to make for nice schedules, we try to allocate every temporary
2587 to a different register. We must of course stay away from call-saved,
2588 fixed, and global registers. We must also stay away from registers
2589 allocated in current_frame_info.gr_used_mask, since those include regs
2590 used all through the prologue.
2591
2592 Any register allocated here must be used immediately. The idea is to
2593 aid scheduling, not to solve data flow problems. */
2594
2595 static int last_scratch_gr_reg;
2596
2597 static int
2598 next_scratch_gr_reg (void)
2599 {
2600 int i, regno;
2601
2602 for (i = 0; i < 32; ++i)
2603 {
2604 regno = (last_scratch_gr_reg + i + 1) & 31;
2605 if (call_used_regs[regno]
2606 && ! fixed_regs[regno]
2607 && ! global_regs[regno]
2608 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2609 {
2610 last_scratch_gr_reg = regno;
2611 return regno;
2612 }
2613 }
2614
2615 /* There must be _something_ available. */
2616 gcc_unreachable ();
2617 }
2618
2619 /* Helper function for ia64_compute_frame_size, called through
2620 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2621
2622 static void
2623 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2624 {
2625 unsigned int regno = REGNO (reg);
2626 if (regno < 32)
2627 {
2628 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2629 for (i = 0; i < n; ++i)
2630 current_frame_info.gr_used_mask |= 1 << (regno + i);
2631 }
2632 }
2633
2634
2635 /* Returns the number of bytes offset between the frame pointer and the stack
2636 pointer for the current function. SIZE is the number of bytes of space
2637 needed for local variables. */
2638
2639 static void
2640 ia64_compute_frame_size (HOST_WIDE_INT size)
2641 {
2642 HOST_WIDE_INT total_size;
2643 HOST_WIDE_INT spill_size = 0;
2644 HOST_WIDE_INT extra_spill_size = 0;
2645 HOST_WIDE_INT pretend_args_size;
2646 HARD_REG_SET mask;
2647 int n_spilled = 0;
2648 int spilled_gr_p = 0;
2649 int spilled_fr_p = 0;
2650 unsigned int regno;
2651 int min_regno;
2652 int max_regno;
2653 int i;
2654
2655 if (current_frame_info.initialized)
2656 return;
2657
2658 memset (&current_frame_info, 0, sizeof current_frame_info);
2659 CLEAR_HARD_REG_SET (mask);
2660
2661 /* Don't allocate scratches to the return register. */
2662 diddle_return_value (mark_reg_gr_used_mask, NULL);
2663
2664 /* Don't allocate scratches to the EH scratch registers. */
2665 if (cfun->machine->ia64_eh_epilogue_sp)
2666 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2667 if (cfun->machine->ia64_eh_epilogue_bsp)
2668 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2669
2670 /* Static stack checking uses r2 and r3. */
2671 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
2672 current_frame_info.gr_used_mask |= 0xc;
2673
2674 /* Find the size of the register stack frame. We have only 80 local
2675 registers, because we reserve 8 for the inputs and 8 for the
2676 outputs. */
2677
2678 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2679 since we'll be adjusting that down later. */
2680 regno = LOC_REG (78) + ! frame_pointer_needed;
2681 for (; regno >= LOC_REG (0); regno--)
2682 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2683 break;
2684 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2685
2686 /* For functions marked with the syscall_linkage attribute, we must mark
2687 all eight input registers as in use, so that locals aren't visible to
2688 the caller. */
2689
2690 if (cfun->machine->n_varargs > 0
2691 || lookup_attribute ("syscall_linkage",
2692 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2693 current_frame_info.n_input_regs = 8;
2694 else
2695 {
2696 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2697 if (df_regs_ever_live_p (regno))
2698 break;
2699 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2700 }
2701
2702 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2703 if (df_regs_ever_live_p (regno))
2704 break;
2705 i = regno - OUT_REG (0) + 1;
2706
2707 #ifndef PROFILE_HOOK
2708 /* When -p profiling, we need one output register for the mcount argument.
2709 Likewise for -a profiling for the bb_init_func argument. For -ax
2710 profiling, we need two output registers for the two bb_init_trace_func
2711 arguments. */
2712 if (crtl->profile)
2713 i = MAX (i, 1);
2714 #endif
2715 current_frame_info.n_output_regs = i;
2716
2717 /* ??? No rotating register support yet. */
2718 current_frame_info.n_rotate_regs = 0;
2719
2720 /* Discover which registers need spilling, and how much room that
2721 will take. Begin with floating point and general registers,
2722 which will always wind up on the stack. */
2723
2724 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2725 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2726 {
2727 SET_HARD_REG_BIT (mask, regno);
2728 spill_size += 16;
2729 n_spilled += 1;
2730 spilled_fr_p = 1;
2731 }
2732
2733 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2734 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2735 {
2736 SET_HARD_REG_BIT (mask, regno);
2737 spill_size += 8;
2738 n_spilled += 1;
2739 spilled_gr_p = 1;
2740 }
2741
2742 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2743 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2744 {
2745 SET_HARD_REG_BIT (mask, regno);
2746 spill_size += 8;
2747 n_spilled += 1;
2748 }
2749
2750 /* Now come all special registers that might get saved in other
2751 general registers. */
2752
2753 if (frame_pointer_needed)
2754 {
2755 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2756 /* If we did not get a register, then we take LOC79. This is guaranteed
2757 to be free, even if regs_ever_live is already set, because this is
2758 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2759 as we don't count loc79 above. */
2760 if (current_frame_info.r[reg_fp] == 0)
2761 {
2762 current_frame_info.r[reg_fp] = LOC_REG (79);
2763 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2764 }
2765 }
2766
2767 if (! crtl->is_leaf)
2768 {
2769 /* Emit a save of BR0 if we call other functions. Do this even
2770 if this function doesn't return, as EH depends on this to be
2771 able to unwind the stack. */
2772 SET_HARD_REG_BIT (mask, BR_REG (0));
2773
2774 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2775 if (current_frame_info.r[reg_save_b0] == 0)
2776 {
2777 extra_spill_size += 8;
2778 n_spilled += 1;
2779 }
2780
2781 /* Similarly for ar.pfs. */
2782 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2783 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2784 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2785 {
2786 extra_spill_size += 8;
2787 n_spilled += 1;
2788 }
2789
2790 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2791 registers are clobbered, so we fall back to the stack. */
2792 current_frame_info.r[reg_save_gp]
2793 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2794 if (current_frame_info.r[reg_save_gp] == 0)
2795 {
2796 SET_HARD_REG_BIT (mask, GR_REG (1));
2797 spill_size += 8;
2798 n_spilled += 1;
2799 }
2800 }
2801 else
2802 {
2803 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2804 {
2805 SET_HARD_REG_BIT (mask, BR_REG (0));
2806 extra_spill_size += 8;
2807 n_spilled += 1;
2808 }
2809
2810 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2811 {
2812 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2813 current_frame_info.r[reg_save_ar_pfs]
2814 = find_gr_spill (reg_save_ar_pfs, 1);
2815 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2816 {
2817 extra_spill_size += 8;
2818 n_spilled += 1;
2819 }
2820 }
2821 }
2822
2823 /* Unwind descriptor hackery: things are most efficient if we allocate
2824 consecutive GR save registers for RP, PFS, FP in that order. However,
2825 it is absolutely critical that FP get the only hard register that's
2826 guaranteed to be free, so we allocated it first. If all three did
2827 happen to be allocated hard regs, and are consecutive, rearrange them
2828 into the preferred order now.
2829
2830 If we have already emitted code for any of those registers,
2831 then it's already too late to change. */
2832 min_regno = MIN (current_frame_info.r[reg_fp],
2833 MIN (current_frame_info.r[reg_save_b0],
2834 current_frame_info.r[reg_save_ar_pfs]));
2835 max_regno = MAX (current_frame_info.r[reg_fp],
2836 MAX (current_frame_info.r[reg_save_b0],
2837 current_frame_info.r[reg_save_ar_pfs]));
2838 if (min_regno > 0
2839 && min_regno + 2 == max_regno
2840 && (current_frame_info.r[reg_fp] == min_regno + 1
2841 || current_frame_info.r[reg_save_b0] == min_regno + 1
2842 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2843 && (emitted_frame_related_regs[reg_save_b0] == 0
2844 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2845 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2846 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2847 && (emitted_frame_related_regs[reg_fp] == 0
2848 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2849 {
2850 current_frame_info.r[reg_save_b0] = min_regno;
2851 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2852 current_frame_info.r[reg_fp] = min_regno + 2;
2853 }
2854
2855 /* See if we need to store the predicate register block. */
2856 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2857 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2858 break;
2859 if (regno <= PR_REG (63))
2860 {
2861 SET_HARD_REG_BIT (mask, PR_REG (0));
2862 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2863 if (current_frame_info.r[reg_save_pr] == 0)
2864 {
2865 extra_spill_size += 8;
2866 n_spilled += 1;
2867 }
2868
2869 /* ??? Mark them all as used so that register renaming and such
2870 are free to use them. */
2871 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2872 df_set_regs_ever_live (regno, true);
2873 }
2874
2875 /* If we're forced to use st8.spill, we're forced to save and restore
2876 ar.unat as well. The check for existing liveness allows inline asm
2877 to touch ar.unat. */
2878 if (spilled_gr_p || cfun->machine->n_varargs
2879 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2880 {
2881 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2882 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2883 current_frame_info.r[reg_save_ar_unat]
2884 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2885 if (current_frame_info.r[reg_save_ar_unat] == 0)
2886 {
2887 extra_spill_size += 8;
2888 n_spilled += 1;
2889 }
2890 }
2891
2892 if (df_regs_ever_live_p (AR_LC_REGNUM))
2893 {
2894 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2895 current_frame_info.r[reg_save_ar_lc]
2896 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2897 if (current_frame_info.r[reg_save_ar_lc] == 0)
2898 {
2899 extra_spill_size += 8;
2900 n_spilled += 1;
2901 }
2902 }
2903
2904 /* If we have an odd number of words of pretend arguments written to
2905 the stack, then the FR save area will be unaligned. We round the
2906 size of this area up to keep things 16 byte aligned. */
2907 if (spilled_fr_p)
2908 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2909 else
2910 pretend_args_size = crtl->args.pretend_args_size;
2911
2912 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2913 + crtl->outgoing_args_size);
2914 total_size = IA64_STACK_ALIGN (total_size);
2915
2916 /* We always use the 16-byte scratch area provided by the caller, but
2917 if we are a leaf function, there's no one to which we need to provide
2918 a scratch area. However, if the function allocates dynamic stack space,
2919 the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
2920 so we need to cope. */
2921 if (crtl->is_leaf && !cfun->calls_alloca)
2922 total_size = MAX (0, total_size - 16);
2923
2924 current_frame_info.total_size = total_size;
2925 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2926 current_frame_info.spill_size = spill_size;
2927 current_frame_info.extra_spill_size = extra_spill_size;
2928 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2929 current_frame_info.n_spilled = n_spilled;
2930 current_frame_info.initialized = reload_completed;
2931 }
2932
2933 /* Worker function for TARGET_CAN_ELIMINATE. */
2934
2935 bool
2936 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2937 {
2938 return (to == BR_REG (0) ? crtl->is_leaf : true);
2939 }
2940
2941 /* Compute the initial difference between the specified pair of registers. */
2942
2943 HOST_WIDE_INT
2944 ia64_initial_elimination_offset (int from, int to)
2945 {
2946 HOST_WIDE_INT offset;
2947
2948 ia64_compute_frame_size (get_frame_size ());
2949 switch (from)
2950 {
2951 case FRAME_POINTER_REGNUM:
2952 switch (to)
2953 {
2954 case HARD_FRAME_POINTER_REGNUM:
2955 offset = -current_frame_info.total_size;
2956 if (!crtl->is_leaf || cfun->calls_alloca)
2957 offset += 16 + crtl->outgoing_args_size;
2958 break;
2959
2960 case STACK_POINTER_REGNUM:
2961 offset = 0;
2962 if (!crtl->is_leaf || cfun->calls_alloca)
2963 offset += 16 + crtl->outgoing_args_size;
2964 break;
2965
2966 default:
2967 gcc_unreachable ();
2968 }
2969 break;
2970
2971 case ARG_POINTER_REGNUM:
2972 /* Arguments start above the 16 byte save area, unless stdarg
2973 in which case we store through the 16 byte save area. */
2974 switch (to)
2975 {
2976 case HARD_FRAME_POINTER_REGNUM:
2977 offset = 16 - crtl->args.pretend_args_size;
2978 break;
2979
2980 case STACK_POINTER_REGNUM:
2981 offset = (current_frame_info.total_size
2982 + 16 - crtl->args.pretend_args_size);
2983 break;
2984
2985 default:
2986 gcc_unreachable ();
2987 }
2988 break;
2989
2990 default:
2991 gcc_unreachable ();
2992 }
2993
2994 return offset;
2995 }
2996
2997 /* If there are more than a trivial number of register spills, we use
2998 two interleaved iterators so that we can get two memory references
2999 per insn group.
3000
3001 In order to simplify things in the prologue and epilogue expanders,
3002 we use helper functions to fix up the memory references after the
3003 fact with the appropriate offsets to a POST_MODIFY memory mode.
3004 The following data structure tracks the state of the two iterators
3005 while insns are being emitted. */
3006
3007 struct spill_fill_data
3008 {
3009 rtx_insn *init_after; /* point at which to emit initializations */
3010 rtx init_reg[2]; /* initial base register */
3011 rtx iter_reg[2]; /* the iterator registers */
3012 rtx *prev_addr[2]; /* address of last memory use */
3013 rtx_insn *prev_insn[2]; /* the insn corresponding to prev_addr */
3014 HOST_WIDE_INT prev_off[2]; /* last offset */
3015 int n_iter; /* number of iterators in use */
3016 int next_iter; /* next iterator to use */
3017 unsigned int save_gr_used_mask;
3018 };
3019
3020 static struct spill_fill_data spill_fill_data;
3021
3022 static void
3023 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
3024 {
3025 int i;
3026
3027 spill_fill_data.init_after = get_last_insn ();
3028 spill_fill_data.init_reg[0] = init_reg;
3029 spill_fill_data.init_reg[1] = init_reg;
3030 spill_fill_data.prev_addr[0] = NULL;
3031 spill_fill_data.prev_addr[1] = NULL;
3032 spill_fill_data.prev_insn[0] = NULL;
3033 spill_fill_data.prev_insn[1] = NULL;
3034 spill_fill_data.prev_off[0] = cfa_off;
3035 spill_fill_data.prev_off[1] = cfa_off;
3036 spill_fill_data.next_iter = 0;
3037 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3038
3039 spill_fill_data.n_iter = 1 + (n_spills > 2);
3040 for (i = 0; i < spill_fill_data.n_iter; ++i)
3041 {
3042 int regno = next_scratch_gr_reg ();
3043 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3044 current_frame_info.gr_used_mask |= 1 << regno;
3045 }
3046 }
3047
3048 static void
3049 finish_spill_pointers (void)
3050 {
3051 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3052 }
3053
3054 static rtx
3055 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3056 {
3057 int iter = spill_fill_data.next_iter;
3058 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3059 rtx disp_rtx = GEN_INT (disp);
3060 rtx mem;
3061
3062 if (spill_fill_data.prev_addr[iter])
3063 {
3064 if (satisfies_constraint_N (disp_rtx))
3065 {
3066 *spill_fill_data.prev_addr[iter]
3067 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3068 gen_rtx_PLUS (DImode,
3069 spill_fill_data.iter_reg[iter],
3070 disp_rtx));
3071 add_reg_note (spill_fill_data.prev_insn[iter],
3072 REG_INC, spill_fill_data.iter_reg[iter]);
3073 }
3074 else
3075 {
3076 /* ??? Could use register post_modify for loads. */
3077 if (!satisfies_constraint_I (disp_rtx))
3078 {
3079 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3080 emit_move_insn (tmp, disp_rtx);
3081 disp_rtx = tmp;
3082 }
3083 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3084 spill_fill_data.iter_reg[iter], disp_rtx));
3085 }
3086 }
3087 /* Micro-optimization: if we've created a frame pointer, it's at
3088 CFA 0, which may allow the real iterator to be initialized lower,
3089 slightly increasing parallelism. Also, if there are few saves
3090 it may eliminate the iterator entirely. */
3091 else if (disp == 0
3092 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3093 && frame_pointer_needed)
3094 {
3095 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3096 set_mem_alias_set (mem, get_varargs_alias_set ());
3097 return mem;
3098 }
3099 else
3100 {
3101 rtx seq;
3102 rtx_insn *insn;
3103
3104 if (disp == 0)
3105 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3106 spill_fill_data.init_reg[iter]);
3107 else
3108 {
3109 start_sequence ();
3110
3111 if (!satisfies_constraint_I (disp_rtx))
3112 {
3113 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3114 emit_move_insn (tmp, disp_rtx);
3115 disp_rtx = tmp;
3116 }
3117
3118 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3119 spill_fill_data.init_reg[iter],
3120 disp_rtx));
3121
3122 seq = get_insns ();
3123 end_sequence ();
3124 }
3125
3126 /* Careful for being the first insn in a sequence. */
3127 if (spill_fill_data.init_after)
3128 insn = emit_insn_after (seq, spill_fill_data.init_after);
3129 else
3130 {
3131 rtx_insn *first = get_insns ();
3132 if (first)
3133 insn = emit_insn_before (seq, first);
3134 else
3135 insn = emit_insn (seq);
3136 }
3137 spill_fill_data.init_after = insn;
3138 }
3139
3140 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3141
3142 /* ??? Not all of the spills are for varargs, but some of them are.
3143 The rest of the spills belong in an alias set of their own. But
3144 it doesn't actually hurt to include them here. */
3145 set_mem_alias_set (mem, get_varargs_alias_set ());
3146
3147 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3148 spill_fill_data.prev_off[iter] = cfa_off;
3149
3150 if (++iter >= spill_fill_data.n_iter)
3151 iter = 0;
3152 spill_fill_data.next_iter = iter;
3153
3154 return mem;
3155 }
3156
3157 static void
3158 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3159 rtx frame_reg)
3160 {
3161 int iter = spill_fill_data.next_iter;
3162 rtx mem;
3163 rtx_insn *insn;
3164
3165 mem = spill_restore_mem (reg, cfa_off);
3166 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3167 spill_fill_data.prev_insn[iter] = insn;
3168
3169 if (frame_reg)
3170 {
3171 rtx base;
3172 HOST_WIDE_INT off;
3173
3174 RTX_FRAME_RELATED_P (insn) = 1;
3175
3176 /* Don't even pretend that the unwind code can intuit its way
3177 through a pair of interleaved post_modify iterators. Just
3178 provide the correct answer. */
3179
3180 if (frame_pointer_needed)
3181 {
3182 base = hard_frame_pointer_rtx;
3183 off = - cfa_off;
3184 }
3185 else
3186 {
3187 base = stack_pointer_rtx;
3188 off = current_frame_info.total_size - cfa_off;
3189 }
3190
3191 add_reg_note (insn, REG_CFA_OFFSET,
3192 gen_rtx_SET (VOIDmode,
3193 gen_rtx_MEM (GET_MODE (reg),
3194 plus_constant (Pmode,
3195 base, off)),
3196 frame_reg));
3197 }
3198 }
3199
3200 static void
3201 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3202 {
3203 int iter = spill_fill_data.next_iter;
3204 rtx_insn *insn;
3205
3206 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3207 GEN_INT (cfa_off)));
3208 spill_fill_data.prev_insn[iter] = insn;
3209 }
3210
3211 /* Wrapper functions that discards the CONST_INT spill offset. These
3212 exist so that we can give gr_spill/gr_fill the offset they need and
3213 use a consistent function interface. */
3214
3215 static rtx
3216 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3217 {
3218 return gen_movdi (dest, src);
3219 }
3220
3221 static rtx
3222 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3223 {
3224 return gen_fr_spill (dest, src);
3225 }
3226
3227 static rtx
3228 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3229 {
3230 return gen_fr_restore (dest, src);
3231 }
3232
3233 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
3234
3235 /* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
3236 #define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
3237
3238 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
3239 inclusive. These are offsets from the current stack pointer. BS_SIZE
3240 is the size of the backing store. ??? This clobbers r2 and r3. */
3241
3242 static void
3243 ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
3244 int bs_size)
3245 {
3246 rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
3247 rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
3248 rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
3249
3250 /* On the IA-64 there is a second stack in memory, namely the Backing Store
3251 of the Register Stack Engine. We also need to probe it after checking
3252 that the 2 stacks don't overlap. */
3253 emit_insn (gen_bsp_value (r3));
3254 emit_move_insn (r2, GEN_INT (-(first + size)));
3255
3256 /* Compare current value of BSP and SP registers. */
3257 emit_insn (gen_rtx_SET (VOIDmode, p6,
3258 gen_rtx_fmt_ee (LTU, BImode,
3259 r3, stack_pointer_rtx)));
3260
3261 /* Compute the address of the probe for the Backing Store (which grows
3262 towards higher addresses). We probe only at the first offset of
3263 the next page because some OS (eg Linux/ia64) only extend the
3264 backing store when this specific address is hit (but generate a SEGV
3265 on other address). Page size is the worst case (4KB). The reserve
3266 size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
3267 Also compute the address of the last probe for the memory stack
3268 (which grows towards lower addresses). */
3269 emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
3270 emit_insn (gen_rtx_SET (VOIDmode, r2,
3271 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3272
3273 /* Compare them and raise SEGV if the former has topped the latter. */
3274 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3275 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3276 gen_rtx_SET (VOIDmode, p6,
3277 gen_rtx_fmt_ee (GEU, BImode,
3278 r3, r2))));
3279 emit_insn (gen_rtx_SET (VOIDmode,
3280 gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
3281 const0_rtx),
3282 const0_rtx));
3283 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3284 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3285 gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
3286 GEN_INT (11))));
3287
3288 /* Probe the Backing Store if necessary. */
3289 if (bs_size > 0)
3290 emit_stack_probe (r3);
3291
3292 /* Probe the memory stack if necessary. */
3293 if (size == 0)
3294 ;
3295
3296 /* See if we have a constant small number of probes to generate. If so,
3297 that's the easy case. */
3298 else if (size <= PROBE_INTERVAL)
3299 emit_stack_probe (r2);
3300
3301 /* The run-time loop is made up of 8 insns in the generic case while this
3302 compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
3303 else if (size <= 4 * PROBE_INTERVAL)
3304 {
3305 HOST_WIDE_INT i;
3306
3307 emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
3308 emit_insn (gen_rtx_SET (VOIDmode, r2,
3309 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3310 emit_stack_probe (r2);
3311
3312 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
3313 it exceeds SIZE. If only two probes are needed, this will not
3314 generate any code. Then probe at FIRST + SIZE. */
3315 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
3316 {
3317 emit_insn (gen_rtx_SET (VOIDmode, r2,
3318 plus_constant (Pmode, r2, -PROBE_INTERVAL)));
3319 emit_stack_probe (r2);
3320 }
3321
3322 emit_insn (gen_rtx_SET (VOIDmode, r2,
3323 plus_constant (Pmode, r2,
3324 (i - PROBE_INTERVAL) - size)));
3325 emit_stack_probe (r2);
3326 }
3327
3328 /* Otherwise, do the same as above, but in a loop. Note that we must be
3329 extra careful with variables wrapping around because we might be at
3330 the very top (or the very bottom) of the address space and we have
3331 to be able to handle this case properly; in particular, we use an
3332 equality test for the loop condition. */
3333 else
3334 {
3335 HOST_WIDE_INT rounded_size;
3336
3337 emit_move_insn (r2, GEN_INT (-first));
3338
3339
3340 /* Step 1: round SIZE to the previous multiple of the interval. */
3341
3342 rounded_size = size & -PROBE_INTERVAL;
3343
3344
3345 /* Step 2: compute initial and final value of the loop counter. */
3346
3347 /* TEST_ADDR = SP + FIRST. */
3348 emit_insn (gen_rtx_SET (VOIDmode, r2,
3349 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3350
3351 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
3352 if (rounded_size > (1 << 21))
3353 {
3354 emit_move_insn (r3, GEN_INT (-rounded_size));
3355 emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
3356 }
3357 else
3358 emit_insn (gen_rtx_SET (VOIDmode, r3,
3359 gen_rtx_PLUS (Pmode, r2,
3360 GEN_INT (-rounded_size))));
3361
3362
3363 /* Step 3: the loop
3364
3365 while (TEST_ADDR != LAST_ADDR)
3366 {
3367 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
3368 probe at TEST_ADDR
3369 }
3370
3371 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
3372 until it is equal to ROUNDED_SIZE. */
3373
3374 emit_insn (gen_probe_stack_range (r2, r2, r3));
3375
3376
3377 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
3378 that SIZE is equal to ROUNDED_SIZE. */
3379
3380 /* TEMP = SIZE - ROUNDED_SIZE. */
3381 if (size != rounded_size)
3382 {
3383 emit_insn (gen_rtx_SET (VOIDmode, r2,
3384 plus_constant (Pmode, r2,
3385 rounded_size - size)));
3386 emit_stack_probe (r2);
3387 }
3388 }
3389
3390 /* Make sure nothing is scheduled before we are done. */
3391 emit_insn (gen_blockage ());
3392 }
3393
3394 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
3395 absolute addresses. */
3396
3397 const char *
3398 output_probe_stack_range (rtx reg1, rtx reg2)
3399 {
3400 static int labelno = 0;
3401 char loop_lab[32], end_lab[32];
3402 rtx xops[3];
3403
3404 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
3405 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
3406
3407 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
3408
3409 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
3410 xops[0] = reg1;
3411 xops[1] = reg2;
3412 xops[2] = gen_rtx_REG (BImode, PR_REG (6));
3413 output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
3414 fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
3415 assemble_name_raw (asm_out_file, end_lab);
3416 fputc ('\n', asm_out_file);
3417
3418 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
3419 xops[1] = GEN_INT (-PROBE_INTERVAL);
3420 output_asm_insn ("addl %0 = %1, %0", xops);
3421 fputs ("\t;;\n", asm_out_file);
3422
3423 /* Probe at TEST_ADDR and branch. */
3424 output_asm_insn ("probe.w.fault %0, 0", xops);
3425 fprintf (asm_out_file, "\tbr ");
3426 assemble_name_raw (asm_out_file, loop_lab);
3427 fputc ('\n', asm_out_file);
3428
3429 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
3430
3431 return "";
3432 }
3433
3434 /* Called after register allocation to add any instructions needed for the
3435 prologue. Using a prologue insn is favored compared to putting all of the
3436 instructions in output_function_prologue(), since it allows the scheduler
3437 to intermix instructions with the saves of the caller saved registers. In
3438 some cases, it might be necessary to emit a barrier instruction as the last
3439 insn to prevent such scheduling.
3440
3441 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3442 so that the debug info generation code can handle them properly.
3443
3444 The register save area is laid out like so:
3445 cfa+16
3446 [ varargs spill area ]
3447 [ fr register spill area ]
3448 [ br register spill area ]
3449 [ ar register spill area ]
3450 [ pr register spill area ]
3451 [ gr register spill area ] */
3452
3453 /* ??? Get inefficient code when the frame size is larger than can fit in an
3454 adds instruction. */
3455
3456 void
3457 ia64_expand_prologue (void)
3458 {
3459 rtx_insn *insn;
3460 rtx ar_pfs_save_reg, ar_unat_save_reg;
3461 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3462 rtx reg, alt_reg;
3463
3464 ia64_compute_frame_size (get_frame_size ());
3465 last_scratch_gr_reg = 15;
3466
3467 if (flag_stack_usage_info)
3468 current_function_static_stack_size = current_frame_info.total_size;
3469
3470 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
3471 {
3472 HOST_WIDE_INT size = current_frame_info.total_size;
3473 int bs_size = BACKING_STORE_SIZE (current_frame_info.n_input_regs
3474 + current_frame_info.n_local_regs);
3475
3476 if (crtl->is_leaf && !cfun->calls_alloca)
3477 {
3478 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
3479 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
3480 size - STACK_CHECK_PROTECT,
3481 bs_size);
3482 else if (size + bs_size > STACK_CHECK_PROTECT)
3483 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
3484 }
3485 else if (size + bs_size > 0)
3486 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
3487 }
3488
3489 if (dump_file)
3490 {
3491 fprintf (dump_file, "ia64 frame related registers "
3492 "recorded in current_frame_info.r[]:\n");
3493 #define PRINTREG(a) if (current_frame_info.r[a]) \
3494 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3495 PRINTREG(reg_fp);
3496 PRINTREG(reg_save_b0);
3497 PRINTREG(reg_save_pr);
3498 PRINTREG(reg_save_ar_pfs);
3499 PRINTREG(reg_save_ar_unat);
3500 PRINTREG(reg_save_ar_lc);
3501 PRINTREG(reg_save_gp);
3502 #undef PRINTREG
3503 }
3504
3505 /* If there is no epilogue, then we don't need some prologue insns.
3506 We need to avoid emitting the dead prologue insns, because flow
3507 will complain about them. */
3508 if (optimize)
3509 {
3510 edge e;
3511 edge_iterator ei;
3512
3513 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
3514 if ((e->flags & EDGE_FAKE) == 0
3515 && (e->flags & EDGE_FALLTHRU) != 0)
3516 break;
3517 epilogue_p = (e != NULL);
3518 }
3519 else
3520 epilogue_p = 1;
3521
3522 /* Set the local, input, and output register names. We need to do this
3523 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3524 half. If we use in/loc/out register names, then we get assembler errors
3525 in crtn.S because there is no alloc insn or regstk directive in there. */
3526 if (! TARGET_REG_NAMES)
3527 {
3528 int inputs = current_frame_info.n_input_regs;
3529 int locals = current_frame_info.n_local_regs;
3530 int outputs = current_frame_info.n_output_regs;
3531
3532 for (i = 0; i < inputs; i++)
3533 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3534 for (i = 0; i < locals; i++)
3535 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3536 for (i = 0; i < outputs; i++)
3537 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3538 }
3539
3540 /* Set the frame pointer register name. The regnum is logically loc79,
3541 but of course we'll not have allocated that many locals. Rather than
3542 worrying about renumbering the existing rtxs, we adjust the name. */
3543 /* ??? This code means that we can never use one local register when
3544 there is a frame pointer. loc79 gets wasted in this case, as it is
3545 renamed to a register that will never be used. See also the try_locals
3546 code in find_gr_spill. */
3547 if (current_frame_info.r[reg_fp])
3548 {
3549 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3550 reg_names[HARD_FRAME_POINTER_REGNUM]
3551 = reg_names[current_frame_info.r[reg_fp]];
3552 reg_names[current_frame_info.r[reg_fp]] = tmp;
3553 }
3554
3555 /* We don't need an alloc instruction if we've used no outputs or locals. */
3556 if (current_frame_info.n_local_regs == 0
3557 && current_frame_info.n_output_regs == 0
3558 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3559 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3560 {
3561 /* If there is no alloc, but there are input registers used, then we
3562 need a .regstk directive. */
3563 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3564 ar_pfs_save_reg = NULL_RTX;
3565 }
3566 else
3567 {
3568 current_frame_info.need_regstk = 0;
3569
3570 if (current_frame_info.r[reg_save_ar_pfs])
3571 {
3572 regno = current_frame_info.r[reg_save_ar_pfs];
3573 reg_emitted (reg_save_ar_pfs);
3574 }
3575 else
3576 regno = next_scratch_gr_reg ();
3577 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3578
3579 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3580 GEN_INT (current_frame_info.n_input_regs),
3581 GEN_INT (current_frame_info.n_local_regs),
3582 GEN_INT (current_frame_info.n_output_regs),
3583 GEN_INT (current_frame_info.n_rotate_regs)));
3584 if (current_frame_info.r[reg_save_ar_pfs])
3585 {
3586 RTX_FRAME_RELATED_P (insn) = 1;
3587 add_reg_note (insn, REG_CFA_REGISTER,
3588 gen_rtx_SET (VOIDmode,
3589 ar_pfs_save_reg,
3590 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3591 }
3592 }
3593
3594 /* Set up frame pointer, stack pointer, and spill iterators. */
3595
3596 n_varargs = cfun->machine->n_varargs;
3597 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3598 stack_pointer_rtx, 0);
3599
3600 if (frame_pointer_needed)
3601 {
3602 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3603 RTX_FRAME_RELATED_P (insn) = 1;
3604
3605 /* Force the unwind info to recognize this as defining a new CFA,
3606 rather than some temp register setup. */
3607 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3608 }
3609
3610 if (current_frame_info.total_size != 0)
3611 {
3612 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3613 rtx offset;
3614
3615 if (satisfies_constraint_I (frame_size_rtx))
3616 offset = frame_size_rtx;
3617 else
3618 {
3619 regno = next_scratch_gr_reg ();
3620 offset = gen_rtx_REG (DImode, regno);
3621 emit_move_insn (offset, frame_size_rtx);
3622 }
3623
3624 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3625 stack_pointer_rtx, offset));
3626
3627 if (! frame_pointer_needed)
3628 {
3629 RTX_FRAME_RELATED_P (insn) = 1;
3630 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3631 gen_rtx_SET (VOIDmode,
3632 stack_pointer_rtx,
3633 gen_rtx_PLUS (DImode,
3634 stack_pointer_rtx,
3635 frame_size_rtx)));
3636 }
3637
3638 /* ??? At this point we must generate a magic insn that appears to
3639 modify the stack pointer, the frame pointer, and all spill
3640 iterators. This would allow the most scheduling freedom. For
3641 now, just hard stop. */
3642 emit_insn (gen_blockage ());
3643 }
3644
3645 /* Must copy out ar.unat before doing any integer spills. */
3646 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3647 {
3648 if (current_frame_info.r[reg_save_ar_unat])
3649 {
3650 ar_unat_save_reg
3651 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3652 reg_emitted (reg_save_ar_unat);
3653 }
3654 else
3655 {
3656 alt_regno = next_scratch_gr_reg ();
3657 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3658 current_frame_info.gr_used_mask |= 1 << alt_regno;
3659 }
3660
3661 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3662 insn = emit_move_insn (ar_unat_save_reg, reg);
3663 if (current_frame_info.r[reg_save_ar_unat])
3664 {
3665 RTX_FRAME_RELATED_P (insn) = 1;
3666 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3667 }
3668
3669 /* Even if we're not going to generate an epilogue, we still
3670 need to save the register so that EH works. */
3671 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3672 emit_insn (gen_prologue_use (ar_unat_save_reg));
3673 }
3674 else
3675 ar_unat_save_reg = NULL_RTX;
3676
3677 /* Spill all varargs registers. Do this before spilling any GR registers,
3678 since we want the UNAT bits for the GR registers to override the UNAT
3679 bits from varargs, which we don't care about. */
3680
3681 cfa_off = -16;
3682 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3683 {
3684 reg = gen_rtx_REG (DImode, regno);
3685 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3686 }
3687
3688 /* Locate the bottom of the register save area. */
3689 cfa_off = (current_frame_info.spill_cfa_off
3690 + current_frame_info.spill_size
3691 + current_frame_info.extra_spill_size);
3692
3693 /* Save the predicate register block either in a register or in memory. */
3694 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3695 {
3696 reg = gen_rtx_REG (DImode, PR_REG (0));
3697 if (current_frame_info.r[reg_save_pr] != 0)
3698 {
3699 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3700 reg_emitted (reg_save_pr);
3701 insn = emit_move_insn (alt_reg, reg);
3702
3703 /* ??? Denote pr spill/fill by a DImode move that modifies all
3704 64 hard registers. */
3705 RTX_FRAME_RELATED_P (insn) = 1;
3706 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3707
3708 /* Even if we're not going to generate an epilogue, we still
3709 need to save the register so that EH works. */
3710 if (! epilogue_p)
3711 emit_insn (gen_prologue_use (alt_reg));
3712 }
3713 else
3714 {
3715 alt_regno = next_scratch_gr_reg ();
3716 alt_reg = gen_rtx_REG (DImode, alt_regno);
3717 insn = emit_move_insn (alt_reg, reg);
3718 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3719 cfa_off -= 8;
3720 }
3721 }
3722
3723 /* Handle AR regs in numerical order. All of them get special handling. */
3724 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3725 && current_frame_info.r[reg_save_ar_unat] == 0)
3726 {
3727 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3728 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3729 cfa_off -= 8;
3730 }
3731
3732 /* The alloc insn already copied ar.pfs into a general register. The
3733 only thing we have to do now is copy that register to a stack slot
3734 if we'd not allocated a local register for the job. */
3735 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3736 && current_frame_info.r[reg_save_ar_pfs] == 0)
3737 {
3738 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3739 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3740 cfa_off -= 8;
3741 }
3742
3743 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3744 {
3745 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3746 if (current_frame_info.r[reg_save_ar_lc] != 0)
3747 {
3748 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3749 reg_emitted (reg_save_ar_lc);
3750 insn = emit_move_insn (alt_reg, reg);
3751 RTX_FRAME_RELATED_P (insn) = 1;
3752 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3753
3754 /* Even if we're not going to generate an epilogue, we still
3755 need to save the register so that EH works. */
3756 if (! epilogue_p)
3757 emit_insn (gen_prologue_use (alt_reg));
3758 }
3759 else
3760 {
3761 alt_regno = next_scratch_gr_reg ();
3762 alt_reg = gen_rtx_REG (DImode, alt_regno);
3763 emit_move_insn (alt_reg, reg);
3764 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3765 cfa_off -= 8;
3766 }
3767 }
3768
3769 /* Save the return pointer. */
3770 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3771 {
3772 reg = gen_rtx_REG (DImode, BR_REG (0));
3773 if (current_frame_info.r[reg_save_b0] != 0)
3774 {
3775 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3776 reg_emitted (reg_save_b0);
3777 insn = emit_move_insn (alt_reg, reg);
3778 RTX_FRAME_RELATED_P (insn) = 1;
3779 add_reg_note (insn, REG_CFA_REGISTER,
3780 gen_rtx_SET (VOIDmode, alt_reg, pc_rtx));
3781
3782 /* Even if we're not going to generate an epilogue, we still
3783 need to save the register so that EH works. */
3784 if (! epilogue_p)
3785 emit_insn (gen_prologue_use (alt_reg));
3786 }
3787 else
3788 {
3789 alt_regno = next_scratch_gr_reg ();
3790 alt_reg = gen_rtx_REG (DImode, alt_regno);
3791 emit_move_insn (alt_reg, reg);
3792 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3793 cfa_off -= 8;
3794 }
3795 }
3796
3797 if (current_frame_info.r[reg_save_gp])
3798 {
3799 reg_emitted (reg_save_gp);
3800 insn = emit_move_insn (gen_rtx_REG (DImode,
3801 current_frame_info.r[reg_save_gp]),
3802 pic_offset_table_rtx);
3803 }
3804
3805 /* We should now be at the base of the gr/br/fr spill area. */
3806 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3807 + current_frame_info.spill_size));
3808
3809 /* Spill all general registers. */
3810 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3811 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3812 {
3813 reg = gen_rtx_REG (DImode, regno);
3814 do_spill (gen_gr_spill, reg, cfa_off, reg);
3815 cfa_off -= 8;
3816 }
3817
3818 /* Spill the rest of the BR registers. */
3819 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3820 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3821 {
3822 alt_regno = next_scratch_gr_reg ();
3823 alt_reg = gen_rtx_REG (DImode, alt_regno);
3824 reg = gen_rtx_REG (DImode, regno);
3825 emit_move_insn (alt_reg, reg);
3826 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3827 cfa_off -= 8;
3828 }
3829
3830 /* Align the frame and spill all FR registers. */
3831 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3832 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3833 {
3834 gcc_assert (!(cfa_off & 15));
3835 reg = gen_rtx_REG (XFmode, regno);
3836 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3837 cfa_off -= 16;
3838 }
3839
3840 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3841
3842 finish_spill_pointers ();
3843 }
3844
3845 /* Output the textual info surrounding the prologue. */
3846
3847 void
3848 ia64_start_function (FILE *file, const char *fnname,
3849 tree decl ATTRIBUTE_UNUSED)
3850 {
3851 #if TARGET_ABI_OPEN_VMS
3852 vms_start_function (fnname);
3853 #endif
3854
3855 fputs ("\t.proc ", file);
3856 assemble_name (file, fnname);
3857 fputc ('\n', file);
3858 ASM_OUTPUT_LABEL (file, fnname);
3859 }
3860
3861 /* Called after register allocation to add any instructions needed for the
3862 epilogue. Using an epilogue insn is favored compared to putting all of the
3863 instructions in output_function_prologue(), since it allows the scheduler
3864 to intermix instructions with the saves of the caller saved registers. In
3865 some cases, it might be necessary to emit a barrier instruction as the last
3866 insn to prevent such scheduling. */
3867
3868 void
3869 ia64_expand_epilogue (int sibcall_p)
3870 {
3871 rtx_insn *insn;
3872 rtx reg, alt_reg, ar_unat_save_reg;
3873 int regno, alt_regno, cfa_off;
3874
3875 ia64_compute_frame_size (get_frame_size ());
3876
3877 /* If there is a frame pointer, then we use it instead of the stack
3878 pointer, so that the stack pointer does not need to be valid when
3879 the epilogue starts. See EXIT_IGNORE_STACK. */
3880 if (frame_pointer_needed)
3881 setup_spill_pointers (current_frame_info.n_spilled,
3882 hard_frame_pointer_rtx, 0);
3883 else
3884 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3885 current_frame_info.total_size);
3886
3887 if (current_frame_info.total_size != 0)
3888 {
3889 /* ??? At this point we must generate a magic insn that appears to
3890 modify the spill iterators and the frame pointer. This would
3891 allow the most scheduling freedom. For now, just hard stop. */
3892 emit_insn (gen_blockage ());
3893 }
3894
3895 /* Locate the bottom of the register save area. */
3896 cfa_off = (current_frame_info.spill_cfa_off
3897 + current_frame_info.spill_size
3898 + current_frame_info.extra_spill_size);
3899
3900 /* Restore the predicate registers. */
3901 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3902 {
3903 if (current_frame_info.r[reg_save_pr] != 0)
3904 {
3905 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3906 reg_emitted (reg_save_pr);
3907 }
3908 else
3909 {
3910 alt_regno = next_scratch_gr_reg ();
3911 alt_reg = gen_rtx_REG (DImode, alt_regno);
3912 do_restore (gen_movdi_x, alt_reg, cfa_off);
3913 cfa_off -= 8;
3914 }
3915 reg = gen_rtx_REG (DImode, PR_REG (0));
3916 emit_move_insn (reg, alt_reg);
3917 }
3918
3919 /* Restore the application registers. */
3920
3921 /* Load the saved unat from the stack, but do not restore it until
3922 after the GRs have been restored. */
3923 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3924 {
3925 if (current_frame_info.r[reg_save_ar_unat] != 0)
3926 {
3927 ar_unat_save_reg
3928 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3929 reg_emitted (reg_save_ar_unat);
3930 }
3931 else
3932 {
3933 alt_regno = next_scratch_gr_reg ();
3934 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3935 current_frame_info.gr_used_mask |= 1 << alt_regno;
3936 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3937 cfa_off -= 8;
3938 }
3939 }
3940 else
3941 ar_unat_save_reg = NULL_RTX;
3942
3943 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3944 {
3945 reg_emitted (reg_save_ar_pfs);
3946 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3947 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3948 emit_move_insn (reg, alt_reg);
3949 }
3950 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3951 {
3952 alt_regno = next_scratch_gr_reg ();
3953 alt_reg = gen_rtx_REG (DImode, alt_regno);
3954 do_restore (gen_movdi_x, alt_reg, cfa_off);
3955 cfa_off -= 8;
3956 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3957 emit_move_insn (reg, alt_reg);
3958 }
3959
3960 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3961 {
3962 if (current_frame_info.r[reg_save_ar_lc] != 0)
3963 {
3964 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3965 reg_emitted (reg_save_ar_lc);
3966 }
3967 else
3968 {
3969 alt_regno = next_scratch_gr_reg ();
3970 alt_reg = gen_rtx_REG (DImode, alt_regno);
3971 do_restore (gen_movdi_x, alt_reg, cfa_off);
3972 cfa_off -= 8;
3973 }
3974 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3975 emit_move_insn (reg, alt_reg);
3976 }
3977
3978 /* Restore the return pointer. */
3979 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3980 {
3981 if (current_frame_info.r[reg_save_b0] != 0)
3982 {
3983 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3984 reg_emitted (reg_save_b0);
3985 }
3986 else
3987 {
3988 alt_regno = next_scratch_gr_reg ();
3989 alt_reg = gen_rtx_REG (DImode, alt_regno);
3990 do_restore (gen_movdi_x, alt_reg, cfa_off);
3991 cfa_off -= 8;
3992 }
3993 reg = gen_rtx_REG (DImode, BR_REG (0));
3994 emit_move_insn (reg, alt_reg);
3995 }
3996
3997 /* We should now be at the base of the gr/br/fr spill area. */
3998 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3999 + current_frame_info.spill_size));
4000
4001 /* The GP may be stored on the stack in the prologue, but it's
4002 never restored in the epilogue. Skip the stack slot. */
4003 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
4004 cfa_off -= 8;
4005
4006 /* Restore all general registers. */
4007 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
4008 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4009 {
4010 reg = gen_rtx_REG (DImode, regno);
4011 do_restore (gen_gr_restore, reg, cfa_off);
4012 cfa_off -= 8;
4013 }
4014
4015 /* Restore the branch registers. */
4016 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
4017 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4018 {
4019 alt_regno = next_scratch_gr_reg ();
4020 alt_reg = gen_rtx_REG (DImode, alt_regno);
4021 do_restore (gen_movdi_x, alt_reg, cfa_off);
4022 cfa_off -= 8;
4023 reg = gen_rtx_REG (DImode, regno);
4024 emit_move_insn (reg, alt_reg);
4025 }
4026
4027 /* Restore floating point registers. */
4028 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
4029 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4030 {
4031 gcc_assert (!(cfa_off & 15));
4032 reg = gen_rtx_REG (XFmode, regno);
4033 do_restore (gen_fr_restore_x, reg, cfa_off);
4034 cfa_off -= 16;
4035 }
4036
4037 /* Restore ar.unat for real. */
4038 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
4039 {
4040 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
4041 emit_move_insn (reg, ar_unat_save_reg);
4042 }
4043
4044 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
4045
4046 finish_spill_pointers ();
4047
4048 if (current_frame_info.total_size
4049 || cfun->machine->ia64_eh_epilogue_sp
4050 || frame_pointer_needed)
4051 {
4052 /* ??? At this point we must generate a magic insn that appears to
4053 modify the spill iterators, the stack pointer, and the frame
4054 pointer. This would allow the most scheduling freedom. For now,
4055 just hard stop. */
4056 emit_insn (gen_blockage ());
4057 }
4058
4059 if (cfun->machine->ia64_eh_epilogue_sp)
4060 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
4061 else if (frame_pointer_needed)
4062 {
4063 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
4064 RTX_FRAME_RELATED_P (insn) = 1;
4065 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
4066 }
4067 else if (current_frame_info.total_size)
4068 {
4069 rtx offset, frame_size_rtx;
4070
4071 frame_size_rtx = GEN_INT (current_frame_info.total_size);
4072 if (satisfies_constraint_I (frame_size_rtx))
4073 offset = frame_size_rtx;
4074 else
4075 {
4076 regno = next_scratch_gr_reg ();
4077 offset = gen_rtx_REG (DImode, regno);
4078 emit_move_insn (offset, frame_size_rtx);
4079 }
4080
4081 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
4082 offset));
4083
4084 RTX_FRAME_RELATED_P (insn) = 1;
4085 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4086 gen_rtx_SET (VOIDmode,
4087 stack_pointer_rtx,
4088 gen_rtx_PLUS (DImode,
4089 stack_pointer_rtx,
4090 frame_size_rtx)));
4091 }
4092
4093 if (cfun->machine->ia64_eh_epilogue_bsp)
4094 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
4095
4096 if (! sibcall_p)
4097 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
4098 else
4099 {
4100 int fp = GR_REG (2);
4101 /* We need a throw away register here, r0 and r1 are reserved,
4102 so r2 is the first available call clobbered register. If
4103 there was a frame_pointer register, we may have swapped the
4104 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
4105 sure we're using the string "r2" when emitting the register
4106 name for the assembler. */
4107 if (current_frame_info.r[reg_fp]
4108 && current_frame_info.r[reg_fp] == GR_REG (2))
4109 fp = HARD_FRAME_POINTER_REGNUM;
4110
4111 /* We must emit an alloc to force the input registers to become output
4112 registers. Otherwise, if the callee tries to pass its parameters
4113 through to another call without an intervening alloc, then these
4114 values get lost. */
4115 /* ??? We don't need to preserve all input registers. We only need to
4116 preserve those input registers used as arguments to the sibling call.
4117 It is unclear how to compute that number here. */
4118 if (current_frame_info.n_input_regs != 0)
4119 {
4120 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
4121
4122 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
4123 const0_rtx, const0_rtx,
4124 n_inputs, const0_rtx));
4125 RTX_FRAME_RELATED_P (insn) = 1;
4126
4127 /* ??? We need to mark the alloc as frame-related so that it gets
4128 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
4129 But there's nothing dwarf2 related to be done wrt the register
4130 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
4131 the empty parallel means dwarf2out will not see anything. */
4132 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4133 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
4134 }
4135 }
4136 }
4137
4138 /* Return 1 if br.ret can do all the work required to return from a
4139 function. */
4140
4141 int
4142 ia64_direct_return (void)
4143 {
4144 if (reload_completed && ! frame_pointer_needed)
4145 {
4146 ia64_compute_frame_size (get_frame_size ());
4147
4148 return (current_frame_info.total_size == 0
4149 && current_frame_info.n_spilled == 0
4150 && current_frame_info.r[reg_save_b0] == 0
4151 && current_frame_info.r[reg_save_pr] == 0
4152 && current_frame_info.r[reg_save_ar_pfs] == 0
4153 && current_frame_info.r[reg_save_ar_unat] == 0
4154 && current_frame_info.r[reg_save_ar_lc] == 0);
4155 }
4156 return 0;
4157 }
4158
4159 /* Return the magic cookie that we use to hold the return address
4160 during early compilation. */
4161
4162 rtx
4163 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
4164 {
4165 if (count != 0)
4166 return NULL;
4167 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
4168 }
4169
4170 /* Split this value after reload, now that we know where the return
4171 address is saved. */
4172
4173 void
4174 ia64_split_return_addr_rtx (rtx dest)
4175 {
4176 rtx src;
4177
4178 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
4179 {
4180 if (current_frame_info.r[reg_save_b0] != 0)
4181 {
4182 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
4183 reg_emitted (reg_save_b0);
4184 }
4185 else
4186 {
4187 HOST_WIDE_INT off;
4188 unsigned int regno;
4189 rtx off_r;
4190
4191 /* Compute offset from CFA for BR0. */
4192 /* ??? Must be kept in sync with ia64_expand_prologue. */
4193 off = (current_frame_info.spill_cfa_off
4194 + current_frame_info.spill_size);
4195 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
4196 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4197 off -= 8;
4198
4199 /* Convert CFA offset to a register based offset. */
4200 if (frame_pointer_needed)
4201 src = hard_frame_pointer_rtx;
4202 else
4203 {
4204 src = stack_pointer_rtx;
4205 off += current_frame_info.total_size;
4206 }
4207
4208 /* Load address into scratch register. */
4209 off_r = GEN_INT (off);
4210 if (satisfies_constraint_I (off_r))
4211 emit_insn (gen_adddi3 (dest, src, off_r));
4212 else
4213 {
4214 emit_move_insn (dest, off_r);
4215 emit_insn (gen_adddi3 (dest, src, dest));
4216 }
4217
4218 src = gen_rtx_MEM (Pmode, dest);
4219 }
4220 }
4221 else
4222 src = gen_rtx_REG (DImode, BR_REG (0));
4223
4224 emit_move_insn (dest, src);
4225 }
4226
4227 int
4228 ia64_hard_regno_rename_ok (int from, int to)
4229 {
4230 /* Don't clobber any of the registers we reserved for the prologue. */
4231 unsigned int r;
4232
4233 for (r = reg_fp; r <= reg_save_ar_lc; r++)
4234 if (to == current_frame_info.r[r]
4235 || from == current_frame_info.r[r]
4236 || to == emitted_frame_related_regs[r]
4237 || from == emitted_frame_related_regs[r])
4238 return 0;
4239
4240 /* Don't use output registers outside the register frame. */
4241 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
4242 return 0;
4243
4244 /* Retain even/oddness on predicate register pairs. */
4245 if (PR_REGNO_P (from) && PR_REGNO_P (to))
4246 return (from & 1) == (to & 1);
4247
4248 return 1;
4249 }
4250
4251 /* Target hook for assembling integer objects. Handle word-sized
4252 aligned objects and detect the cases when @fptr is needed. */
4253
4254 static bool
4255 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
4256 {
4257 if (size == POINTER_SIZE / BITS_PER_UNIT
4258 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
4259 && GET_CODE (x) == SYMBOL_REF
4260 && SYMBOL_REF_FUNCTION_P (x))
4261 {
4262 static const char * const directive[2][2] = {
4263 /* 64-bit pointer */ /* 32-bit pointer */
4264 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4265 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4266 };
4267 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4268 output_addr_const (asm_out_file, x);
4269 fputs (")\n", asm_out_file);
4270 return true;
4271 }
4272 return default_assemble_integer (x, size, aligned_p);
4273 }
4274
4275 /* Emit the function prologue. */
4276
4277 static void
4278 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4279 {
4280 int mask, grsave, grsave_prev;
4281
4282 if (current_frame_info.need_regstk)
4283 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4284 current_frame_info.n_input_regs,
4285 current_frame_info.n_local_regs,
4286 current_frame_info.n_output_regs,
4287 current_frame_info.n_rotate_regs);
4288
4289 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4290 return;
4291
4292 /* Emit the .prologue directive. */
4293
4294 mask = 0;
4295 grsave = grsave_prev = 0;
4296 if (current_frame_info.r[reg_save_b0] != 0)
4297 {
4298 mask |= 8;
4299 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4300 }
4301 if (current_frame_info.r[reg_save_ar_pfs] != 0
4302 && (grsave_prev == 0
4303 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4304 {
4305 mask |= 4;
4306 if (grsave_prev == 0)
4307 grsave = current_frame_info.r[reg_save_ar_pfs];
4308 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4309 }
4310 if (current_frame_info.r[reg_fp] != 0
4311 && (grsave_prev == 0
4312 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4313 {
4314 mask |= 2;
4315 if (grsave_prev == 0)
4316 grsave = HARD_FRAME_POINTER_REGNUM;
4317 grsave_prev = current_frame_info.r[reg_fp];
4318 }
4319 if (current_frame_info.r[reg_save_pr] != 0
4320 && (grsave_prev == 0
4321 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4322 {
4323 mask |= 1;
4324 if (grsave_prev == 0)
4325 grsave = current_frame_info.r[reg_save_pr];
4326 }
4327
4328 if (mask && TARGET_GNU_AS)
4329 fprintf (file, "\t.prologue %d, %d\n", mask,
4330 ia64_dbx_register_number (grsave));
4331 else
4332 fputs ("\t.prologue\n", file);
4333
4334 /* Emit a .spill directive, if necessary, to relocate the base of
4335 the register spill area. */
4336 if (current_frame_info.spill_cfa_off != -16)
4337 fprintf (file, "\t.spill %ld\n",
4338 (long) (current_frame_info.spill_cfa_off
4339 + current_frame_info.spill_size));
4340 }
4341
4342 /* Emit the .body directive at the scheduled end of the prologue. */
4343
4344 static void
4345 ia64_output_function_end_prologue (FILE *file)
4346 {
4347 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4348 return;
4349
4350 fputs ("\t.body\n", file);
4351 }
4352
4353 /* Emit the function epilogue. */
4354
4355 static void
4356 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4357 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4358 {
4359 int i;
4360
4361 if (current_frame_info.r[reg_fp])
4362 {
4363 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4364 reg_names[HARD_FRAME_POINTER_REGNUM]
4365 = reg_names[current_frame_info.r[reg_fp]];
4366 reg_names[current_frame_info.r[reg_fp]] = tmp;
4367 reg_emitted (reg_fp);
4368 }
4369 if (! TARGET_REG_NAMES)
4370 {
4371 for (i = 0; i < current_frame_info.n_input_regs; i++)
4372 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4373 for (i = 0; i < current_frame_info.n_local_regs; i++)
4374 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4375 for (i = 0; i < current_frame_info.n_output_regs; i++)
4376 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4377 }
4378
4379 current_frame_info.initialized = 0;
4380 }
4381
4382 int
4383 ia64_dbx_register_number (int regno)
4384 {
4385 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4386 from its home at loc79 to something inside the register frame. We
4387 must perform the same renumbering here for the debug info. */
4388 if (current_frame_info.r[reg_fp])
4389 {
4390 if (regno == HARD_FRAME_POINTER_REGNUM)
4391 regno = current_frame_info.r[reg_fp];
4392 else if (regno == current_frame_info.r[reg_fp])
4393 regno = HARD_FRAME_POINTER_REGNUM;
4394 }
4395
4396 if (IN_REGNO_P (regno))
4397 return 32 + regno - IN_REG (0);
4398 else if (LOC_REGNO_P (regno))
4399 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4400 else if (OUT_REGNO_P (regno))
4401 return (32 + current_frame_info.n_input_regs
4402 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4403 else
4404 return regno;
4405 }
4406
4407 /* Implement TARGET_TRAMPOLINE_INIT.
4408
4409 The trampoline should set the static chain pointer to value placed
4410 into the trampoline and should branch to the specified routine.
4411 To make the normal indirect-subroutine calling convention work,
4412 the trampoline must look like a function descriptor; the first
4413 word being the target address and the second being the target's
4414 global pointer.
4415
4416 We abuse the concept of a global pointer by arranging for it
4417 to point to the data we need to load. The complete trampoline
4418 has the following form:
4419
4420 +-------------------+ \
4421 TRAMP: | __ia64_trampoline | |
4422 +-------------------+ > fake function descriptor
4423 | TRAMP+16 | |
4424 +-------------------+ /
4425 | target descriptor |
4426 +-------------------+
4427 | static link |
4428 +-------------------+
4429 */
4430
4431 static void
4432 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4433 {
4434 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4435 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4436
4437 /* The Intel assembler requires that the global __ia64_trampoline symbol
4438 be declared explicitly */
4439 if (!TARGET_GNU_AS)
4440 {
4441 static bool declared_ia64_trampoline = false;
4442
4443 if (!declared_ia64_trampoline)
4444 {
4445 declared_ia64_trampoline = true;
4446 (*targetm.asm_out.globalize_label) (asm_out_file,
4447 "__ia64_trampoline");
4448 }
4449 }
4450
4451 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4452 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4453 fnaddr = convert_memory_address (Pmode, fnaddr);
4454 static_chain = convert_memory_address (Pmode, static_chain);
4455
4456 /* Load up our iterator. */
4457 addr_reg = copy_to_reg (addr);
4458 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4459
4460 /* The first two words are the fake descriptor:
4461 __ia64_trampoline, ADDR+16. */
4462 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4463 if (TARGET_ABI_OPEN_VMS)
4464 {
4465 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4466 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4467 relocation against function symbols to make it identical to the
4468 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4469 strict ELF and dereference to get the bare code address. */
4470 rtx reg = gen_reg_rtx (Pmode);
4471 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4472 emit_move_insn (reg, tramp);
4473 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4474 tramp = reg;
4475 }
4476 emit_move_insn (m_tramp, tramp);
4477 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4478 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4479
4480 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
4481 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4482 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4483
4484 /* The third word is the target descriptor. */
4485 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4486 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4487 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4488
4489 /* The fourth word is the static chain. */
4490 emit_move_insn (m_tramp, static_chain);
4491 }
4492 \f
4493 /* Do any needed setup for a variadic function. CUM has not been updated
4494 for the last named argument which has type TYPE and mode MODE.
4495
4496 We generate the actual spill instructions during prologue generation. */
4497
4498 static void
4499 ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
4500 tree type, int * pretend_size,
4501 int second_time ATTRIBUTE_UNUSED)
4502 {
4503 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4504
4505 /* Skip the current argument. */
4506 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4507
4508 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4509 {
4510 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4511 *pretend_size = n * UNITS_PER_WORD;
4512 cfun->machine->n_varargs = n;
4513 }
4514 }
4515
4516 /* Check whether TYPE is a homogeneous floating point aggregate. If
4517 it is, return the mode of the floating point type that appears
4518 in all leafs. If it is not, return VOIDmode.
4519
4520 An aggregate is a homogeneous floating point aggregate is if all
4521 fields/elements in it have the same floating point type (e.g,
4522 SFmode). 128-bit quad-precision floats are excluded.
4523
4524 Variable sized aggregates should never arrive here, since we should
4525 have already decided to pass them by reference. Top-level zero-sized
4526 aggregates are excluded because our parallels crash the middle-end. */
4527
4528 static enum machine_mode
4529 hfa_element_mode (const_tree type, bool nested)
4530 {
4531 enum machine_mode element_mode = VOIDmode;
4532 enum machine_mode mode;
4533 enum tree_code code = TREE_CODE (type);
4534 int know_element_mode = 0;
4535 tree t;
4536
4537 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4538 return VOIDmode;
4539
4540 switch (code)
4541 {
4542 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4543 case BOOLEAN_TYPE: case POINTER_TYPE:
4544 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4545 case LANG_TYPE: case FUNCTION_TYPE:
4546 return VOIDmode;
4547
4548 /* Fortran complex types are supposed to be HFAs, so we need to handle
4549 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4550 types though. */
4551 case COMPLEX_TYPE:
4552 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4553 && TYPE_MODE (type) != TCmode)
4554 return GET_MODE_INNER (TYPE_MODE (type));
4555 else
4556 return VOIDmode;
4557
4558 case REAL_TYPE:
4559 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4560 mode if this is contained within an aggregate. */
4561 if (nested && TYPE_MODE (type) != TFmode)
4562 return TYPE_MODE (type);
4563 else
4564 return VOIDmode;
4565
4566 case ARRAY_TYPE:
4567 return hfa_element_mode (TREE_TYPE (type), 1);
4568
4569 case RECORD_TYPE:
4570 case UNION_TYPE:
4571 case QUAL_UNION_TYPE:
4572 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4573 {
4574 if (TREE_CODE (t) != FIELD_DECL)
4575 continue;
4576
4577 mode = hfa_element_mode (TREE_TYPE (t), 1);
4578 if (know_element_mode)
4579 {
4580 if (mode != element_mode)
4581 return VOIDmode;
4582 }
4583 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4584 return VOIDmode;
4585 else
4586 {
4587 know_element_mode = 1;
4588 element_mode = mode;
4589 }
4590 }
4591 return element_mode;
4592
4593 default:
4594 /* If we reach here, we probably have some front-end specific type
4595 that the backend doesn't know about. This can happen via the
4596 aggregate_value_p call in init_function_start. All we can do is
4597 ignore unknown tree types. */
4598 return VOIDmode;
4599 }
4600
4601 return VOIDmode;
4602 }
4603
4604 /* Return the number of words required to hold a quantity of TYPE and MODE
4605 when passed as an argument. */
4606 static int
4607 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4608 {
4609 int words;
4610
4611 if (mode == BLKmode)
4612 words = int_size_in_bytes (type);
4613 else
4614 words = GET_MODE_SIZE (mode);
4615
4616 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4617 }
4618
4619 /* Return the number of registers that should be skipped so the current
4620 argument (described by TYPE and WORDS) will be properly aligned.
4621
4622 Integer and float arguments larger than 8 bytes start at the next
4623 even boundary. Aggregates larger than 8 bytes start at the next
4624 even boundary if the aggregate has 16 byte alignment. Note that
4625 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4626 but are still to be aligned in registers.
4627
4628 ??? The ABI does not specify how to handle aggregates with
4629 alignment from 9 to 15 bytes, or greater than 16. We handle them
4630 all as if they had 16 byte alignment. Such aggregates can occur
4631 only if gcc extensions are used. */
4632 static int
4633 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4634 const_tree type, int words)
4635 {
4636 /* No registers are skipped on VMS. */
4637 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4638 return 0;
4639
4640 if (type
4641 && TREE_CODE (type) != INTEGER_TYPE
4642 && TREE_CODE (type) != REAL_TYPE)
4643 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4644 else
4645 return words > 1;
4646 }
4647
4648 /* Return rtx for register where argument is passed, or zero if it is passed
4649 on the stack. */
4650 /* ??? 128-bit quad-precision floats are always passed in general
4651 registers. */
4652
4653 static rtx
4654 ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
4655 const_tree type, bool named, bool incoming)
4656 {
4657 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4658
4659 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4660 int words = ia64_function_arg_words (type, mode);
4661 int offset = ia64_function_arg_offset (cum, type, words);
4662 enum machine_mode hfa_mode = VOIDmode;
4663
4664 /* For OPEN VMS, emit the instruction setting up the argument register here,
4665 when we know this will be together with the other arguments setup related
4666 insns. This is not the conceptually best place to do this, but this is
4667 the easiest as we have convenient access to cumulative args info. */
4668
4669 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4670 && named == 1)
4671 {
4672 unsigned HOST_WIDE_INT regval = cum->words;
4673 int i;
4674
4675 for (i = 0; i < 8; i++)
4676 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4677
4678 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4679 GEN_INT (regval));
4680 }
4681
4682 /* If all argument slots are used, then it must go on the stack. */
4683 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4684 return 0;
4685
4686 /* On OpenVMS argument is either in Rn or Fn. */
4687 if (TARGET_ABI_OPEN_VMS)
4688 {
4689 if (FLOAT_MODE_P (mode))
4690 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4691 else
4692 return gen_rtx_REG (mode, basereg + cum->words);
4693 }
4694
4695 /* Check for and handle homogeneous FP aggregates. */
4696 if (type)
4697 hfa_mode = hfa_element_mode (type, 0);
4698
4699 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4700 and unprototyped hfas are passed specially. */
4701 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4702 {
4703 rtx loc[16];
4704 int i = 0;
4705 int fp_regs = cum->fp_regs;
4706 int int_regs = cum->words + offset;
4707 int hfa_size = GET_MODE_SIZE (hfa_mode);
4708 int byte_size;
4709 int args_byte_size;
4710
4711 /* If prototyped, pass it in FR regs then GR regs.
4712 If not prototyped, pass it in both FR and GR regs.
4713
4714 If this is an SFmode aggregate, then it is possible to run out of
4715 FR regs while GR regs are still left. In that case, we pass the
4716 remaining part in the GR regs. */
4717
4718 /* Fill the FP regs. We do this always. We stop if we reach the end
4719 of the argument, the last FP register, or the last argument slot. */
4720
4721 byte_size = ((mode == BLKmode)
4722 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4723 args_byte_size = int_regs * UNITS_PER_WORD;
4724 offset = 0;
4725 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4726 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4727 {
4728 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4729 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4730 + fp_regs)),
4731 GEN_INT (offset));
4732 offset += hfa_size;
4733 args_byte_size += hfa_size;
4734 fp_regs++;
4735 }
4736
4737 /* If no prototype, then the whole thing must go in GR regs. */
4738 if (! cum->prototype)
4739 offset = 0;
4740 /* If this is an SFmode aggregate, then we might have some left over
4741 that needs to go in GR regs. */
4742 else if (byte_size != offset)
4743 int_regs += offset / UNITS_PER_WORD;
4744
4745 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4746
4747 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4748 {
4749 enum machine_mode gr_mode = DImode;
4750 unsigned int gr_size;
4751
4752 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4753 then this goes in a GR reg left adjusted/little endian, right
4754 adjusted/big endian. */
4755 /* ??? Currently this is handled wrong, because 4-byte hunks are
4756 always right adjusted/little endian. */
4757 if (offset & 0x4)
4758 gr_mode = SImode;
4759 /* If we have an even 4 byte hunk because the aggregate is a
4760 multiple of 4 bytes in size, then this goes in a GR reg right
4761 adjusted/little endian. */
4762 else if (byte_size - offset == 4)
4763 gr_mode = SImode;
4764
4765 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4766 gen_rtx_REG (gr_mode, (basereg
4767 + int_regs)),
4768 GEN_INT (offset));
4769
4770 gr_size = GET_MODE_SIZE (gr_mode);
4771 offset += gr_size;
4772 if (gr_size == UNITS_PER_WORD
4773 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4774 int_regs++;
4775 else if (gr_size > UNITS_PER_WORD)
4776 int_regs += gr_size / UNITS_PER_WORD;
4777 }
4778 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4779 }
4780
4781 /* Integral and aggregates go in general registers. If we have run out of
4782 FR registers, then FP values must also go in general registers. This can
4783 happen when we have a SFmode HFA. */
4784 else if (mode == TFmode || mode == TCmode
4785 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4786 {
4787 int byte_size = ((mode == BLKmode)
4788 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4789 if (BYTES_BIG_ENDIAN
4790 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4791 && byte_size < UNITS_PER_WORD
4792 && byte_size > 0)
4793 {
4794 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4795 gen_rtx_REG (DImode,
4796 (basereg + cum->words
4797 + offset)),
4798 const0_rtx);
4799 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4800 }
4801 else
4802 return gen_rtx_REG (mode, basereg + cum->words + offset);
4803
4804 }
4805
4806 /* If there is a prototype, then FP values go in a FR register when
4807 named, and in a GR register when unnamed. */
4808 else if (cum->prototype)
4809 {
4810 if (named)
4811 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4812 /* In big-endian mode, an anonymous SFmode value must be represented
4813 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4814 the value into the high half of the general register. */
4815 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4816 return gen_rtx_PARALLEL (mode,
4817 gen_rtvec (1,
4818 gen_rtx_EXPR_LIST (VOIDmode,
4819 gen_rtx_REG (DImode, basereg + cum->words + offset),
4820 const0_rtx)));
4821 else
4822 return gen_rtx_REG (mode, basereg + cum->words + offset);
4823 }
4824 /* If there is no prototype, then FP values go in both FR and GR
4825 registers. */
4826 else
4827 {
4828 /* See comment above. */
4829 enum machine_mode inner_mode =
4830 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4831
4832 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4833 gen_rtx_REG (mode, (FR_ARG_FIRST
4834 + cum->fp_regs)),
4835 const0_rtx);
4836 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4837 gen_rtx_REG (inner_mode,
4838 (basereg + cum->words
4839 + offset)),
4840 const0_rtx);
4841
4842 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4843 }
4844 }
4845
4846 /* Implement TARGET_FUNCION_ARG target hook. */
4847
4848 static rtx
4849 ia64_function_arg (cumulative_args_t cum, enum machine_mode mode,
4850 const_tree type, bool named)
4851 {
4852 return ia64_function_arg_1 (cum, mode, type, named, false);
4853 }
4854
4855 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4856
4857 static rtx
4858 ia64_function_incoming_arg (cumulative_args_t cum,
4859 enum machine_mode mode,
4860 const_tree type, bool named)
4861 {
4862 return ia64_function_arg_1 (cum, mode, type, named, true);
4863 }
4864
4865 /* Return number of bytes, at the beginning of the argument, that must be
4866 put in registers. 0 is the argument is entirely in registers or entirely
4867 in memory. */
4868
4869 static int
4870 ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
4871 tree type, bool named ATTRIBUTE_UNUSED)
4872 {
4873 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4874
4875 int words = ia64_function_arg_words (type, mode);
4876 int offset = ia64_function_arg_offset (cum, type, words);
4877
4878 /* If all argument slots are used, then it must go on the stack. */
4879 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4880 return 0;
4881
4882 /* It doesn't matter whether the argument goes in FR or GR regs. If
4883 it fits within the 8 argument slots, then it goes entirely in
4884 registers. If it extends past the last argument slot, then the rest
4885 goes on the stack. */
4886
4887 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4888 return 0;
4889
4890 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4891 }
4892
4893 /* Return ivms_arg_type based on machine_mode. */
4894
4895 static enum ivms_arg_type
4896 ia64_arg_type (enum machine_mode mode)
4897 {
4898 switch (mode)
4899 {
4900 case SFmode:
4901 return FS;
4902 case DFmode:
4903 return FT;
4904 default:
4905 return I64;
4906 }
4907 }
4908
4909 /* Update CUM to point after this argument. This is patterned after
4910 ia64_function_arg. */
4911
4912 static void
4913 ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4914 const_tree type, bool named)
4915 {
4916 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4917 int words = ia64_function_arg_words (type, mode);
4918 int offset = ia64_function_arg_offset (cum, type, words);
4919 enum machine_mode hfa_mode = VOIDmode;
4920
4921 /* If all arg slots are already full, then there is nothing to do. */
4922 if (cum->words >= MAX_ARGUMENT_SLOTS)
4923 {
4924 cum->words += words + offset;
4925 return;
4926 }
4927
4928 cum->atypes[cum->words] = ia64_arg_type (mode);
4929 cum->words += words + offset;
4930
4931 /* On OpenVMS argument is either in Rn or Fn. */
4932 if (TARGET_ABI_OPEN_VMS)
4933 {
4934 cum->int_regs = cum->words;
4935 cum->fp_regs = cum->words;
4936 return;
4937 }
4938
4939 /* Check for and handle homogeneous FP aggregates. */
4940 if (type)
4941 hfa_mode = hfa_element_mode (type, 0);
4942
4943 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4944 and unprototyped hfas are passed specially. */
4945 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4946 {
4947 int fp_regs = cum->fp_regs;
4948 /* This is the original value of cum->words + offset. */
4949 int int_regs = cum->words - words;
4950 int hfa_size = GET_MODE_SIZE (hfa_mode);
4951 int byte_size;
4952 int args_byte_size;
4953
4954 /* If prototyped, pass it in FR regs then GR regs.
4955 If not prototyped, pass it in both FR and GR regs.
4956
4957 If this is an SFmode aggregate, then it is possible to run out of
4958 FR regs while GR regs are still left. In that case, we pass the
4959 remaining part in the GR regs. */
4960
4961 /* Fill the FP regs. We do this always. We stop if we reach the end
4962 of the argument, the last FP register, or the last argument slot. */
4963
4964 byte_size = ((mode == BLKmode)
4965 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4966 args_byte_size = int_regs * UNITS_PER_WORD;
4967 offset = 0;
4968 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4969 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4970 {
4971 offset += hfa_size;
4972 args_byte_size += hfa_size;
4973 fp_regs++;
4974 }
4975
4976 cum->fp_regs = fp_regs;
4977 }
4978
4979 /* Integral and aggregates go in general registers. So do TFmode FP values.
4980 If we have run out of FR registers, then other FP values must also go in
4981 general registers. This can happen when we have a SFmode HFA. */
4982 else if (mode == TFmode || mode == TCmode
4983 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4984 cum->int_regs = cum->words;
4985
4986 /* If there is a prototype, then FP values go in a FR register when
4987 named, and in a GR register when unnamed. */
4988 else if (cum->prototype)
4989 {
4990 if (! named)
4991 cum->int_regs = cum->words;
4992 else
4993 /* ??? Complex types should not reach here. */
4994 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4995 }
4996 /* If there is no prototype, then FP values go in both FR and GR
4997 registers. */
4998 else
4999 {
5000 /* ??? Complex types should not reach here. */
5001 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
5002 cum->int_regs = cum->words;
5003 }
5004 }
5005
5006 /* Arguments with alignment larger than 8 bytes start at the next even
5007 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
5008 even though their normal alignment is 8 bytes. See ia64_function_arg. */
5009
5010 static unsigned int
5011 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
5012 {
5013 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
5014 return PARM_BOUNDARY * 2;
5015
5016 if (type)
5017 {
5018 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
5019 return PARM_BOUNDARY * 2;
5020 else
5021 return PARM_BOUNDARY;
5022 }
5023
5024 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
5025 return PARM_BOUNDARY * 2;
5026 else
5027 return PARM_BOUNDARY;
5028 }
5029
5030 /* True if it is OK to do sibling call optimization for the specified
5031 call expression EXP. DECL will be the called function, or NULL if
5032 this is an indirect call. */
5033 static bool
5034 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5035 {
5036 /* We can't perform a sibcall if the current function has the syscall_linkage
5037 attribute. */
5038 if (lookup_attribute ("syscall_linkage",
5039 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
5040 return false;
5041
5042 /* We must always return with our current GP. This means we can
5043 only sibcall to functions defined in the current module unless
5044 TARGET_CONST_GP is set to true. */
5045 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
5046 }
5047 \f
5048
5049 /* Implement va_arg. */
5050
5051 static tree
5052 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5053 gimple_seq *post_p)
5054 {
5055 /* Variable sized types are passed by reference. */
5056 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5057 {
5058 tree ptrtype = build_pointer_type (type);
5059 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
5060 return build_va_arg_indirect_ref (addr);
5061 }
5062
5063 /* Aggregate arguments with alignment larger than 8 bytes start at
5064 the next even boundary. Integer and floating point arguments
5065 do so if they are larger than 8 bytes, whether or not they are
5066 also aligned larger than 8 bytes. */
5067 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
5068 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
5069 {
5070 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
5071 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5072 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
5073 gimplify_assign (unshare_expr (valist), t, pre_p);
5074 }
5075
5076 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5077 }
5078 \f
5079 /* Return 1 if function return value returned in memory. Return 0 if it is
5080 in a register. */
5081
5082 static bool
5083 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
5084 {
5085 enum machine_mode mode;
5086 enum machine_mode hfa_mode;
5087 HOST_WIDE_INT byte_size;
5088
5089 mode = TYPE_MODE (valtype);
5090 byte_size = GET_MODE_SIZE (mode);
5091 if (mode == BLKmode)
5092 {
5093 byte_size = int_size_in_bytes (valtype);
5094 if (byte_size < 0)
5095 return true;
5096 }
5097
5098 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
5099
5100 hfa_mode = hfa_element_mode (valtype, 0);
5101 if (hfa_mode != VOIDmode)
5102 {
5103 int hfa_size = GET_MODE_SIZE (hfa_mode);
5104
5105 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
5106 return true;
5107 else
5108 return false;
5109 }
5110 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
5111 return true;
5112 else
5113 return false;
5114 }
5115
5116 /* Return rtx for register that holds the function return value. */
5117
5118 static rtx
5119 ia64_function_value (const_tree valtype,
5120 const_tree fn_decl_or_type,
5121 bool outgoing ATTRIBUTE_UNUSED)
5122 {
5123 enum machine_mode mode;
5124 enum machine_mode hfa_mode;
5125 int unsignedp;
5126 const_tree func = fn_decl_or_type;
5127
5128 if (fn_decl_or_type
5129 && !DECL_P (fn_decl_or_type))
5130 func = NULL;
5131
5132 mode = TYPE_MODE (valtype);
5133 hfa_mode = hfa_element_mode (valtype, 0);
5134
5135 if (hfa_mode != VOIDmode)
5136 {
5137 rtx loc[8];
5138 int i;
5139 int hfa_size;
5140 int byte_size;
5141 int offset;
5142
5143 hfa_size = GET_MODE_SIZE (hfa_mode);
5144 byte_size = ((mode == BLKmode)
5145 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
5146 offset = 0;
5147 for (i = 0; offset < byte_size; i++)
5148 {
5149 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5150 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
5151 GEN_INT (offset));
5152 offset += hfa_size;
5153 }
5154 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5155 }
5156 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
5157 return gen_rtx_REG (mode, FR_ARG_FIRST);
5158 else
5159 {
5160 bool need_parallel = false;
5161
5162 /* In big-endian mode, we need to manage the layout of aggregates
5163 in the registers so that we get the bits properly aligned in
5164 the highpart of the registers. */
5165 if (BYTES_BIG_ENDIAN
5166 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
5167 need_parallel = true;
5168
5169 /* Something like struct S { long double x; char a[0] } is not an
5170 HFA structure, and therefore doesn't go in fp registers. But
5171 the middle-end will give it XFmode anyway, and XFmode values
5172 don't normally fit in integer registers. So we need to smuggle
5173 the value inside a parallel. */
5174 else if (mode == XFmode || mode == XCmode || mode == RFmode)
5175 need_parallel = true;
5176
5177 if (need_parallel)
5178 {
5179 rtx loc[8];
5180 int offset;
5181 int bytesize;
5182 int i;
5183
5184 offset = 0;
5185 bytesize = int_size_in_bytes (valtype);
5186 /* An empty PARALLEL is invalid here, but the return value
5187 doesn't matter for empty structs. */
5188 if (bytesize == 0)
5189 return gen_rtx_REG (mode, GR_RET_FIRST);
5190 for (i = 0; offset < bytesize; i++)
5191 {
5192 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5193 gen_rtx_REG (DImode,
5194 GR_RET_FIRST + i),
5195 GEN_INT (offset));
5196 offset += UNITS_PER_WORD;
5197 }
5198 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5199 }
5200
5201 mode = promote_function_mode (valtype, mode, &unsignedp,
5202 func ? TREE_TYPE (func) : NULL_TREE,
5203 true);
5204
5205 return gen_rtx_REG (mode, GR_RET_FIRST);
5206 }
5207 }
5208
5209 /* Worker function for TARGET_LIBCALL_VALUE. */
5210
5211 static rtx
5212 ia64_libcall_value (enum machine_mode mode,
5213 const_rtx fun ATTRIBUTE_UNUSED)
5214 {
5215 return gen_rtx_REG (mode,
5216 (((GET_MODE_CLASS (mode) == MODE_FLOAT
5217 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5218 && (mode) != TFmode)
5219 ? FR_RET_FIRST : GR_RET_FIRST));
5220 }
5221
5222 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5223
5224 static bool
5225 ia64_function_value_regno_p (const unsigned int regno)
5226 {
5227 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
5228 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
5229 }
5230
5231 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5232 We need to emit DTP-relative relocations. */
5233
5234 static void
5235 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
5236 {
5237 gcc_assert (size == 4 || size == 8);
5238 if (size == 4)
5239 fputs ("\tdata4.ua\t@dtprel(", file);
5240 else
5241 fputs ("\tdata8.ua\t@dtprel(", file);
5242 output_addr_const (file, x);
5243 fputs (")", file);
5244 }
5245
5246 /* Print a memory address as an operand to reference that memory location. */
5247
5248 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5249 also call this from ia64_print_operand for memory addresses. */
5250
5251 static void
5252 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
5253 rtx address ATTRIBUTE_UNUSED)
5254 {
5255 }
5256
5257 /* Print an operand to an assembler instruction.
5258 C Swap and print a comparison operator.
5259 D Print an FP comparison operator.
5260 E Print 32 - constant, for SImode shifts as extract.
5261 e Print 64 - constant, for DImode rotates.
5262 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5263 a floating point register emitted normally.
5264 G A floating point constant.
5265 I Invert a predicate register by adding 1.
5266 J Select the proper predicate register for a condition.
5267 j Select the inverse predicate register for a condition.
5268 O Append .acq for volatile load.
5269 P Postincrement of a MEM.
5270 Q Append .rel for volatile store.
5271 R Print .s .d or nothing for a single, double or no truncation.
5272 S Shift amount for shladd instruction.
5273 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5274 for Intel assembler.
5275 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5276 for Intel assembler.
5277 X A pair of floating point registers.
5278 r Print register name, or constant 0 as r0. HP compatibility for
5279 Linux kernel.
5280 v Print vector constant value as an 8-byte integer value. */
5281
5282 static void
5283 ia64_print_operand (FILE * file, rtx x, int code)
5284 {
5285 const char *str;
5286
5287 switch (code)
5288 {
5289 case 0:
5290 /* Handled below. */
5291 break;
5292
5293 case 'C':
5294 {
5295 enum rtx_code c = swap_condition (GET_CODE (x));
5296 fputs (GET_RTX_NAME (c), file);
5297 return;
5298 }
5299
5300 case 'D':
5301 switch (GET_CODE (x))
5302 {
5303 case NE:
5304 str = "neq";
5305 break;
5306 case UNORDERED:
5307 str = "unord";
5308 break;
5309 case ORDERED:
5310 str = "ord";
5311 break;
5312 case UNLT:
5313 str = "nge";
5314 break;
5315 case UNLE:
5316 str = "ngt";
5317 break;
5318 case UNGT:
5319 str = "nle";
5320 break;
5321 case UNGE:
5322 str = "nlt";
5323 break;
5324 case UNEQ:
5325 case LTGT:
5326 gcc_unreachable ();
5327 default:
5328 str = GET_RTX_NAME (GET_CODE (x));
5329 break;
5330 }
5331 fputs (str, file);
5332 return;
5333
5334 case 'E':
5335 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5336 return;
5337
5338 case 'e':
5339 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5340 return;
5341
5342 case 'F':
5343 if (x == CONST0_RTX (GET_MODE (x)))
5344 str = reg_names [FR_REG (0)];
5345 else if (x == CONST1_RTX (GET_MODE (x)))
5346 str = reg_names [FR_REG (1)];
5347 else
5348 {
5349 gcc_assert (GET_CODE (x) == REG);
5350 str = reg_names [REGNO (x)];
5351 }
5352 fputs (str, file);
5353 return;
5354
5355 case 'G':
5356 {
5357 long val[4];
5358 REAL_VALUE_TYPE rv;
5359 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5360 real_to_target (val, &rv, GET_MODE (x));
5361 if (GET_MODE (x) == SFmode)
5362 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5363 else if (GET_MODE (x) == DFmode)
5364 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5365 & 0xffffffff,
5366 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5367 & 0xffffffff);
5368 else
5369 output_operand_lossage ("invalid %%G mode");
5370 }
5371 return;
5372
5373 case 'I':
5374 fputs (reg_names [REGNO (x) + 1], file);
5375 return;
5376
5377 case 'J':
5378 case 'j':
5379 {
5380 unsigned int regno = REGNO (XEXP (x, 0));
5381 if (GET_CODE (x) == EQ)
5382 regno += 1;
5383 if (code == 'j')
5384 regno ^= 1;
5385 fputs (reg_names [regno], file);
5386 }
5387 return;
5388
5389 case 'O':
5390 if (MEM_VOLATILE_P (x))
5391 fputs(".acq", file);
5392 return;
5393
5394 case 'P':
5395 {
5396 HOST_WIDE_INT value;
5397
5398 switch (GET_CODE (XEXP (x, 0)))
5399 {
5400 default:
5401 return;
5402
5403 case POST_MODIFY:
5404 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5405 if (GET_CODE (x) == CONST_INT)
5406 value = INTVAL (x);
5407 else
5408 {
5409 gcc_assert (GET_CODE (x) == REG);
5410 fprintf (file, ", %s", reg_names[REGNO (x)]);
5411 return;
5412 }
5413 break;
5414
5415 case POST_INC:
5416 value = GET_MODE_SIZE (GET_MODE (x));
5417 break;
5418
5419 case POST_DEC:
5420 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5421 break;
5422 }
5423
5424 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5425 return;
5426 }
5427
5428 case 'Q':
5429 if (MEM_VOLATILE_P (x))
5430 fputs(".rel", file);
5431 return;
5432
5433 case 'R':
5434 if (x == CONST0_RTX (GET_MODE (x)))
5435 fputs(".s", file);
5436 else if (x == CONST1_RTX (GET_MODE (x)))
5437 fputs(".d", file);
5438 else if (x == CONST2_RTX (GET_MODE (x)))
5439 ;
5440 else
5441 output_operand_lossage ("invalid %%R value");
5442 return;
5443
5444 case 'S':
5445 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5446 return;
5447
5448 case 'T':
5449 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5450 {
5451 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5452 return;
5453 }
5454 break;
5455
5456 case 'U':
5457 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5458 {
5459 const char *prefix = "0x";
5460 if (INTVAL (x) & 0x80000000)
5461 {
5462 fprintf (file, "0xffffffff");
5463 prefix = "";
5464 }
5465 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5466 return;
5467 }
5468 break;
5469
5470 case 'X':
5471 {
5472 unsigned int regno = REGNO (x);
5473 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5474 }
5475 return;
5476
5477 case 'r':
5478 /* If this operand is the constant zero, write it as register zero.
5479 Any register, zero, or CONST_INT value is OK here. */
5480 if (GET_CODE (x) == REG)
5481 fputs (reg_names[REGNO (x)], file);
5482 else if (x == CONST0_RTX (GET_MODE (x)))
5483 fputs ("r0", file);
5484 else if (GET_CODE (x) == CONST_INT)
5485 output_addr_const (file, x);
5486 else
5487 output_operand_lossage ("invalid %%r value");
5488 return;
5489
5490 case 'v':
5491 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5492 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5493 break;
5494
5495 case '+':
5496 {
5497 const char *which;
5498
5499 /* For conditional branches, returns or calls, substitute
5500 sptk, dptk, dpnt, or spnt for %s. */
5501 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5502 if (x)
5503 {
5504 int pred_val = XINT (x, 0);
5505
5506 /* Guess top and bottom 10% statically predicted. */
5507 if (pred_val < REG_BR_PROB_BASE / 50
5508 && br_prob_note_reliable_p (x))
5509 which = ".spnt";
5510 else if (pred_val < REG_BR_PROB_BASE / 2)
5511 which = ".dpnt";
5512 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5513 || !br_prob_note_reliable_p (x))
5514 which = ".dptk";
5515 else
5516 which = ".sptk";
5517 }
5518 else if (CALL_P (current_output_insn))
5519 which = ".sptk";
5520 else
5521 which = ".dptk";
5522
5523 fputs (which, file);
5524 return;
5525 }
5526
5527 case ',':
5528 x = current_insn_predicate;
5529 if (x)
5530 {
5531 unsigned int regno = REGNO (XEXP (x, 0));
5532 if (GET_CODE (x) == EQ)
5533 regno += 1;
5534 fprintf (file, "(%s) ", reg_names [regno]);
5535 }
5536 return;
5537
5538 default:
5539 output_operand_lossage ("ia64_print_operand: unknown code");
5540 return;
5541 }
5542
5543 switch (GET_CODE (x))
5544 {
5545 /* This happens for the spill/restore instructions. */
5546 case POST_INC:
5547 case POST_DEC:
5548 case POST_MODIFY:
5549 x = XEXP (x, 0);
5550 /* ... fall through ... */
5551
5552 case REG:
5553 fputs (reg_names [REGNO (x)], file);
5554 break;
5555
5556 case MEM:
5557 {
5558 rtx addr = XEXP (x, 0);
5559 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5560 addr = XEXP (addr, 0);
5561 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5562 break;
5563 }
5564
5565 default:
5566 output_addr_const (file, x);
5567 break;
5568 }
5569
5570 return;
5571 }
5572
5573 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5574
5575 static bool
5576 ia64_print_operand_punct_valid_p (unsigned char code)
5577 {
5578 return (code == '+' || code == ',');
5579 }
5580 \f
5581 /* Compute a (partial) cost for rtx X. Return true if the complete
5582 cost has been computed, and false if subexpressions should be
5583 scanned. In either case, *TOTAL contains the cost result. */
5584 /* ??? This is incomplete. */
5585
5586 static bool
5587 ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
5588 int *total, bool speed ATTRIBUTE_UNUSED)
5589 {
5590 switch (code)
5591 {
5592 case CONST_INT:
5593 switch (outer_code)
5594 {
5595 case SET:
5596 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5597 return true;
5598 case PLUS:
5599 if (satisfies_constraint_I (x))
5600 *total = 0;
5601 else if (satisfies_constraint_J (x))
5602 *total = 1;
5603 else
5604 *total = COSTS_N_INSNS (1);
5605 return true;
5606 default:
5607 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5608 *total = 0;
5609 else
5610 *total = COSTS_N_INSNS (1);
5611 return true;
5612 }
5613
5614 case CONST_DOUBLE:
5615 *total = COSTS_N_INSNS (1);
5616 return true;
5617
5618 case CONST:
5619 case SYMBOL_REF:
5620 case LABEL_REF:
5621 *total = COSTS_N_INSNS (3);
5622 return true;
5623
5624 case FMA:
5625 *total = COSTS_N_INSNS (4);
5626 return true;
5627
5628 case MULT:
5629 /* For multiplies wider than HImode, we have to go to the FPU,
5630 which normally involves copies. Plus there's the latency
5631 of the multiply itself, and the latency of the instructions to
5632 transfer integer regs to FP regs. */
5633 if (FLOAT_MODE_P (GET_MODE (x)))
5634 *total = COSTS_N_INSNS (4);
5635 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5636 *total = COSTS_N_INSNS (10);
5637 else
5638 *total = COSTS_N_INSNS (2);
5639 return true;
5640
5641 case PLUS:
5642 case MINUS:
5643 if (FLOAT_MODE_P (GET_MODE (x)))
5644 {
5645 *total = COSTS_N_INSNS (4);
5646 return true;
5647 }
5648 /* FALLTHRU */
5649
5650 case ASHIFT:
5651 case ASHIFTRT:
5652 case LSHIFTRT:
5653 *total = COSTS_N_INSNS (1);
5654 return true;
5655
5656 case DIV:
5657 case UDIV:
5658 case MOD:
5659 case UMOD:
5660 /* We make divide expensive, so that divide-by-constant will be
5661 optimized to a multiply. */
5662 *total = COSTS_N_INSNS (60);
5663 return true;
5664
5665 default:
5666 return false;
5667 }
5668 }
5669
5670 /* Calculate the cost of moving data from a register in class FROM to
5671 one in class TO, using MODE. */
5672
5673 static int
5674 ia64_register_move_cost (enum machine_mode mode, reg_class_t from,
5675 reg_class_t to)
5676 {
5677 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5678 if (to == ADDL_REGS)
5679 to = GR_REGS;
5680 if (from == ADDL_REGS)
5681 from = GR_REGS;
5682
5683 /* All costs are symmetric, so reduce cases by putting the
5684 lower number class as the destination. */
5685 if (from < to)
5686 {
5687 reg_class_t tmp = to;
5688 to = from, from = tmp;
5689 }
5690
5691 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5692 so that we get secondary memory reloads. Between FR_REGS,
5693 we have to make this at least as expensive as memory_move_cost
5694 to avoid spectacularly poor register class preferencing. */
5695 if (mode == XFmode || mode == RFmode)
5696 {
5697 if (to != GR_REGS || from != GR_REGS)
5698 return memory_move_cost (mode, to, false);
5699 else
5700 return 3;
5701 }
5702
5703 switch (to)
5704 {
5705 case PR_REGS:
5706 /* Moving between PR registers takes two insns. */
5707 if (from == PR_REGS)
5708 return 3;
5709 /* Moving between PR and anything but GR is impossible. */
5710 if (from != GR_REGS)
5711 return memory_move_cost (mode, to, false);
5712 break;
5713
5714 case BR_REGS:
5715 /* Moving between BR and anything but GR is impossible. */
5716 if (from != GR_REGS && from != GR_AND_BR_REGS)
5717 return memory_move_cost (mode, to, false);
5718 break;
5719
5720 case AR_I_REGS:
5721 case AR_M_REGS:
5722 /* Moving between AR and anything but GR is impossible. */
5723 if (from != GR_REGS)
5724 return memory_move_cost (mode, to, false);
5725 break;
5726
5727 case GR_REGS:
5728 case FR_REGS:
5729 case FP_REGS:
5730 case GR_AND_FR_REGS:
5731 case GR_AND_BR_REGS:
5732 case ALL_REGS:
5733 break;
5734
5735 default:
5736 gcc_unreachable ();
5737 }
5738
5739 return 2;
5740 }
5741
5742 /* Calculate the cost of moving data of MODE from a register to or from
5743 memory. */
5744
5745 static int
5746 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5747 reg_class_t rclass,
5748 bool in ATTRIBUTE_UNUSED)
5749 {
5750 if (rclass == GENERAL_REGS
5751 || rclass == FR_REGS
5752 || rclass == FP_REGS
5753 || rclass == GR_AND_FR_REGS)
5754 return 4;
5755 else
5756 return 10;
5757 }
5758
5759 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5760 on RCLASS to use when copying X into that class. */
5761
5762 static reg_class_t
5763 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5764 {
5765 switch (rclass)
5766 {
5767 case FR_REGS:
5768 case FP_REGS:
5769 /* Don't allow volatile mem reloads into floating point registers.
5770 This is defined to force reload to choose the r/m case instead
5771 of the f/f case when reloading (set (reg fX) (mem/v)). */
5772 if (MEM_P (x) && MEM_VOLATILE_P (x))
5773 return NO_REGS;
5774
5775 /* Force all unrecognized constants into the constant pool. */
5776 if (CONSTANT_P (x))
5777 return NO_REGS;
5778 break;
5779
5780 case AR_M_REGS:
5781 case AR_I_REGS:
5782 if (!OBJECT_P (x))
5783 return NO_REGS;
5784 break;
5785
5786 default:
5787 break;
5788 }
5789
5790 return rclass;
5791 }
5792
5793 /* This function returns the register class required for a secondary
5794 register when copying between one of the registers in RCLASS, and X,
5795 using MODE. A return value of NO_REGS means that no secondary register
5796 is required. */
5797
5798 enum reg_class
5799 ia64_secondary_reload_class (enum reg_class rclass,
5800 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5801 {
5802 int regno = -1;
5803
5804 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5805 regno = true_regnum (x);
5806
5807 switch (rclass)
5808 {
5809 case BR_REGS:
5810 case AR_M_REGS:
5811 case AR_I_REGS:
5812 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5813 interaction. We end up with two pseudos with overlapping lifetimes
5814 both of which are equiv to the same constant, and both which need
5815 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5816 changes depending on the path length, which means the qty_first_reg
5817 check in make_regs_eqv can give different answers at different times.
5818 At some point I'll probably need a reload_indi pattern to handle
5819 this.
5820
5821 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5822 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5823 non-general registers for good measure. */
5824 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5825 return GR_REGS;
5826
5827 /* This is needed if a pseudo used as a call_operand gets spilled to a
5828 stack slot. */
5829 if (GET_CODE (x) == MEM)
5830 return GR_REGS;
5831 break;
5832
5833 case FR_REGS:
5834 case FP_REGS:
5835 /* Need to go through general registers to get to other class regs. */
5836 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5837 return GR_REGS;
5838
5839 /* This can happen when a paradoxical subreg is an operand to the
5840 muldi3 pattern. */
5841 /* ??? This shouldn't be necessary after instruction scheduling is
5842 enabled, because paradoxical subregs are not accepted by
5843 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5844 stop the paradoxical subreg stupidity in the *_operand functions
5845 in recog.c. */
5846 if (GET_CODE (x) == MEM
5847 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5848 || GET_MODE (x) == QImode))
5849 return GR_REGS;
5850
5851 /* This can happen because of the ior/and/etc patterns that accept FP
5852 registers as operands. If the third operand is a constant, then it
5853 needs to be reloaded into a FP register. */
5854 if (GET_CODE (x) == CONST_INT)
5855 return GR_REGS;
5856
5857 /* This can happen because of register elimination in a muldi3 insn.
5858 E.g. `26107 * (unsigned long)&u'. */
5859 if (GET_CODE (x) == PLUS)
5860 return GR_REGS;
5861 break;
5862
5863 case PR_REGS:
5864 /* ??? This happens if we cse/gcse a BImode value across a call,
5865 and the function has a nonlocal goto. This is because global
5866 does not allocate call crossing pseudos to hard registers when
5867 crtl->has_nonlocal_goto is true. This is relatively
5868 common for C++ programs that use exceptions. To reproduce,
5869 return NO_REGS and compile libstdc++. */
5870 if (GET_CODE (x) == MEM)
5871 return GR_REGS;
5872
5873 /* This can happen when we take a BImode subreg of a DImode value,
5874 and that DImode value winds up in some non-GR register. */
5875 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5876 return GR_REGS;
5877 break;
5878
5879 default:
5880 break;
5881 }
5882
5883 return NO_REGS;
5884 }
5885
5886 \f
5887 /* Implement targetm.unspec_may_trap_p hook. */
5888 static int
5889 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5890 {
5891 switch (XINT (x, 1))
5892 {
5893 case UNSPEC_LDA:
5894 case UNSPEC_LDS:
5895 case UNSPEC_LDSA:
5896 case UNSPEC_LDCCLR:
5897 case UNSPEC_CHKACLR:
5898 case UNSPEC_CHKS:
5899 /* These unspecs are just wrappers. */
5900 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5901 }
5902
5903 return default_unspec_may_trap_p (x, flags);
5904 }
5905
5906 \f
5907 /* Parse the -mfixed-range= option string. */
5908
5909 static void
5910 fix_range (const char *const_str)
5911 {
5912 int i, first, last;
5913 char *str, *dash, *comma;
5914
5915 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5916 REG2 are either register names or register numbers. The effect
5917 of this option is to mark the registers in the range from REG1 to
5918 REG2 as ``fixed'' so they won't be used by the compiler. This is
5919 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5920
5921 i = strlen (const_str);
5922 str = (char *) alloca (i + 1);
5923 memcpy (str, const_str, i + 1);
5924
5925 while (1)
5926 {
5927 dash = strchr (str, '-');
5928 if (!dash)
5929 {
5930 warning (0, "value of -mfixed-range must have form REG1-REG2");
5931 return;
5932 }
5933 *dash = '\0';
5934
5935 comma = strchr (dash + 1, ',');
5936 if (comma)
5937 *comma = '\0';
5938
5939 first = decode_reg_name (str);
5940 if (first < 0)
5941 {
5942 warning (0, "unknown register name: %s", str);
5943 return;
5944 }
5945
5946 last = decode_reg_name (dash + 1);
5947 if (last < 0)
5948 {
5949 warning (0, "unknown register name: %s", dash + 1);
5950 return;
5951 }
5952
5953 *dash = '-';
5954
5955 if (first > last)
5956 {
5957 warning (0, "%s-%s is an empty range", str, dash + 1);
5958 return;
5959 }
5960
5961 for (i = first; i <= last; ++i)
5962 fixed_regs[i] = call_used_regs[i] = 1;
5963
5964 if (!comma)
5965 break;
5966
5967 *comma = ',';
5968 str = comma + 1;
5969 }
5970 }
5971
5972 /* Implement TARGET_OPTION_OVERRIDE. */
5973
5974 static void
5975 ia64_option_override (void)
5976 {
5977 unsigned int i;
5978 cl_deferred_option *opt;
5979 vec<cl_deferred_option> *v
5980 = (vec<cl_deferred_option> *) ia64_deferred_options;
5981
5982 if (v)
5983 FOR_EACH_VEC_ELT (*v, i, opt)
5984 {
5985 switch (opt->opt_index)
5986 {
5987 case OPT_mfixed_range_:
5988 fix_range (opt->arg);
5989 break;
5990
5991 default:
5992 gcc_unreachable ();
5993 }
5994 }
5995
5996 if (TARGET_AUTO_PIC)
5997 target_flags |= MASK_CONST_GP;
5998
5999 /* Numerous experiment shows that IRA based loop pressure
6000 calculation works better for RTL loop invariant motion on targets
6001 with enough (>= 32) registers. It is an expensive optimization.
6002 So it is on only for peak performance. */
6003 if (optimize >= 3)
6004 flag_ira_loop_pressure = 1;
6005
6006
6007 ia64_section_threshold = (global_options_set.x_g_switch_value
6008 ? g_switch_value
6009 : IA64_DEFAULT_GVALUE);
6010
6011 init_machine_status = ia64_init_machine_status;
6012
6013 if (align_functions <= 0)
6014 align_functions = 64;
6015 if (align_loops <= 0)
6016 align_loops = 32;
6017 if (TARGET_ABI_OPEN_VMS)
6018 flag_no_common = 1;
6019
6020 ia64_override_options_after_change();
6021 }
6022
6023 /* Implement targetm.override_options_after_change. */
6024
6025 static void
6026 ia64_override_options_after_change (void)
6027 {
6028 if (optimize >= 3
6029 && !global_options_set.x_flag_selective_scheduling
6030 && !global_options_set.x_flag_selective_scheduling2)
6031 {
6032 flag_selective_scheduling2 = 1;
6033 flag_sel_sched_pipelining = 1;
6034 }
6035 if (mflag_sched_control_spec == 2)
6036 {
6037 /* Control speculation is on by default for the selective scheduler,
6038 but not for the Haifa scheduler. */
6039 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
6040 }
6041 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
6042 {
6043 /* FIXME: remove this when we'd implement breaking autoinsns as
6044 a transformation. */
6045 flag_auto_inc_dec = 0;
6046 }
6047 }
6048
6049 /* Initialize the record of emitted frame related registers. */
6050
6051 void ia64_init_expanders (void)
6052 {
6053 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
6054 }
6055
6056 static struct machine_function *
6057 ia64_init_machine_status (void)
6058 {
6059 return ggc_cleared_alloc<machine_function> ();
6060 }
6061 \f
6062 static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *);
6063 static enum attr_type ia64_safe_type (rtx_insn *);
6064
6065 static enum attr_itanium_class
6066 ia64_safe_itanium_class (rtx_insn *insn)
6067 {
6068 if (recog_memoized (insn) >= 0)
6069 return get_attr_itanium_class (insn);
6070 else if (DEBUG_INSN_P (insn))
6071 return ITANIUM_CLASS_IGNORE;
6072 else
6073 return ITANIUM_CLASS_UNKNOWN;
6074 }
6075
6076 static enum attr_type
6077 ia64_safe_type (rtx_insn *insn)
6078 {
6079 if (recog_memoized (insn) >= 0)
6080 return get_attr_type (insn);
6081 else
6082 return TYPE_UNKNOWN;
6083 }
6084 \f
6085 /* The following collection of routines emit instruction group stop bits as
6086 necessary to avoid dependencies. */
6087
6088 /* Need to track some additional registers as far as serialization is
6089 concerned so we can properly handle br.call and br.ret. We could
6090 make these registers visible to gcc, but since these registers are
6091 never explicitly used in gcc generated code, it seems wasteful to
6092 do so (plus it would make the call and return patterns needlessly
6093 complex). */
6094 #define REG_RP (BR_REG (0))
6095 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
6096 /* This is used for volatile asms which may require a stop bit immediately
6097 before and after them. */
6098 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
6099 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
6100 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
6101
6102 /* For each register, we keep track of how it has been written in the
6103 current instruction group.
6104
6105 If a register is written unconditionally (no qualifying predicate),
6106 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
6107
6108 If a register is written if its qualifying predicate P is true, we
6109 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
6110 may be written again by the complement of P (P^1) and when this happens,
6111 WRITE_COUNT gets set to 2.
6112
6113 The result of this is that whenever an insn attempts to write a register
6114 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
6115
6116 If a predicate register is written by a floating-point insn, we set
6117 WRITTEN_BY_FP to true.
6118
6119 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
6120 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
6121
6122 #if GCC_VERSION >= 4000
6123 #define RWS_FIELD_TYPE __extension__ unsigned short
6124 #else
6125 #define RWS_FIELD_TYPE unsigned int
6126 #endif
6127 struct reg_write_state
6128 {
6129 RWS_FIELD_TYPE write_count : 2;
6130 RWS_FIELD_TYPE first_pred : 10;
6131 RWS_FIELD_TYPE written_by_fp : 1;
6132 RWS_FIELD_TYPE written_by_and : 1;
6133 RWS_FIELD_TYPE written_by_or : 1;
6134 };
6135
6136 /* Cumulative info for the current instruction group. */
6137 struct reg_write_state rws_sum[NUM_REGS];
6138 #ifdef ENABLE_CHECKING
6139 /* Bitmap whether a register has been written in the current insn. */
6140 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
6141 / HOST_BITS_PER_WIDEST_FAST_INT];
6142
6143 static inline void
6144 rws_insn_set (int regno)
6145 {
6146 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
6147 SET_HARD_REG_BIT (rws_insn, regno);
6148 }
6149
6150 static inline int
6151 rws_insn_test (int regno)
6152 {
6153 return TEST_HARD_REG_BIT (rws_insn, regno);
6154 }
6155 #else
6156 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
6157 unsigned char rws_insn[2];
6158
6159 static inline void
6160 rws_insn_set (int regno)
6161 {
6162 if (regno == REG_AR_CFM)
6163 rws_insn[0] = 1;
6164 else if (regno == REG_VOLATILE)
6165 rws_insn[1] = 1;
6166 }
6167
6168 static inline int
6169 rws_insn_test (int regno)
6170 {
6171 if (regno == REG_AR_CFM)
6172 return rws_insn[0];
6173 if (regno == REG_VOLATILE)
6174 return rws_insn[1];
6175 return 0;
6176 }
6177 #endif
6178
6179 /* Indicates whether this is the first instruction after a stop bit,
6180 in which case we don't need another stop bit. Without this,
6181 ia64_variable_issue will die when scheduling an alloc. */
6182 static int first_instruction;
6183
6184 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
6185 RTL for one instruction. */
6186 struct reg_flags
6187 {
6188 unsigned int is_write : 1; /* Is register being written? */
6189 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
6190 unsigned int is_branch : 1; /* Is register used as part of a branch? */
6191 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
6192 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
6193 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
6194 };
6195
6196 static void rws_update (int, struct reg_flags, int);
6197 static int rws_access_regno (int, struct reg_flags, int);
6198 static int rws_access_reg (rtx, struct reg_flags, int);
6199 static void update_set_flags (rtx, struct reg_flags *);
6200 static int set_src_needs_barrier (rtx, struct reg_flags, int);
6201 static int rtx_needs_barrier (rtx, struct reg_flags, int);
6202 static void init_insn_group_barriers (void);
6203 static int group_barrier_needed (rtx_insn *);
6204 static int safe_group_barrier_needed (rtx_insn *);
6205 static int in_safe_group_barrier;
6206
6207 /* Update *RWS for REGNO, which is being written by the current instruction,
6208 with predicate PRED, and associated register flags in FLAGS. */
6209
6210 static void
6211 rws_update (int regno, struct reg_flags flags, int pred)
6212 {
6213 if (pred)
6214 rws_sum[regno].write_count++;
6215 else
6216 rws_sum[regno].write_count = 2;
6217 rws_sum[regno].written_by_fp |= flags.is_fp;
6218 /* ??? Not tracking and/or across differing predicates. */
6219 rws_sum[regno].written_by_and = flags.is_and;
6220 rws_sum[regno].written_by_or = flags.is_or;
6221 rws_sum[regno].first_pred = pred;
6222 }
6223
6224 /* Handle an access to register REGNO of type FLAGS using predicate register
6225 PRED. Update rws_sum array. Return 1 if this access creates
6226 a dependency with an earlier instruction in the same group. */
6227
6228 static int
6229 rws_access_regno (int regno, struct reg_flags flags, int pred)
6230 {
6231 int need_barrier = 0;
6232
6233 gcc_assert (regno < NUM_REGS);
6234
6235 if (! PR_REGNO_P (regno))
6236 flags.is_and = flags.is_or = 0;
6237
6238 if (flags.is_write)
6239 {
6240 int write_count;
6241
6242 rws_insn_set (regno);
6243 write_count = rws_sum[regno].write_count;
6244
6245 switch (write_count)
6246 {
6247 case 0:
6248 /* The register has not been written yet. */
6249 if (!in_safe_group_barrier)
6250 rws_update (regno, flags, pred);
6251 break;
6252
6253 case 1:
6254 /* The register has been written via a predicate. Treat
6255 it like a unconditional write and do not try to check
6256 for complementary pred reg in earlier write. */
6257 if (flags.is_and && rws_sum[regno].written_by_and)
6258 ;
6259 else if (flags.is_or && rws_sum[regno].written_by_or)
6260 ;
6261 else
6262 need_barrier = 1;
6263 if (!in_safe_group_barrier)
6264 rws_update (regno, flags, pred);
6265 break;
6266
6267 case 2:
6268 /* The register has been unconditionally written already. We
6269 need a barrier. */
6270 if (flags.is_and && rws_sum[regno].written_by_and)
6271 ;
6272 else if (flags.is_or && rws_sum[regno].written_by_or)
6273 ;
6274 else
6275 need_barrier = 1;
6276 if (!in_safe_group_barrier)
6277 {
6278 rws_sum[regno].written_by_and = flags.is_and;
6279 rws_sum[regno].written_by_or = flags.is_or;
6280 }
6281 break;
6282
6283 default:
6284 gcc_unreachable ();
6285 }
6286 }
6287 else
6288 {
6289 if (flags.is_branch)
6290 {
6291 /* Branches have several RAW exceptions that allow to avoid
6292 barriers. */
6293
6294 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6295 /* RAW dependencies on branch regs are permissible as long
6296 as the writer is a non-branch instruction. Since we
6297 never generate code that uses a branch register written
6298 by a branch instruction, handling this case is
6299 easy. */
6300 return 0;
6301
6302 if (REGNO_REG_CLASS (regno) == PR_REGS
6303 && ! rws_sum[regno].written_by_fp)
6304 /* The predicates of a branch are available within the
6305 same insn group as long as the predicate was written by
6306 something other than a floating-point instruction. */
6307 return 0;
6308 }
6309
6310 if (flags.is_and && rws_sum[regno].written_by_and)
6311 return 0;
6312 if (flags.is_or && rws_sum[regno].written_by_or)
6313 return 0;
6314
6315 switch (rws_sum[regno].write_count)
6316 {
6317 case 0:
6318 /* The register has not been written yet. */
6319 break;
6320
6321 case 1:
6322 /* The register has been written via a predicate, assume we
6323 need a barrier (don't check for complementary regs). */
6324 need_barrier = 1;
6325 break;
6326
6327 case 2:
6328 /* The register has been unconditionally written already. We
6329 need a barrier. */
6330 need_barrier = 1;
6331 break;
6332
6333 default:
6334 gcc_unreachable ();
6335 }
6336 }
6337
6338 return need_barrier;
6339 }
6340
6341 static int
6342 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6343 {
6344 int regno = REGNO (reg);
6345 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6346
6347 if (n == 1)
6348 return rws_access_regno (regno, flags, pred);
6349 else
6350 {
6351 int need_barrier = 0;
6352 while (--n >= 0)
6353 need_barrier |= rws_access_regno (regno + n, flags, pred);
6354 return need_barrier;
6355 }
6356 }
6357
6358 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6359 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6360
6361 static void
6362 update_set_flags (rtx x, struct reg_flags *pflags)
6363 {
6364 rtx src = SET_SRC (x);
6365
6366 switch (GET_CODE (src))
6367 {
6368 case CALL:
6369 return;
6370
6371 case IF_THEN_ELSE:
6372 /* There are four cases here:
6373 (1) The destination is (pc), in which case this is a branch,
6374 nothing here applies.
6375 (2) The destination is ar.lc, in which case this is a
6376 doloop_end_internal,
6377 (3) The destination is an fp register, in which case this is
6378 an fselect instruction.
6379 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6380 this is a check load.
6381 In all cases, nothing we do in this function applies. */
6382 return;
6383
6384 default:
6385 if (COMPARISON_P (src)
6386 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6387 /* Set pflags->is_fp to 1 so that we know we're dealing
6388 with a floating point comparison when processing the
6389 destination of the SET. */
6390 pflags->is_fp = 1;
6391
6392 /* Discover if this is a parallel comparison. We only handle
6393 and.orcm and or.andcm at present, since we must retain a
6394 strict inverse on the predicate pair. */
6395 else if (GET_CODE (src) == AND)
6396 pflags->is_and = 1;
6397 else if (GET_CODE (src) == IOR)
6398 pflags->is_or = 1;
6399
6400 break;
6401 }
6402 }
6403
6404 /* Subroutine of rtx_needs_barrier; this function determines whether the
6405 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6406 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6407 for this insn. */
6408
6409 static int
6410 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6411 {
6412 int need_barrier = 0;
6413 rtx dst;
6414 rtx src = SET_SRC (x);
6415
6416 if (GET_CODE (src) == CALL)
6417 /* We don't need to worry about the result registers that
6418 get written by subroutine call. */
6419 return rtx_needs_barrier (src, flags, pred);
6420 else if (SET_DEST (x) == pc_rtx)
6421 {
6422 /* X is a conditional branch. */
6423 /* ??? This seems redundant, as the caller sets this bit for
6424 all JUMP_INSNs. */
6425 if (!ia64_spec_check_src_p (src))
6426 flags.is_branch = 1;
6427 return rtx_needs_barrier (src, flags, pred);
6428 }
6429
6430 if (ia64_spec_check_src_p (src))
6431 /* Avoid checking one register twice (in condition
6432 and in 'then' section) for ldc pattern. */
6433 {
6434 gcc_assert (REG_P (XEXP (src, 2)));
6435 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6436
6437 /* We process MEM below. */
6438 src = XEXP (src, 1);
6439 }
6440
6441 need_barrier |= rtx_needs_barrier (src, flags, pred);
6442
6443 dst = SET_DEST (x);
6444 if (GET_CODE (dst) == ZERO_EXTRACT)
6445 {
6446 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6447 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6448 }
6449 return need_barrier;
6450 }
6451
6452 /* Handle an access to rtx X of type FLAGS using predicate register
6453 PRED. Return 1 if this access creates a dependency with an earlier
6454 instruction in the same group. */
6455
6456 static int
6457 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6458 {
6459 int i, j;
6460 int is_complemented = 0;
6461 int need_barrier = 0;
6462 const char *format_ptr;
6463 struct reg_flags new_flags;
6464 rtx cond;
6465
6466 if (! x)
6467 return 0;
6468
6469 new_flags = flags;
6470
6471 switch (GET_CODE (x))
6472 {
6473 case SET:
6474 update_set_flags (x, &new_flags);
6475 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6476 if (GET_CODE (SET_SRC (x)) != CALL)
6477 {
6478 new_flags.is_write = 1;
6479 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6480 }
6481 break;
6482
6483 case CALL:
6484 new_flags.is_write = 0;
6485 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6486
6487 /* Avoid multiple register writes, in case this is a pattern with
6488 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6489 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6490 {
6491 new_flags.is_write = 1;
6492 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6493 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6494 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6495 }
6496 break;
6497
6498 case COND_EXEC:
6499 /* X is a predicated instruction. */
6500
6501 cond = COND_EXEC_TEST (x);
6502 gcc_assert (!pred);
6503 need_barrier = rtx_needs_barrier (cond, flags, 0);
6504
6505 if (GET_CODE (cond) == EQ)
6506 is_complemented = 1;
6507 cond = XEXP (cond, 0);
6508 gcc_assert (GET_CODE (cond) == REG
6509 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6510 pred = REGNO (cond);
6511 if (is_complemented)
6512 ++pred;
6513
6514 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6515 return need_barrier;
6516
6517 case CLOBBER:
6518 case USE:
6519 /* Clobber & use are for earlier compiler-phases only. */
6520 break;
6521
6522 case ASM_OPERANDS:
6523 case ASM_INPUT:
6524 /* We always emit stop bits for traditional asms. We emit stop bits
6525 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6526 if (GET_CODE (x) != ASM_OPERANDS
6527 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6528 {
6529 /* Avoid writing the register multiple times if we have multiple
6530 asm outputs. This avoids a failure in rws_access_reg. */
6531 if (! rws_insn_test (REG_VOLATILE))
6532 {
6533 new_flags.is_write = 1;
6534 rws_access_regno (REG_VOLATILE, new_flags, pred);
6535 }
6536 return 1;
6537 }
6538
6539 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6540 We cannot just fall through here since then we would be confused
6541 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6542 traditional asms unlike their normal usage. */
6543
6544 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6545 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6546 need_barrier = 1;
6547 break;
6548
6549 case PARALLEL:
6550 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6551 {
6552 rtx pat = XVECEXP (x, 0, i);
6553 switch (GET_CODE (pat))
6554 {
6555 case SET:
6556 update_set_flags (pat, &new_flags);
6557 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6558 break;
6559
6560 case USE:
6561 case CALL:
6562 case ASM_OPERANDS:
6563 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6564 break;
6565
6566 case CLOBBER:
6567 if (REG_P (XEXP (pat, 0))
6568 && extract_asm_operands (x) != NULL_RTX
6569 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6570 {
6571 new_flags.is_write = 1;
6572 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6573 new_flags, pred);
6574 new_flags = flags;
6575 }
6576 break;
6577
6578 case RETURN:
6579 break;
6580
6581 default:
6582 gcc_unreachable ();
6583 }
6584 }
6585 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6586 {
6587 rtx pat = XVECEXP (x, 0, i);
6588 if (GET_CODE (pat) == SET)
6589 {
6590 if (GET_CODE (SET_SRC (pat)) != CALL)
6591 {
6592 new_flags.is_write = 1;
6593 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6594 pred);
6595 }
6596 }
6597 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6598 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6599 }
6600 break;
6601
6602 case SUBREG:
6603 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6604 break;
6605 case REG:
6606 if (REGNO (x) == AR_UNAT_REGNUM)
6607 {
6608 for (i = 0; i < 64; ++i)
6609 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6610 }
6611 else
6612 need_barrier = rws_access_reg (x, flags, pred);
6613 break;
6614
6615 case MEM:
6616 /* Find the regs used in memory address computation. */
6617 new_flags.is_write = 0;
6618 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6619 break;
6620
6621 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6622 case SYMBOL_REF: case LABEL_REF: case CONST:
6623 break;
6624
6625 /* Operators with side-effects. */
6626 case POST_INC: case POST_DEC:
6627 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6628
6629 new_flags.is_write = 0;
6630 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6631 new_flags.is_write = 1;
6632 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6633 break;
6634
6635 case POST_MODIFY:
6636 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6637
6638 new_flags.is_write = 0;
6639 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6640 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6641 new_flags.is_write = 1;
6642 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6643 break;
6644
6645 /* Handle common unary and binary ops for efficiency. */
6646 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6647 case MOD: case UDIV: case UMOD: case AND: case IOR:
6648 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6649 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6650 case NE: case EQ: case GE: case GT: case LE:
6651 case LT: case GEU: case GTU: case LEU: case LTU:
6652 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6653 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6654 break;
6655
6656 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6657 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6658 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6659 case SQRT: case FFS: case POPCOUNT:
6660 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6661 break;
6662
6663 case VEC_SELECT:
6664 /* VEC_SELECT's second argument is a PARALLEL with integers that
6665 describe the elements selected. On ia64, those integers are
6666 always constants. Avoid walking the PARALLEL so that we don't
6667 get confused with "normal" parallels and then die. */
6668 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6669 break;
6670
6671 case UNSPEC:
6672 switch (XINT (x, 1))
6673 {
6674 case UNSPEC_LTOFF_DTPMOD:
6675 case UNSPEC_LTOFF_DTPREL:
6676 case UNSPEC_DTPREL:
6677 case UNSPEC_LTOFF_TPREL:
6678 case UNSPEC_TPREL:
6679 case UNSPEC_PRED_REL_MUTEX:
6680 case UNSPEC_PIC_CALL:
6681 case UNSPEC_MF:
6682 case UNSPEC_FETCHADD_ACQ:
6683 case UNSPEC_FETCHADD_REL:
6684 case UNSPEC_BSP_VALUE:
6685 case UNSPEC_FLUSHRS:
6686 case UNSPEC_BUNDLE_SELECTOR:
6687 break;
6688
6689 case UNSPEC_GR_SPILL:
6690 case UNSPEC_GR_RESTORE:
6691 {
6692 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6693 HOST_WIDE_INT bit = (offset >> 3) & 63;
6694
6695 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6696 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6697 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6698 new_flags, pred);
6699 break;
6700 }
6701
6702 case UNSPEC_FR_SPILL:
6703 case UNSPEC_FR_RESTORE:
6704 case UNSPEC_GETF_EXP:
6705 case UNSPEC_SETF_EXP:
6706 case UNSPEC_ADDP4:
6707 case UNSPEC_FR_SQRT_RECIP_APPROX:
6708 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6709 case UNSPEC_LDA:
6710 case UNSPEC_LDS:
6711 case UNSPEC_LDS_A:
6712 case UNSPEC_LDSA:
6713 case UNSPEC_CHKACLR:
6714 case UNSPEC_CHKS:
6715 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6716 break;
6717
6718 case UNSPEC_FR_RECIP_APPROX:
6719 case UNSPEC_SHRP:
6720 case UNSPEC_COPYSIGN:
6721 case UNSPEC_FR_RECIP_APPROX_RES:
6722 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6723 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6724 break;
6725
6726 case UNSPEC_CMPXCHG_ACQ:
6727 case UNSPEC_CMPXCHG_REL:
6728 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6729 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6730 break;
6731
6732 default:
6733 gcc_unreachable ();
6734 }
6735 break;
6736
6737 case UNSPEC_VOLATILE:
6738 switch (XINT (x, 1))
6739 {
6740 case UNSPECV_ALLOC:
6741 /* Alloc must always be the first instruction of a group.
6742 We force this by always returning true. */
6743 /* ??? We might get better scheduling if we explicitly check for
6744 input/local/output register dependencies, and modify the
6745 scheduler so that alloc is always reordered to the start of
6746 the current group. We could then eliminate all of the
6747 first_instruction code. */
6748 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6749
6750 new_flags.is_write = 1;
6751 rws_access_regno (REG_AR_CFM, new_flags, pred);
6752 return 1;
6753
6754 case UNSPECV_SET_BSP:
6755 case UNSPECV_PROBE_STACK_RANGE:
6756 need_barrier = 1;
6757 break;
6758
6759 case UNSPECV_BLOCKAGE:
6760 case UNSPECV_INSN_GROUP_BARRIER:
6761 case UNSPECV_BREAK:
6762 case UNSPECV_PSAC_ALL:
6763 case UNSPECV_PSAC_NORMAL:
6764 return 0;
6765
6766 case UNSPECV_PROBE_STACK_ADDRESS:
6767 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6768 break;
6769
6770 default:
6771 gcc_unreachable ();
6772 }
6773 break;
6774
6775 case RETURN:
6776 new_flags.is_write = 0;
6777 need_barrier = rws_access_regno (REG_RP, flags, pred);
6778 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6779
6780 new_flags.is_write = 1;
6781 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6782 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6783 break;
6784
6785 default:
6786 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6787 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6788 switch (format_ptr[i])
6789 {
6790 case '0': /* unused field */
6791 case 'i': /* integer */
6792 case 'n': /* note */
6793 case 'w': /* wide integer */
6794 case 's': /* pointer to string */
6795 case 'S': /* optional pointer to string */
6796 break;
6797
6798 case 'e':
6799 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6800 need_barrier = 1;
6801 break;
6802
6803 case 'E':
6804 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6805 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6806 need_barrier = 1;
6807 break;
6808
6809 default:
6810 gcc_unreachable ();
6811 }
6812 break;
6813 }
6814 return need_barrier;
6815 }
6816
6817 /* Clear out the state for group_barrier_needed at the start of a
6818 sequence of insns. */
6819
6820 static void
6821 init_insn_group_barriers (void)
6822 {
6823 memset (rws_sum, 0, sizeof (rws_sum));
6824 first_instruction = 1;
6825 }
6826
6827 /* Given the current state, determine whether a group barrier (a stop bit) is
6828 necessary before INSN. Return nonzero if so. This modifies the state to
6829 include the effects of INSN as a side-effect. */
6830
6831 static int
6832 group_barrier_needed (rtx_insn *insn)
6833 {
6834 rtx pat;
6835 int need_barrier = 0;
6836 struct reg_flags flags;
6837
6838 memset (&flags, 0, sizeof (flags));
6839 switch (GET_CODE (insn))
6840 {
6841 case NOTE:
6842 case DEBUG_INSN:
6843 break;
6844
6845 case BARRIER:
6846 /* A barrier doesn't imply an instruction group boundary. */
6847 break;
6848
6849 case CODE_LABEL:
6850 memset (rws_insn, 0, sizeof (rws_insn));
6851 return 1;
6852
6853 case CALL_INSN:
6854 flags.is_branch = 1;
6855 flags.is_sibcall = SIBLING_CALL_P (insn);
6856 memset (rws_insn, 0, sizeof (rws_insn));
6857
6858 /* Don't bundle a call following another call. */
6859 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6860 {
6861 need_barrier = 1;
6862 break;
6863 }
6864
6865 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6866 break;
6867
6868 case JUMP_INSN:
6869 if (!ia64_spec_check_p (insn))
6870 flags.is_branch = 1;
6871
6872 /* Don't bundle a jump following a call. */
6873 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6874 {
6875 need_barrier = 1;
6876 break;
6877 }
6878 /* FALLTHRU */
6879
6880 case INSN:
6881 if (GET_CODE (PATTERN (insn)) == USE
6882 || GET_CODE (PATTERN (insn)) == CLOBBER)
6883 /* Don't care about USE and CLOBBER "insns"---those are used to
6884 indicate to the optimizer that it shouldn't get rid of
6885 certain operations. */
6886 break;
6887
6888 pat = PATTERN (insn);
6889
6890 /* Ug. Hack hacks hacked elsewhere. */
6891 switch (recog_memoized (insn))
6892 {
6893 /* We play dependency tricks with the epilogue in order
6894 to get proper schedules. Undo this for dv analysis. */
6895 case CODE_FOR_epilogue_deallocate_stack:
6896 case CODE_FOR_prologue_allocate_stack:
6897 pat = XVECEXP (pat, 0, 0);
6898 break;
6899
6900 /* The pattern we use for br.cloop confuses the code above.
6901 The second element of the vector is representative. */
6902 case CODE_FOR_doloop_end_internal:
6903 pat = XVECEXP (pat, 0, 1);
6904 break;
6905
6906 /* Doesn't generate code. */
6907 case CODE_FOR_pred_rel_mutex:
6908 case CODE_FOR_prologue_use:
6909 return 0;
6910
6911 default:
6912 break;
6913 }
6914
6915 memset (rws_insn, 0, sizeof (rws_insn));
6916 need_barrier = rtx_needs_barrier (pat, flags, 0);
6917
6918 /* Check to see if the previous instruction was a volatile
6919 asm. */
6920 if (! need_barrier)
6921 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6922
6923 break;
6924
6925 default:
6926 gcc_unreachable ();
6927 }
6928
6929 if (first_instruction && important_for_bundling_p (insn))
6930 {
6931 need_barrier = 0;
6932 first_instruction = 0;
6933 }
6934
6935 return need_barrier;
6936 }
6937
6938 /* Like group_barrier_needed, but do not clobber the current state. */
6939
6940 static int
6941 safe_group_barrier_needed (rtx_insn *insn)
6942 {
6943 int saved_first_instruction;
6944 int t;
6945
6946 saved_first_instruction = first_instruction;
6947 in_safe_group_barrier = 1;
6948
6949 t = group_barrier_needed (insn);
6950
6951 first_instruction = saved_first_instruction;
6952 in_safe_group_barrier = 0;
6953
6954 return t;
6955 }
6956
6957 /* Scan the current function and insert stop bits as necessary to
6958 eliminate dependencies. This function assumes that a final
6959 instruction scheduling pass has been run which has already
6960 inserted most of the necessary stop bits. This function only
6961 inserts new ones at basic block boundaries, since these are
6962 invisible to the scheduler. */
6963
6964 static void
6965 emit_insn_group_barriers (FILE *dump)
6966 {
6967 rtx_insn *insn;
6968 rtx_insn *last_label = 0;
6969 int insns_since_last_label = 0;
6970
6971 init_insn_group_barriers ();
6972
6973 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6974 {
6975 if (LABEL_P (insn))
6976 {
6977 if (insns_since_last_label)
6978 last_label = insn;
6979 insns_since_last_label = 0;
6980 }
6981 else if (NOTE_P (insn)
6982 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6983 {
6984 if (insns_since_last_label)
6985 last_label = insn;
6986 insns_since_last_label = 0;
6987 }
6988 else if (NONJUMP_INSN_P (insn)
6989 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6990 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6991 {
6992 init_insn_group_barriers ();
6993 last_label = 0;
6994 }
6995 else if (NONDEBUG_INSN_P (insn))
6996 {
6997 insns_since_last_label = 1;
6998
6999 if (group_barrier_needed (insn))
7000 {
7001 if (last_label)
7002 {
7003 if (dump)
7004 fprintf (dump, "Emitting stop before label %d\n",
7005 INSN_UID (last_label));
7006 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
7007 insn = last_label;
7008
7009 init_insn_group_barriers ();
7010 last_label = 0;
7011 }
7012 }
7013 }
7014 }
7015 }
7016
7017 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
7018 This function has to emit all necessary group barriers. */
7019
7020 static void
7021 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7022 {
7023 rtx_insn *insn;
7024
7025 init_insn_group_barriers ();
7026
7027 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7028 {
7029 if (BARRIER_P (insn))
7030 {
7031 rtx_insn *last = prev_active_insn (insn);
7032
7033 if (! last)
7034 continue;
7035 if (JUMP_TABLE_DATA_P (last))
7036 last = prev_active_insn (last);
7037 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7038 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7039
7040 init_insn_group_barriers ();
7041 }
7042 else if (NONDEBUG_INSN_P (insn))
7043 {
7044 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7045 init_insn_group_barriers ();
7046 else if (group_barrier_needed (insn))
7047 {
7048 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
7049 init_insn_group_barriers ();
7050 group_barrier_needed (insn);
7051 }
7052 }
7053 }
7054 }
7055
7056 \f
7057
7058 /* Instruction scheduling support. */
7059
7060 #define NR_BUNDLES 10
7061
7062 /* A list of names of all available bundles. */
7063
7064 static const char *bundle_name [NR_BUNDLES] =
7065 {
7066 ".mii",
7067 ".mmi",
7068 ".mfi",
7069 ".mmf",
7070 #if NR_BUNDLES == 10
7071 ".bbb",
7072 ".mbb",
7073 #endif
7074 ".mib",
7075 ".mmb",
7076 ".mfb",
7077 ".mlx"
7078 };
7079
7080 /* Nonzero if we should insert stop bits into the schedule. */
7081
7082 int ia64_final_schedule = 0;
7083
7084 /* Codes of the corresponding queried units: */
7085
7086 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
7087 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
7088
7089 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
7090 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
7091
7092 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
7093
7094 /* The following variable value is an insn group barrier. */
7095
7096 static rtx_insn *dfa_stop_insn;
7097
7098 /* The following variable value is the last issued insn. */
7099
7100 static rtx_insn *last_scheduled_insn;
7101
7102 /* The following variable value is pointer to a DFA state used as
7103 temporary variable. */
7104
7105 static state_t temp_dfa_state = NULL;
7106
7107 /* The following variable value is DFA state after issuing the last
7108 insn. */
7109
7110 static state_t prev_cycle_state = NULL;
7111
7112 /* The following array element values are TRUE if the corresponding
7113 insn requires to add stop bits before it. */
7114
7115 static char *stops_p = NULL;
7116
7117 /* The following variable is used to set up the mentioned above array. */
7118
7119 static int stop_before_p = 0;
7120
7121 /* The following variable value is length of the arrays `clocks' and
7122 `add_cycles'. */
7123
7124 static int clocks_length;
7125
7126 /* The following variable value is number of data speculations in progress. */
7127 static int pending_data_specs = 0;
7128
7129 /* Number of memory references on current and three future processor cycles. */
7130 static char mem_ops_in_group[4];
7131
7132 /* Number of current processor cycle (from scheduler's point of view). */
7133 static int current_cycle;
7134
7135 static rtx ia64_single_set (rtx_insn *);
7136 static void ia64_emit_insn_before (rtx, rtx);
7137
7138 /* Map a bundle number to its pseudo-op. */
7139
7140 const char *
7141 get_bundle_name (int b)
7142 {
7143 return bundle_name[b];
7144 }
7145
7146
7147 /* Return the maximum number of instructions a cpu can issue. */
7148
7149 static int
7150 ia64_issue_rate (void)
7151 {
7152 return 6;
7153 }
7154
7155 /* Helper function - like single_set, but look inside COND_EXEC. */
7156
7157 static rtx
7158 ia64_single_set (rtx_insn *insn)
7159 {
7160 rtx x = PATTERN (insn), ret;
7161 if (GET_CODE (x) == COND_EXEC)
7162 x = COND_EXEC_CODE (x);
7163 if (GET_CODE (x) == SET)
7164 return x;
7165
7166 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
7167 Although they are not classical single set, the second set is there just
7168 to protect it from moving past FP-relative stack accesses. */
7169 switch (recog_memoized (insn))
7170 {
7171 case CODE_FOR_prologue_allocate_stack:
7172 case CODE_FOR_prologue_allocate_stack_pr:
7173 case CODE_FOR_epilogue_deallocate_stack:
7174 case CODE_FOR_epilogue_deallocate_stack_pr:
7175 ret = XVECEXP (x, 0, 0);
7176 break;
7177
7178 default:
7179 ret = single_set_2 (insn, x);
7180 break;
7181 }
7182
7183 return ret;
7184 }
7185
7186 /* Adjust the cost of a scheduling dependency.
7187 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
7188 COST is the current cost, DW is dependency weakness. */
7189 static int
7190 ia64_adjust_cost_2 (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
7191 int cost, dw_t dw)
7192 {
7193 enum reg_note dep_type = (enum reg_note) dep_type1;
7194 enum attr_itanium_class dep_class;
7195 enum attr_itanium_class insn_class;
7196
7197 insn_class = ia64_safe_itanium_class (insn);
7198 dep_class = ia64_safe_itanium_class (dep_insn);
7199
7200 /* Treat true memory dependencies separately. Ignore apparent true
7201 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7202 if (dep_type == REG_DEP_TRUE
7203 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
7204 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
7205 return 0;
7206
7207 if (dw == MIN_DEP_WEAK)
7208 /* Store and load are likely to alias, use higher cost to avoid stall. */
7209 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
7210 else if (dw > MIN_DEP_WEAK)
7211 {
7212 /* Store and load are less likely to alias. */
7213 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
7214 /* Assume there will be no cache conflict for floating-point data.
7215 For integer data, L1 conflict penalty is huge (17 cycles), so we
7216 never assume it will not cause a conflict. */
7217 return 0;
7218 else
7219 return cost;
7220 }
7221
7222 if (dep_type != REG_DEP_OUTPUT)
7223 return cost;
7224
7225 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
7226 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
7227 return 0;
7228
7229 return cost;
7230 }
7231
7232 /* Like emit_insn_before, but skip cycle_display notes.
7233 ??? When cycle display notes are implemented, update this. */
7234
7235 static void
7236 ia64_emit_insn_before (rtx insn, rtx before)
7237 {
7238 emit_insn_before (insn, before);
7239 }
7240
7241 /* The following function marks insns who produce addresses for load
7242 and store insns. Such insns will be placed into M slots because it
7243 decrease latency time for Itanium1 (see function
7244 `ia64_produce_address_p' and the DFA descriptions). */
7245
7246 static void
7247 ia64_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
7248 {
7249 rtx_insn *insn, *next, *next_tail;
7250
7251 /* Before reload, which_alternative is not set, which means that
7252 ia64_safe_itanium_class will produce wrong results for (at least)
7253 move instructions. */
7254 if (!reload_completed)
7255 return;
7256
7257 next_tail = NEXT_INSN (tail);
7258 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7259 if (INSN_P (insn))
7260 insn->call = 0;
7261 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7262 if (INSN_P (insn)
7263 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7264 {
7265 sd_iterator_def sd_it;
7266 dep_t dep;
7267 bool has_mem_op_consumer_p = false;
7268
7269 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7270 {
7271 enum attr_itanium_class c;
7272
7273 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7274 continue;
7275
7276 next = DEP_CON (dep);
7277 c = ia64_safe_itanium_class (next);
7278 if ((c == ITANIUM_CLASS_ST
7279 || c == ITANIUM_CLASS_STF)
7280 && ia64_st_address_bypass_p (insn, next))
7281 {
7282 has_mem_op_consumer_p = true;
7283 break;
7284 }
7285 else if ((c == ITANIUM_CLASS_LD
7286 || c == ITANIUM_CLASS_FLD
7287 || c == ITANIUM_CLASS_FLDP)
7288 && ia64_ld_address_bypass_p (insn, next))
7289 {
7290 has_mem_op_consumer_p = true;
7291 break;
7292 }
7293 }
7294
7295 insn->call = has_mem_op_consumer_p;
7296 }
7297 }
7298
7299 /* We're beginning a new block. Initialize data structures as necessary. */
7300
7301 static void
7302 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7303 int sched_verbose ATTRIBUTE_UNUSED,
7304 int max_ready ATTRIBUTE_UNUSED)
7305 {
7306 #ifdef ENABLE_CHECKING
7307 rtx_insn *insn;
7308
7309 if (!sel_sched_p () && reload_completed)
7310 for (insn = NEXT_INSN (current_sched_info->prev_head);
7311 insn != current_sched_info->next_tail;
7312 insn = NEXT_INSN (insn))
7313 gcc_assert (!SCHED_GROUP_P (insn));
7314 #endif
7315 last_scheduled_insn = NULL;
7316 init_insn_group_barriers ();
7317
7318 current_cycle = 0;
7319 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7320 }
7321
7322 /* We're beginning a scheduling pass. Check assertion. */
7323
7324 static void
7325 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7326 int sched_verbose ATTRIBUTE_UNUSED,
7327 int max_ready ATTRIBUTE_UNUSED)
7328 {
7329 gcc_assert (pending_data_specs == 0);
7330 }
7331
7332 /* Scheduling pass is now finished. Free/reset static variable. */
7333 static void
7334 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7335 int sched_verbose ATTRIBUTE_UNUSED)
7336 {
7337 gcc_assert (pending_data_specs == 0);
7338 }
7339
7340 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7341 speculation check), FALSE otherwise. */
7342 static bool
7343 is_load_p (rtx_insn *insn)
7344 {
7345 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7346
7347 return
7348 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7349 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7350 }
7351
7352 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7353 (taking account for 3-cycle cache reference postponing for stores: Intel
7354 Itanium 2 Reference Manual for Software Development and Optimization,
7355 6.7.3.1). */
7356 static void
7357 record_memory_reference (rtx_insn *insn)
7358 {
7359 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7360
7361 switch (insn_class) {
7362 case ITANIUM_CLASS_FLD:
7363 case ITANIUM_CLASS_LD:
7364 mem_ops_in_group[current_cycle % 4]++;
7365 break;
7366 case ITANIUM_CLASS_STF:
7367 case ITANIUM_CLASS_ST:
7368 mem_ops_in_group[(current_cycle + 3) % 4]++;
7369 break;
7370 default:;
7371 }
7372 }
7373
7374 /* We are about to being issuing insns for this clock cycle.
7375 Override the default sort algorithm to better slot instructions. */
7376
7377 static int
7378 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7379 int *pn_ready, int clock_var,
7380 int reorder_type)
7381 {
7382 int n_asms;
7383 int n_ready = *pn_ready;
7384 rtx_insn **e_ready = ready + n_ready;
7385 rtx_insn **insnp;
7386
7387 if (sched_verbose)
7388 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7389
7390 if (reorder_type == 0)
7391 {
7392 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7393 n_asms = 0;
7394 for (insnp = ready; insnp < e_ready; insnp++)
7395 if (insnp < e_ready)
7396 {
7397 rtx_insn *insn = *insnp;
7398 enum attr_type t = ia64_safe_type (insn);
7399 if (t == TYPE_UNKNOWN)
7400 {
7401 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7402 || asm_noperands (PATTERN (insn)) >= 0)
7403 {
7404 rtx_insn *lowest = ready[n_asms];
7405 ready[n_asms] = insn;
7406 *insnp = lowest;
7407 n_asms++;
7408 }
7409 else
7410 {
7411 rtx_insn *highest = ready[n_ready - 1];
7412 ready[n_ready - 1] = insn;
7413 *insnp = highest;
7414 return 1;
7415 }
7416 }
7417 }
7418
7419 if (n_asms < n_ready)
7420 {
7421 /* Some normal insns to process. Skip the asms. */
7422 ready += n_asms;
7423 n_ready -= n_asms;
7424 }
7425 else if (n_ready > 0)
7426 return 1;
7427 }
7428
7429 if (ia64_final_schedule)
7430 {
7431 int deleted = 0;
7432 int nr_need_stop = 0;
7433
7434 for (insnp = ready; insnp < e_ready; insnp++)
7435 if (safe_group_barrier_needed (*insnp))
7436 nr_need_stop++;
7437
7438 if (reorder_type == 1 && n_ready == nr_need_stop)
7439 return 0;
7440 if (reorder_type == 0)
7441 return 1;
7442 insnp = e_ready;
7443 /* Move down everything that needs a stop bit, preserving
7444 relative order. */
7445 while (insnp-- > ready + deleted)
7446 while (insnp >= ready + deleted)
7447 {
7448 rtx_insn *insn = *insnp;
7449 if (! safe_group_barrier_needed (insn))
7450 break;
7451 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7452 *ready = insn;
7453 deleted++;
7454 }
7455 n_ready -= deleted;
7456 ready += deleted;
7457 }
7458
7459 current_cycle = clock_var;
7460 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7461 {
7462 int moved = 0;
7463
7464 insnp = e_ready;
7465 /* Move down loads/stores, preserving relative order. */
7466 while (insnp-- > ready + moved)
7467 while (insnp >= ready + moved)
7468 {
7469 rtx_insn *insn = *insnp;
7470 if (! is_load_p (insn))
7471 break;
7472 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7473 *ready = insn;
7474 moved++;
7475 }
7476 n_ready -= moved;
7477 ready += moved;
7478 }
7479
7480 return 1;
7481 }
7482
7483 /* We are about to being issuing insns for this clock cycle. Override
7484 the default sort algorithm to better slot instructions. */
7485
7486 static int
7487 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7488 int *pn_ready, int clock_var)
7489 {
7490 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7491 pn_ready, clock_var, 0);
7492 }
7493
7494 /* Like ia64_sched_reorder, but called after issuing each insn.
7495 Override the default sort algorithm to better slot instructions. */
7496
7497 static int
7498 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7499 int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready,
7500 int *pn_ready, int clock_var)
7501 {
7502 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7503 clock_var, 1);
7504 }
7505
7506 /* We are about to issue INSN. Return the number of insns left on the
7507 ready queue that can be issued this cycle. */
7508
7509 static int
7510 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7511 int sched_verbose ATTRIBUTE_UNUSED,
7512 rtx_insn *insn,
7513 int can_issue_more ATTRIBUTE_UNUSED)
7514 {
7515 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7516 /* Modulo scheduling does not extend h_i_d when emitting
7517 new instructions. Don't use h_i_d, if we don't have to. */
7518 {
7519 if (DONE_SPEC (insn) & BEGIN_DATA)
7520 pending_data_specs++;
7521 if (CHECK_SPEC (insn) & BEGIN_DATA)
7522 pending_data_specs--;
7523 }
7524
7525 if (DEBUG_INSN_P (insn))
7526 return 1;
7527
7528 last_scheduled_insn = insn;
7529 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7530 if (reload_completed)
7531 {
7532 int needed = group_barrier_needed (insn);
7533
7534 gcc_assert (!needed);
7535 if (CALL_P (insn))
7536 init_insn_group_barriers ();
7537 stops_p [INSN_UID (insn)] = stop_before_p;
7538 stop_before_p = 0;
7539
7540 record_memory_reference (insn);
7541 }
7542 return 1;
7543 }
7544
7545 /* We are choosing insn from the ready queue. Return zero if INSN
7546 can be chosen. */
7547
7548 static int
7549 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index)
7550 {
7551 gcc_assert (insn && INSN_P (insn));
7552
7553 /* Size of ALAT is 32. As far as we perform conservative
7554 data speculation, we keep ALAT half-empty. */
7555 if (pending_data_specs >= 16 && (TODO_SPEC (insn) & BEGIN_DATA))
7556 return ready_index == 0 ? -1 : 1;
7557
7558 if (ready_index == 0)
7559 return 0;
7560
7561 if ((!reload_completed
7562 || !safe_group_barrier_needed (insn))
7563 && (!mflag_sched_mem_insns_hard_limit
7564 || !is_load_p (insn)
7565 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns))
7566 return 0;
7567
7568 return 1;
7569 }
7570
7571 /* The following variable value is pseudo-insn used by the DFA insn
7572 scheduler to change the DFA state when the simulated clock is
7573 increased. */
7574
7575 static rtx_insn *dfa_pre_cycle_insn;
7576
7577 /* Returns 1 when a meaningful insn was scheduled between the last group
7578 barrier and LAST. */
7579 static int
7580 scheduled_good_insn (rtx_insn *last)
7581 {
7582 if (last && recog_memoized (last) >= 0)
7583 return 1;
7584
7585 for ( ;
7586 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7587 && !stops_p[INSN_UID (last)];
7588 last = PREV_INSN (last))
7589 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7590 the ebb we're scheduling. */
7591 if (INSN_P (last) && recog_memoized (last) >= 0)
7592 return 1;
7593
7594 return 0;
7595 }
7596
7597 /* We are about to being issuing INSN. Return nonzero if we cannot
7598 issue it on given cycle CLOCK and return zero if we should not sort
7599 the ready queue on the next clock start. */
7600
7601 static int
7602 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
7603 int clock, int *sort_p)
7604 {
7605 gcc_assert (insn && INSN_P (insn));
7606
7607 if (DEBUG_INSN_P (insn))
7608 return 0;
7609
7610 /* When a group barrier is needed for insn, last_scheduled_insn
7611 should be set. */
7612 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7613 || last_scheduled_insn);
7614
7615 if ((reload_completed
7616 && (safe_group_barrier_needed (insn)
7617 || (mflag_sched_stop_bits_after_every_cycle
7618 && last_clock != clock
7619 && last_scheduled_insn
7620 && scheduled_good_insn (last_scheduled_insn))))
7621 || (last_scheduled_insn
7622 && (CALL_P (last_scheduled_insn)
7623 || unknown_for_bundling_p (last_scheduled_insn))))
7624 {
7625 init_insn_group_barriers ();
7626
7627 if (verbose && dump)
7628 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7629 last_clock == clock ? " + cycle advance" : "");
7630
7631 stop_before_p = 1;
7632 current_cycle = clock;
7633 mem_ops_in_group[current_cycle % 4] = 0;
7634
7635 if (last_clock == clock)
7636 {
7637 state_transition (curr_state, dfa_stop_insn);
7638 if (TARGET_EARLY_STOP_BITS)
7639 *sort_p = (last_scheduled_insn == NULL_RTX
7640 || ! CALL_P (last_scheduled_insn));
7641 else
7642 *sort_p = 0;
7643 return 1;
7644 }
7645
7646 if (last_scheduled_insn)
7647 {
7648 if (unknown_for_bundling_p (last_scheduled_insn))
7649 state_reset (curr_state);
7650 else
7651 {
7652 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7653 state_transition (curr_state, dfa_stop_insn);
7654 state_transition (curr_state, dfa_pre_cycle_insn);
7655 state_transition (curr_state, NULL);
7656 }
7657 }
7658 }
7659 return 0;
7660 }
7661
7662 /* Implement targetm.sched.h_i_d_extended hook.
7663 Extend internal data structures. */
7664 static void
7665 ia64_h_i_d_extended (void)
7666 {
7667 if (stops_p != NULL)
7668 {
7669 int new_clocks_length = get_max_uid () * 3 / 2;
7670 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7671 clocks_length = new_clocks_length;
7672 }
7673 }
7674 \f
7675
7676 /* This structure describes the data used by the backend to guide scheduling.
7677 When the current scheduling point is switched, this data should be saved
7678 and restored later, if the scheduler returns to this point. */
7679 struct _ia64_sched_context
7680 {
7681 state_t prev_cycle_state;
7682 rtx_insn *last_scheduled_insn;
7683 struct reg_write_state rws_sum[NUM_REGS];
7684 struct reg_write_state rws_insn[NUM_REGS];
7685 int first_instruction;
7686 int pending_data_specs;
7687 int current_cycle;
7688 char mem_ops_in_group[4];
7689 };
7690 typedef struct _ia64_sched_context *ia64_sched_context_t;
7691
7692 /* Allocates a scheduling context. */
7693 static void *
7694 ia64_alloc_sched_context (void)
7695 {
7696 return xmalloc (sizeof (struct _ia64_sched_context));
7697 }
7698
7699 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7700 the global context otherwise. */
7701 static void
7702 ia64_init_sched_context (void *_sc, bool clean_p)
7703 {
7704 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7705
7706 sc->prev_cycle_state = xmalloc (dfa_state_size);
7707 if (clean_p)
7708 {
7709 state_reset (sc->prev_cycle_state);
7710 sc->last_scheduled_insn = NULL;
7711 memset (sc->rws_sum, 0, sizeof (rws_sum));
7712 memset (sc->rws_insn, 0, sizeof (rws_insn));
7713 sc->first_instruction = 1;
7714 sc->pending_data_specs = 0;
7715 sc->current_cycle = 0;
7716 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7717 }
7718 else
7719 {
7720 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7721 sc->last_scheduled_insn = last_scheduled_insn;
7722 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7723 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7724 sc->first_instruction = first_instruction;
7725 sc->pending_data_specs = pending_data_specs;
7726 sc->current_cycle = current_cycle;
7727 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7728 }
7729 }
7730
7731 /* Sets the global scheduling context to the one pointed to by _SC. */
7732 static void
7733 ia64_set_sched_context (void *_sc)
7734 {
7735 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7736
7737 gcc_assert (sc != NULL);
7738
7739 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7740 last_scheduled_insn = sc->last_scheduled_insn;
7741 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7742 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7743 first_instruction = sc->first_instruction;
7744 pending_data_specs = sc->pending_data_specs;
7745 current_cycle = sc->current_cycle;
7746 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7747 }
7748
7749 /* Clears the data in the _SC scheduling context. */
7750 static void
7751 ia64_clear_sched_context (void *_sc)
7752 {
7753 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7754
7755 free (sc->prev_cycle_state);
7756 sc->prev_cycle_state = NULL;
7757 }
7758
7759 /* Frees the _SC scheduling context. */
7760 static void
7761 ia64_free_sched_context (void *_sc)
7762 {
7763 gcc_assert (_sc != NULL);
7764
7765 free (_sc);
7766 }
7767
7768 typedef rtx (* gen_func_t) (rtx, rtx);
7769
7770 /* Return a function that will generate a load of mode MODE_NO
7771 with speculation types TS. */
7772 static gen_func_t
7773 get_spec_load_gen_function (ds_t ts, int mode_no)
7774 {
7775 static gen_func_t gen_ld_[] = {
7776 gen_movbi,
7777 gen_movqi_internal,
7778 gen_movhi_internal,
7779 gen_movsi_internal,
7780 gen_movdi_internal,
7781 gen_movsf_internal,
7782 gen_movdf_internal,
7783 gen_movxf_internal,
7784 gen_movti_internal,
7785 gen_zero_extendqidi2,
7786 gen_zero_extendhidi2,
7787 gen_zero_extendsidi2,
7788 };
7789
7790 static gen_func_t gen_ld_a[] = {
7791 gen_movbi_advanced,
7792 gen_movqi_advanced,
7793 gen_movhi_advanced,
7794 gen_movsi_advanced,
7795 gen_movdi_advanced,
7796 gen_movsf_advanced,
7797 gen_movdf_advanced,
7798 gen_movxf_advanced,
7799 gen_movti_advanced,
7800 gen_zero_extendqidi2_advanced,
7801 gen_zero_extendhidi2_advanced,
7802 gen_zero_extendsidi2_advanced,
7803 };
7804 static gen_func_t gen_ld_s[] = {
7805 gen_movbi_speculative,
7806 gen_movqi_speculative,
7807 gen_movhi_speculative,
7808 gen_movsi_speculative,
7809 gen_movdi_speculative,
7810 gen_movsf_speculative,
7811 gen_movdf_speculative,
7812 gen_movxf_speculative,
7813 gen_movti_speculative,
7814 gen_zero_extendqidi2_speculative,
7815 gen_zero_extendhidi2_speculative,
7816 gen_zero_extendsidi2_speculative,
7817 };
7818 static gen_func_t gen_ld_sa[] = {
7819 gen_movbi_speculative_advanced,
7820 gen_movqi_speculative_advanced,
7821 gen_movhi_speculative_advanced,
7822 gen_movsi_speculative_advanced,
7823 gen_movdi_speculative_advanced,
7824 gen_movsf_speculative_advanced,
7825 gen_movdf_speculative_advanced,
7826 gen_movxf_speculative_advanced,
7827 gen_movti_speculative_advanced,
7828 gen_zero_extendqidi2_speculative_advanced,
7829 gen_zero_extendhidi2_speculative_advanced,
7830 gen_zero_extendsidi2_speculative_advanced,
7831 };
7832 static gen_func_t gen_ld_s_a[] = {
7833 gen_movbi_speculative_a,
7834 gen_movqi_speculative_a,
7835 gen_movhi_speculative_a,
7836 gen_movsi_speculative_a,
7837 gen_movdi_speculative_a,
7838 gen_movsf_speculative_a,
7839 gen_movdf_speculative_a,
7840 gen_movxf_speculative_a,
7841 gen_movti_speculative_a,
7842 gen_zero_extendqidi2_speculative_a,
7843 gen_zero_extendhidi2_speculative_a,
7844 gen_zero_extendsidi2_speculative_a,
7845 };
7846
7847 gen_func_t *gen_ld;
7848
7849 if (ts & BEGIN_DATA)
7850 {
7851 if (ts & BEGIN_CONTROL)
7852 gen_ld = gen_ld_sa;
7853 else
7854 gen_ld = gen_ld_a;
7855 }
7856 else if (ts & BEGIN_CONTROL)
7857 {
7858 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7859 || ia64_needs_block_p (ts))
7860 gen_ld = gen_ld_s;
7861 else
7862 gen_ld = gen_ld_s_a;
7863 }
7864 else if (ts == 0)
7865 gen_ld = gen_ld_;
7866 else
7867 gcc_unreachable ();
7868
7869 return gen_ld[mode_no];
7870 }
7871
7872 /* Constants that help mapping 'enum machine_mode' to int. */
7873 enum SPEC_MODES
7874 {
7875 SPEC_MODE_INVALID = -1,
7876 SPEC_MODE_FIRST = 0,
7877 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7878 SPEC_MODE_FOR_EXTEND_LAST = 3,
7879 SPEC_MODE_LAST = 8
7880 };
7881
7882 enum
7883 {
7884 /* Offset to reach ZERO_EXTEND patterns. */
7885 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7886 };
7887
7888 /* Return index of the MODE. */
7889 static int
7890 ia64_mode_to_int (enum machine_mode mode)
7891 {
7892 switch (mode)
7893 {
7894 case BImode: return 0; /* SPEC_MODE_FIRST */
7895 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7896 case HImode: return 2;
7897 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7898 case DImode: return 4;
7899 case SFmode: return 5;
7900 case DFmode: return 6;
7901 case XFmode: return 7;
7902 case TImode:
7903 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7904 mentioned in itanium[12].md. Predicate fp_register_operand also
7905 needs to be defined. Bottom line: better disable for now. */
7906 return SPEC_MODE_INVALID;
7907 default: return SPEC_MODE_INVALID;
7908 }
7909 }
7910
7911 /* Provide information about speculation capabilities. */
7912 static void
7913 ia64_set_sched_flags (spec_info_t spec_info)
7914 {
7915 unsigned int *flags = &(current_sched_info->flags);
7916
7917 if (*flags & SCHED_RGN
7918 || *flags & SCHED_EBB
7919 || *flags & SEL_SCHED)
7920 {
7921 int mask = 0;
7922
7923 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7924 || (mflag_sched_ar_data_spec && reload_completed))
7925 {
7926 mask |= BEGIN_DATA;
7927
7928 if (!sel_sched_p ()
7929 && ((mflag_sched_br_in_data_spec && !reload_completed)
7930 || (mflag_sched_ar_in_data_spec && reload_completed)))
7931 mask |= BE_IN_DATA;
7932 }
7933
7934 if (mflag_sched_control_spec
7935 && (!sel_sched_p ()
7936 || reload_completed))
7937 {
7938 mask |= BEGIN_CONTROL;
7939
7940 if (!sel_sched_p () && mflag_sched_in_control_spec)
7941 mask |= BE_IN_CONTROL;
7942 }
7943
7944 spec_info->mask = mask;
7945
7946 if (mask)
7947 {
7948 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7949
7950 if (mask & BE_IN_SPEC)
7951 *flags |= NEW_BBS;
7952
7953 spec_info->flags = 0;
7954
7955 if ((mask & CONTROL_SPEC)
7956 && sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7957 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7958
7959 if (sched_verbose >= 1)
7960 spec_info->dump = sched_dump;
7961 else
7962 spec_info->dump = 0;
7963
7964 if (mflag_sched_count_spec_in_critical_path)
7965 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7966 }
7967 }
7968 else
7969 spec_info->mask = 0;
7970 }
7971
7972 /* If INSN is an appropriate load return its mode.
7973 Return -1 otherwise. */
7974 static int
7975 get_mode_no_for_insn (rtx_insn *insn)
7976 {
7977 rtx reg, mem, mode_rtx;
7978 int mode_no;
7979 bool extend_p;
7980
7981 extract_insn_cached (insn);
7982
7983 /* We use WHICH_ALTERNATIVE only after reload. This will
7984 guarantee that reload won't touch a speculative insn. */
7985
7986 if (recog_data.n_operands != 2)
7987 return -1;
7988
7989 reg = recog_data.operand[0];
7990 mem = recog_data.operand[1];
7991
7992 /* We should use MEM's mode since REG's mode in presence of
7993 ZERO_EXTEND will always be DImode. */
7994 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7995 /* Process non-speculative ld. */
7996 {
7997 if (!reload_completed)
7998 {
7999 /* Do not speculate into regs like ar.lc. */
8000 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
8001 return -1;
8002
8003 if (!MEM_P (mem))
8004 return -1;
8005
8006 {
8007 rtx mem_reg = XEXP (mem, 0);
8008
8009 if (!REG_P (mem_reg))
8010 return -1;
8011 }
8012
8013 mode_rtx = mem;
8014 }
8015 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
8016 {
8017 gcc_assert (REG_P (reg) && MEM_P (mem));
8018 mode_rtx = mem;
8019 }
8020 else
8021 return -1;
8022 }
8023 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
8024 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
8025 || get_attr_check_load (insn) == CHECK_LOAD_YES)
8026 /* Process speculative ld or ld.c. */
8027 {
8028 gcc_assert (REG_P (reg) && MEM_P (mem));
8029 mode_rtx = mem;
8030 }
8031 else
8032 {
8033 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
8034
8035 if (attr_class == ITANIUM_CLASS_CHK_A
8036 || attr_class == ITANIUM_CLASS_CHK_S_I
8037 || attr_class == ITANIUM_CLASS_CHK_S_F)
8038 /* Process chk. */
8039 mode_rtx = reg;
8040 else
8041 return -1;
8042 }
8043
8044 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
8045
8046 if (mode_no == SPEC_MODE_INVALID)
8047 return -1;
8048
8049 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
8050
8051 if (extend_p)
8052 {
8053 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
8054 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
8055 return -1;
8056
8057 mode_no += SPEC_GEN_EXTEND_OFFSET;
8058 }
8059
8060 return mode_no;
8061 }
8062
8063 /* If X is an unspec part of a speculative load, return its code.
8064 Return -1 otherwise. */
8065 static int
8066 get_spec_unspec_code (const_rtx x)
8067 {
8068 if (GET_CODE (x) != UNSPEC)
8069 return -1;
8070
8071 {
8072 int code;
8073
8074 code = XINT (x, 1);
8075
8076 switch (code)
8077 {
8078 case UNSPEC_LDA:
8079 case UNSPEC_LDS:
8080 case UNSPEC_LDS_A:
8081 case UNSPEC_LDSA:
8082 return code;
8083
8084 default:
8085 return -1;
8086 }
8087 }
8088 }
8089
8090 /* Implement skip_rtx_p hook. */
8091 static bool
8092 ia64_skip_rtx_p (const_rtx x)
8093 {
8094 return get_spec_unspec_code (x) != -1;
8095 }
8096
8097 /* If INSN is a speculative load, return its UNSPEC code.
8098 Return -1 otherwise. */
8099 static int
8100 get_insn_spec_code (const_rtx insn)
8101 {
8102 rtx pat, reg, mem;
8103
8104 pat = PATTERN (insn);
8105
8106 if (GET_CODE (pat) == COND_EXEC)
8107 pat = COND_EXEC_CODE (pat);
8108
8109 if (GET_CODE (pat) != SET)
8110 return -1;
8111
8112 reg = SET_DEST (pat);
8113 if (!REG_P (reg))
8114 return -1;
8115
8116 mem = SET_SRC (pat);
8117 if (GET_CODE (mem) == ZERO_EXTEND)
8118 mem = XEXP (mem, 0);
8119
8120 return get_spec_unspec_code (mem);
8121 }
8122
8123 /* If INSN is a speculative load, return a ds with the speculation types.
8124 Otherwise [if INSN is a normal instruction] return 0. */
8125 static ds_t
8126 ia64_get_insn_spec_ds (rtx_insn *insn)
8127 {
8128 int code = get_insn_spec_code (insn);
8129
8130 switch (code)
8131 {
8132 case UNSPEC_LDA:
8133 return BEGIN_DATA;
8134
8135 case UNSPEC_LDS:
8136 case UNSPEC_LDS_A:
8137 return BEGIN_CONTROL;
8138
8139 case UNSPEC_LDSA:
8140 return BEGIN_DATA | BEGIN_CONTROL;
8141
8142 default:
8143 return 0;
8144 }
8145 }
8146
8147 /* If INSN is a speculative load return a ds with the speculation types that
8148 will be checked.
8149 Otherwise [if INSN is a normal instruction] return 0. */
8150 static ds_t
8151 ia64_get_insn_checked_ds (rtx_insn *insn)
8152 {
8153 int code = get_insn_spec_code (insn);
8154
8155 switch (code)
8156 {
8157 case UNSPEC_LDA:
8158 return BEGIN_DATA | BEGIN_CONTROL;
8159
8160 case UNSPEC_LDS:
8161 return BEGIN_CONTROL;
8162
8163 case UNSPEC_LDS_A:
8164 case UNSPEC_LDSA:
8165 return BEGIN_DATA | BEGIN_CONTROL;
8166
8167 default:
8168 return 0;
8169 }
8170 }
8171
8172 /* If GEN_P is true, calculate the index of needed speculation check and return
8173 speculative pattern for INSN with speculative mode TS, machine mode
8174 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
8175 If GEN_P is false, just calculate the index of needed speculation check. */
8176 static rtx
8177 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
8178 {
8179 rtx pat, new_pat;
8180 gen_func_t gen_load;
8181
8182 gen_load = get_spec_load_gen_function (ts, mode_no);
8183
8184 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
8185 copy_rtx (recog_data.operand[1]));
8186
8187 pat = PATTERN (insn);
8188 if (GET_CODE (pat) == COND_EXEC)
8189 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8190 new_pat);
8191
8192 return new_pat;
8193 }
8194
8195 static bool
8196 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
8197 ds_t ds ATTRIBUTE_UNUSED)
8198 {
8199 return false;
8200 }
8201
8202 /* Implement targetm.sched.speculate_insn hook.
8203 Check if the INSN can be TS speculative.
8204 If 'no' - return -1.
8205 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8206 If current pattern of the INSN already provides TS speculation,
8207 return 0. */
8208 static int
8209 ia64_speculate_insn (rtx_insn *insn, ds_t ts, rtx *new_pat)
8210 {
8211 int mode_no;
8212 int res;
8213
8214 gcc_assert (!(ts & ~SPECULATIVE));
8215
8216 if (ia64_spec_check_p (insn))
8217 return -1;
8218
8219 if ((ts & BE_IN_SPEC)
8220 && !insn_can_be_in_speculative_p (insn, ts))
8221 return -1;
8222
8223 mode_no = get_mode_no_for_insn (insn);
8224
8225 if (mode_no != SPEC_MODE_INVALID)
8226 {
8227 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
8228 res = 0;
8229 else
8230 {
8231 res = 1;
8232 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
8233 }
8234 }
8235 else
8236 res = -1;
8237
8238 return res;
8239 }
8240
8241 /* Return a function that will generate a check for speculation TS with mode
8242 MODE_NO.
8243 If simple check is needed, pass true for SIMPLE_CHECK_P.
8244 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8245 static gen_func_t
8246 get_spec_check_gen_function (ds_t ts, int mode_no,
8247 bool simple_check_p, bool clearing_check_p)
8248 {
8249 static gen_func_t gen_ld_c_clr[] = {
8250 gen_movbi_clr,
8251 gen_movqi_clr,
8252 gen_movhi_clr,
8253 gen_movsi_clr,
8254 gen_movdi_clr,
8255 gen_movsf_clr,
8256 gen_movdf_clr,
8257 gen_movxf_clr,
8258 gen_movti_clr,
8259 gen_zero_extendqidi2_clr,
8260 gen_zero_extendhidi2_clr,
8261 gen_zero_extendsidi2_clr,
8262 };
8263 static gen_func_t gen_ld_c_nc[] = {
8264 gen_movbi_nc,
8265 gen_movqi_nc,
8266 gen_movhi_nc,
8267 gen_movsi_nc,
8268 gen_movdi_nc,
8269 gen_movsf_nc,
8270 gen_movdf_nc,
8271 gen_movxf_nc,
8272 gen_movti_nc,
8273 gen_zero_extendqidi2_nc,
8274 gen_zero_extendhidi2_nc,
8275 gen_zero_extendsidi2_nc,
8276 };
8277 static gen_func_t gen_chk_a_clr[] = {
8278 gen_advanced_load_check_clr_bi,
8279 gen_advanced_load_check_clr_qi,
8280 gen_advanced_load_check_clr_hi,
8281 gen_advanced_load_check_clr_si,
8282 gen_advanced_load_check_clr_di,
8283 gen_advanced_load_check_clr_sf,
8284 gen_advanced_load_check_clr_df,
8285 gen_advanced_load_check_clr_xf,
8286 gen_advanced_load_check_clr_ti,
8287 gen_advanced_load_check_clr_di,
8288 gen_advanced_load_check_clr_di,
8289 gen_advanced_load_check_clr_di,
8290 };
8291 static gen_func_t gen_chk_a_nc[] = {
8292 gen_advanced_load_check_nc_bi,
8293 gen_advanced_load_check_nc_qi,
8294 gen_advanced_load_check_nc_hi,
8295 gen_advanced_load_check_nc_si,
8296 gen_advanced_load_check_nc_di,
8297 gen_advanced_load_check_nc_sf,
8298 gen_advanced_load_check_nc_df,
8299 gen_advanced_load_check_nc_xf,
8300 gen_advanced_load_check_nc_ti,
8301 gen_advanced_load_check_nc_di,
8302 gen_advanced_load_check_nc_di,
8303 gen_advanced_load_check_nc_di,
8304 };
8305 static gen_func_t gen_chk_s[] = {
8306 gen_speculation_check_bi,
8307 gen_speculation_check_qi,
8308 gen_speculation_check_hi,
8309 gen_speculation_check_si,
8310 gen_speculation_check_di,
8311 gen_speculation_check_sf,
8312 gen_speculation_check_df,
8313 gen_speculation_check_xf,
8314 gen_speculation_check_ti,
8315 gen_speculation_check_di,
8316 gen_speculation_check_di,
8317 gen_speculation_check_di,
8318 };
8319
8320 gen_func_t *gen_check;
8321
8322 if (ts & BEGIN_DATA)
8323 {
8324 /* We don't need recovery because even if this is ld.sa
8325 ALAT entry will be allocated only if NAT bit is set to zero.
8326 So it is enough to use ld.c here. */
8327
8328 if (simple_check_p)
8329 {
8330 gcc_assert (mflag_sched_spec_ldc);
8331
8332 if (clearing_check_p)
8333 gen_check = gen_ld_c_clr;
8334 else
8335 gen_check = gen_ld_c_nc;
8336 }
8337 else
8338 {
8339 if (clearing_check_p)
8340 gen_check = gen_chk_a_clr;
8341 else
8342 gen_check = gen_chk_a_nc;
8343 }
8344 }
8345 else if (ts & BEGIN_CONTROL)
8346 {
8347 if (simple_check_p)
8348 /* We might want to use ld.sa -> ld.c instead of
8349 ld.s -> chk.s. */
8350 {
8351 gcc_assert (!ia64_needs_block_p (ts));
8352
8353 if (clearing_check_p)
8354 gen_check = gen_ld_c_clr;
8355 else
8356 gen_check = gen_ld_c_nc;
8357 }
8358 else
8359 {
8360 gen_check = gen_chk_s;
8361 }
8362 }
8363 else
8364 gcc_unreachable ();
8365
8366 gcc_assert (mode_no >= 0);
8367 return gen_check[mode_no];
8368 }
8369
8370 /* Return nonzero, if INSN needs branchy recovery check. */
8371 static bool
8372 ia64_needs_block_p (ds_t ts)
8373 {
8374 if (ts & BEGIN_DATA)
8375 return !mflag_sched_spec_ldc;
8376
8377 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8378
8379 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8380 }
8381
8382 /* Generate (or regenerate) a recovery check for INSN. */
8383 static rtx
8384 ia64_gen_spec_check (rtx_insn *insn, rtx_insn *label, ds_t ds)
8385 {
8386 rtx op1, pat, check_pat;
8387 gen_func_t gen_check;
8388 int mode_no;
8389
8390 mode_no = get_mode_no_for_insn (insn);
8391 gcc_assert (mode_no >= 0);
8392
8393 if (label)
8394 op1 = label;
8395 else
8396 {
8397 gcc_assert (!ia64_needs_block_p (ds));
8398 op1 = copy_rtx (recog_data.operand[1]);
8399 }
8400
8401 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8402 true);
8403
8404 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8405
8406 pat = PATTERN (insn);
8407 if (GET_CODE (pat) == COND_EXEC)
8408 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8409 check_pat);
8410
8411 return check_pat;
8412 }
8413
8414 /* Return nonzero, if X is branchy recovery check. */
8415 static int
8416 ia64_spec_check_p (rtx x)
8417 {
8418 x = PATTERN (x);
8419 if (GET_CODE (x) == COND_EXEC)
8420 x = COND_EXEC_CODE (x);
8421 if (GET_CODE (x) == SET)
8422 return ia64_spec_check_src_p (SET_SRC (x));
8423 return 0;
8424 }
8425
8426 /* Return nonzero, if SRC belongs to recovery check. */
8427 static int
8428 ia64_spec_check_src_p (rtx src)
8429 {
8430 if (GET_CODE (src) == IF_THEN_ELSE)
8431 {
8432 rtx t;
8433
8434 t = XEXP (src, 0);
8435 if (GET_CODE (t) == NE)
8436 {
8437 t = XEXP (t, 0);
8438
8439 if (GET_CODE (t) == UNSPEC)
8440 {
8441 int code;
8442
8443 code = XINT (t, 1);
8444
8445 if (code == UNSPEC_LDCCLR
8446 || code == UNSPEC_LDCNC
8447 || code == UNSPEC_CHKACLR
8448 || code == UNSPEC_CHKANC
8449 || code == UNSPEC_CHKS)
8450 {
8451 gcc_assert (code != 0);
8452 return code;
8453 }
8454 }
8455 }
8456 }
8457 return 0;
8458 }
8459 \f
8460
8461 /* The following page contains abstract data `bundle states' which are
8462 used for bundling insns (inserting nops and template generation). */
8463
8464 /* The following describes state of insn bundling. */
8465
8466 struct bundle_state
8467 {
8468 /* Unique bundle state number to identify them in the debugging
8469 output */
8470 int unique_num;
8471 rtx_insn *insn; /* corresponding insn, NULL for the 1st and the last state */
8472 /* number nops before and after the insn */
8473 short before_nops_num, after_nops_num;
8474 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8475 insn */
8476 int cost; /* cost of the state in cycles */
8477 int accumulated_insns_num; /* number of all previous insns including
8478 nops. L is considered as 2 insns */
8479 int branch_deviation; /* deviation of previous branches from 3rd slots */
8480 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8481 struct bundle_state *next; /* next state with the same insn_num */
8482 struct bundle_state *originator; /* originator (previous insn state) */
8483 /* All bundle states are in the following chain. */
8484 struct bundle_state *allocated_states_chain;
8485 /* The DFA State after issuing the insn and the nops. */
8486 state_t dfa_state;
8487 };
8488
8489 /* The following is map insn number to the corresponding bundle state. */
8490
8491 static struct bundle_state **index_to_bundle_states;
8492
8493 /* The unique number of next bundle state. */
8494
8495 static int bundle_states_num;
8496
8497 /* All allocated bundle states are in the following chain. */
8498
8499 static struct bundle_state *allocated_bundle_states_chain;
8500
8501 /* All allocated but not used bundle states are in the following
8502 chain. */
8503
8504 static struct bundle_state *free_bundle_state_chain;
8505
8506
8507 /* The following function returns a free bundle state. */
8508
8509 static struct bundle_state *
8510 get_free_bundle_state (void)
8511 {
8512 struct bundle_state *result;
8513
8514 if (free_bundle_state_chain != NULL)
8515 {
8516 result = free_bundle_state_chain;
8517 free_bundle_state_chain = result->next;
8518 }
8519 else
8520 {
8521 result = XNEW (struct bundle_state);
8522 result->dfa_state = xmalloc (dfa_state_size);
8523 result->allocated_states_chain = allocated_bundle_states_chain;
8524 allocated_bundle_states_chain = result;
8525 }
8526 result->unique_num = bundle_states_num++;
8527 return result;
8528
8529 }
8530
8531 /* The following function frees given bundle state. */
8532
8533 static void
8534 free_bundle_state (struct bundle_state *state)
8535 {
8536 state->next = free_bundle_state_chain;
8537 free_bundle_state_chain = state;
8538 }
8539
8540 /* Start work with abstract data `bundle states'. */
8541
8542 static void
8543 initiate_bundle_states (void)
8544 {
8545 bundle_states_num = 0;
8546 free_bundle_state_chain = NULL;
8547 allocated_bundle_states_chain = NULL;
8548 }
8549
8550 /* Finish work with abstract data `bundle states'. */
8551
8552 static void
8553 finish_bundle_states (void)
8554 {
8555 struct bundle_state *curr_state, *next_state;
8556
8557 for (curr_state = allocated_bundle_states_chain;
8558 curr_state != NULL;
8559 curr_state = next_state)
8560 {
8561 next_state = curr_state->allocated_states_chain;
8562 free (curr_state->dfa_state);
8563 free (curr_state);
8564 }
8565 }
8566
8567 /* Hashtable helpers. */
8568
8569 struct bundle_state_hasher : typed_noop_remove <bundle_state>
8570 {
8571 typedef bundle_state value_type;
8572 typedef bundle_state compare_type;
8573 static inline hashval_t hash (const value_type *);
8574 static inline bool equal (const value_type *, const compare_type *);
8575 };
8576
8577 /* The function returns hash of BUNDLE_STATE. */
8578
8579 inline hashval_t
8580 bundle_state_hasher::hash (const value_type *state)
8581 {
8582 unsigned result, i;
8583
8584 for (result = i = 0; i < dfa_state_size; i++)
8585 result += (((unsigned char *) state->dfa_state) [i]
8586 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8587 return result + state->insn_num;
8588 }
8589
8590 /* The function returns nonzero if the bundle state keys are equal. */
8591
8592 inline bool
8593 bundle_state_hasher::equal (const value_type *state1,
8594 const compare_type *state2)
8595 {
8596 return (state1->insn_num == state2->insn_num
8597 && memcmp (state1->dfa_state, state2->dfa_state,
8598 dfa_state_size) == 0);
8599 }
8600
8601 /* Hash table of the bundle states. The key is dfa_state and insn_num
8602 of the bundle states. */
8603
8604 static hash_table<bundle_state_hasher> *bundle_state_table;
8605
8606 /* The function inserts the BUNDLE_STATE into the hash table. The
8607 function returns nonzero if the bundle has been inserted into the
8608 table. The table contains the best bundle state with given key. */
8609
8610 static int
8611 insert_bundle_state (struct bundle_state *bundle_state)
8612 {
8613 struct bundle_state **entry_ptr;
8614
8615 entry_ptr = bundle_state_table->find_slot (bundle_state, INSERT);
8616 if (*entry_ptr == NULL)
8617 {
8618 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8619 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8620 *entry_ptr = bundle_state;
8621 return TRUE;
8622 }
8623 else if (bundle_state->cost < (*entry_ptr)->cost
8624 || (bundle_state->cost == (*entry_ptr)->cost
8625 && ((*entry_ptr)->accumulated_insns_num
8626 > bundle_state->accumulated_insns_num
8627 || ((*entry_ptr)->accumulated_insns_num
8628 == bundle_state->accumulated_insns_num
8629 && ((*entry_ptr)->branch_deviation
8630 > bundle_state->branch_deviation
8631 || ((*entry_ptr)->branch_deviation
8632 == bundle_state->branch_deviation
8633 && (*entry_ptr)->middle_bundle_stops
8634 > bundle_state->middle_bundle_stops))))))
8635
8636 {
8637 struct bundle_state temp;
8638
8639 temp = **entry_ptr;
8640 **entry_ptr = *bundle_state;
8641 (*entry_ptr)->next = temp.next;
8642 *bundle_state = temp;
8643 }
8644 return FALSE;
8645 }
8646
8647 /* Start work with the hash table. */
8648
8649 static void
8650 initiate_bundle_state_table (void)
8651 {
8652 bundle_state_table = new hash_table<bundle_state_hasher> (50);
8653 }
8654
8655 /* Finish work with the hash table. */
8656
8657 static void
8658 finish_bundle_state_table (void)
8659 {
8660 delete bundle_state_table;
8661 bundle_state_table = NULL;
8662 }
8663
8664 \f
8665
8666 /* The following variable is a insn `nop' used to check bundle states
8667 with different number of inserted nops. */
8668
8669 static rtx_insn *ia64_nop;
8670
8671 /* The following function tries to issue NOPS_NUM nops for the current
8672 state without advancing processor cycle. If it failed, the
8673 function returns FALSE and frees the current state. */
8674
8675 static int
8676 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8677 {
8678 int i;
8679
8680 for (i = 0; i < nops_num; i++)
8681 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8682 {
8683 free_bundle_state (curr_state);
8684 return FALSE;
8685 }
8686 return TRUE;
8687 }
8688
8689 /* The following function tries to issue INSN for the current
8690 state without advancing processor cycle. If it failed, the
8691 function returns FALSE and frees the current state. */
8692
8693 static int
8694 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8695 {
8696 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8697 {
8698 free_bundle_state (curr_state);
8699 return FALSE;
8700 }
8701 return TRUE;
8702 }
8703
8704 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8705 starting with ORIGINATOR without advancing processor cycle. If
8706 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8707 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8708 If it was successful, the function creates new bundle state and
8709 insert into the hash table and into `index_to_bundle_states'. */
8710
8711 static void
8712 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8713 rtx_insn *insn, int try_bundle_end_p,
8714 int only_bundle_end_p)
8715 {
8716 struct bundle_state *curr_state;
8717
8718 curr_state = get_free_bundle_state ();
8719 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8720 curr_state->insn = insn;
8721 curr_state->insn_num = originator->insn_num + 1;
8722 curr_state->cost = originator->cost;
8723 curr_state->originator = originator;
8724 curr_state->before_nops_num = before_nops_num;
8725 curr_state->after_nops_num = 0;
8726 curr_state->accumulated_insns_num
8727 = originator->accumulated_insns_num + before_nops_num;
8728 curr_state->branch_deviation = originator->branch_deviation;
8729 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8730 gcc_assert (insn);
8731 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8732 {
8733 gcc_assert (GET_MODE (insn) != TImode);
8734 if (!try_issue_nops (curr_state, before_nops_num))
8735 return;
8736 if (!try_issue_insn (curr_state, insn))
8737 return;
8738 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8739 if (curr_state->accumulated_insns_num % 3 != 0)
8740 curr_state->middle_bundle_stops++;
8741 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8742 && curr_state->accumulated_insns_num % 3 != 0)
8743 {
8744 free_bundle_state (curr_state);
8745 return;
8746 }
8747 }
8748 else if (GET_MODE (insn) != TImode)
8749 {
8750 if (!try_issue_nops (curr_state, before_nops_num))
8751 return;
8752 if (!try_issue_insn (curr_state, insn))
8753 return;
8754 curr_state->accumulated_insns_num++;
8755 gcc_assert (!unknown_for_bundling_p (insn));
8756
8757 if (ia64_safe_type (insn) == TYPE_L)
8758 curr_state->accumulated_insns_num++;
8759 }
8760 else
8761 {
8762 /* If this is an insn that must be first in a group, then don't allow
8763 nops to be emitted before it. Currently, alloc is the only such
8764 supported instruction. */
8765 /* ??? The bundling automatons should handle this for us, but they do
8766 not yet have support for the first_insn attribute. */
8767 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8768 {
8769 free_bundle_state (curr_state);
8770 return;
8771 }
8772
8773 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8774 state_transition (curr_state->dfa_state, NULL);
8775 curr_state->cost++;
8776 if (!try_issue_nops (curr_state, before_nops_num))
8777 return;
8778 if (!try_issue_insn (curr_state, insn))
8779 return;
8780 curr_state->accumulated_insns_num++;
8781 if (unknown_for_bundling_p (insn))
8782 {
8783 /* Finish bundle containing asm insn. */
8784 curr_state->after_nops_num
8785 = 3 - curr_state->accumulated_insns_num % 3;
8786 curr_state->accumulated_insns_num
8787 += 3 - curr_state->accumulated_insns_num % 3;
8788 }
8789 else if (ia64_safe_type (insn) == TYPE_L)
8790 curr_state->accumulated_insns_num++;
8791 }
8792 if (ia64_safe_type (insn) == TYPE_B)
8793 curr_state->branch_deviation
8794 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8795 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8796 {
8797 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8798 {
8799 state_t dfa_state;
8800 struct bundle_state *curr_state1;
8801 struct bundle_state *allocated_states_chain;
8802
8803 curr_state1 = get_free_bundle_state ();
8804 dfa_state = curr_state1->dfa_state;
8805 allocated_states_chain = curr_state1->allocated_states_chain;
8806 *curr_state1 = *curr_state;
8807 curr_state1->dfa_state = dfa_state;
8808 curr_state1->allocated_states_chain = allocated_states_chain;
8809 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8810 dfa_state_size);
8811 curr_state = curr_state1;
8812 }
8813 if (!try_issue_nops (curr_state,
8814 3 - curr_state->accumulated_insns_num % 3))
8815 return;
8816 curr_state->after_nops_num
8817 = 3 - curr_state->accumulated_insns_num % 3;
8818 curr_state->accumulated_insns_num
8819 += 3 - curr_state->accumulated_insns_num % 3;
8820 }
8821 if (!insert_bundle_state (curr_state))
8822 free_bundle_state (curr_state);
8823 return;
8824 }
8825
8826 /* The following function returns position in the two window bundle
8827 for given STATE. */
8828
8829 static int
8830 get_max_pos (state_t state)
8831 {
8832 if (cpu_unit_reservation_p (state, pos_6))
8833 return 6;
8834 else if (cpu_unit_reservation_p (state, pos_5))
8835 return 5;
8836 else if (cpu_unit_reservation_p (state, pos_4))
8837 return 4;
8838 else if (cpu_unit_reservation_p (state, pos_3))
8839 return 3;
8840 else if (cpu_unit_reservation_p (state, pos_2))
8841 return 2;
8842 else if (cpu_unit_reservation_p (state, pos_1))
8843 return 1;
8844 else
8845 return 0;
8846 }
8847
8848 /* The function returns code of a possible template for given position
8849 and state. The function should be called only with 2 values of
8850 position equal to 3 or 6. We avoid generating F NOPs by putting
8851 templates containing F insns at the end of the template search
8852 because undocumented anomaly in McKinley derived cores which can
8853 cause stalls if an F-unit insn (including a NOP) is issued within a
8854 six-cycle window after reading certain application registers (such
8855 as ar.bsp). Furthermore, power-considerations also argue against
8856 the use of F-unit instructions unless they're really needed. */
8857
8858 static int
8859 get_template (state_t state, int pos)
8860 {
8861 switch (pos)
8862 {
8863 case 3:
8864 if (cpu_unit_reservation_p (state, _0mmi_))
8865 return 1;
8866 else if (cpu_unit_reservation_p (state, _0mii_))
8867 return 0;
8868 else if (cpu_unit_reservation_p (state, _0mmb_))
8869 return 7;
8870 else if (cpu_unit_reservation_p (state, _0mib_))
8871 return 6;
8872 else if (cpu_unit_reservation_p (state, _0mbb_))
8873 return 5;
8874 else if (cpu_unit_reservation_p (state, _0bbb_))
8875 return 4;
8876 else if (cpu_unit_reservation_p (state, _0mmf_))
8877 return 3;
8878 else if (cpu_unit_reservation_p (state, _0mfi_))
8879 return 2;
8880 else if (cpu_unit_reservation_p (state, _0mfb_))
8881 return 8;
8882 else if (cpu_unit_reservation_p (state, _0mlx_))
8883 return 9;
8884 else
8885 gcc_unreachable ();
8886 case 6:
8887 if (cpu_unit_reservation_p (state, _1mmi_))
8888 return 1;
8889 else if (cpu_unit_reservation_p (state, _1mii_))
8890 return 0;
8891 else if (cpu_unit_reservation_p (state, _1mmb_))
8892 return 7;
8893 else if (cpu_unit_reservation_p (state, _1mib_))
8894 return 6;
8895 else if (cpu_unit_reservation_p (state, _1mbb_))
8896 return 5;
8897 else if (cpu_unit_reservation_p (state, _1bbb_))
8898 return 4;
8899 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8900 return 3;
8901 else if (cpu_unit_reservation_p (state, _1mfi_))
8902 return 2;
8903 else if (cpu_unit_reservation_p (state, _1mfb_))
8904 return 8;
8905 else if (cpu_unit_reservation_p (state, _1mlx_))
8906 return 9;
8907 else
8908 gcc_unreachable ();
8909 default:
8910 gcc_unreachable ();
8911 }
8912 }
8913
8914 /* True when INSN is important for bundling. */
8915
8916 static bool
8917 important_for_bundling_p (rtx_insn *insn)
8918 {
8919 return (INSN_P (insn)
8920 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8921 && GET_CODE (PATTERN (insn)) != USE
8922 && GET_CODE (PATTERN (insn)) != CLOBBER);
8923 }
8924
8925 /* The following function returns an insn important for insn bundling
8926 followed by INSN and before TAIL. */
8927
8928 static rtx_insn *
8929 get_next_important_insn (rtx_insn *insn, rtx_insn *tail)
8930 {
8931 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8932 if (important_for_bundling_p (insn))
8933 return insn;
8934 return NULL;
8935 }
8936
8937 /* True when INSN is unknown, but important, for bundling. */
8938
8939 static bool
8940 unknown_for_bundling_p (rtx_insn *insn)
8941 {
8942 return (INSN_P (insn)
8943 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
8944 && GET_CODE (PATTERN (insn)) != USE
8945 && GET_CODE (PATTERN (insn)) != CLOBBER);
8946 }
8947
8948 /* Add a bundle selector TEMPLATE0 before INSN. */
8949
8950 static void
8951 ia64_add_bundle_selector_before (int template0, rtx_insn *insn)
8952 {
8953 rtx b = gen_bundle_selector (GEN_INT (template0));
8954
8955 ia64_emit_insn_before (b, insn);
8956 #if NR_BUNDLES == 10
8957 if ((template0 == 4 || template0 == 5)
8958 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8959 {
8960 int i;
8961 rtx note = NULL_RTX;
8962
8963 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8964 first or second slot. If it is and has REG_EH_NOTE set, copy it
8965 to following nops, as br.call sets rp to the address of following
8966 bundle and therefore an EH region end must be on a bundle
8967 boundary. */
8968 insn = PREV_INSN (insn);
8969 for (i = 0; i < 3; i++)
8970 {
8971 do
8972 insn = next_active_insn (insn);
8973 while (NONJUMP_INSN_P (insn)
8974 && get_attr_empty (insn) == EMPTY_YES);
8975 if (CALL_P (insn))
8976 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8977 else if (note)
8978 {
8979 int code;
8980
8981 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8982 || code == CODE_FOR_nop_b);
8983 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8984 note = NULL_RTX;
8985 else
8986 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8987 }
8988 }
8989 }
8990 #endif
8991 }
8992
8993 /* The following function does insn bundling. Bundling means
8994 inserting templates and nop insns to fit insn groups into permitted
8995 templates. Instruction scheduling uses NDFA (non-deterministic
8996 finite automata) encoding informations about the templates and the
8997 inserted nops. Nondeterminism of the automata permits follows
8998 all possible insn sequences very fast.
8999
9000 Unfortunately it is not possible to get information about inserting
9001 nop insns and used templates from the automata states. The
9002 automata only says that we can issue an insn possibly inserting
9003 some nops before it and using some template. Therefore insn
9004 bundling in this function is implemented by using DFA
9005 (deterministic finite automata). We follow all possible insn
9006 sequences by inserting 0-2 nops (that is what the NDFA describe for
9007 insn scheduling) before/after each insn being bundled. We know the
9008 start of simulated processor cycle from insn scheduling (insn
9009 starting a new cycle has TImode).
9010
9011 Simple implementation of insn bundling would create enormous
9012 number of possible insn sequences satisfying information about new
9013 cycle ticks taken from the insn scheduling. To make the algorithm
9014 practical we use dynamic programming. Each decision (about
9015 inserting nops and implicitly about previous decisions) is described
9016 by structure bundle_state (see above). If we generate the same
9017 bundle state (key is automaton state after issuing the insns and
9018 nops for it), we reuse already generated one. As consequence we
9019 reject some decisions which cannot improve the solution and
9020 reduce memory for the algorithm.
9021
9022 When we reach the end of EBB (extended basic block), we choose the
9023 best sequence and then, moving back in EBB, insert templates for
9024 the best alternative. The templates are taken from querying
9025 automaton state for each insn in chosen bundle states.
9026
9027 So the algorithm makes two (forward and backward) passes through
9028 EBB. */
9029
9030 static void
9031 bundling (FILE *dump, int verbose, rtx_insn *prev_head_insn, rtx_insn *tail)
9032 {
9033 struct bundle_state *curr_state, *next_state, *best_state;
9034 rtx_insn *insn, *next_insn;
9035 int insn_num;
9036 int i, bundle_end_p, only_bundle_end_p, asm_p;
9037 int pos = 0, max_pos, template0, template1;
9038 rtx_insn *b;
9039 enum attr_type type;
9040
9041 insn_num = 0;
9042 /* Count insns in the EBB. */
9043 for (insn = NEXT_INSN (prev_head_insn);
9044 insn && insn != tail;
9045 insn = NEXT_INSN (insn))
9046 if (INSN_P (insn))
9047 insn_num++;
9048 if (insn_num == 0)
9049 return;
9050 bundling_p = 1;
9051 dfa_clean_insn_cache ();
9052 initiate_bundle_state_table ();
9053 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
9054 /* First (forward) pass -- generation of bundle states. */
9055 curr_state = get_free_bundle_state ();
9056 curr_state->insn = NULL;
9057 curr_state->before_nops_num = 0;
9058 curr_state->after_nops_num = 0;
9059 curr_state->insn_num = 0;
9060 curr_state->cost = 0;
9061 curr_state->accumulated_insns_num = 0;
9062 curr_state->branch_deviation = 0;
9063 curr_state->middle_bundle_stops = 0;
9064 curr_state->next = NULL;
9065 curr_state->originator = NULL;
9066 state_reset (curr_state->dfa_state);
9067 index_to_bundle_states [0] = curr_state;
9068 insn_num = 0;
9069 /* Shift cycle mark if it is put on insn which could be ignored. */
9070 for (insn = NEXT_INSN (prev_head_insn);
9071 insn != tail;
9072 insn = NEXT_INSN (insn))
9073 if (INSN_P (insn)
9074 && !important_for_bundling_p (insn)
9075 && GET_MODE (insn) == TImode)
9076 {
9077 PUT_MODE (insn, VOIDmode);
9078 for (next_insn = NEXT_INSN (insn);
9079 next_insn != tail;
9080 next_insn = NEXT_INSN (next_insn))
9081 if (important_for_bundling_p (next_insn)
9082 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
9083 {
9084 PUT_MODE (next_insn, TImode);
9085 break;
9086 }
9087 }
9088 /* Forward pass: generation of bundle states. */
9089 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
9090 insn != NULL_RTX;
9091 insn = next_insn)
9092 {
9093 gcc_assert (important_for_bundling_p (insn));
9094 type = ia64_safe_type (insn);
9095 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
9096 insn_num++;
9097 index_to_bundle_states [insn_num] = NULL;
9098 for (curr_state = index_to_bundle_states [insn_num - 1];
9099 curr_state != NULL;
9100 curr_state = next_state)
9101 {
9102 pos = curr_state->accumulated_insns_num % 3;
9103 next_state = curr_state->next;
9104 /* We must fill up the current bundle in order to start a
9105 subsequent asm insn in a new bundle. Asm insn is always
9106 placed in a separate bundle. */
9107 only_bundle_end_p
9108 = (next_insn != NULL_RTX
9109 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
9110 && unknown_for_bundling_p (next_insn));
9111 /* We may fill up the current bundle if it is the cycle end
9112 without a group barrier. */
9113 bundle_end_p
9114 = (only_bundle_end_p || next_insn == NULL_RTX
9115 || (GET_MODE (next_insn) == TImode
9116 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
9117 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
9118 || type == TYPE_S)
9119 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
9120 only_bundle_end_p);
9121 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
9122 only_bundle_end_p);
9123 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
9124 only_bundle_end_p);
9125 }
9126 gcc_assert (index_to_bundle_states [insn_num]);
9127 for (curr_state = index_to_bundle_states [insn_num];
9128 curr_state != NULL;
9129 curr_state = curr_state->next)
9130 if (verbose >= 2 && dump)
9131 {
9132 /* This structure is taken from generated code of the
9133 pipeline hazard recognizer (see file insn-attrtab.c).
9134 Please don't forget to change the structure if a new
9135 automaton is added to .md file. */
9136 struct DFA_chip
9137 {
9138 unsigned short one_automaton_state;
9139 unsigned short oneb_automaton_state;
9140 unsigned short two_automaton_state;
9141 unsigned short twob_automaton_state;
9142 };
9143
9144 fprintf
9145 (dump,
9146 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
9147 curr_state->unique_num,
9148 (curr_state->originator == NULL
9149 ? -1 : curr_state->originator->unique_num),
9150 curr_state->cost,
9151 curr_state->before_nops_num, curr_state->after_nops_num,
9152 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9153 curr_state->middle_bundle_stops,
9154 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9155 INSN_UID (insn));
9156 }
9157 }
9158
9159 /* We should find a solution because the 2nd insn scheduling has
9160 found one. */
9161 gcc_assert (index_to_bundle_states [insn_num]);
9162 /* Find a state corresponding to the best insn sequence. */
9163 best_state = NULL;
9164 for (curr_state = index_to_bundle_states [insn_num];
9165 curr_state != NULL;
9166 curr_state = curr_state->next)
9167 /* We are just looking at the states with fully filled up last
9168 bundle. The first we prefer insn sequences with minimal cost
9169 then with minimal inserted nops and finally with branch insns
9170 placed in the 3rd slots. */
9171 if (curr_state->accumulated_insns_num % 3 == 0
9172 && (best_state == NULL || best_state->cost > curr_state->cost
9173 || (best_state->cost == curr_state->cost
9174 && (curr_state->accumulated_insns_num
9175 < best_state->accumulated_insns_num
9176 || (curr_state->accumulated_insns_num
9177 == best_state->accumulated_insns_num
9178 && (curr_state->branch_deviation
9179 < best_state->branch_deviation
9180 || (curr_state->branch_deviation
9181 == best_state->branch_deviation
9182 && curr_state->middle_bundle_stops
9183 < best_state->middle_bundle_stops)))))))
9184 best_state = curr_state;
9185 /* Second (backward) pass: adding nops and templates. */
9186 gcc_assert (best_state);
9187 insn_num = best_state->before_nops_num;
9188 template0 = template1 = -1;
9189 for (curr_state = best_state;
9190 curr_state->originator != NULL;
9191 curr_state = curr_state->originator)
9192 {
9193 insn = curr_state->insn;
9194 asm_p = unknown_for_bundling_p (insn);
9195 insn_num++;
9196 if (verbose >= 2 && dump)
9197 {
9198 struct DFA_chip
9199 {
9200 unsigned short one_automaton_state;
9201 unsigned short oneb_automaton_state;
9202 unsigned short two_automaton_state;
9203 unsigned short twob_automaton_state;
9204 };
9205
9206 fprintf
9207 (dump,
9208 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9209 curr_state->unique_num,
9210 (curr_state->originator == NULL
9211 ? -1 : curr_state->originator->unique_num),
9212 curr_state->cost,
9213 curr_state->before_nops_num, curr_state->after_nops_num,
9214 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9215 curr_state->middle_bundle_stops,
9216 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9217 INSN_UID (insn));
9218 }
9219 /* Find the position in the current bundle window. The window can
9220 contain at most two bundles. Two bundle window means that
9221 the processor will make two bundle rotation. */
9222 max_pos = get_max_pos (curr_state->dfa_state);
9223 if (max_pos == 6
9224 /* The following (negative template number) means that the
9225 processor did one bundle rotation. */
9226 || (max_pos == 3 && template0 < 0))
9227 {
9228 /* We are at the end of the window -- find template(s) for
9229 its bundle(s). */
9230 pos = max_pos;
9231 if (max_pos == 3)
9232 template0 = get_template (curr_state->dfa_state, 3);
9233 else
9234 {
9235 template1 = get_template (curr_state->dfa_state, 3);
9236 template0 = get_template (curr_state->dfa_state, 6);
9237 }
9238 }
9239 if (max_pos > 3 && template1 < 0)
9240 /* It may happen when we have the stop inside a bundle. */
9241 {
9242 gcc_assert (pos <= 3);
9243 template1 = get_template (curr_state->dfa_state, 3);
9244 pos += 3;
9245 }
9246 if (!asm_p)
9247 /* Emit nops after the current insn. */
9248 for (i = 0; i < curr_state->after_nops_num; i++)
9249 {
9250 rtx nop_pat = gen_nop ();
9251 rtx_insn *nop = emit_insn_after (nop_pat, insn);
9252 pos--;
9253 gcc_assert (pos >= 0);
9254 if (pos % 3 == 0)
9255 {
9256 /* We are at the start of a bundle: emit the template
9257 (it should be defined). */
9258 gcc_assert (template0 >= 0);
9259 ia64_add_bundle_selector_before (template0, nop);
9260 /* If we have two bundle window, we make one bundle
9261 rotation. Otherwise template0 will be undefined
9262 (negative value). */
9263 template0 = template1;
9264 template1 = -1;
9265 }
9266 }
9267 /* Move the position backward in the window. Group barrier has
9268 no slot. Asm insn takes all bundle. */
9269 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9270 && !unknown_for_bundling_p (insn))
9271 pos--;
9272 /* Long insn takes 2 slots. */
9273 if (ia64_safe_type (insn) == TYPE_L)
9274 pos--;
9275 gcc_assert (pos >= 0);
9276 if (pos % 3 == 0
9277 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9278 && !unknown_for_bundling_p (insn))
9279 {
9280 /* The current insn is at the bundle start: emit the
9281 template. */
9282 gcc_assert (template0 >= 0);
9283 ia64_add_bundle_selector_before (template0, insn);
9284 b = PREV_INSN (insn);
9285 insn = b;
9286 /* See comment above in analogous place for emitting nops
9287 after the insn. */
9288 template0 = template1;
9289 template1 = -1;
9290 }
9291 /* Emit nops after the current insn. */
9292 for (i = 0; i < curr_state->before_nops_num; i++)
9293 {
9294 rtx nop_pat = gen_nop ();
9295 ia64_emit_insn_before (nop_pat, insn);
9296 rtx_insn *nop = PREV_INSN (insn);
9297 insn = nop;
9298 pos--;
9299 gcc_assert (pos >= 0);
9300 if (pos % 3 == 0)
9301 {
9302 /* See comment above in analogous place for emitting nops
9303 after the insn. */
9304 gcc_assert (template0 >= 0);
9305 ia64_add_bundle_selector_before (template0, insn);
9306 b = PREV_INSN (insn);
9307 insn = b;
9308 template0 = template1;
9309 template1 = -1;
9310 }
9311 }
9312 }
9313
9314 #ifdef ENABLE_CHECKING
9315 {
9316 /* Assert right calculation of middle_bundle_stops. */
9317 int num = best_state->middle_bundle_stops;
9318 bool start_bundle = true, end_bundle = false;
9319
9320 for (insn = NEXT_INSN (prev_head_insn);
9321 insn && insn != tail;
9322 insn = NEXT_INSN (insn))
9323 {
9324 if (!INSN_P (insn))
9325 continue;
9326 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9327 start_bundle = true;
9328 else
9329 {
9330 rtx_insn *next_insn;
9331
9332 for (next_insn = NEXT_INSN (insn);
9333 next_insn && next_insn != tail;
9334 next_insn = NEXT_INSN (next_insn))
9335 if (INSN_P (next_insn)
9336 && (ia64_safe_itanium_class (next_insn)
9337 != ITANIUM_CLASS_IGNORE
9338 || recog_memoized (next_insn)
9339 == CODE_FOR_bundle_selector)
9340 && GET_CODE (PATTERN (next_insn)) != USE
9341 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9342 break;
9343
9344 end_bundle = next_insn == NULL_RTX
9345 || next_insn == tail
9346 || (INSN_P (next_insn)
9347 && recog_memoized (next_insn)
9348 == CODE_FOR_bundle_selector);
9349 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9350 && !start_bundle && !end_bundle
9351 && next_insn
9352 && !unknown_for_bundling_p (next_insn))
9353 num--;
9354
9355 start_bundle = false;
9356 }
9357 }
9358
9359 gcc_assert (num == 0);
9360 }
9361 #endif
9362
9363 free (index_to_bundle_states);
9364 finish_bundle_state_table ();
9365 bundling_p = 0;
9366 dfa_clean_insn_cache ();
9367 }
9368
9369 /* The following function is called at the end of scheduling BB or
9370 EBB. After reload, it inserts stop bits and does insn bundling. */
9371
9372 static void
9373 ia64_sched_finish (FILE *dump, int sched_verbose)
9374 {
9375 if (sched_verbose)
9376 fprintf (dump, "// Finishing schedule.\n");
9377 if (!reload_completed)
9378 return;
9379 if (reload_completed)
9380 {
9381 final_emit_insn_group_barriers (dump);
9382 bundling (dump, sched_verbose, current_sched_info->prev_head,
9383 current_sched_info->next_tail);
9384 if (sched_verbose && dump)
9385 fprintf (dump, "// finishing %d-%d\n",
9386 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9387 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9388
9389 return;
9390 }
9391 }
9392
9393 /* The following function inserts stop bits in scheduled BB or EBB. */
9394
9395 static void
9396 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9397 {
9398 rtx_insn *insn;
9399 int need_barrier_p = 0;
9400 int seen_good_insn = 0;
9401
9402 init_insn_group_barriers ();
9403
9404 for (insn = NEXT_INSN (current_sched_info->prev_head);
9405 insn != current_sched_info->next_tail;
9406 insn = NEXT_INSN (insn))
9407 {
9408 if (BARRIER_P (insn))
9409 {
9410 rtx_insn *last = prev_active_insn (insn);
9411
9412 if (! last)
9413 continue;
9414 if (JUMP_TABLE_DATA_P (last))
9415 last = prev_active_insn (last);
9416 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9417 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9418
9419 init_insn_group_barriers ();
9420 seen_good_insn = 0;
9421 need_barrier_p = 0;
9422 }
9423 else if (NONDEBUG_INSN_P (insn))
9424 {
9425 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9426 {
9427 init_insn_group_barriers ();
9428 seen_good_insn = 0;
9429 need_barrier_p = 0;
9430 }
9431 else if (need_barrier_p || group_barrier_needed (insn)
9432 || (mflag_sched_stop_bits_after_every_cycle
9433 && GET_MODE (insn) == TImode
9434 && seen_good_insn))
9435 {
9436 if (TARGET_EARLY_STOP_BITS)
9437 {
9438 rtx_insn *last;
9439
9440 for (last = insn;
9441 last != current_sched_info->prev_head;
9442 last = PREV_INSN (last))
9443 if (INSN_P (last) && GET_MODE (last) == TImode
9444 && stops_p [INSN_UID (last)])
9445 break;
9446 if (last == current_sched_info->prev_head)
9447 last = insn;
9448 last = prev_active_insn (last);
9449 if (last
9450 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9451 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9452 last);
9453 init_insn_group_barriers ();
9454 for (last = NEXT_INSN (last);
9455 last != insn;
9456 last = NEXT_INSN (last))
9457 if (INSN_P (last))
9458 {
9459 group_barrier_needed (last);
9460 if (recog_memoized (last) >= 0
9461 && important_for_bundling_p (last))
9462 seen_good_insn = 1;
9463 }
9464 }
9465 else
9466 {
9467 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9468 insn);
9469 init_insn_group_barriers ();
9470 seen_good_insn = 0;
9471 }
9472 group_barrier_needed (insn);
9473 if (recog_memoized (insn) >= 0
9474 && important_for_bundling_p (insn))
9475 seen_good_insn = 1;
9476 }
9477 else if (recog_memoized (insn) >= 0
9478 && important_for_bundling_p (insn))
9479 seen_good_insn = 1;
9480 need_barrier_p = (CALL_P (insn) || unknown_for_bundling_p (insn));
9481 }
9482 }
9483 }
9484
9485 \f
9486
9487 /* If the following function returns TRUE, we will use the DFA
9488 insn scheduler. */
9489
9490 static int
9491 ia64_first_cycle_multipass_dfa_lookahead (void)
9492 {
9493 return (reload_completed ? 6 : 4);
9494 }
9495
9496 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9497
9498 static void
9499 ia64_init_dfa_pre_cycle_insn (void)
9500 {
9501 if (temp_dfa_state == NULL)
9502 {
9503 dfa_state_size = state_size ();
9504 temp_dfa_state = xmalloc (dfa_state_size);
9505 prev_cycle_state = xmalloc (dfa_state_size);
9506 }
9507 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9508 SET_PREV_INSN (dfa_pre_cycle_insn) = SET_NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9509 recog_memoized (dfa_pre_cycle_insn);
9510 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9511 SET_PREV_INSN (dfa_stop_insn) = SET_NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9512 recog_memoized (dfa_stop_insn);
9513 }
9514
9515 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9516 used by the DFA insn scheduler. */
9517
9518 static rtx
9519 ia64_dfa_pre_cycle_insn (void)
9520 {
9521 return dfa_pre_cycle_insn;
9522 }
9523
9524 /* The following function returns TRUE if PRODUCER (of type ilog or
9525 ld) produces address for CONSUMER (of type st or stf). */
9526
9527 int
9528 ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9529 {
9530 rtx dest, reg, mem;
9531
9532 gcc_assert (producer && consumer);
9533 dest = ia64_single_set (producer);
9534 gcc_assert (dest);
9535 reg = SET_DEST (dest);
9536 gcc_assert (reg);
9537 if (GET_CODE (reg) == SUBREG)
9538 reg = SUBREG_REG (reg);
9539 gcc_assert (GET_CODE (reg) == REG);
9540
9541 dest = ia64_single_set (consumer);
9542 gcc_assert (dest);
9543 mem = SET_DEST (dest);
9544 gcc_assert (mem && GET_CODE (mem) == MEM);
9545 return reg_mentioned_p (reg, mem);
9546 }
9547
9548 /* The following function returns TRUE if PRODUCER (of type ilog or
9549 ld) produces address for CONSUMER (of type ld or fld). */
9550
9551 int
9552 ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9553 {
9554 rtx dest, src, reg, mem;
9555
9556 gcc_assert (producer && consumer);
9557 dest = ia64_single_set (producer);
9558 gcc_assert (dest);
9559 reg = SET_DEST (dest);
9560 gcc_assert (reg);
9561 if (GET_CODE (reg) == SUBREG)
9562 reg = SUBREG_REG (reg);
9563 gcc_assert (GET_CODE (reg) == REG);
9564
9565 src = ia64_single_set (consumer);
9566 gcc_assert (src);
9567 mem = SET_SRC (src);
9568 gcc_assert (mem);
9569
9570 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9571 mem = XVECEXP (mem, 0, 0);
9572 else if (GET_CODE (mem) == IF_THEN_ELSE)
9573 /* ??? Is this bypass necessary for ld.c? */
9574 {
9575 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9576 mem = XEXP (mem, 1);
9577 }
9578
9579 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9580 mem = XEXP (mem, 0);
9581
9582 if (GET_CODE (mem) == UNSPEC)
9583 {
9584 int c = XINT (mem, 1);
9585
9586 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9587 || c == UNSPEC_LDSA);
9588 mem = XVECEXP (mem, 0, 0);
9589 }
9590
9591 /* Note that LO_SUM is used for GOT loads. */
9592 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9593
9594 return reg_mentioned_p (reg, mem);
9595 }
9596
9597 /* The following function returns TRUE if INSN produces address for a
9598 load/store insn. We will place such insns into M slot because it
9599 decreases its latency time. */
9600
9601 int
9602 ia64_produce_address_p (rtx insn)
9603 {
9604 return insn->call;
9605 }
9606
9607 \f
9608 /* Emit pseudo-ops for the assembler to describe predicate relations.
9609 At present this assumes that we only consider predicate pairs to
9610 be mutex, and that the assembler can deduce proper values from
9611 straight-line code. */
9612
9613 static void
9614 emit_predicate_relation_info (void)
9615 {
9616 basic_block bb;
9617
9618 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9619 {
9620 int r;
9621 rtx_insn *head = BB_HEAD (bb);
9622
9623 /* We only need such notes at code labels. */
9624 if (! LABEL_P (head))
9625 continue;
9626 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9627 head = NEXT_INSN (head);
9628
9629 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9630 grabbing the entire block of predicate registers. */
9631 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9632 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9633 {
9634 rtx p = gen_rtx_REG (BImode, r);
9635 rtx_insn *n = emit_insn_after (gen_pred_rel_mutex (p), head);
9636 if (head == BB_END (bb))
9637 BB_END (bb) = n;
9638 head = n;
9639 }
9640 }
9641
9642 /* Look for conditional calls that do not return, and protect predicate
9643 relations around them. Otherwise the assembler will assume the call
9644 returns, and complain about uses of call-clobbered predicates after
9645 the call. */
9646 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9647 {
9648 rtx_insn *insn = BB_HEAD (bb);
9649
9650 while (1)
9651 {
9652 if (CALL_P (insn)
9653 && GET_CODE (PATTERN (insn)) == COND_EXEC
9654 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9655 {
9656 rtx_insn *b =
9657 emit_insn_before (gen_safe_across_calls_all (), insn);
9658 rtx_insn *a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9659 if (BB_HEAD (bb) == insn)
9660 BB_HEAD (bb) = b;
9661 if (BB_END (bb) == insn)
9662 BB_END (bb) = a;
9663 }
9664
9665 if (insn == BB_END (bb))
9666 break;
9667 insn = NEXT_INSN (insn);
9668 }
9669 }
9670 }
9671
9672 /* Perform machine dependent operations on the rtl chain INSNS. */
9673
9674 static void
9675 ia64_reorg (void)
9676 {
9677 /* We are freeing block_for_insn in the toplev to keep compatibility
9678 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9679 compute_bb_for_insn ();
9680
9681 /* If optimizing, we'll have split before scheduling. */
9682 if (optimize == 0)
9683 split_all_insns ();
9684
9685 if (optimize && flag_schedule_insns_after_reload
9686 && dbg_cnt (ia64_sched2))
9687 {
9688 basic_block bb;
9689 timevar_push (TV_SCHED2);
9690 ia64_final_schedule = 1;
9691
9692 /* We can't let modulo-sched prevent us from scheduling any bbs,
9693 since we need the final schedule to produce bundle information. */
9694 FOR_EACH_BB_FN (bb, cfun)
9695 bb->flags &= ~BB_DISABLE_SCHEDULE;
9696
9697 initiate_bundle_states ();
9698 ia64_nop = make_insn_raw (gen_nop ());
9699 SET_PREV_INSN (ia64_nop) = SET_NEXT_INSN (ia64_nop) = NULL_RTX;
9700 recog_memoized (ia64_nop);
9701 clocks_length = get_max_uid () + 1;
9702 stops_p = XCNEWVEC (char, clocks_length);
9703
9704 if (ia64_tune == PROCESSOR_ITANIUM2)
9705 {
9706 pos_1 = get_cpu_unit_code ("2_1");
9707 pos_2 = get_cpu_unit_code ("2_2");
9708 pos_3 = get_cpu_unit_code ("2_3");
9709 pos_4 = get_cpu_unit_code ("2_4");
9710 pos_5 = get_cpu_unit_code ("2_5");
9711 pos_6 = get_cpu_unit_code ("2_6");
9712 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9713 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9714 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9715 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9716 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9717 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9718 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9719 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9720 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9721 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9722 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9723 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9724 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9725 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9726 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9727 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9728 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9729 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9730 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9731 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9732 }
9733 else
9734 {
9735 pos_1 = get_cpu_unit_code ("1_1");
9736 pos_2 = get_cpu_unit_code ("1_2");
9737 pos_3 = get_cpu_unit_code ("1_3");
9738 pos_4 = get_cpu_unit_code ("1_4");
9739 pos_5 = get_cpu_unit_code ("1_5");
9740 pos_6 = get_cpu_unit_code ("1_6");
9741 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9742 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9743 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9744 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9745 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9746 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9747 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9748 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9749 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9750 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9751 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9752 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9753 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9754 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9755 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9756 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9757 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9758 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9759 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9760 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9761 }
9762
9763 if (flag_selective_scheduling2
9764 && !maybe_skip_selective_scheduling ())
9765 run_selective_scheduling ();
9766 else
9767 schedule_ebbs ();
9768
9769 /* Redo alignment computation, as it might gone wrong. */
9770 compute_alignments ();
9771
9772 /* We cannot reuse this one because it has been corrupted by the
9773 evil glat. */
9774 finish_bundle_states ();
9775 free (stops_p);
9776 stops_p = NULL;
9777 emit_insn_group_barriers (dump_file);
9778
9779 ia64_final_schedule = 0;
9780 timevar_pop (TV_SCHED2);
9781 }
9782 else
9783 emit_all_insn_group_barriers (dump_file);
9784
9785 df_analyze ();
9786
9787 /* A call must not be the last instruction in a function, so that the
9788 return address is still within the function, so that unwinding works
9789 properly. Note that IA-64 differs from dwarf2 on this point. */
9790 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9791 {
9792 rtx_insn *insn;
9793 int saw_stop = 0;
9794
9795 insn = get_last_insn ();
9796 if (! INSN_P (insn))
9797 insn = prev_active_insn (insn);
9798 if (insn)
9799 {
9800 /* Skip over insns that expand to nothing. */
9801 while (NONJUMP_INSN_P (insn)
9802 && get_attr_empty (insn) == EMPTY_YES)
9803 {
9804 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9805 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9806 saw_stop = 1;
9807 insn = prev_active_insn (insn);
9808 }
9809 if (CALL_P (insn))
9810 {
9811 if (! saw_stop)
9812 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9813 emit_insn (gen_break_f ());
9814 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9815 }
9816 }
9817 }
9818
9819 emit_predicate_relation_info ();
9820
9821 if (flag_var_tracking)
9822 {
9823 timevar_push (TV_VAR_TRACKING);
9824 variable_tracking_main ();
9825 timevar_pop (TV_VAR_TRACKING);
9826 }
9827 df_finish_pass (false);
9828 }
9829 \f
9830 /* Return true if REGNO is used by the epilogue. */
9831
9832 int
9833 ia64_epilogue_uses (int regno)
9834 {
9835 switch (regno)
9836 {
9837 case R_GR (1):
9838 /* With a call to a function in another module, we will write a new
9839 value to "gp". After returning from such a call, we need to make
9840 sure the function restores the original gp-value, even if the
9841 function itself does not use the gp anymore. */
9842 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9843
9844 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9845 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9846 /* For functions defined with the syscall_linkage attribute, all
9847 input registers are marked as live at all function exits. This
9848 prevents the register allocator from using the input registers,
9849 which in turn makes it possible to restart a system call after
9850 an interrupt without having to save/restore the input registers.
9851 This also prevents kernel data from leaking to application code. */
9852 return lookup_attribute ("syscall_linkage",
9853 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9854
9855 case R_BR (0):
9856 /* Conditional return patterns can't represent the use of `b0' as
9857 the return address, so we force the value live this way. */
9858 return 1;
9859
9860 case AR_PFS_REGNUM:
9861 /* Likewise for ar.pfs, which is used by br.ret. */
9862 return 1;
9863
9864 default:
9865 return 0;
9866 }
9867 }
9868
9869 /* Return true if REGNO is used by the frame unwinder. */
9870
9871 int
9872 ia64_eh_uses (int regno)
9873 {
9874 unsigned int r;
9875
9876 if (! reload_completed)
9877 return 0;
9878
9879 if (regno == 0)
9880 return 0;
9881
9882 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9883 if (regno == current_frame_info.r[r]
9884 || regno == emitted_frame_related_regs[r])
9885 return 1;
9886
9887 return 0;
9888 }
9889 \f
9890 /* Return true if this goes in small data/bss. */
9891
9892 /* ??? We could also support own long data here. Generating movl/add/ld8
9893 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9894 code faster because there is one less load. This also includes incomplete
9895 types which can't go in sdata/sbss. */
9896
9897 static bool
9898 ia64_in_small_data_p (const_tree exp)
9899 {
9900 if (TARGET_NO_SDATA)
9901 return false;
9902
9903 /* We want to merge strings, so we never consider them small data. */
9904 if (TREE_CODE (exp) == STRING_CST)
9905 return false;
9906
9907 /* Functions are never small data. */
9908 if (TREE_CODE (exp) == FUNCTION_DECL)
9909 return false;
9910
9911 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9912 {
9913 const char *section = DECL_SECTION_NAME (exp);
9914
9915 if (strcmp (section, ".sdata") == 0
9916 || strncmp (section, ".sdata.", 7) == 0
9917 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9918 || strcmp (section, ".sbss") == 0
9919 || strncmp (section, ".sbss.", 6) == 0
9920 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9921 return true;
9922 }
9923 else
9924 {
9925 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9926
9927 /* If this is an incomplete type with size 0, then we can't put it
9928 in sdata because it might be too big when completed. */
9929 if (size > 0 && size <= ia64_section_threshold)
9930 return true;
9931 }
9932
9933 return false;
9934 }
9935 \f
9936 /* Output assembly directives for prologue regions. */
9937
9938 /* The current basic block number. */
9939
9940 static bool last_block;
9941
9942 /* True if we need a copy_state command at the start of the next block. */
9943
9944 static bool need_copy_state;
9945
9946 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9947 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9948 #endif
9949
9950 /* The function emits unwind directives for the start of an epilogue. */
9951
9952 static void
9953 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9954 bool unwind, bool frame ATTRIBUTE_UNUSED)
9955 {
9956 /* If this isn't the last block of the function, then we need to label the
9957 current state, and copy it back in at the start of the next block. */
9958
9959 if (!last_block)
9960 {
9961 if (unwind)
9962 fprintf (asm_out_file, "\t.label_state %d\n",
9963 ++cfun->machine->state_num);
9964 need_copy_state = true;
9965 }
9966
9967 if (unwind)
9968 fprintf (asm_out_file, "\t.restore sp\n");
9969 }
9970
9971 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9972
9973 static void
9974 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9975 bool unwind, bool frame)
9976 {
9977 rtx dest = SET_DEST (pat);
9978 rtx src = SET_SRC (pat);
9979
9980 if (dest == stack_pointer_rtx)
9981 {
9982 if (GET_CODE (src) == PLUS)
9983 {
9984 rtx op0 = XEXP (src, 0);
9985 rtx op1 = XEXP (src, 1);
9986
9987 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9988
9989 if (INTVAL (op1) < 0)
9990 {
9991 gcc_assert (!frame_pointer_needed);
9992 if (unwind)
9993 fprintf (asm_out_file,
9994 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9995 -INTVAL (op1));
9996 }
9997 else
9998 process_epilogue (asm_out_file, insn, unwind, frame);
9999 }
10000 else
10001 {
10002 gcc_assert (src == hard_frame_pointer_rtx);
10003 process_epilogue (asm_out_file, insn, unwind, frame);
10004 }
10005 }
10006 else if (dest == hard_frame_pointer_rtx)
10007 {
10008 gcc_assert (src == stack_pointer_rtx);
10009 gcc_assert (frame_pointer_needed);
10010
10011 if (unwind)
10012 fprintf (asm_out_file, "\t.vframe r%d\n",
10013 ia64_dbx_register_number (REGNO (dest)));
10014 }
10015 else
10016 gcc_unreachable ();
10017 }
10018
10019 /* This function processes a SET pattern for REG_CFA_REGISTER. */
10020
10021 static void
10022 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
10023 {
10024 rtx dest = SET_DEST (pat);
10025 rtx src = SET_SRC (pat);
10026 int dest_regno = REGNO (dest);
10027 int src_regno;
10028
10029 if (src == pc_rtx)
10030 {
10031 /* Saving return address pointer. */
10032 if (unwind)
10033 fprintf (asm_out_file, "\t.save rp, r%d\n",
10034 ia64_dbx_register_number (dest_regno));
10035 return;
10036 }
10037
10038 src_regno = REGNO (src);
10039
10040 switch (src_regno)
10041 {
10042 case PR_REG (0):
10043 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
10044 if (unwind)
10045 fprintf (asm_out_file, "\t.save pr, r%d\n",
10046 ia64_dbx_register_number (dest_regno));
10047 break;
10048
10049 case AR_UNAT_REGNUM:
10050 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
10051 if (unwind)
10052 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
10053 ia64_dbx_register_number (dest_regno));
10054 break;
10055
10056 case AR_LC_REGNUM:
10057 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
10058 if (unwind)
10059 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
10060 ia64_dbx_register_number (dest_regno));
10061 break;
10062
10063 default:
10064 /* Everything else should indicate being stored to memory. */
10065 gcc_unreachable ();
10066 }
10067 }
10068
10069 /* This function processes a SET pattern for REG_CFA_OFFSET. */
10070
10071 static void
10072 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
10073 {
10074 rtx dest = SET_DEST (pat);
10075 rtx src = SET_SRC (pat);
10076 int src_regno = REGNO (src);
10077 const char *saveop;
10078 HOST_WIDE_INT off;
10079 rtx base;
10080
10081 gcc_assert (MEM_P (dest));
10082 if (GET_CODE (XEXP (dest, 0)) == REG)
10083 {
10084 base = XEXP (dest, 0);
10085 off = 0;
10086 }
10087 else
10088 {
10089 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
10090 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
10091 base = XEXP (XEXP (dest, 0), 0);
10092 off = INTVAL (XEXP (XEXP (dest, 0), 1));
10093 }
10094
10095 if (base == hard_frame_pointer_rtx)
10096 {
10097 saveop = ".savepsp";
10098 off = - off;
10099 }
10100 else
10101 {
10102 gcc_assert (base == stack_pointer_rtx);
10103 saveop = ".savesp";
10104 }
10105
10106 src_regno = REGNO (src);
10107 switch (src_regno)
10108 {
10109 case BR_REG (0):
10110 gcc_assert (!current_frame_info.r[reg_save_b0]);
10111 if (unwind)
10112 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
10113 saveop, off);
10114 break;
10115
10116 case PR_REG (0):
10117 gcc_assert (!current_frame_info.r[reg_save_pr]);
10118 if (unwind)
10119 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
10120 saveop, off);
10121 break;
10122
10123 case AR_LC_REGNUM:
10124 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
10125 if (unwind)
10126 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
10127 saveop, off);
10128 break;
10129
10130 case AR_PFS_REGNUM:
10131 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
10132 if (unwind)
10133 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
10134 saveop, off);
10135 break;
10136
10137 case AR_UNAT_REGNUM:
10138 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
10139 if (unwind)
10140 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
10141 saveop, off);
10142 break;
10143
10144 case GR_REG (4):
10145 case GR_REG (5):
10146 case GR_REG (6):
10147 case GR_REG (7):
10148 if (unwind)
10149 fprintf (asm_out_file, "\t.save.g 0x%x\n",
10150 1 << (src_regno - GR_REG (4)));
10151 break;
10152
10153 case BR_REG (1):
10154 case BR_REG (2):
10155 case BR_REG (3):
10156 case BR_REG (4):
10157 case BR_REG (5):
10158 if (unwind)
10159 fprintf (asm_out_file, "\t.save.b 0x%x\n",
10160 1 << (src_regno - BR_REG (1)));
10161 break;
10162
10163 case FR_REG (2):
10164 case FR_REG (3):
10165 case FR_REG (4):
10166 case FR_REG (5):
10167 if (unwind)
10168 fprintf (asm_out_file, "\t.save.f 0x%x\n",
10169 1 << (src_regno - FR_REG (2)));
10170 break;
10171
10172 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
10173 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10174 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10175 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10176 if (unwind)
10177 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10178 1 << (src_regno - FR_REG (12)));
10179 break;
10180
10181 default:
10182 /* ??? For some reason we mark other general registers, even those
10183 we can't represent in the unwind info. Ignore them. */
10184 break;
10185 }
10186 }
10187
10188 /* This function looks at a single insn and emits any directives
10189 required to unwind this insn. */
10190
10191 static void
10192 ia64_asm_unwind_emit (FILE *asm_out_file, rtx_insn *insn)
10193 {
10194 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10195 bool frame = dwarf2out_do_frame ();
10196 rtx note, pat;
10197 bool handled_one;
10198
10199 if (!unwind && !frame)
10200 return;
10201
10202 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10203 {
10204 last_block = NOTE_BASIC_BLOCK (insn)->next_bb
10205 == EXIT_BLOCK_PTR_FOR_FN (cfun);
10206
10207 /* Restore unwind state from immediately before the epilogue. */
10208 if (need_copy_state)
10209 {
10210 if (unwind)
10211 {
10212 fprintf (asm_out_file, "\t.body\n");
10213 fprintf (asm_out_file, "\t.copy_state %d\n",
10214 cfun->machine->state_num);
10215 }
10216 need_copy_state = false;
10217 }
10218 }
10219
10220 if (NOTE_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10221 return;
10222
10223 /* Look for the ALLOC insn. */
10224 if (INSN_CODE (insn) == CODE_FOR_alloc)
10225 {
10226 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10227 int dest_regno = REGNO (dest);
10228
10229 /* If this is the final destination for ar.pfs, then this must
10230 be the alloc in the prologue. */
10231 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10232 {
10233 if (unwind)
10234 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10235 ia64_dbx_register_number (dest_regno));
10236 }
10237 else
10238 {
10239 /* This must be an alloc before a sibcall. We must drop the
10240 old frame info. The easiest way to drop the old frame
10241 info is to ensure we had a ".restore sp" directive
10242 followed by a new prologue. If the procedure doesn't
10243 have a memory-stack frame, we'll issue a dummy ".restore
10244 sp" now. */
10245 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10246 /* if haven't done process_epilogue() yet, do it now */
10247 process_epilogue (asm_out_file, insn, unwind, frame);
10248 if (unwind)
10249 fprintf (asm_out_file, "\t.prologue\n");
10250 }
10251 return;
10252 }
10253
10254 handled_one = false;
10255 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10256 switch (REG_NOTE_KIND (note))
10257 {
10258 case REG_CFA_ADJUST_CFA:
10259 pat = XEXP (note, 0);
10260 if (pat == NULL)
10261 pat = PATTERN (insn);
10262 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10263 handled_one = true;
10264 break;
10265
10266 case REG_CFA_OFFSET:
10267 pat = XEXP (note, 0);
10268 if (pat == NULL)
10269 pat = PATTERN (insn);
10270 process_cfa_offset (asm_out_file, pat, unwind);
10271 handled_one = true;
10272 break;
10273
10274 case REG_CFA_REGISTER:
10275 pat = XEXP (note, 0);
10276 if (pat == NULL)
10277 pat = PATTERN (insn);
10278 process_cfa_register (asm_out_file, pat, unwind);
10279 handled_one = true;
10280 break;
10281
10282 case REG_FRAME_RELATED_EXPR:
10283 case REG_CFA_DEF_CFA:
10284 case REG_CFA_EXPRESSION:
10285 case REG_CFA_RESTORE:
10286 case REG_CFA_SET_VDRAP:
10287 /* Not used in the ia64 port. */
10288 gcc_unreachable ();
10289
10290 default:
10291 /* Not a frame-related note. */
10292 break;
10293 }
10294
10295 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10296 explicit action to take. No guessing required. */
10297 gcc_assert (handled_one);
10298 }
10299
10300 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10301
10302 static void
10303 ia64_asm_emit_except_personality (rtx personality)
10304 {
10305 fputs ("\t.personality\t", asm_out_file);
10306 output_addr_const (asm_out_file, personality);
10307 fputc ('\n', asm_out_file);
10308 }
10309
10310 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10311
10312 static void
10313 ia64_asm_init_sections (void)
10314 {
10315 exception_section = get_unnamed_section (0, output_section_asm_op,
10316 "\t.handlerdata");
10317 }
10318
10319 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10320
10321 static enum unwind_info_type
10322 ia64_debug_unwind_info (void)
10323 {
10324 return UI_TARGET;
10325 }
10326 \f
10327 enum ia64_builtins
10328 {
10329 IA64_BUILTIN_BSP,
10330 IA64_BUILTIN_COPYSIGNQ,
10331 IA64_BUILTIN_FABSQ,
10332 IA64_BUILTIN_FLUSHRS,
10333 IA64_BUILTIN_INFQ,
10334 IA64_BUILTIN_HUGE_VALQ,
10335 IA64_BUILTIN_max
10336 };
10337
10338 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10339
10340 void
10341 ia64_init_builtins (void)
10342 {
10343 tree fpreg_type;
10344 tree float80_type;
10345 tree decl;
10346
10347 /* The __fpreg type. */
10348 fpreg_type = make_node (REAL_TYPE);
10349 TYPE_PRECISION (fpreg_type) = 82;
10350 layout_type (fpreg_type);
10351 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10352
10353 /* The __float80 type. */
10354 float80_type = make_node (REAL_TYPE);
10355 TYPE_PRECISION (float80_type) = 80;
10356 layout_type (float80_type);
10357 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10358
10359 /* The __float128 type. */
10360 if (!TARGET_HPUX)
10361 {
10362 tree ftype;
10363 tree float128_type = make_node (REAL_TYPE);
10364
10365 TYPE_PRECISION (float128_type) = 128;
10366 layout_type (float128_type);
10367 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10368
10369 /* TFmode support builtins. */
10370 ftype = build_function_type_list (float128_type, NULL_TREE);
10371 decl = add_builtin_function ("__builtin_infq", ftype,
10372 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10373 NULL, NULL_TREE);
10374 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10375
10376 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10377 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10378 NULL, NULL_TREE);
10379 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10380
10381 ftype = build_function_type_list (float128_type,
10382 float128_type,
10383 NULL_TREE);
10384 decl = add_builtin_function ("__builtin_fabsq", ftype,
10385 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10386 "__fabstf2", NULL_TREE);
10387 TREE_READONLY (decl) = 1;
10388 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10389
10390 ftype = build_function_type_list (float128_type,
10391 float128_type,
10392 float128_type,
10393 NULL_TREE);
10394 decl = add_builtin_function ("__builtin_copysignq", ftype,
10395 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10396 "__copysigntf3", NULL_TREE);
10397 TREE_READONLY (decl) = 1;
10398 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10399 }
10400 else
10401 /* Under HPUX, this is a synonym for "long double". */
10402 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10403 "__float128");
10404
10405 /* Fwrite on VMS is non-standard. */
10406 #if TARGET_ABI_OPEN_VMS
10407 vms_patch_builtins ();
10408 #endif
10409
10410 #define def_builtin(name, type, code) \
10411 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10412 NULL, NULL_TREE)
10413
10414 decl = def_builtin ("__builtin_ia64_bsp",
10415 build_function_type_list (ptr_type_node, NULL_TREE),
10416 IA64_BUILTIN_BSP);
10417 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10418
10419 decl = def_builtin ("__builtin_ia64_flushrs",
10420 build_function_type_list (void_type_node, NULL_TREE),
10421 IA64_BUILTIN_FLUSHRS);
10422 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10423
10424 #undef def_builtin
10425
10426 if (TARGET_HPUX)
10427 {
10428 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10429 set_user_assembler_name (decl, "_Isfinite");
10430 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10431 set_user_assembler_name (decl, "_Isfinitef");
10432 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10433 set_user_assembler_name (decl, "_Isfinitef128");
10434 }
10435 }
10436
10437 rtx
10438 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10439 enum machine_mode mode ATTRIBUTE_UNUSED,
10440 int ignore ATTRIBUTE_UNUSED)
10441 {
10442 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10443 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10444
10445 switch (fcode)
10446 {
10447 case IA64_BUILTIN_BSP:
10448 if (! target || ! register_operand (target, DImode))
10449 target = gen_reg_rtx (DImode);
10450 emit_insn (gen_bsp_value (target));
10451 #ifdef POINTERS_EXTEND_UNSIGNED
10452 target = convert_memory_address (ptr_mode, target);
10453 #endif
10454 return target;
10455
10456 case IA64_BUILTIN_FLUSHRS:
10457 emit_insn (gen_flushrs ());
10458 return const0_rtx;
10459
10460 case IA64_BUILTIN_INFQ:
10461 case IA64_BUILTIN_HUGE_VALQ:
10462 {
10463 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10464 REAL_VALUE_TYPE inf;
10465 rtx tmp;
10466
10467 real_inf (&inf);
10468 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10469
10470 tmp = validize_mem (force_const_mem (target_mode, tmp));
10471
10472 if (target == 0)
10473 target = gen_reg_rtx (target_mode);
10474
10475 emit_move_insn (target, tmp);
10476 return target;
10477 }
10478
10479 case IA64_BUILTIN_FABSQ:
10480 case IA64_BUILTIN_COPYSIGNQ:
10481 return expand_call (exp, target, ignore);
10482
10483 default:
10484 gcc_unreachable ();
10485 }
10486
10487 return NULL_RTX;
10488 }
10489
10490 /* Return the ia64 builtin for CODE. */
10491
10492 static tree
10493 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10494 {
10495 if (code >= IA64_BUILTIN_max)
10496 return error_mark_node;
10497
10498 return ia64_builtins[code];
10499 }
10500
10501 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10502 most significant bits of the stack slot. */
10503
10504 enum direction
10505 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10506 {
10507 /* Exception to normal case for structures/unions/etc. */
10508
10509 if (type && AGGREGATE_TYPE_P (type)
10510 && int_size_in_bytes (type) < UNITS_PER_WORD)
10511 return upward;
10512
10513 /* Fall back to the default. */
10514 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10515 }
10516
10517 /* Emit text to declare externally defined variables and functions, because
10518 the Intel assembler does not support undefined externals. */
10519
10520 void
10521 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10522 {
10523 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10524 set in order to avoid putting out names that are never really
10525 used. */
10526 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10527 {
10528 /* maybe_assemble_visibility will return 1 if the assembler
10529 visibility directive is output. */
10530 int need_visibility = ((*targetm.binds_local_p) (decl)
10531 && maybe_assemble_visibility (decl));
10532
10533 /* GNU as does not need anything here, but the HP linker does
10534 need something for external functions. */
10535 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10536 && TREE_CODE (decl) == FUNCTION_DECL)
10537 (*targetm.asm_out.globalize_decl_name) (file, decl);
10538 else if (need_visibility && !TARGET_GNU_AS)
10539 (*targetm.asm_out.globalize_label) (file, name);
10540 }
10541 }
10542
10543 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10544 modes of word_mode and larger. Rename the TFmode libfuncs using the
10545 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10546 backward compatibility. */
10547
10548 static void
10549 ia64_init_libfuncs (void)
10550 {
10551 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10552 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10553 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10554 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10555
10556 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10557 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10558 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10559 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10560 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10561
10562 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10563 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10564 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10565 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10566 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10567 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10568
10569 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10570 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10571 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10572 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10573 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10574
10575 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10576 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10577 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10578 /* HP-UX 11.23 libc does not have a function for unsigned
10579 SImode-to-TFmode conversion. */
10580 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10581 }
10582
10583 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10584
10585 static void
10586 ia64_hpux_init_libfuncs (void)
10587 {
10588 ia64_init_libfuncs ();
10589
10590 /* The HP SI millicode division and mod functions expect DI arguments.
10591 By turning them off completely we avoid using both libgcc and the
10592 non-standard millicode routines and use the HP DI millicode routines
10593 instead. */
10594
10595 set_optab_libfunc (sdiv_optab, SImode, 0);
10596 set_optab_libfunc (udiv_optab, SImode, 0);
10597 set_optab_libfunc (smod_optab, SImode, 0);
10598 set_optab_libfunc (umod_optab, SImode, 0);
10599
10600 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10601 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10602 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10603 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10604
10605 /* HP-UX libc has TF min/max/abs routines in it. */
10606 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10607 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10608 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10609
10610 /* ia64_expand_compare uses this. */
10611 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10612
10613 /* These should never be used. */
10614 set_optab_libfunc (eq_optab, TFmode, 0);
10615 set_optab_libfunc (ne_optab, TFmode, 0);
10616 set_optab_libfunc (gt_optab, TFmode, 0);
10617 set_optab_libfunc (ge_optab, TFmode, 0);
10618 set_optab_libfunc (lt_optab, TFmode, 0);
10619 set_optab_libfunc (le_optab, TFmode, 0);
10620 }
10621
10622 /* Rename the division and modulus functions in VMS. */
10623
10624 static void
10625 ia64_vms_init_libfuncs (void)
10626 {
10627 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10628 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10629 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10630 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10631 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10632 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10633 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10634 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10635 abort_libfunc = init_one_libfunc ("decc$abort");
10636 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10637 #ifdef MEM_LIBFUNCS_INIT
10638 MEM_LIBFUNCS_INIT;
10639 #endif
10640 }
10641
10642 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10643 the HPUX conventions. */
10644
10645 static void
10646 ia64_sysv4_init_libfuncs (void)
10647 {
10648 ia64_init_libfuncs ();
10649
10650 /* These functions are not part of the HPUX TFmode interface. We
10651 use them instead of _U_Qfcmp, which doesn't work the way we
10652 expect. */
10653 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10654 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10655 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10656 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10657 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10658 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10659
10660 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10661 glibc doesn't have them. */
10662 }
10663
10664 /* Use soft-fp. */
10665
10666 static void
10667 ia64_soft_fp_init_libfuncs (void)
10668 {
10669 }
10670
10671 static bool
10672 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10673 {
10674 return (mode == SImode || mode == DImode);
10675 }
10676 \f
10677 /* For HPUX, it is illegal to have relocations in shared segments. */
10678
10679 static int
10680 ia64_hpux_reloc_rw_mask (void)
10681 {
10682 return 3;
10683 }
10684
10685 /* For others, relax this so that relocations to local data goes in
10686 read-only segments, but we still cannot allow global relocations
10687 in read-only segments. */
10688
10689 static int
10690 ia64_reloc_rw_mask (void)
10691 {
10692 return flag_pic ? 3 : 2;
10693 }
10694
10695 /* Return the section to use for X. The only special thing we do here
10696 is to honor small data. */
10697
10698 static section *
10699 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10700 unsigned HOST_WIDE_INT align)
10701 {
10702 if (GET_MODE_SIZE (mode) > 0
10703 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10704 && !TARGET_NO_SDATA)
10705 return sdata_section;
10706 else
10707 return default_elf_select_rtx_section (mode, x, align);
10708 }
10709
10710 static unsigned int
10711 ia64_section_type_flags (tree decl, const char *name, int reloc)
10712 {
10713 unsigned int flags = 0;
10714
10715 if (strcmp (name, ".sdata") == 0
10716 || strncmp (name, ".sdata.", 7) == 0
10717 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10718 || strncmp (name, ".sdata2.", 8) == 0
10719 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10720 || strcmp (name, ".sbss") == 0
10721 || strncmp (name, ".sbss.", 6) == 0
10722 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10723 flags = SECTION_SMALL;
10724
10725 flags |= default_section_type_flags (decl, name, reloc);
10726 return flags;
10727 }
10728
10729 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10730 structure type and that the address of that type should be passed
10731 in out0, rather than in r8. */
10732
10733 static bool
10734 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10735 {
10736 tree ret_type = TREE_TYPE (fntype);
10737
10738 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10739 as the structure return address parameter, if the return value
10740 type has a non-trivial copy constructor or destructor. It is not
10741 clear if this same convention should be used for other
10742 programming languages. Until G++ 3.4, we incorrectly used r8 for
10743 these return values. */
10744 return (abi_version_at_least (2)
10745 && ret_type
10746 && TYPE_MODE (ret_type) == BLKmode
10747 && TREE_ADDRESSABLE (ret_type)
10748 && strcmp (lang_hooks.name, "GNU C++") == 0);
10749 }
10750
10751 /* Output the assembler code for a thunk function. THUNK_DECL is the
10752 declaration for the thunk function itself, FUNCTION is the decl for
10753 the target function. DELTA is an immediate constant offset to be
10754 added to THIS. If VCALL_OFFSET is nonzero, the word at
10755 *(*this + vcall_offset) should be added to THIS. */
10756
10757 static void
10758 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10759 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10760 tree function)
10761 {
10762 rtx this_rtx, funexp;
10763 rtx_insn *insn;
10764 unsigned int this_parmno;
10765 unsigned int this_regno;
10766 rtx delta_rtx;
10767
10768 reload_completed = 1;
10769 epilogue_completed = 1;
10770
10771 /* Set things up as ia64_expand_prologue might. */
10772 last_scratch_gr_reg = 15;
10773
10774 memset (&current_frame_info, 0, sizeof (current_frame_info));
10775 current_frame_info.spill_cfa_off = -16;
10776 current_frame_info.n_input_regs = 1;
10777 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10778
10779 /* Mark the end of the (empty) prologue. */
10780 emit_note (NOTE_INSN_PROLOGUE_END);
10781
10782 /* Figure out whether "this" will be the first parameter (the
10783 typical case) or the second parameter (as happens when the
10784 virtual function returns certain class objects). */
10785 this_parmno
10786 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10787 ? 1 : 0);
10788 this_regno = IN_REG (this_parmno);
10789 if (!TARGET_REG_NAMES)
10790 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10791
10792 this_rtx = gen_rtx_REG (Pmode, this_regno);
10793
10794 /* Apply the constant offset, if required. */
10795 delta_rtx = GEN_INT (delta);
10796 if (TARGET_ILP32)
10797 {
10798 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10799 REG_POINTER (tmp) = 1;
10800 if (delta && satisfies_constraint_I (delta_rtx))
10801 {
10802 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10803 delta = 0;
10804 }
10805 else
10806 emit_insn (gen_ptr_extend (this_rtx, tmp));
10807 }
10808 if (delta)
10809 {
10810 if (!satisfies_constraint_I (delta_rtx))
10811 {
10812 rtx tmp = gen_rtx_REG (Pmode, 2);
10813 emit_move_insn (tmp, delta_rtx);
10814 delta_rtx = tmp;
10815 }
10816 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10817 }
10818
10819 /* Apply the offset from the vtable, if required. */
10820 if (vcall_offset)
10821 {
10822 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10823 rtx tmp = gen_rtx_REG (Pmode, 2);
10824
10825 if (TARGET_ILP32)
10826 {
10827 rtx t = gen_rtx_REG (ptr_mode, 2);
10828 REG_POINTER (t) = 1;
10829 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10830 if (satisfies_constraint_I (vcall_offset_rtx))
10831 {
10832 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10833 vcall_offset = 0;
10834 }
10835 else
10836 emit_insn (gen_ptr_extend (tmp, t));
10837 }
10838 else
10839 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10840
10841 if (vcall_offset)
10842 {
10843 if (!satisfies_constraint_J (vcall_offset_rtx))
10844 {
10845 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10846 emit_move_insn (tmp2, vcall_offset_rtx);
10847 vcall_offset_rtx = tmp2;
10848 }
10849 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10850 }
10851
10852 if (TARGET_ILP32)
10853 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10854 else
10855 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10856
10857 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10858 }
10859
10860 /* Generate a tail call to the target function. */
10861 if (! TREE_USED (function))
10862 {
10863 assemble_external (function);
10864 TREE_USED (function) = 1;
10865 }
10866 funexp = XEXP (DECL_RTL (function), 0);
10867 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10868 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10869 insn = get_last_insn ();
10870 SIBLING_CALL_P (insn) = 1;
10871
10872 /* Code generation for calls relies on splitting. */
10873 reload_completed = 1;
10874 epilogue_completed = 1;
10875 try_split (PATTERN (insn), insn, 0);
10876
10877 emit_barrier ();
10878
10879 /* Run just enough of rest_of_compilation to get the insns emitted.
10880 There's not really enough bulk here to make other passes such as
10881 instruction scheduling worth while. Note that use_thunk calls
10882 assemble_start_function and assemble_end_function. */
10883
10884 emit_all_insn_group_barriers (NULL);
10885 insn = get_insns ();
10886 shorten_branches (insn);
10887 final_start_function (insn, file, 1);
10888 final (insn, file, 1);
10889 final_end_function ();
10890
10891 reload_completed = 0;
10892 epilogue_completed = 0;
10893 }
10894
10895 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10896
10897 static rtx
10898 ia64_struct_value_rtx (tree fntype,
10899 int incoming ATTRIBUTE_UNUSED)
10900 {
10901 if (TARGET_ABI_OPEN_VMS ||
10902 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10903 return NULL_RTX;
10904 return gen_rtx_REG (Pmode, GR_REG (8));
10905 }
10906
10907 static bool
10908 ia64_scalar_mode_supported_p (enum machine_mode mode)
10909 {
10910 switch (mode)
10911 {
10912 case QImode:
10913 case HImode:
10914 case SImode:
10915 case DImode:
10916 case TImode:
10917 return true;
10918
10919 case SFmode:
10920 case DFmode:
10921 case XFmode:
10922 case RFmode:
10923 return true;
10924
10925 case TFmode:
10926 return true;
10927
10928 default:
10929 return false;
10930 }
10931 }
10932
10933 static bool
10934 ia64_vector_mode_supported_p (enum machine_mode mode)
10935 {
10936 switch (mode)
10937 {
10938 case V8QImode:
10939 case V4HImode:
10940 case V2SImode:
10941 return true;
10942
10943 case V2SFmode:
10944 return true;
10945
10946 default:
10947 return false;
10948 }
10949 }
10950
10951 /* Implement TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P. */
10952
10953 static bool
10954 ia64_libgcc_floating_mode_supported_p (enum machine_mode mode)
10955 {
10956 switch (mode)
10957 {
10958 case SFmode:
10959 case DFmode:
10960 return true;
10961
10962 case XFmode:
10963 #ifdef IA64_NO_LIBGCC_XFMODE
10964 return false;
10965 #else
10966 return true;
10967 #endif
10968
10969 case TFmode:
10970 #ifdef IA64_NO_LIBGCC_TFMODE
10971 return false;
10972 #else
10973 return true;
10974 #endif
10975
10976 default:
10977 return false;
10978 }
10979 }
10980
10981 /* Implement the FUNCTION_PROFILER macro. */
10982
10983 void
10984 ia64_output_function_profiler (FILE *file, int labelno)
10985 {
10986 bool indirect_call;
10987
10988 /* If the function needs a static chain and the static chain
10989 register is r15, we use an indirect call so as to bypass
10990 the PLT stub in case the executable is dynamically linked,
10991 because the stub clobbers r15 as per 5.3.6 of the psABI.
10992 We don't need to do that in non canonical PIC mode. */
10993
10994 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10995 {
10996 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10997 indirect_call = true;
10998 }
10999 else
11000 indirect_call = false;
11001
11002 if (TARGET_GNU_AS)
11003 fputs ("\t.prologue 4, r40\n", file);
11004 else
11005 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
11006 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
11007
11008 if (NO_PROFILE_COUNTERS)
11009 fputs ("\tmov out3 = r0\n", file);
11010 else
11011 {
11012 char buf[20];
11013 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11014
11015 if (TARGET_AUTO_PIC)
11016 fputs ("\tmovl out3 = @gprel(", file);
11017 else
11018 fputs ("\taddl out3 = @ltoff(", file);
11019 assemble_name (file, buf);
11020 if (TARGET_AUTO_PIC)
11021 fputs (")\n", file);
11022 else
11023 fputs ("), r1\n", file);
11024 }
11025
11026 if (indirect_call)
11027 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
11028 fputs ("\t;;\n", file);
11029
11030 fputs ("\t.save rp, r42\n", file);
11031 fputs ("\tmov out2 = b0\n", file);
11032 if (indirect_call)
11033 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
11034 fputs ("\t.body\n", file);
11035 fputs ("\tmov out1 = r1\n", file);
11036 if (indirect_call)
11037 {
11038 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
11039 fputs ("\tmov b6 = r16\n", file);
11040 fputs ("\tld8 r1 = [r14]\n", file);
11041 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
11042 }
11043 else
11044 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
11045 }
11046
11047 static GTY(()) rtx mcount_func_rtx;
11048 static rtx
11049 gen_mcount_func_rtx (void)
11050 {
11051 if (!mcount_func_rtx)
11052 mcount_func_rtx = init_one_libfunc ("_mcount");
11053 return mcount_func_rtx;
11054 }
11055
11056 void
11057 ia64_profile_hook (int labelno)
11058 {
11059 rtx label, ip;
11060
11061 if (NO_PROFILE_COUNTERS)
11062 label = const0_rtx;
11063 else
11064 {
11065 char buf[30];
11066 const char *label_name;
11067 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11068 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
11069 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
11070 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
11071 }
11072 ip = gen_reg_rtx (Pmode);
11073 emit_insn (gen_ip_value (ip));
11074 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
11075 VOIDmode, 3,
11076 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
11077 ip, Pmode,
11078 label, Pmode);
11079 }
11080
11081 /* Return the mangling of TYPE if it is an extended fundamental type. */
11082
11083 static const char *
11084 ia64_mangle_type (const_tree type)
11085 {
11086 type = TYPE_MAIN_VARIANT (type);
11087
11088 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
11089 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
11090 return NULL;
11091
11092 /* On HP-UX, "long double" is mangled as "e" so __float128 is
11093 mangled as "e". */
11094 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
11095 return "g";
11096 /* On HP-UX, "e" is not available as a mangling of __float80 so use
11097 an extended mangling. Elsewhere, "e" is available since long
11098 double is 80 bits. */
11099 if (TYPE_MODE (type) == XFmode)
11100 return TARGET_HPUX ? "u9__float80" : "e";
11101 if (TYPE_MODE (type) == RFmode)
11102 return "u7__fpreg";
11103 return NULL;
11104 }
11105
11106 /* Return the diagnostic message string if conversion from FROMTYPE to
11107 TOTYPE is not allowed, NULL otherwise. */
11108 static const char *
11109 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
11110 {
11111 /* Reject nontrivial conversion to or from __fpreg. */
11112 if (TYPE_MODE (fromtype) == RFmode
11113 && TYPE_MODE (totype) != RFmode
11114 && TYPE_MODE (totype) != VOIDmode)
11115 return N_("invalid conversion from %<__fpreg%>");
11116 if (TYPE_MODE (totype) == RFmode
11117 && TYPE_MODE (fromtype) != RFmode)
11118 return N_("invalid conversion to %<__fpreg%>");
11119 return NULL;
11120 }
11121
11122 /* Return the diagnostic message string if the unary operation OP is
11123 not permitted on TYPE, NULL otherwise. */
11124 static const char *
11125 ia64_invalid_unary_op (int op, const_tree type)
11126 {
11127 /* Reject operations on __fpreg other than unary + or &. */
11128 if (TYPE_MODE (type) == RFmode
11129 && op != CONVERT_EXPR
11130 && op != ADDR_EXPR)
11131 return N_("invalid operation on %<__fpreg%>");
11132 return NULL;
11133 }
11134
11135 /* Return the diagnostic message string if the binary operation OP is
11136 not permitted on TYPE1 and TYPE2, NULL otherwise. */
11137 static const char *
11138 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
11139 {
11140 /* Reject operations on __fpreg. */
11141 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
11142 return N_("invalid operation on %<__fpreg%>");
11143 return NULL;
11144 }
11145
11146 /* HP-UX version_id attribute.
11147 For object foo, if the version_id is set to 1234 put out an alias
11148 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
11149 other than an alias statement because it is an illegal symbol name. */
11150
11151 static tree
11152 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
11153 tree name ATTRIBUTE_UNUSED,
11154 tree args,
11155 int flags ATTRIBUTE_UNUSED,
11156 bool *no_add_attrs)
11157 {
11158 tree arg = TREE_VALUE (args);
11159
11160 if (TREE_CODE (arg) != STRING_CST)
11161 {
11162 error("version attribute is not a string");
11163 *no_add_attrs = true;
11164 return NULL_TREE;
11165 }
11166 return NULL_TREE;
11167 }
11168
11169 /* Target hook for c_mode_for_suffix. */
11170
11171 static enum machine_mode
11172 ia64_c_mode_for_suffix (char suffix)
11173 {
11174 if (suffix == 'q')
11175 return TFmode;
11176 if (suffix == 'w')
11177 return XFmode;
11178
11179 return VOIDmode;
11180 }
11181
11182 static GTY(()) rtx ia64_dconst_0_5_rtx;
11183
11184 rtx
11185 ia64_dconst_0_5 (void)
11186 {
11187 if (! ia64_dconst_0_5_rtx)
11188 {
11189 REAL_VALUE_TYPE rv;
11190 real_from_string (&rv, "0.5");
11191 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11192 }
11193 return ia64_dconst_0_5_rtx;
11194 }
11195
11196 static GTY(()) rtx ia64_dconst_0_375_rtx;
11197
11198 rtx
11199 ia64_dconst_0_375 (void)
11200 {
11201 if (! ia64_dconst_0_375_rtx)
11202 {
11203 REAL_VALUE_TYPE rv;
11204 real_from_string (&rv, "0.375");
11205 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11206 }
11207 return ia64_dconst_0_375_rtx;
11208 }
11209
11210 static enum machine_mode
11211 ia64_get_reg_raw_mode (int regno)
11212 {
11213 if (FR_REGNO_P (regno))
11214 return XFmode;
11215 return default_get_reg_raw_mode(regno);
11216 }
11217
11218 /* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
11219 anymore. */
11220
11221 bool
11222 ia64_member_type_forces_blk (const_tree, enum machine_mode mode)
11223 {
11224 return TARGET_HPUX && mode == TFmode;
11225 }
11226
11227 /* Always default to .text section until HP-UX linker is fixed. */
11228
11229 ATTRIBUTE_UNUSED static section *
11230 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11231 enum node_frequency freq ATTRIBUTE_UNUSED,
11232 bool startup ATTRIBUTE_UNUSED,
11233 bool exit ATTRIBUTE_UNUSED)
11234 {
11235 return NULL;
11236 }
11237 \f
11238 /* Construct (set target (vec_select op0 (parallel perm))) and
11239 return true if that's a valid instruction in the active ISA. */
11240
11241 static bool
11242 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
11243 {
11244 rtx rperm[MAX_VECT_LEN], x;
11245 unsigned i;
11246
11247 for (i = 0; i < nelt; ++i)
11248 rperm[i] = GEN_INT (perm[i]);
11249
11250 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
11251 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
11252 x = gen_rtx_SET (VOIDmode, target, x);
11253
11254 rtx_insn *insn = emit_insn (x);
11255 if (recog_memoized (insn) < 0)
11256 {
11257 remove_insn (insn);
11258 return false;
11259 }
11260 return true;
11261 }
11262
11263 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11264
11265 static bool
11266 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
11267 const unsigned char *perm, unsigned nelt)
11268 {
11269 enum machine_mode v2mode;
11270 rtx x;
11271
11272 v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
11273 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
11274 return expand_vselect (target, x, perm, nelt);
11275 }
11276
11277 /* Try to expand a no-op permutation. */
11278
11279 static bool
11280 expand_vec_perm_identity (struct expand_vec_perm_d *d)
11281 {
11282 unsigned i, nelt = d->nelt;
11283
11284 for (i = 0; i < nelt; ++i)
11285 if (d->perm[i] != i)
11286 return false;
11287
11288 if (!d->testing_p)
11289 emit_move_insn (d->target, d->op0);
11290
11291 return true;
11292 }
11293
11294 /* Try to expand D via a shrp instruction. */
11295
11296 static bool
11297 expand_vec_perm_shrp (struct expand_vec_perm_d *d)
11298 {
11299 unsigned i, nelt = d->nelt, shift, mask;
11300 rtx tmp, hi, lo;
11301
11302 /* ??? Don't force V2SFmode into the integer registers. */
11303 if (d->vmode == V2SFmode)
11304 return false;
11305
11306 mask = (d->one_operand_p ? nelt - 1 : 2 * nelt - 1);
11307
11308 shift = d->perm[0];
11309 if (BYTES_BIG_ENDIAN && shift > nelt)
11310 return false;
11311
11312 for (i = 1; i < nelt; ++i)
11313 if (d->perm[i] != ((shift + i) & mask))
11314 return false;
11315
11316 if (d->testing_p)
11317 return true;
11318
11319 hi = shift < nelt ? d->op1 : d->op0;
11320 lo = shift < nelt ? d->op0 : d->op1;
11321
11322 shift %= nelt;
11323
11324 shift *= GET_MODE_UNIT_SIZE (d->vmode) * BITS_PER_UNIT;
11325
11326 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11327 gcc_assert (IN_RANGE (shift, 1, 63));
11328
11329 /* Recall that big-endian elements are numbered starting at the top of
11330 the register. Ideally we'd have a shift-left-pair. But since we
11331 don't, convert to a shift the other direction. */
11332 if (BYTES_BIG_ENDIAN)
11333 shift = 64 - shift;
11334
11335 tmp = gen_reg_rtx (DImode);
11336 hi = gen_lowpart (DImode, hi);
11337 lo = gen_lowpart (DImode, lo);
11338 emit_insn (gen_shrp (tmp, hi, lo, GEN_INT (shift)));
11339
11340 emit_move_insn (d->target, gen_lowpart (d->vmode, tmp));
11341 return true;
11342 }
11343
11344 /* Try to instantiate D in a single instruction. */
11345
11346 static bool
11347 expand_vec_perm_1 (struct expand_vec_perm_d *d)
11348 {
11349 unsigned i, nelt = d->nelt;
11350 unsigned char perm2[MAX_VECT_LEN];
11351
11352 /* Try single-operand selections. */
11353 if (d->one_operand_p)
11354 {
11355 if (expand_vec_perm_identity (d))
11356 return true;
11357 if (expand_vselect (d->target, d->op0, d->perm, nelt))
11358 return true;
11359 }
11360
11361 /* Try two operand selections. */
11362 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
11363 return true;
11364
11365 /* Recognize interleave style patterns with reversed operands. */
11366 if (!d->one_operand_p)
11367 {
11368 for (i = 0; i < nelt; ++i)
11369 {
11370 unsigned e = d->perm[i];
11371 if (e >= nelt)
11372 e -= nelt;
11373 else
11374 e += nelt;
11375 perm2[i] = e;
11376 }
11377
11378 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
11379 return true;
11380 }
11381
11382 if (expand_vec_perm_shrp (d))
11383 return true;
11384
11385 /* ??? Look for deposit-like permutations where most of the result
11386 comes from one vector unchanged and the rest comes from a
11387 sequential hunk of the other vector. */
11388
11389 return false;
11390 }
11391
11392 /* Pattern match broadcast permutations. */
11393
11394 static bool
11395 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
11396 {
11397 unsigned i, elt, nelt = d->nelt;
11398 unsigned char perm2[2];
11399 rtx temp;
11400 bool ok;
11401
11402 if (!d->one_operand_p)
11403 return false;
11404
11405 elt = d->perm[0];
11406 for (i = 1; i < nelt; ++i)
11407 if (d->perm[i] != elt)
11408 return false;
11409
11410 switch (d->vmode)
11411 {
11412 case V2SImode:
11413 case V2SFmode:
11414 /* Implementable by interleave. */
11415 perm2[0] = elt;
11416 perm2[1] = elt + 2;
11417 ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2);
11418 gcc_assert (ok);
11419 break;
11420
11421 case V8QImode:
11422 /* Implementable by extract + broadcast. */
11423 if (BYTES_BIG_ENDIAN)
11424 elt = 7 - elt;
11425 elt *= BITS_PER_UNIT;
11426 temp = gen_reg_rtx (DImode);
11427 emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0),
11428 GEN_INT (8), GEN_INT (elt)));
11429 emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp)));
11430 break;
11431
11432 case V4HImode:
11433 /* Should have been matched directly by vec_select. */
11434 default:
11435 gcc_unreachable ();
11436 }
11437
11438 return true;
11439 }
11440
11441 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11442 two vector permutation into a single vector permutation by using
11443 an interleave operation to merge the vectors. */
11444
11445 static bool
11446 expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d)
11447 {
11448 struct expand_vec_perm_d dremap, dfinal;
11449 unsigned char remap[2 * MAX_VECT_LEN];
11450 unsigned contents, i, nelt, nelt2;
11451 unsigned h0, h1, h2, h3;
11452 rtx_insn *seq;
11453 bool ok;
11454
11455 if (d->one_operand_p)
11456 return false;
11457
11458 nelt = d->nelt;
11459 nelt2 = nelt / 2;
11460
11461 /* Examine from whence the elements come. */
11462 contents = 0;
11463 for (i = 0; i < nelt; ++i)
11464 contents |= 1u << d->perm[i];
11465
11466 memset (remap, 0xff, sizeof (remap));
11467 dremap = *d;
11468
11469 h0 = (1u << nelt2) - 1;
11470 h1 = h0 << nelt2;
11471 h2 = h0 << nelt;
11472 h3 = h0 << (nelt + nelt2);
11473
11474 if ((contents & (h0 | h2)) == contents) /* punpck even halves */
11475 {
11476 for (i = 0; i < nelt; ++i)
11477 {
11478 unsigned which = i / 2 + (i & 1 ? nelt : 0);
11479 remap[which] = i;
11480 dremap.perm[i] = which;
11481 }
11482 }
11483 else if ((contents & (h1 | h3)) == contents) /* punpck odd halves */
11484 {
11485 for (i = 0; i < nelt; ++i)
11486 {
11487 unsigned which = i / 2 + nelt2 + (i & 1 ? nelt : 0);
11488 remap[which] = i;
11489 dremap.perm[i] = which;
11490 }
11491 }
11492 else if ((contents & 0x5555) == contents) /* mix even elements */
11493 {
11494 for (i = 0; i < nelt; ++i)
11495 {
11496 unsigned which = (i & ~1) + (i & 1 ? nelt : 0);
11497 remap[which] = i;
11498 dremap.perm[i] = which;
11499 }
11500 }
11501 else if ((contents & 0xaaaa) == contents) /* mix odd elements */
11502 {
11503 for (i = 0; i < nelt; ++i)
11504 {
11505 unsigned which = (i | 1) + (i & 1 ? nelt : 0);
11506 remap[which] = i;
11507 dremap.perm[i] = which;
11508 }
11509 }
11510 else if (floor_log2 (contents) - ctz_hwi (contents) < (int)nelt) /* shrp */
11511 {
11512 unsigned shift = ctz_hwi (contents);
11513 for (i = 0; i < nelt; ++i)
11514 {
11515 unsigned which = (i + shift) & (2 * nelt - 1);
11516 remap[which] = i;
11517 dremap.perm[i] = which;
11518 }
11519 }
11520 else
11521 return false;
11522
11523 /* Use the remapping array set up above to move the elements from their
11524 swizzled locations into their final destinations. */
11525 dfinal = *d;
11526 for (i = 0; i < nelt; ++i)
11527 {
11528 unsigned e = remap[d->perm[i]];
11529 gcc_assert (e < nelt);
11530 dfinal.perm[i] = e;
11531 }
11532 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
11533 dfinal.op1 = dfinal.op0;
11534 dfinal.one_operand_p = true;
11535 dremap.target = dfinal.op0;
11536
11537 /* Test if the final remap can be done with a single insn. For V4HImode
11538 this *will* succeed. For V8QImode or V2SImode it may not. */
11539 start_sequence ();
11540 ok = expand_vec_perm_1 (&dfinal);
11541 seq = get_insns ();
11542 end_sequence ();
11543 if (!ok)
11544 return false;
11545 if (d->testing_p)
11546 return true;
11547
11548 ok = expand_vec_perm_1 (&dremap);
11549 gcc_assert (ok);
11550
11551 emit_insn (seq);
11552 return true;
11553 }
11554
11555 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11556 constant permutation via two mux2 and a merge. */
11557
11558 static bool
11559 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d *d)
11560 {
11561 unsigned char perm2[4];
11562 rtx rmask[4];
11563 unsigned i;
11564 rtx t0, t1, mask, x;
11565 bool ok;
11566
11567 if (d->vmode != V4HImode || d->one_operand_p)
11568 return false;
11569 if (d->testing_p)
11570 return true;
11571
11572 for (i = 0; i < 4; ++i)
11573 {
11574 perm2[i] = d->perm[i] & 3;
11575 rmask[i] = (d->perm[i] & 4 ? const0_rtx : constm1_rtx);
11576 }
11577 mask = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmask));
11578 mask = force_reg (V4HImode, mask);
11579
11580 t0 = gen_reg_rtx (V4HImode);
11581 t1 = gen_reg_rtx (V4HImode);
11582
11583 ok = expand_vselect (t0, d->op0, perm2, 4);
11584 gcc_assert (ok);
11585 ok = expand_vselect (t1, d->op1, perm2, 4);
11586 gcc_assert (ok);
11587
11588 x = gen_rtx_AND (V4HImode, mask, t0);
11589 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
11590
11591 x = gen_rtx_NOT (V4HImode, mask);
11592 x = gen_rtx_AND (V4HImode, x, t1);
11593 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
11594
11595 x = gen_rtx_IOR (V4HImode, t0, t1);
11596 emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
11597
11598 return true;
11599 }
11600
11601 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11602 With all of the interface bits taken care of, perform the expansion
11603 in D and return true on success. */
11604
11605 static bool
11606 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
11607 {
11608 if (expand_vec_perm_1 (d))
11609 return true;
11610 if (expand_vec_perm_broadcast (d))
11611 return true;
11612 if (expand_vec_perm_interleave_2 (d))
11613 return true;
11614 if (expand_vec_perm_v4hi_5 (d))
11615 return true;
11616 return false;
11617 }
11618
11619 bool
11620 ia64_expand_vec_perm_const (rtx operands[4])
11621 {
11622 struct expand_vec_perm_d d;
11623 unsigned char perm[MAX_VECT_LEN];
11624 int i, nelt, which;
11625 rtx sel;
11626
11627 d.target = operands[0];
11628 d.op0 = operands[1];
11629 d.op1 = operands[2];
11630 sel = operands[3];
11631
11632 d.vmode = GET_MODE (d.target);
11633 gcc_assert (VECTOR_MODE_P (d.vmode));
11634 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11635 d.testing_p = false;
11636
11637 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
11638 gcc_assert (XVECLEN (sel, 0) == nelt);
11639 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
11640
11641 for (i = which = 0; i < nelt; ++i)
11642 {
11643 rtx e = XVECEXP (sel, 0, i);
11644 int ei = INTVAL (e) & (2 * nelt - 1);
11645
11646 which |= (ei < nelt ? 1 : 2);
11647 d.perm[i] = ei;
11648 perm[i] = ei;
11649 }
11650
11651 switch (which)
11652 {
11653 default:
11654 gcc_unreachable();
11655
11656 case 3:
11657 if (!rtx_equal_p (d.op0, d.op1))
11658 {
11659 d.one_operand_p = false;
11660 break;
11661 }
11662
11663 /* The elements of PERM do not suggest that only the first operand
11664 is used, but both operands are identical. Allow easier matching
11665 of the permutation by folding the permutation into the single
11666 input vector. */
11667 for (i = 0; i < nelt; ++i)
11668 if (d.perm[i] >= nelt)
11669 d.perm[i] -= nelt;
11670 /* FALLTHRU */
11671
11672 case 1:
11673 d.op1 = d.op0;
11674 d.one_operand_p = true;
11675 break;
11676
11677 case 2:
11678 for (i = 0; i < nelt; ++i)
11679 d.perm[i] -= nelt;
11680 d.op0 = d.op1;
11681 d.one_operand_p = true;
11682 break;
11683 }
11684
11685 if (ia64_expand_vec_perm_const_1 (&d))
11686 return true;
11687
11688 /* If the mask says both arguments are needed, but they are the same,
11689 the above tried to expand with one_operand_p true. If that didn't
11690 work, retry with one_operand_p false, as that's what we used in _ok. */
11691 if (which == 3 && d.one_operand_p)
11692 {
11693 memcpy (d.perm, perm, sizeof (perm));
11694 d.one_operand_p = false;
11695 return ia64_expand_vec_perm_const_1 (&d);
11696 }
11697
11698 return false;
11699 }
11700
11701 /* Implement targetm.vectorize.vec_perm_const_ok. */
11702
11703 static bool
11704 ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
11705 const unsigned char *sel)
11706 {
11707 struct expand_vec_perm_d d;
11708 unsigned int i, nelt, which;
11709 bool ret;
11710
11711 d.vmode = vmode;
11712 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11713 d.testing_p = true;
11714
11715 /* Extract the values from the vector CST into the permutation
11716 array in D. */
11717 memcpy (d.perm, sel, nelt);
11718 for (i = which = 0; i < nelt; ++i)
11719 {
11720 unsigned char e = d.perm[i];
11721 gcc_assert (e < 2 * nelt);
11722 which |= (e < nelt ? 1 : 2);
11723 }
11724
11725 /* For all elements from second vector, fold the elements to first. */
11726 if (which == 2)
11727 for (i = 0; i < nelt; ++i)
11728 d.perm[i] -= nelt;
11729
11730 /* Check whether the mask can be applied to the vector type. */
11731 d.one_operand_p = (which != 3);
11732
11733 /* Otherwise we have to go through the motions and see if we can
11734 figure out how to generate the requested permutation. */
11735 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
11736 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
11737 if (!d.one_operand_p)
11738 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
11739
11740 start_sequence ();
11741 ret = ia64_expand_vec_perm_const_1 (&d);
11742 end_sequence ();
11743
11744 return ret;
11745 }
11746
11747 void
11748 ia64_expand_vec_setv2sf (rtx operands[3])
11749 {
11750 struct expand_vec_perm_d d;
11751 unsigned int which;
11752 bool ok;
11753
11754 d.target = operands[0];
11755 d.op0 = operands[0];
11756 d.op1 = gen_reg_rtx (V2SFmode);
11757 d.vmode = V2SFmode;
11758 d.nelt = 2;
11759 d.one_operand_p = false;
11760 d.testing_p = false;
11761
11762 which = INTVAL (operands[2]);
11763 gcc_assert (which <= 1);
11764 d.perm[0] = 1 - which;
11765 d.perm[1] = which + 2;
11766
11767 emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode)));
11768
11769 ok = ia64_expand_vec_perm_const_1 (&d);
11770 gcc_assert (ok);
11771 }
11772
11773 void
11774 ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd)
11775 {
11776 struct expand_vec_perm_d d;
11777 enum machine_mode vmode = GET_MODE (target);
11778 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
11779 bool ok;
11780
11781 d.target = target;
11782 d.op0 = op0;
11783 d.op1 = op1;
11784 d.vmode = vmode;
11785 d.nelt = nelt;
11786 d.one_operand_p = false;
11787 d.testing_p = false;
11788
11789 for (i = 0; i < nelt; ++i)
11790 d.perm[i] = i * 2 + odd;
11791
11792 ok = ia64_expand_vec_perm_const_1 (&d);
11793 gcc_assert (ok);
11794 }
11795
11796 #include "gt-ia64.h"