]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/ia64/ia64.c
71bc666b685035cf08266eb712eacfc7866adb27
[thirdparty/gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "stor-layout.h"
30 #include "calls.h"
31 #include "varasm.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "ggc.h"
45 #include "basic-block.h"
46 #include "libfuncs.h"
47 #include "diagnostic-core.h"
48 #include "sched-int.h"
49 #include "timevar.h"
50 #include "target.h"
51 #include "target-def.h"
52 #include "common/common-target.h"
53 #include "tm_p.h"
54 #include "hash-table.h"
55 #include "langhooks.h"
56 #include "gimple.h"
57 #include "gimplify.h"
58 #include "intl.h"
59 #include "df.h"
60 #include "debug.h"
61 #include "params.h"
62 #include "dbgcnt.h"
63 #include "tm-constrs.h"
64 #include "sel-sched.h"
65 #include "reload.h"
66 #include "opts.h"
67 #include "dumpfile.h"
68
69 /* This is used for communication between ASM_OUTPUT_LABEL and
70 ASM_OUTPUT_LABELREF. */
71 int ia64_asm_output_label = 0;
72
73 /* Register names for ia64_expand_prologue. */
74 static const char * const ia64_reg_numbers[96] =
75 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
76 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
77 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
78 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
79 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
80 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
81 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
82 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
83 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
84 "r104","r105","r106","r107","r108","r109","r110","r111",
85 "r112","r113","r114","r115","r116","r117","r118","r119",
86 "r120","r121","r122","r123","r124","r125","r126","r127"};
87
88 /* ??? These strings could be shared with REGISTER_NAMES. */
89 static const char * const ia64_input_reg_names[8] =
90 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
91
92 /* ??? These strings could be shared with REGISTER_NAMES. */
93 static const char * const ia64_local_reg_names[80] =
94 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
95 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
96 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
97 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
98 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
99 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
100 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
101 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
102 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
103 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
104
105 /* ??? These strings could be shared with REGISTER_NAMES. */
106 static const char * const ia64_output_reg_names[8] =
107 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
108
109 /* Variables which are this size or smaller are put in the sdata/sbss
110 sections. */
111
112 unsigned int ia64_section_threshold;
113
114 /* The following variable is used by the DFA insn scheduler. The value is
115 TRUE if we do insn bundling instead of insn scheduling. */
116 int bundling_p = 0;
117
118 enum ia64_frame_regs
119 {
120 reg_fp,
121 reg_save_b0,
122 reg_save_pr,
123 reg_save_ar_pfs,
124 reg_save_ar_unat,
125 reg_save_ar_lc,
126 reg_save_gp,
127 number_of_ia64_frame_regs
128 };
129
130 /* Structure to be filled in by ia64_compute_frame_size with register
131 save masks and offsets for the current function. */
132
133 struct ia64_frame_info
134 {
135 HOST_WIDE_INT total_size; /* size of the stack frame, not including
136 the caller's scratch area. */
137 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
138 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
139 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
140 HARD_REG_SET mask; /* mask of saved registers. */
141 unsigned int gr_used_mask; /* mask of registers in use as gr spill
142 registers or long-term scratches. */
143 int n_spilled; /* number of spilled registers. */
144 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
145 int n_input_regs; /* number of input registers used. */
146 int n_local_regs; /* number of local registers used. */
147 int n_output_regs; /* number of output registers used. */
148 int n_rotate_regs; /* number of rotating registers used. */
149
150 char need_regstk; /* true if a .regstk directive needed. */
151 char initialized; /* true if the data is finalized. */
152 };
153
154 /* Current frame information calculated by ia64_compute_frame_size. */
155 static struct ia64_frame_info current_frame_info;
156 /* The actual registers that are emitted. */
157 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
158 \f
159 static int ia64_first_cycle_multipass_dfa_lookahead (void);
160 static void ia64_dependencies_evaluation_hook (rtx, rtx);
161 static void ia64_init_dfa_pre_cycle_insn (void);
162 static rtx ia64_dfa_pre_cycle_insn (void);
163 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
164 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
165 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
166 static void ia64_h_i_d_extended (void);
167 static void * ia64_alloc_sched_context (void);
168 static void ia64_init_sched_context (void *, bool);
169 static void ia64_set_sched_context (void *);
170 static void ia64_clear_sched_context (void *);
171 static void ia64_free_sched_context (void *);
172 static int ia64_mode_to_int (enum machine_mode);
173 static void ia64_set_sched_flags (spec_info_t);
174 static ds_t ia64_get_insn_spec_ds (rtx);
175 static ds_t ia64_get_insn_checked_ds (rtx);
176 static bool ia64_skip_rtx_p (const_rtx);
177 static int ia64_speculate_insn (rtx, ds_t, rtx *);
178 static bool ia64_needs_block_p (ds_t);
179 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
180 static int ia64_spec_check_p (rtx);
181 static int ia64_spec_check_src_p (rtx);
182 static rtx gen_tls_get_addr (void);
183 static rtx gen_thread_pointer (void);
184 static int find_gr_spill (enum ia64_frame_regs, int);
185 static int next_scratch_gr_reg (void);
186 static void mark_reg_gr_used_mask (rtx, void *);
187 static void ia64_compute_frame_size (HOST_WIDE_INT);
188 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
189 static void finish_spill_pointers (void);
190 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
191 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
192 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
193 static rtx gen_movdi_x (rtx, rtx, rtx);
194 static rtx gen_fr_spill_x (rtx, rtx, rtx);
195 static rtx gen_fr_restore_x (rtx, rtx, rtx);
196
197 static void ia64_option_override (void);
198 static bool ia64_can_eliminate (const int, const int);
199 static enum machine_mode hfa_element_mode (const_tree, bool);
200 static void ia64_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
201 tree, int *, int);
202 static int ia64_arg_partial_bytes (cumulative_args_t, enum machine_mode,
203 tree, bool);
204 static rtx ia64_function_arg_1 (cumulative_args_t, enum machine_mode,
205 const_tree, bool, bool);
206 static rtx ia64_function_arg (cumulative_args_t, enum machine_mode,
207 const_tree, bool);
208 static rtx ia64_function_incoming_arg (cumulative_args_t,
209 enum machine_mode, const_tree, bool);
210 static void ia64_function_arg_advance (cumulative_args_t, enum machine_mode,
211 const_tree, bool);
212 static unsigned int ia64_function_arg_boundary (enum machine_mode,
213 const_tree);
214 static bool ia64_function_ok_for_sibcall (tree, tree);
215 static bool ia64_return_in_memory (const_tree, const_tree);
216 static rtx ia64_function_value (const_tree, const_tree, bool);
217 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
218 static bool ia64_function_value_regno_p (const unsigned int);
219 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
220 reg_class_t);
221 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
222 bool);
223 static bool ia64_rtx_costs (rtx, int, int, int, int *, bool);
224 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
225 static void fix_range (const char *);
226 static struct machine_function * ia64_init_machine_status (void);
227 static void emit_insn_group_barriers (FILE *);
228 static void emit_all_insn_group_barriers (FILE *);
229 static void final_emit_insn_group_barriers (FILE *);
230 static void emit_predicate_relation_info (void);
231 static void ia64_reorg (void);
232 static bool ia64_in_small_data_p (const_tree);
233 static void process_epilogue (FILE *, rtx, bool, bool);
234
235 static bool ia64_assemble_integer (rtx, unsigned int, int);
236 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
237 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
238 static void ia64_output_function_end_prologue (FILE *);
239
240 static void ia64_print_operand (FILE *, rtx, int);
241 static void ia64_print_operand_address (FILE *, rtx);
242 static bool ia64_print_operand_punct_valid_p (unsigned char code);
243
244 static int ia64_issue_rate (void);
245 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
246 static void ia64_sched_init (FILE *, int, int);
247 static void ia64_sched_init_global (FILE *, int, int);
248 static void ia64_sched_finish_global (FILE *, int);
249 static void ia64_sched_finish (FILE *, int);
250 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
251 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
252 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
253 static int ia64_variable_issue (FILE *, int, rtx, int);
254
255 static void ia64_asm_unwind_emit (FILE *, rtx);
256 static void ia64_asm_emit_except_personality (rtx);
257 static void ia64_asm_init_sections (void);
258
259 static enum unwind_info_type ia64_debug_unwind_info (void);
260
261 static struct bundle_state *get_free_bundle_state (void);
262 static void free_bundle_state (struct bundle_state *);
263 static void initiate_bundle_states (void);
264 static void finish_bundle_states (void);
265 static int insert_bundle_state (struct bundle_state *);
266 static void initiate_bundle_state_table (void);
267 static void finish_bundle_state_table (void);
268 static int try_issue_nops (struct bundle_state *, int);
269 static int try_issue_insn (struct bundle_state *, rtx);
270 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
271 static int get_max_pos (state_t);
272 static int get_template (state_t, int);
273
274 static rtx get_next_important_insn (rtx, rtx);
275 static bool important_for_bundling_p (rtx);
276 static bool unknown_for_bundling_p (rtx);
277 static void bundling (FILE *, int, rtx, rtx);
278
279 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
280 HOST_WIDE_INT, tree);
281 static void ia64_file_start (void);
282 static void ia64_globalize_decl_name (FILE *, tree);
283
284 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
285 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
286 static section *ia64_select_rtx_section (enum machine_mode, rtx,
287 unsigned HOST_WIDE_INT);
288 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
289 ATTRIBUTE_UNUSED;
290 static unsigned int ia64_section_type_flags (tree, const char *, int);
291 static void ia64_init_libfuncs (void)
292 ATTRIBUTE_UNUSED;
293 static void ia64_hpux_init_libfuncs (void)
294 ATTRIBUTE_UNUSED;
295 static void ia64_sysv4_init_libfuncs (void)
296 ATTRIBUTE_UNUSED;
297 static void ia64_vms_init_libfuncs (void)
298 ATTRIBUTE_UNUSED;
299 static void ia64_soft_fp_init_libfuncs (void)
300 ATTRIBUTE_UNUSED;
301 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
302 ATTRIBUTE_UNUSED;
303 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
304 ATTRIBUTE_UNUSED;
305
306 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
307 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
308 static void ia64_encode_section_info (tree, rtx, int);
309 static rtx ia64_struct_value_rtx (tree, int);
310 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
311 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
312 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
313 static bool ia64_legitimate_constant_p (enum machine_mode, rtx);
314 static bool ia64_legitimate_address_p (enum machine_mode, rtx, bool);
315 static bool ia64_cannot_force_const_mem (enum machine_mode, rtx);
316 static const char *ia64_mangle_type (const_tree);
317 static const char *ia64_invalid_conversion (const_tree, const_tree);
318 static const char *ia64_invalid_unary_op (int, const_tree);
319 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
320 static enum machine_mode ia64_c_mode_for_suffix (char);
321 static void ia64_trampoline_init (rtx, tree, rtx);
322 static void ia64_override_options_after_change (void);
323 static bool ia64_member_type_forces_blk (const_tree, enum machine_mode);
324
325 static tree ia64_builtin_decl (unsigned, bool);
326
327 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
328 static enum machine_mode ia64_get_reg_raw_mode (int regno);
329 static section * ia64_hpux_function_section (tree, enum node_frequency,
330 bool, bool);
331
332 static bool ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
333 const unsigned char *sel);
334
335 #define MAX_VECT_LEN 8
336
337 struct expand_vec_perm_d
338 {
339 rtx target, op0, op1;
340 unsigned char perm[MAX_VECT_LEN];
341 enum machine_mode vmode;
342 unsigned char nelt;
343 bool one_operand_p;
344 bool testing_p;
345 };
346
347 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
348
349 \f
350 /* Table of valid machine attributes. */
351 static const struct attribute_spec ia64_attribute_table[] =
352 {
353 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
354 affects_type_identity } */
355 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
356 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
357 false },
358 #if TARGET_ABI_OPEN_VMS
359 { "common_object", 1, 1, true, false, false,
360 ia64_vms_common_object_attribute, false },
361 #endif
362 { "version_id", 1, 1, true, false, false,
363 ia64_handle_version_id_attribute, false },
364 { NULL, 0, 0, false, false, false, NULL, false }
365 };
366
367 /* Initialize the GCC target structure. */
368 #undef TARGET_ATTRIBUTE_TABLE
369 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
370
371 #undef TARGET_INIT_BUILTINS
372 #define TARGET_INIT_BUILTINS ia64_init_builtins
373
374 #undef TARGET_EXPAND_BUILTIN
375 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
376
377 #undef TARGET_BUILTIN_DECL
378 #define TARGET_BUILTIN_DECL ia64_builtin_decl
379
380 #undef TARGET_ASM_BYTE_OP
381 #define TARGET_ASM_BYTE_OP "\tdata1\t"
382 #undef TARGET_ASM_ALIGNED_HI_OP
383 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
384 #undef TARGET_ASM_ALIGNED_SI_OP
385 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
386 #undef TARGET_ASM_ALIGNED_DI_OP
387 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
388 #undef TARGET_ASM_UNALIGNED_HI_OP
389 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
390 #undef TARGET_ASM_UNALIGNED_SI_OP
391 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
392 #undef TARGET_ASM_UNALIGNED_DI_OP
393 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
394 #undef TARGET_ASM_INTEGER
395 #define TARGET_ASM_INTEGER ia64_assemble_integer
396
397 #undef TARGET_OPTION_OVERRIDE
398 #define TARGET_OPTION_OVERRIDE ia64_option_override
399
400 #undef TARGET_ASM_FUNCTION_PROLOGUE
401 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
402 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
403 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
404 #undef TARGET_ASM_FUNCTION_EPILOGUE
405 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
406
407 #undef TARGET_PRINT_OPERAND
408 #define TARGET_PRINT_OPERAND ia64_print_operand
409 #undef TARGET_PRINT_OPERAND_ADDRESS
410 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
411 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
412 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
413
414 #undef TARGET_IN_SMALL_DATA_P
415 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
416
417 #undef TARGET_SCHED_ADJUST_COST_2
418 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
419 #undef TARGET_SCHED_ISSUE_RATE
420 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
421 #undef TARGET_SCHED_VARIABLE_ISSUE
422 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
423 #undef TARGET_SCHED_INIT
424 #define TARGET_SCHED_INIT ia64_sched_init
425 #undef TARGET_SCHED_FINISH
426 #define TARGET_SCHED_FINISH ia64_sched_finish
427 #undef TARGET_SCHED_INIT_GLOBAL
428 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
429 #undef TARGET_SCHED_FINISH_GLOBAL
430 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
431 #undef TARGET_SCHED_REORDER
432 #define TARGET_SCHED_REORDER ia64_sched_reorder
433 #undef TARGET_SCHED_REORDER2
434 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
435
436 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
437 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
438
439 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
440 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
441
442 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
443 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
444 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
445 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
446
447 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
448 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
449 ia64_first_cycle_multipass_dfa_lookahead_guard
450
451 #undef TARGET_SCHED_DFA_NEW_CYCLE
452 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
453
454 #undef TARGET_SCHED_H_I_D_EXTENDED
455 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
456
457 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
458 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
459
460 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
461 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
462
463 #undef TARGET_SCHED_SET_SCHED_CONTEXT
464 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
465
466 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
467 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
468
469 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
470 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
471
472 #undef TARGET_SCHED_SET_SCHED_FLAGS
473 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
474
475 #undef TARGET_SCHED_GET_INSN_SPEC_DS
476 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
477
478 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
479 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
480
481 #undef TARGET_SCHED_SPECULATE_INSN
482 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
483
484 #undef TARGET_SCHED_NEEDS_BLOCK_P
485 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
486
487 #undef TARGET_SCHED_GEN_SPEC_CHECK
488 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
489
490 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
491 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
492 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
493
494 #undef TARGET_SCHED_SKIP_RTX_P
495 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
496
497 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
498 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
499 #undef TARGET_ARG_PARTIAL_BYTES
500 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
501 #undef TARGET_FUNCTION_ARG
502 #define TARGET_FUNCTION_ARG ia64_function_arg
503 #undef TARGET_FUNCTION_INCOMING_ARG
504 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
505 #undef TARGET_FUNCTION_ARG_ADVANCE
506 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
507 #undef TARGET_FUNCTION_ARG_BOUNDARY
508 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
509
510 #undef TARGET_ASM_OUTPUT_MI_THUNK
511 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
512 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
513 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
514
515 #undef TARGET_ASM_FILE_START
516 #define TARGET_ASM_FILE_START ia64_file_start
517
518 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
519 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
520
521 #undef TARGET_REGISTER_MOVE_COST
522 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
523 #undef TARGET_MEMORY_MOVE_COST
524 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
525 #undef TARGET_RTX_COSTS
526 #define TARGET_RTX_COSTS ia64_rtx_costs
527 #undef TARGET_ADDRESS_COST
528 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
529
530 #undef TARGET_UNSPEC_MAY_TRAP_P
531 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
532
533 #undef TARGET_MACHINE_DEPENDENT_REORG
534 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
535
536 #undef TARGET_ENCODE_SECTION_INFO
537 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
538
539 #undef TARGET_SECTION_TYPE_FLAGS
540 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
541
542 #ifdef HAVE_AS_TLS
543 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
544 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
545 #endif
546
547 /* ??? Investigate. */
548 #if 0
549 #undef TARGET_PROMOTE_PROTOTYPES
550 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
551 #endif
552
553 #undef TARGET_FUNCTION_VALUE
554 #define TARGET_FUNCTION_VALUE ia64_function_value
555 #undef TARGET_LIBCALL_VALUE
556 #define TARGET_LIBCALL_VALUE ia64_libcall_value
557 #undef TARGET_FUNCTION_VALUE_REGNO_P
558 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
559
560 #undef TARGET_STRUCT_VALUE_RTX
561 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
562 #undef TARGET_RETURN_IN_MEMORY
563 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
564 #undef TARGET_SETUP_INCOMING_VARARGS
565 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
566 #undef TARGET_STRICT_ARGUMENT_NAMING
567 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
568 #undef TARGET_MUST_PASS_IN_STACK
569 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
570 #undef TARGET_GET_RAW_RESULT_MODE
571 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
572 #undef TARGET_GET_RAW_ARG_MODE
573 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
574
575 #undef TARGET_MEMBER_TYPE_FORCES_BLK
576 #define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
577
578 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
579 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
580
581 #undef TARGET_ASM_UNWIND_EMIT
582 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
583 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
584 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
585 #undef TARGET_ASM_INIT_SECTIONS
586 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
587
588 #undef TARGET_DEBUG_UNWIND_INFO
589 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
590
591 #undef TARGET_SCALAR_MODE_SUPPORTED_P
592 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
593 #undef TARGET_VECTOR_MODE_SUPPORTED_P
594 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
595
596 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
597 in an order different from the specified program order. */
598 #undef TARGET_RELAXED_ORDERING
599 #define TARGET_RELAXED_ORDERING true
600
601 #undef TARGET_LEGITIMATE_CONSTANT_P
602 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
603 #undef TARGET_LEGITIMATE_ADDRESS_P
604 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
605
606 #undef TARGET_CANNOT_FORCE_CONST_MEM
607 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
608
609 #undef TARGET_MANGLE_TYPE
610 #define TARGET_MANGLE_TYPE ia64_mangle_type
611
612 #undef TARGET_INVALID_CONVERSION
613 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
614 #undef TARGET_INVALID_UNARY_OP
615 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
616 #undef TARGET_INVALID_BINARY_OP
617 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
618
619 #undef TARGET_C_MODE_FOR_SUFFIX
620 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
621
622 #undef TARGET_CAN_ELIMINATE
623 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
624
625 #undef TARGET_TRAMPOLINE_INIT
626 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
627
628 #undef TARGET_CAN_USE_DOLOOP_P
629 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
630 #undef TARGET_INVALID_WITHIN_DOLOOP
631 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
632
633 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
634 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
635
636 #undef TARGET_PREFERRED_RELOAD_CLASS
637 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
638
639 #undef TARGET_DELAY_SCHED2
640 #define TARGET_DELAY_SCHED2 true
641
642 /* Variable tracking should be run after all optimizations which
643 change order of insns. It also needs a valid CFG. */
644 #undef TARGET_DELAY_VARTRACK
645 #define TARGET_DELAY_VARTRACK true
646
647 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
648 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok
649
650 struct gcc_target targetm = TARGET_INITIALIZER;
651 \f
652 typedef enum
653 {
654 ADDR_AREA_NORMAL, /* normal address area */
655 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
656 }
657 ia64_addr_area;
658
659 static GTY(()) tree small_ident1;
660 static GTY(()) tree small_ident2;
661
662 static void
663 init_idents (void)
664 {
665 if (small_ident1 == 0)
666 {
667 small_ident1 = get_identifier ("small");
668 small_ident2 = get_identifier ("__small__");
669 }
670 }
671
672 /* Retrieve the address area that has been chosen for the given decl. */
673
674 static ia64_addr_area
675 ia64_get_addr_area (tree decl)
676 {
677 tree model_attr;
678
679 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
680 if (model_attr)
681 {
682 tree id;
683
684 init_idents ();
685 id = TREE_VALUE (TREE_VALUE (model_attr));
686 if (id == small_ident1 || id == small_ident2)
687 return ADDR_AREA_SMALL;
688 }
689 return ADDR_AREA_NORMAL;
690 }
691
692 static tree
693 ia64_handle_model_attribute (tree *node, tree name, tree args,
694 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
695 {
696 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
697 ia64_addr_area area;
698 tree arg, decl = *node;
699
700 init_idents ();
701 arg = TREE_VALUE (args);
702 if (arg == small_ident1 || arg == small_ident2)
703 {
704 addr_area = ADDR_AREA_SMALL;
705 }
706 else
707 {
708 warning (OPT_Wattributes, "invalid argument of %qE attribute",
709 name);
710 *no_add_attrs = true;
711 }
712
713 switch (TREE_CODE (decl))
714 {
715 case VAR_DECL:
716 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
717 == FUNCTION_DECL)
718 && !TREE_STATIC (decl))
719 {
720 error_at (DECL_SOURCE_LOCATION (decl),
721 "an address area attribute cannot be specified for "
722 "local variables");
723 *no_add_attrs = true;
724 }
725 area = ia64_get_addr_area (decl);
726 if (area != ADDR_AREA_NORMAL && addr_area != area)
727 {
728 error ("address area of %q+D conflicts with previous "
729 "declaration", decl);
730 *no_add_attrs = true;
731 }
732 break;
733
734 case FUNCTION_DECL:
735 error_at (DECL_SOURCE_LOCATION (decl),
736 "address area attribute cannot be specified for "
737 "functions");
738 *no_add_attrs = true;
739 break;
740
741 default:
742 warning (OPT_Wattributes, "%qE attribute ignored",
743 name);
744 *no_add_attrs = true;
745 break;
746 }
747
748 return NULL_TREE;
749 }
750
751 /* Part of the low level implementation of DEC Ada pragma Common_Object which
752 enables the shared use of variables stored in overlaid linker areas
753 corresponding to the use of Fortran COMMON. */
754
755 static tree
756 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
757 int flags ATTRIBUTE_UNUSED,
758 bool *no_add_attrs)
759 {
760 tree decl = *node;
761 tree id;
762
763 gcc_assert (DECL_P (decl));
764
765 DECL_COMMON (decl) = 1;
766 id = TREE_VALUE (args);
767 if (TREE_CODE (id) != IDENTIFIER_NODE && TREE_CODE (id) != STRING_CST)
768 {
769 error ("%qE attribute requires a string constant argument", name);
770 *no_add_attrs = true;
771 return NULL_TREE;
772 }
773 return NULL_TREE;
774 }
775
776 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
777
778 void
779 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
780 unsigned HOST_WIDE_INT size,
781 unsigned int align)
782 {
783 tree attr = DECL_ATTRIBUTES (decl);
784
785 if (attr)
786 attr = lookup_attribute ("common_object", attr);
787 if (attr)
788 {
789 tree id = TREE_VALUE (TREE_VALUE (attr));
790 const char *name;
791
792 if (TREE_CODE (id) == IDENTIFIER_NODE)
793 name = IDENTIFIER_POINTER (id);
794 else if (TREE_CODE (id) == STRING_CST)
795 name = TREE_STRING_POINTER (id);
796 else
797 abort ();
798
799 fprintf (file, "\t.vms_common\t\"%s\",", name);
800 }
801 else
802 fprintf (file, "%s", COMMON_ASM_OP);
803
804 /* Code from elfos.h. */
805 assemble_name (file, name);
806 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u",
807 size, align / BITS_PER_UNIT);
808
809 fputc ('\n', file);
810 }
811
812 static void
813 ia64_encode_addr_area (tree decl, rtx symbol)
814 {
815 int flags;
816
817 flags = SYMBOL_REF_FLAGS (symbol);
818 switch (ia64_get_addr_area (decl))
819 {
820 case ADDR_AREA_NORMAL: break;
821 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
822 default: gcc_unreachable ();
823 }
824 SYMBOL_REF_FLAGS (symbol) = flags;
825 }
826
827 static void
828 ia64_encode_section_info (tree decl, rtx rtl, int first)
829 {
830 default_encode_section_info (decl, rtl, first);
831
832 /* Careful not to prod global register variables. */
833 if (TREE_CODE (decl) == VAR_DECL
834 && GET_CODE (DECL_RTL (decl)) == MEM
835 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
836 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
837 ia64_encode_addr_area (decl, XEXP (rtl, 0));
838 }
839 \f
840 /* Return 1 if the operands of a move are ok. */
841
842 int
843 ia64_move_ok (rtx dst, rtx src)
844 {
845 /* If we're under init_recog_no_volatile, we'll not be able to use
846 memory_operand. So check the code directly and don't worry about
847 the validity of the underlying address, which should have been
848 checked elsewhere anyway. */
849 if (GET_CODE (dst) != MEM)
850 return 1;
851 if (GET_CODE (src) == MEM)
852 return 0;
853 if (register_operand (src, VOIDmode))
854 return 1;
855
856 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
857 if (INTEGRAL_MODE_P (GET_MODE (dst)))
858 return src == const0_rtx;
859 else
860 return satisfies_constraint_G (src);
861 }
862
863 /* Return 1 if the operands are ok for a floating point load pair. */
864
865 int
866 ia64_load_pair_ok (rtx dst, rtx src)
867 {
868 /* ??? There is a thinko in the implementation of the "x" constraint and the
869 FP_REGS class. The constraint will also reject (reg f30:TI) so we must
870 also return false for it. */
871 if (GET_CODE (dst) != REG
872 || !(FP_REGNO_P (REGNO (dst)) && FP_REGNO_P (REGNO (dst) + 1)))
873 return 0;
874 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
875 return 0;
876 switch (GET_CODE (XEXP (src, 0)))
877 {
878 case REG:
879 case POST_INC:
880 break;
881 case POST_DEC:
882 return 0;
883 case POST_MODIFY:
884 {
885 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
886
887 if (GET_CODE (adjust) != CONST_INT
888 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
889 return 0;
890 }
891 break;
892 default:
893 abort ();
894 }
895 return 1;
896 }
897
898 int
899 addp4_optimize_ok (rtx op1, rtx op2)
900 {
901 return (basereg_operand (op1, GET_MODE(op1)) !=
902 basereg_operand (op2, GET_MODE(op2)));
903 }
904
905 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
906 Return the length of the field, or <= 0 on failure. */
907
908 int
909 ia64_depz_field_mask (rtx rop, rtx rshift)
910 {
911 unsigned HOST_WIDE_INT op = INTVAL (rop);
912 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
913
914 /* Get rid of the zero bits we're shifting in. */
915 op >>= shift;
916
917 /* We must now have a solid block of 1's at bit 0. */
918 return exact_log2 (op + 1);
919 }
920
921 /* Return the TLS model to use for ADDR. */
922
923 static enum tls_model
924 tls_symbolic_operand_type (rtx addr)
925 {
926 enum tls_model tls_kind = TLS_MODEL_NONE;
927
928 if (GET_CODE (addr) == CONST)
929 {
930 if (GET_CODE (XEXP (addr, 0)) == PLUS
931 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
932 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
933 }
934 else if (GET_CODE (addr) == SYMBOL_REF)
935 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
936
937 return tls_kind;
938 }
939
940 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
941 as a base register. */
942
943 static inline bool
944 ia64_reg_ok_for_base_p (const_rtx reg, bool strict)
945 {
946 if (strict
947 && REGNO_OK_FOR_BASE_P (REGNO (reg)))
948 return true;
949 else if (!strict
950 && (GENERAL_REGNO_P (REGNO (reg))
951 || !HARD_REGISTER_P (reg)))
952 return true;
953 else
954 return false;
955 }
956
957 static bool
958 ia64_legitimate_address_reg (const_rtx reg, bool strict)
959 {
960 if ((REG_P (reg) && ia64_reg_ok_for_base_p (reg, strict))
961 || (GET_CODE (reg) == SUBREG && REG_P (XEXP (reg, 0))
962 && ia64_reg_ok_for_base_p (XEXP (reg, 0), strict)))
963 return true;
964
965 return false;
966 }
967
968 static bool
969 ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict)
970 {
971 if (GET_CODE (disp) == PLUS
972 && rtx_equal_p (reg, XEXP (disp, 0))
973 && (ia64_legitimate_address_reg (XEXP (disp, 1), strict)
974 || (CONST_INT_P (XEXP (disp, 1))
975 && IN_RANGE (INTVAL (XEXP (disp, 1)), -256, 255))))
976 return true;
977
978 return false;
979 }
980
981 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
982
983 static bool
984 ia64_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
985 rtx x, bool strict)
986 {
987 if (ia64_legitimate_address_reg (x, strict))
988 return true;
989 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
990 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
991 && XEXP (x, 0) != arg_pointer_rtx)
992 return true;
993 else if (GET_CODE (x) == POST_MODIFY
994 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
995 && XEXP (x, 0) != arg_pointer_rtx
996 && ia64_legitimate_address_disp (XEXP (x, 0), XEXP (x, 1), strict))
997 return true;
998 else
999 return false;
1000 }
1001
1002 /* Return true if X is a constant that is valid for some immediate
1003 field in an instruction. */
1004
1005 static bool
1006 ia64_legitimate_constant_p (enum machine_mode mode, rtx x)
1007 {
1008 switch (GET_CODE (x))
1009 {
1010 case CONST_INT:
1011 case LABEL_REF:
1012 return true;
1013
1014 case CONST_DOUBLE:
1015 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
1016 return true;
1017 return satisfies_constraint_G (x);
1018
1019 case CONST:
1020 case SYMBOL_REF:
1021 /* ??? Short term workaround for PR 28490. We must make the code here
1022 match the code in ia64_expand_move and move_operand, even though they
1023 are both technically wrong. */
1024 if (tls_symbolic_operand_type (x) == 0)
1025 {
1026 HOST_WIDE_INT addend = 0;
1027 rtx op = x;
1028
1029 if (GET_CODE (op) == CONST
1030 && GET_CODE (XEXP (op, 0)) == PLUS
1031 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1032 {
1033 addend = INTVAL (XEXP (XEXP (op, 0), 1));
1034 op = XEXP (XEXP (op, 0), 0);
1035 }
1036
1037 if (any_offset_symbol_operand (op, mode)
1038 || function_operand (op, mode))
1039 return true;
1040 if (aligned_offset_symbol_operand (op, mode))
1041 return (addend & 0x3fff) == 0;
1042 return false;
1043 }
1044 return false;
1045
1046 case CONST_VECTOR:
1047 if (mode == V2SFmode)
1048 return satisfies_constraint_Y (x);
1049
1050 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1051 && GET_MODE_SIZE (mode) <= 8);
1052
1053 default:
1054 return false;
1055 }
1056 }
1057
1058 /* Don't allow TLS addresses to get spilled to memory. */
1059
1060 static bool
1061 ia64_cannot_force_const_mem (enum machine_mode mode, rtx x)
1062 {
1063 if (mode == RFmode)
1064 return true;
1065 return tls_symbolic_operand_type (x) != 0;
1066 }
1067
1068 /* Expand a symbolic constant load. */
1069
1070 bool
1071 ia64_expand_load_address (rtx dest, rtx src)
1072 {
1073 gcc_assert (GET_CODE (dest) == REG);
1074
1075 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1076 having to pointer-extend the value afterward. Other forms of address
1077 computation below are also more natural to compute as 64-bit quantities.
1078 If we've been given an SImode destination register, change it. */
1079 if (GET_MODE (dest) != Pmode)
1080 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1081 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1082
1083 if (TARGET_NO_PIC)
1084 return false;
1085 if (small_addr_symbolic_operand (src, VOIDmode))
1086 return false;
1087
1088 if (TARGET_AUTO_PIC)
1089 emit_insn (gen_load_gprel64 (dest, src));
1090 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1091 emit_insn (gen_load_fptr (dest, src));
1092 else if (sdata_symbolic_operand (src, VOIDmode))
1093 emit_insn (gen_load_gprel (dest, src));
1094 else
1095 {
1096 HOST_WIDE_INT addend = 0;
1097 rtx tmp;
1098
1099 /* We did split constant offsets in ia64_expand_move, and we did try
1100 to keep them split in move_operand, but we also allowed reload to
1101 rematerialize arbitrary constants rather than spill the value to
1102 the stack and reload it. So we have to be prepared here to split
1103 them apart again. */
1104 if (GET_CODE (src) == CONST)
1105 {
1106 HOST_WIDE_INT hi, lo;
1107
1108 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1109 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1110 hi = hi - lo;
1111
1112 if (lo != 0)
1113 {
1114 addend = lo;
1115 src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
1116 }
1117 }
1118
1119 tmp = gen_rtx_HIGH (Pmode, src);
1120 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1121 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1122
1123 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1124 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1125
1126 if (addend)
1127 {
1128 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1129 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1130 }
1131 }
1132
1133 return true;
1134 }
1135
1136 static GTY(()) rtx gen_tls_tga;
1137 static rtx
1138 gen_tls_get_addr (void)
1139 {
1140 if (!gen_tls_tga)
1141 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1142 return gen_tls_tga;
1143 }
1144
1145 static GTY(()) rtx thread_pointer_rtx;
1146 static rtx
1147 gen_thread_pointer (void)
1148 {
1149 if (!thread_pointer_rtx)
1150 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1151 return thread_pointer_rtx;
1152 }
1153
1154 static rtx
1155 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1156 rtx orig_op1, HOST_WIDE_INT addend)
1157 {
1158 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1159 rtx orig_op0 = op0;
1160 HOST_WIDE_INT addend_lo, addend_hi;
1161
1162 switch (tls_kind)
1163 {
1164 case TLS_MODEL_GLOBAL_DYNAMIC:
1165 start_sequence ();
1166
1167 tga_op1 = gen_reg_rtx (Pmode);
1168 emit_insn (gen_load_dtpmod (tga_op1, op1));
1169
1170 tga_op2 = gen_reg_rtx (Pmode);
1171 emit_insn (gen_load_dtprel (tga_op2, op1));
1172
1173 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1174 LCT_CONST, Pmode, 2, tga_op1,
1175 Pmode, tga_op2, Pmode);
1176
1177 insns = get_insns ();
1178 end_sequence ();
1179
1180 if (GET_MODE (op0) != Pmode)
1181 op0 = tga_ret;
1182 emit_libcall_block (insns, op0, tga_ret, op1);
1183 break;
1184
1185 case TLS_MODEL_LOCAL_DYNAMIC:
1186 /* ??? This isn't the completely proper way to do local-dynamic
1187 If the call to __tls_get_addr is used only by a single symbol,
1188 then we should (somehow) move the dtprel to the second arg
1189 to avoid the extra add. */
1190 start_sequence ();
1191
1192 tga_op1 = gen_reg_rtx (Pmode);
1193 emit_insn (gen_load_dtpmod (tga_op1, op1));
1194
1195 tga_op2 = const0_rtx;
1196
1197 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1198 LCT_CONST, Pmode, 2, tga_op1,
1199 Pmode, tga_op2, Pmode);
1200
1201 insns = get_insns ();
1202 end_sequence ();
1203
1204 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1205 UNSPEC_LD_BASE);
1206 tmp = gen_reg_rtx (Pmode);
1207 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1208
1209 if (!register_operand (op0, Pmode))
1210 op0 = gen_reg_rtx (Pmode);
1211 if (TARGET_TLS64)
1212 {
1213 emit_insn (gen_load_dtprel (op0, op1));
1214 emit_insn (gen_adddi3 (op0, tmp, op0));
1215 }
1216 else
1217 emit_insn (gen_add_dtprel (op0, op1, tmp));
1218 break;
1219
1220 case TLS_MODEL_INITIAL_EXEC:
1221 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1222 addend_hi = addend - addend_lo;
1223
1224 op1 = plus_constant (Pmode, op1, addend_hi);
1225 addend = addend_lo;
1226
1227 tmp = gen_reg_rtx (Pmode);
1228 emit_insn (gen_load_tprel (tmp, op1));
1229
1230 if (!register_operand (op0, Pmode))
1231 op0 = gen_reg_rtx (Pmode);
1232 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1233 break;
1234
1235 case TLS_MODEL_LOCAL_EXEC:
1236 if (!register_operand (op0, Pmode))
1237 op0 = gen_reg_rtx (Pmode);
1238
1239 op1 = orig_op1;
1240 addend = 0;
1241 if (TARGET_TLS64)
1242 {
1243 emit_insn (gen_load_tprel (op0, op1));
1244 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1245 }
1246 else
1247 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1248 break;
1249
1250 default:
1251 gcc_unreachable ();
1252 }
1253
1254 if (addend)
1255 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1256 orig_op0, 1, OPTAB_DIRECT);
1257 if (orig_op0 == op0)
1258 return NULL_RTX;
1259 if (GET_MODE (orig_op0) == Pmode)
1260 return op0;
1261 return gen_lowpart (GET_MODE (orig_op0), op0);
1262 }
1263
1264 rtx
1265 ia64_expand_move (rtx op0, rtx op1)
1266 {
1267 enum machine_mode mode = GET_MODE (op0);
1268
1269 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1270 op1 = force_reg (mode, op1);
1271
1272 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1273 {
1274 HOST_WIDE_INT addend = 0;
1275 enum tls_model tls_kind;
1276 rtx sym = op1;
1277
1278 if (GET_CODE (op1) == CONST
1279 && GET_CODE (XEXP (op1, 0)) == PLUS
1280 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1281 {
1282 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1283 sym = XEXP (XEXP (op1, 0), 0);
1284 }
1285
1286 tls_kind = tls_symbolic_operand_type (sym);
1287 if (tls_kind)
1288 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1289
1290 if (any_offset_symbol_operand (sym, mode))
1291 addend = 0;
1292 else if (aligned_offset_symbol_operand (sym, mode))
1293 {
1294 HOST_WIDE_INT addend_lo, addend_hi;
1295
1296 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1297 addend_hi = addend - addend_lo;
1298
1299 if (addend_lo != 0)
1300 {
1301 op1 = plus_constant (mode, sym, addend_hi);
1302 addend = addend_lo;
1303 }
1304 else
1305 addend = 0;
1306 }
1307 else
1308 op1 = sym;
1309
1310 if (reload_completed)
1311 {
1312 /* We really should have taken care of this offset earlier. */
1313 gcc_assert (addend == 0);
1314 if (ia64_expand_load_address (op0, op1))
1315 return NULL_RTX;
1316 }
1317
1318 if (addend)
1319 {
1320 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1321
1322 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1323
1324 op1 = expand_simple_binop (mode, PLUS, subtarget,
1325 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1326 if (op0 == op1)
1327 return NULL_RTX;
1328 }
1329 }
1330
1331 return op1;
1332 }
1333
1334 /* Split a move from OP1 to OP0 conditional on COND. */
1335
1336 void
1337 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1338 {
1339 rtx insn, first = get_last_insn ();
1340
1341 emit_move_insn (op0, op1);
1342
1343 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1344 if (INSN_P (insn))
1345 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1346 PATTERN (insn));
1347 }
1348
1349 /* Split a post-reload TImode or TFmode reference into two DImode
1350 components. This is made extra difficult by the fact that we do
1351 not get any scratch registers to work with, because reload cannot
1352 be prevented from giving us a scratch that overlaps the register
1353 pair involved. So instead, when addressing memory, we tweak the
1354 pointer register up and back down with POST_INCs. Or up and not
1355 back down when we can get away with it.
1356
1357 REVERSED is true when the loads must be done in reversed order
1358 (high word first) for correctness. DEAD is true when the pointer
1359 dies with the second insn we generate and therefore the second
1360 address must not carry a postmodify.
1361
1362 May return an insn which is to be emitted after the moves. */
1363
1364 static rtx
1365 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1366 {
1367 rtx fixup = 0;
1368
1369 switch (GET_CODE (in))
1370 {
1371 case REG:
1372 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1373 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1374 break;
1375
1376 case CONST_INT:
1377 case CONST_DOUBLE:
1378 /* Cannot occur reversed. */
1379 gcc_assert (!reversed);
1380
1381 if (GET_MODE (in) != TFmode)
1382 split_double (in, &out[0], &out[1]);
1383 else
1384 /* split_double does not understand how to split a TFmode
1385 quantity into a pair of DImode constants. */
1386 {
1387 REAL_VALUE_TYPE r;
1388 unsigned HOST_WIDE_INT p[2];
1389 long l[4]; /* TFmode is 128 bits */
1390
1391 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1392 real_to_target (l, &r, TFmode);
1393
1394 if (FLOAT_WORDS_BIG_ENDIAN)
1395 {
1396 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1397 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1398 }
1399 else
1400 {
1401 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1402 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1403 }
1404 out[0] = GEN_INT (p[0]);
1405 out[1] = GEN_INT (p[1]);
1406 }
1407 break;
1408
1409 case MEM:
1410 {
1411 rtx base = XEXP (in, 0);
1412 rtx offset;
1413
1414 switch (GET_CODE (base))
1415 {
1416 case REG:
1417 if (!reversed)
1418 {
1419 out[0] = adjust_automodify_address
1420 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1421 out[1] = adjust_automodify_address
1422 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1423 }
1424 else
1425 {
1426 /* Reversal requires a pre-increment, which can only
1427 be done as a separate insn. */
1428 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1429 out[0] = adjust_automodify_address
1430 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1431 out[1] = adjust_address (in, DImode, 0);
1432 }
1433 break;
1434
1435 case POST_INC:
1436 gcc_assert (!reversed && !dead);
1437
1438 /* Just do the increment in two steps. */
1439 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1440 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1441 break;
1442
1443 case POST_DEC:
1444 gcc_assert (!reversed && !dead);
1445
1446 /* Add 8, subtract 24. */
1447 base = XEXP (base, 0);
1448 out[0] = adjust_automodify_address
1449 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1450 out[1] = adjust_automodify_address
1451 (in, DImode,
1452 gen_rtx_POST_MODIFY (Pmode, base,
1453 plus_constant (Pmode, base, -24)),
1454 8);
1455 break;
1456
1457 case POST_MODIFY:
1458 gcc_assert (!reversed && !dead);
1459
1460 /* Extract and adjust the modification. This case is
1461 trickier than the others, because we might have an
1462 index register, or we might have a combined offset that
1463 doesn't fit a signed 9-bit displacement field. We can
1464 assume the incoming expression is already legitimate. */
1465 offset = XEXP (base, 1);
1466 base = XEXP (base, 0);
1467
1468 out[0] = adjust_automodify_address
1469 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1470
1471 if (GET_CODE (XEXP (offset, 1)) == REG)
1472 {
1473 /* Can't adjust the postmodify to match. Emit the
1474 original, then a separate addition insn. */
1475 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1476 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1477 }
1478 else
1479 {
1480 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1481 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1482 {
1483 /* Again the postmodify cannot be made to match,
1484 but in this case it's more efficient to get rid
1485 of the postmodify entirely and fix up with an
1486 add insn. */
1487 out[1] = adjust_automodify_address (in, DImode, base, 8);
1488 fixup = gen_adddi3
1489 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1490 }
1491 else
1492 {
1493 /* Combined offset still fits in the displacement field.
1494 (We cannot overflow it at the high end.) */
1495 out[1] = adjust_automodify_address
1496 (in, DImode, gen_rtx_POST_MODIFY
1497 (Pmode, base, gen_rtx_PLUS
1498 (Pmode, base,
1499 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1500 8);
1501 }
1502 }
1503 break;
1504
1505 default:
1506 gcc_unreachable ();
1507 }
1508 break;
1509 }
1510
1511 default:
1512 gcc_unreachable ();
1513 }
1514
1515 return fixup;
1516 }
1517
1518 /* Split a TImode or TFmode move instruction after reload.
1519 This is used by *movtf_internal and *movti_internal. */
1520 void
1521 ia64_split_tmode_move (rtx operands[])
1522 {
1523 rtx in[2], out[2], insn;
1524 rtx fixup[2];
1525 bool dead = false;
1526 bool reversed = false;
1527
1528 /* It is possible for reload to decide to overwrite a pointer with
1529 the value it points to. In that case we have to do the loads in
1530 the appropriate order so that the pointer is not destroyed too
1531 early. Also we must not generate a postmodify for that second
1532 load, or rws_access_regno will die. And we must not generate a
1533 postmodify for the second load if the destination register
1534 overlaps with the base register. */
1535 if (GET_CODE (operands[1]) == MEM
1536 && reg_overlap_mentioned_p (operands[0], operands[1]))
1537 {
1538 rtx base = XEXP (operands[1], 0);
1539 while (GET_CODE (base) != REG)
1540 base = XEXP (base, 0);
1541
1542 if (REGNO (base) == REGNO (operands[0]))
1543 reversed = true;
1544
1545 if (refers_to_regno_p (REGNO (operands[0]),
1546 REGNO (operands[0])+2,
1547 base, 0))
1548 dead = true;
1549 }
1550 /* Another reason to do the moves in reversed order is if the first
1551 element of the target register pair is also the second element of
1552 the source register pair. */
1553 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1554 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1555 reversed = true;
1556
1557 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1558 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1559
1560 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1561 if (GET_CODE (EXP) == MEM \
1562 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1563 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1564 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1565 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1566
1567 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1568 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1569 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1570
1571 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1572 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1573 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1574
1575 if (fixup[0])
1576 emit_insn (fixup[0]);
1577 if (fixup[1])
1578 emit_insn (fixup[1]);
1579
1580 #undef MAYBE_ADD_REG_INC_NOTE
1581 }
1582
1583 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1584 through memory plus an extra GR scratch register. Except that you can
1585 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1586 SECONDARY_RELOAD_CLASS, but not both.
1587
1588 We got into problems in the first place by allowing a construct like
1589 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1590 This solution attempts to prevent this situation from occurring. When
1591 we see something like the above, we spill the inner register to memory. */
1592
1593 static rtx
1594 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1595 {
1596 if (GET_CODE (in) == SUBREG
1597 && GET_MODE (SUBREG_REG (in)) == TImode
1598 && GET_CODE (SUBREG_REG (in)) == REG)
1599 {
1600 rtx memt = assign_stack_temp (TImode, 16);
1601 emit_move_insn (memt, SUBREG_REG (in));
1602 return adjust_address (memt, mode, 0);
1603 }
1604 else if (force && GET_CODE (in) == REG)
1605 {
1606 rtx memx = assign_stack_temp (mode, 16);
1607 emit_move_insn (memx, in);
1608 return memx;
1609 }
1610 else
1611 return in;
1612 }
1613
1614 /* Expand the movxf or movrf pattern (MODE says which) with the given
1615 OPERANDS, returning true if the pattern should then invoke
1616 DONE. */
1617
1618 bool
1619 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1620 {
1621 rtx op0 = operands[0];
1622
1623 if (GET_CODE (op0) == SUBREG)
1624 op0 = SUBREG_REG (op0);
1625
1626 /* We must support XFmode loads into general registers for stdarg/vararg,
1627 unprototyped calls, and a rare case where a long double is passed as
1628 an argument after a float HFA fills the FP registers. We split them into
1629 DImode loads for convenience. We also need to support XFmode stores
1630 for the last case. This case does not happen for stdarg/vararg routines,
1631 because we do a block store to memory of unnamed arguments. */
1632
1633 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1634 {
1635 rtx out[2];
1636
1637 /* We're hoping to transform everything that deals with XFmode
1638 quantities and GR registers early in the compiler. */
1639 gcc_assert (can_create_pseudo_p ());
1640
1641 /* Struct to register can just use TImode instead. */
1642 if ((GET_CODE (operands[1]) == SUBREG
1643 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1644 || (GET_CODE (operands[1]) == REG
1645 && GR_REGNO_P (REGNO (operands[1]))))
1646 {
1647 rtx op1 = operands[1];
1648
1649 if (GET_CODE (op1) == SUBREG)
1650 op1 = SUBREG_REG (op1);
1651 else
1652 op1 = gen_rtx_REG (TImode, REGNO (op1));
1653
1654 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1655 return true;
1656 }
1657
1658 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1659 {
1660 /* Don't word-swap when reading in the constant. */
1661 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1662 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1663 0, mode));
1664 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1665 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1666 0, mode));
1667 return true;
1668 }
1669
1670 /* If the quantity is in a register not known to be GR, spill it. */
1671 if (register_operand (operands[1], mode))
1672 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1673
1674 gcc_assert (GET_CODE (operands[1]) == MEM);
1675
1676 /* Don't word-swap when reading in the value. */
1677 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1678 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1679
1680 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1681 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1682 return true;
1683 }
1684
1685 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1686 {
1687 /* We're hoping to transform everything that deals with XFmode
1688 quantities and GR registers early in the compiler. */
1689 gcc_assert (can_create_pseudo_p ());
1690
1691 /* Op0 can't be a GR_REG here, as that case is handled above.
1692 If op0 is a register, then we spill op1, so that we now have a
1693 MEM operand. This requires creating an XFmode subreg of a TImode reg
1694 to force the spill. */
1695 if (register_operand (operands[0], mode))
1696 {
1697 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1698 op1 = gen_rtx_SUBREG (mode, op1, 0);
1699 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1700 }
1701
1702 else
1703 {
1704 rtx in[2];
1705
1706 gcc_assert (GET_CODE (operands[0]) == MEM);
1707
1708 /* Don't word-swap when writing out the value. */
1709 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1710 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1711
1712 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1713 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1714 return true;
1715 }
1716 }
1717
1718 if (!reload_in_progress && !reload_completed)
1719 {
1720 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1721
1722 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1723 {
1724 rtx memt, memx, in = operands[1];
1725 if (CONSTANT_P (in))
1726 in = validize_mem (force_const_mem (mode, in));
1727 if (GET_CODE (in) == MEM)
1728 memt = adjust_address (in, TImode, 0);
1729 else
1730 {
1731 memt = assign_stack_temp (TImode, 16);
1732 memx = adjust_address (memt, mode, 0);
1733 emit_move_insn (memx, in);
1734 }
1735 emit_move_insn (op0, memt);
1736 return true;
1737 }
1738
1739 if (!ia64_move_ok (operands[0], operands[1]))
1740 operands[1] = force_reg (mode, operands[1]);
1741 }
1742
1743 return false;
1744 }
1745
1746 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1747 with the expression that holds the compare result (in VOIDmode). */
1748
1749 static GTY(()) rtx cmptf_libfunc;
1750
1751 void
1752 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1753 {
1754 enum rtx_code code = GET_CODE (*expr);
1755 rtx cmp;
1756
1757 /* If we have a BImode input, then we already have a compare result, and
1758 do not need to emit another comparison. */
1759 if (GET_MODE (*op0) == BImode)
1760 {
1761 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1762 cmp = *op0;
1763 }
1764 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1765 magic number as its third argument, that indicates what to do.
1766 The return value is an integer to be compared against zero. */
1767 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1768 {
1769 enum qfcmp_magic {
1770 QCMP_INV = 1, /* Raise FP_INVALID on NaNs as a side effect. */
1771 QCMP_UNORD = 2,
1772 QCMP_EQ = 4,
1773 QCMP_LT = 8,
1774 QCMP_GT = 16
1775 };
1776 int magic;
1777 enum rtx_code ncode;
1778 rtx ret, insns;
1779
1780 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1781 switch (code)
1782 {
1783 /* 1 = equal, 0 = not equal. Equality operators do
1784 not raise FP_INVALID when given a NaN operand. */
1785 case EQ: magic = QCMP_EQ; ncode = NE; break;
1786 case NE: magic = QCMP_EQ; ncode = EQ; break;
1787 /* isunordered() from C99. */
1788 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1789 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1790 /* Relational operators raise FP_INVALID when given
1791 a NaN operand. */
1792 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1793 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1794 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1795 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1796 /* Unordered relational operators do not raise FP_INVALID
1797 when given a NaN operand. */
1798 case UNLT: magic = QCMP_LT |QCMP_UNORD; ncode = NE; break;
1799 case UNLE: magic = QCMP_LT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1800 case UNGT: magic = QCMP_GT |QCMP_UNORD; ncode = NE; break;
1801 case UNGE: magic = QCMP_GT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1802 /* Not supported. */
1803 case UNEQ:
1804 case LTGT:
1805 default: gcc_unreachable ();
1806 }
1807
1808 start_sequence ();
1809
1810 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1811 *op0, TFmode, *op1, TFmode,
1812 GEN_INT (magic), DImode);
1813 cmp = gen_reg_rtx (BImode);
1814 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1815 gen_rtx_fmt_ee (ncode, BImode,
1816 ret, const0_rtx)));
1817
1818 insns = get_insns ();
1819 end_sequence ();
1820
1821 emit_libcall_block (insns, cmp, cmp,
1822 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1823 code = NE;
1824 }
1825 else
1826 {
1827 cmp = gen_reg_rtx (BImode);
1828 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1829 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1830 code = NE;
1831 }
1832
1833 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1834 *op0 = cmp;
1835 *op1 = const0_rtx;
1836 }
1837
1838 /* Generate an integral vector comparison. Return true if the condition has
1839 been reversed, and so the sense of the comparison should be inverted. */
1840
1841 static bool
1842 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1843 rtx dest, rtx op0, rtx op1)
1844 {
1845 bool negate = false;
1846 rtx x;
1847
1848 /* Canonicalize the comparison to EQ, GT, GTU. */
1849 switch (code)
1850 {
1851 case EQ:
1852 case GT:
1853 case GTU:
1854 break;
1855
1856 case NE:
1857 case LE:
1858 case LEU:
1859 code = reverse_condition (code);
1860 negate = true;
1861 break;
1862
1863 case GE:
1864 case GEU:
1865 code = reverse_condition (code);
1866 negate = true;
1867 /* FALLTHRU */
1868
1869 case LT:
1870 case LTU:
1871 code = swap_condition (code);
1872 x = op0, op0 = op1, op1 = x;
1873 break;
1874
1875 default:
1876 gcc_unreachable ();
1877 }
1878
1879 /* Unsigned parallel compare is not supported by the hardware. Play some
1880 tricks to turn this into a signed comparison against 0. */
1881 if (code == GTU)
1882 {
1883 switch (mode)
1884 {
1885 case V2SImode:
1886 {
1887 rtx t1, t2, mask;
1888
1889 /* Subtract (-(INT MAX) - 1) from both operands to make
1890 them signed. */
1891 mask = GEN_INT (0x80000000);
1892 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1893 mask = force_reg (mode, mask);
1894 t1 = gen_reg_rtx (mode);
1895 emit_insn (gen_subv2si3 (t1, op0, mask));
1896 t2 = gen_reg_rtx (mode);
1897 emit_insn (gen_subv2si3 (t2, op1, mask));
1898 op0 = t1;
1899 op1 = t2;
1900 code = GT;
1901 }
1902 break;
1903
1904 case V8QImode:
1905 case V4HImode:
1906 /* Perform a parallel unsigned saturating subtraction. */
1907 x = gen_reg_rtx (mode);
1908 emit_insn (gen_rtx_SET (VOIDmode, x,
1909 gen_rtx_US_MINUS (mode, op0, op1)));
1910
1911 code = EQ;
1912 op0 = x;
1913 op1 = CONST0_RTX (mode);
1914 negate = !negate;
1915 break;
1916
1917 default:
1918 gcc_unreachable ();
1919 }
1920 }
1921
1922 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1923 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1924
1925 return negate;
1926 }
1927
1928 /* Emit an integral vector conditional move. */
1929
1930 void
1931 ia64_expand_vecint_cmov (rtx operands[])
1932 {
1933 enum machine_mode mode = GET_MODE (operands[0]);
1934 enum rtx_code code = GET_CODE (operands[3]);
1935 bool negate;
1936 rtx cmp, x, ot, of;
1937
1938 cmp = gen_reg_rtx (mode);
1939 negate = ia64_expand_vecint_compare (code, mode, cmp,
1940 operands[4], operands[5]);
1941
1942 ot = operands[1+negate];
1943 of = operands[2-negate];
1944
1945 if (ot == CONST0_RTX (mode))
1946 {
1947 if (of == CONST0_RTX (mode))
1948 {
1949 emit_move_insn (operands[0], ot);
1950 return;
1951 }
1952
1953 x = gen_rtx_NOT (mode, cmp);
1954 x = gen_rtx_AND (mode, x, of);
1955 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1956 }
1957 else if (of == CONST0_RTX (mode))
1958 {
1959 x = gen_rtx_AND (mode, cmp, ot);
1960 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1961 }
1962 else
1963 {
1964 rtx t, f;
1965
1966 t = gen_reg_rtx (mode);
1967 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1968 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1969
1970 f = gen_reg_rtx (mode);
1971 x = gen_rtx_NOT (mode, cmp);
1972 x = gen_rtx_AND (mode, x, operands[2-negate]);
1973 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1974
1975 x = gen_rtx_IOR (mode, t, f);
1976 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1977 }
1978 }
1979
1980 /* Emit an integral vector min or max operation. Return true if all done. */
1981
1982 bool
1983 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1984 rtx operands[])
1985 {
1986 rtx xops[6];
1987
1988 /* These four combinations are supported directly. */
1989 if (mode == V8QImode && (code == UMIN || code == UMAX))
1990 return false;
1991 if (mode == V4HImode && (code == SMIN || code == SMAX))
1992 return false;
1993
1994 /* This combination can be implemented with only saturating subtraction. */
1995 if (mode == V4HImode && code == UMAX)
1996 {
1997 rtx x, tmp = gen_reg_rtx (mode);
1998
1999 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
2000 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
2001
2002 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
2003 return true;
2004 }
2005
2006 /* Everything else implemented via vector comparisons. */
2007 xops[0] = operands[0];
2008 xops[4] = xops[1] = operands[1];
2009 xops[5] = xops[2] = operands[2];
2010
2011 switch (code)
2012 {
2013 case UMIN:
2014 code = LTU;
2015 break;
2016 case UMAX:
2017 code = GTU;
2018 break;
2019 case SMIN:
2020 code = LT;
2021 break;
2022 case SMAX:
2023 code = GT;
2024 break;
2025 default:
2026 gcc_unreachable ();
2027 }
2028 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
2029
2030 ia64_expand_vecint_cmov (xops);
2031 return true;
2032 }
2033
2034 /* The vectors LO and HI each contain N halves of a double-wide vector.
2035 Reassemble either the first N/2 or the second N/2 elements. */
2036
2037 void
2038 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
2039 {
2040 enum machine_mode vmode = GET_MODE (lo);
2041 unsigned int i, high, nelt = GET_MODE_NUNITS (vmode);
2042 struct expand_vec_perm_d d;
2043 bool ok;
2044
2045 d.target = gen_lowpart (vmode, out);
2046 d.op0 = (TARGET_BIG_ENDIAN ? hi : lo);
2047 d.op1 = (TARGET_BIG_ENDIAN ? lo : hi);
2048 d.vmode = vmode;
2049 d.nelt = nelt;
2050 d.one_operand_p = false;
2051 d.testing_p = false;
2052
2053 high = (highp ? nelt / 2 : 0);
2054 for (i = 0; i < nelt / 2; ++i)
2055 {
2056 d.perm[i * 2] = i + high;
2057 d.perm[i * 2 + 1] = i + high + nelt;
2058 }
2059
2060 ok = ia64_expand_vec_perm_const_1 (&d);
2061 gcc_assert (ok);
2062 }
2063
2064 /* Return a vector of the sign-extension of VEC. */
2065
2066 static rtx
2067 ia64_unpack_sign (rtx vec, bool unsignedp)
2068 {
2069 enum machine_mode mode = GET_MODE (vec);
2070 rtx zero = CONST0_RTX (mode);
2071
2072 if (unsignedp)
2073 return zero;
2074 else
2075 {
2076 rtx sign = gen_reg_rtx (mode);
2077 bool neg;
2078
2079 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2080 gcc_assert (!neg);
2081
2082 return sign;
2083 }
2084 }
2085
2086 /* Emit an integral vector unpack operation. */
2087
2088 void
2089 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2090 {
2091 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2092 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2093 }
2094
2095 /* Emit an integral vector widening sum operations. */
2096
2097 void
2098 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2099 {
2100 enum machine_mode wmode;
2101 rtx l, h, t, sign;
2102
2103 sign = ia64_unpack_sign (operands[1], unsignedp);
2104
2105 wmode = GET_MODE (operands[0]);
2106 l = gen_reg_rtx (wmode);
2107 h = gen_reg_rtx (wmode);
2108
2109 ia64_unpack_assemble (l, operands[1], sign, false);
2110 ia64_unpack_assemble (h, operands[1], sign, true);
2111
2112 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2113 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2114 if (t != operands[0])
2115 emit_move_insn (operands[0], t);
2116 }
2117
2118 /* Emit the appropriate sequence for a call. */
2119
2120 void
2121 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2122 int sibcall_p)
2123 {
2124 rtx insn, b0;
2125
2126 addr = XEXP (addr, 0);
2127 addr = convert_memory_address (DImode, addr);
2128 b0 = gen_rtx_REG (DImode, R_BR (0));
2129
2130 /* ??? Should do this for functions known to bind local too. */
2131 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2132 {
2133 if (sibcall_p)
2134 insn = gen_sibcall_nogp (addr);
2135 else if (! retval)
2136 insn = gen_call_nogp (addr, b0);
2137 else
2138 insn = gen_call_value_nogp (retval, addr, b0);
2139 insn = emit_call_insn (insn);
2140 }
2141 else
2142 {
2143 if (sibcall_p)
2144 insn = gen_sibcall_gp (addr);
2145 else if (! retval)
2146 insn = gen_call_gp (addr, b0);
2147 else
2148 insn = gen_call_value_gp (retval, addr, b0);
2149 insn = emit_call_insn (insn);
2150
2151 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2152 }
2153
2154 if (sibcall_p)
2155 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2156
2157 if (TARGET_ABI_OPEN_VMS)
2158 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2159 gen_rtx_REG (DImode, GR_REG (25)));
2160 }
2161
2162 static void
2163 reg_emitted (enum ia64_frame_regs r)
2164 {
2165 if (emitted_frame_related_regs[r] == 0)
2166 emitted_frame_related_regs[r] = current_frame_info.r[r];
2167 else
2168 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2169 }
2170
2171 static int
2172 get_reg (enum ia64_frame_regs r)
2173 {
2174 reg_emitted (r);
2175 return current_frame_info.r[r];
2176 }
2177
2178 static bool
2179 is_emitted (int regno)
2180 {
2181 unsigned int r;
2182
2183 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2184 if (emitted_frame_related_regs[r] == regno)
2185 return true;
2186 return false;
2187 }
2188
2189 void
2190 ia64_reload_gp (void)
2191 {
2192 rtx tmp;
2193
2194 if (current_frame_info.r[reg_save_gp])
2195 {
2196 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2197 }
2198 else
2199 {
2200 HOST_WIDE_INT offset;
2201 rtx offset_r;
2202
2203 offset = (current_frame_info.spill_cfa_off
2204 + current_frame_info.spill_size);
2205 if (frame_pointer_needed)
2206 {
2207 tmp = hard_frame_pointer_rtx;
2208 offset = -offset;
2209 }
2210 else
2211 {
2212 tmp = stack_pointer_rtx;
2213 offset = current_frame_info.total_size - offset;
2214 }
2215
2216 offset_r = GEN_INT (offset);
2217 if (satisfies_constraint_I (offset_r))
2218 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2219 else
2220 {
2221 emit_move_insn (pic_offset_table_rtx, offset_r);
2222 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2223 pic_offset_table_rtx, tmp));
2224 }
2225
2226 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2227 }
2228
2229 emit_move_insn (pic_offset_table_rtx, tmp);
2230 }
2231
2232 void
2233 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2234 rtx scratch_b, int noreturn_p, int sibcall_p)
2235 {
2236 rtx insn;
2237 bool is_desc = false;
2238
2239 /* If we find we're calling through a register, then we're actually
2240 calling through a descriptor, so load up the values. */
2241 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2242 {
2243 rtx tmp;
2244 bool addr_dead_p;
2245
2246 /* ??? We are currently constrained to *not* use peep2, because
2247 we can legitimately change the global lifetime of the GP
2248 (in the form of killing where previously live). This is
2249 because a call through a descriptor doesn't use the previous
2250 value of the GP, while a direct call does, and we do not
2251 commit to either form until the split here.
2252
2253 That said, this means that we lack precise life info for
2254 whether ADDR is dead after this call. This is not terribly
2255 important, since we can fix things up essentially for free
2256 with the POST_DEC below, but it's nice to not use it when we
2257 can immediately tell it's not necessary. */
2258 addr_dead_p = ((noreturn_p || sibcall_p
2259 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2260 REGNO (addr)))
2261 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2262
2263 /* Load the code address into scratch_b. */
2264 tmp = gen_rtx_POST_INC (Pmode, addr);
2265 tmp = gen_rtx_MEM (Pmode, tmp);
2266 emit_move_insn (scratch_r, tmp);
2267 emit_move_insn (scratch_b, scratch_r);
2268
2269 /* Load the GP address. If ADDR is not dead here, then we must
2270 revert the change made above via the POST_INCREMENT. */
2271 if (!addr_dead_p)
2272 tmp = gen_rtx_POST_DEC (Pmode, addr);
2273 else
2274 tmp = addr;
2275 tmp = gen_rtx_MEM (Pmode, tmp);
2276 emit_move_insn (pic_offset_table_rtx, tmp);
2277
2278 is_desc = true;
2279 addr = scratch_b;
2280 }
2281
2282 if (sibcall_p)
2283 insn = gen_sibcall_nogp (addr);
2284 else if (retval)
2285 insn = gen_call_value_nogp (retval, addr, retaddr);
2286 else
2287 insn = gen_call_nogp (addr, retaddr);
2288 emit_call_insn (insn);
2289
2290 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2291 ia64_reload_gp ();
2292 }
2293
2294 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2295
2296 This differs from the generic code in that we know about the zero-extending
2297 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2298 also know that ld.acq+cmpxchg.rel equals a full barrier.
2299
2300 The loop we want to generate looks like
2301
2302 cmp_reg = mem;
2303 label:
2304 old_reg = cmp_reg;
2305 new_reg = cmp_reg op val;
2306 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2307 if (cmp_reg != old_reg)
2308 goto label;
2309
2310 Note that we only do the plain load from memory once. Subsequent
2311 iterations use the value loaded by the compare-and-swap pattern. */
2312
2313 void
2314 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2315 rtx old_dst, rtx new_dst, enum memmodel model)
2316 {
2317 enum machine_mode mode = GET_MODE (mem);
2318 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2319 enum insn_code icode;
2320
2321 /* Special case for using fetchadd. */
2322 if ((mode == SImode || mode == DImode)
2323 && (code == PLUS || code == MINUS)
2324 && fetchadd_operand (val, mode))
2325 {
2326 if (code == MINUS)
2327 val = GEN_INT (-INTVAL (val));
2328
2329 if (!old_dst)
2330 old_dst = gen_reg_rtx (mode);
2331
2332 switch (model)
2333 {
2334 case MEMMODEL_ACQ_REL:
2335 case MEMMODEL_SEQ_CST:
2336 emit_insn (gen_memory_barrier ());
2337 /* FALLTHRU */
2338 case MEMMODEL_RELAXED:
2339 case MEMMODEL_ACQUIRE:
2340 case MEMMODEL_CONSUME:
2341 if (mode == SImode)
2342 icode = CODE_FOR_fetchadd_acq_si;
2343 else
2344 icode = CODE_FOR_fetchadd_acq_di;
2345 break;
2346 case MEMMODEL_RELEASE:
2347 if (mode == SImode)
2348 icode = CODE_FOR_fetchadd_rel_si;
2349 else
2350 icode = CODE_FOR_fetchadd_rel_di;
2351 break;
2352
2353 default:
2354 gcc_unreachable ();
2355 }
2356
2357 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2358
2359 if (new_dst)
2360 {
2361 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2362 true, OPTAB_WIDEN);
2363 if (new_reg != new_dst)
2364 emit_move_insn (new_dst, new_reg);
2365 }
2366 return;
2367 }
2368
2369 /* Because of the volatile mem read, we get an ld.acq, which is the
2370 front half of the full barrier. The end half is the cmpxchg.rel.
2371 For relaxed and release memory models, we don't need this. But we
2372 also don't bother trying to prevent it either. */
2373 gcc_assert (model == MEMMODEL_RELAXED
2374 || model == MEMMODEL_RELEASE
2375 || MEM_VOLATILE_P (mem));
2376
2377 old_reg = gen_reg_rtx (DImode);
2378 cmp_reg = gen_reg_rtx (DImode);
2379 label = gen_label_rtx ();
2380
2381 if (mode != DImode)
2382 {
2383 val = simplify_gen_subreg (DImode, val, mode, 0);
2384 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2385 }
2386 else
2387 emit_move_insn (cmp_reg, mem);
2388
2389 emit_label (label);
2390
2391 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2392 emit_move_insn (old_reg, cmp_reg);
2393 emit_move_insn (ar_ccv, cmp_reg);
2394
2395 if (old_dst)
2396 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2397
2398 new_reg = cmp_reg;
2399 if (code == NOT)
2400 {
2401 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2402 true, OPTAB_DIRECT);
2403 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2404 }
2405 else
2406 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2407 true, OPTAB_DIRECT);
2408
2409 if (mode != DImode)
2410 new_reg = gen_lowpart (mode, new_reg);
2411 if (new_dst)
2412 emit_move_insn (new_dst, new_reg);
2413
2414 switch (model)
2415 {
2416 case MEMMODEL_RELAXED:
2417 case MEMMODEL_ACQUIRE:
2418 case MEMMODEL_CONSUME:
2419 switch (mode)
2420 {
2421 case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2422 case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2423 case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2424 case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2425 default:
2426 gcc_unreachable ();
2427 }
2428 break;
2429
2430 case MEMMODEL_RELEASE:
2431 case MEMMODEL_ACQ_REL:
2432 case MEMMODEL_SEQ_CST:
2433 switch (mode)
2434 {
2435 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2436 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2437 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2438 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2439 default:
2440 gcc_unreachable ();
2441 }
2442 break;
2443
2444 default:
2445 gcc_unreachable ();
2446 }
2447
2448 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2449
2450 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2451 }
2452 \f
2453 /* Begin the assembly file. */
2454
2455 static void
2456 ia64_file_start (void)
2457 {
2458 default_file_start ();
2459 emit_safe_across_calls ();
2460 }
2461
2462 void
2463 emit_safe_across_calls (void)
2464 {
2465 unsigned int rs, re;
2466 int out_state;
2467
2468 rs = 1;
2469 out_state = 0;
2470 while (1)
2471 {
2472 while (rs < 64 && call_used_regs[PR_REG (rs)])
2473 rs++;
2474 if (rs >= 64)
2475 break;
2476 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2477 continue;
2478 if (out_state == 0)
2479 {
2480 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2481 out_state = 1;
2482 }
2483 else
2484 fputc (',', asm_out_file);
2485 if (re == rs + 1)
2486 fprintf (asm_out_file, "p%u", rs);
2487 else
2488 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2489 rs = re + 1;
2490 }
2491 if (out_state)
2492 fputc ('\n', asm_out_file);
2493 }
2494
2495 /* Globalize a declaration. */
2496
2497 static void
2498 ia64_globalize_decl_name (FILE * stream, tree decl)
2499 {
2500 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2501 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2502 if (version_attr)
2503 {
2504 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2505 const char *p = TREE_STRING_POINTER (v);
2506 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2507 }
2508 targetm.asm_out.globalize_label (stream, name);
2509 if (TREE_CODE (decl) == FUNCTION_DECL)
2510 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2511 }
2512
2513 /* Helper function for ia64_compute_frame_size: find an appropriate general
2514 register to spill some special register to. SPECIAL_SPILL_MASK contains
2515 bits in GR0 to GR31 that have already been allocated by this routine.
2516 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2517
2518 static int
2519 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2520 {
2521 int regno;
2522
2523 if (emitted_frame_related_regs[r] != 0)
2524 {
2525 regno = emitted_frame_related_regs[r];
2526 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2527 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2528 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2529 else if (crtl->is_leaf
2530 && regno >= GR_REG (1) && regno <= GR_REG (31))
2531 current_frame_info.gr_used_mask |= 1 << regno;
2532
2533 return regno;
2534 }
2535
2536 /* If this is a leaf function, first try an otherwise unused
2537 call-clobbered register. */
2538 if (crtl->is_leaf)
2539 {
2540 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2541 if (! df_regs_ever_live_p (regno)
2542 && call_used_regs[regno]
2543 && ! fixed_regs[regno]
2544 && ! global_regs[regno]
2545 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2546 && ! is_emitted (regno))
2547 {
2548 current_frame_info.gr_used_mask |= 1 << regno;
2549 return regno;
2550 }
2551 }
2552
2553 if (try_locals)
2554 {
2555 regno = current_frame_info.n_local_regs;
2556 /* If there is a frame pointer, then we can't use loc79, because
2557 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2558 reg_name switching code in ia64_expand_prologue. */
2559 while (regno < (80 - frame_pointer_needed))
2560 if (! is_emitted (LOC_REG (regno++)))
2561 {
2562 current_frame_info.n_local_regs = regno;
2563 return LOC_REG (regno - 1);
2564 }
2565 }
2566
2567 /* Failed to find a general register to spill to. Must use stack. */
2568 return 0;
2569 }
2570
2571 /* In order to make for nice schedules, we try to allocate every temporary
2572 to a different register. We must of course stay away from call-saved,
2573 fixed, and global registers. We must also stay away from registers
2574 allocated in current_frame_info.gr_used_mask, since those include regs
2575 used all through the prologue.
2576
2577 Any register allocated here must be used immediately. The idea is to
2578 aid scheduling, not to solve data flow problems. */
2579
2580 static int last_scratch_gr_reg;
2581
2582 static int
2583 next_scratch_gr_reg (void)
2584 {
2585 int i, regno;
2586
2587 for (i = 0; i < 32; ++i)
2588 {
2589 regno = (last_scratch_gr_reg + i + 1) & 31;
2590 if (call_used_regs[regno]
2591 && ! fixed_regs[regno]
2592 && ! global_regs[regno]
2593 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2594 {
2595 last_scratch_gr_reg = regno;
2596 return regno;
2597 }
2598 }
2599
2600 /* There must be _something_ available. */
2601 gcc_unreachable ();
2602 }
2603
2604 /* Helper function for ia64_compute_frame_size, called through
2605 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2606
2607 static void
2608 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2609 {
2610 unsigned int regno = REGNO (reg);
2611 if (regno < 32)
2612 {
2613 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2614 for (i = 0; i < n; ++i)
2615 current_frame_info.gr_used_mask |= 1 << (regno + i);
2616 }
2617 }
2618
2619
2620 /* Returns the number of bytes offset between the frame pointer and the stack
2621 pointer for the current function. SIZE is the number of bytes of space
2622 needed for local variables. */
2623
2624 static void
2625 ia64_compute_frame_size (HOST_WIDE_INT size)
2626 {
2627 HOST_WIDE_INT total_size;
2628 HOST_WIDE_INT spill_size = 0;
2629 HOST_WIDE_INT extra_spill_size = 0;
2630 HOST_WIDE_INT pretend_args_size;
2631 HARD_REG_SET mask;
2632 int n_spilled = 0;
2633 int spilled_gr_p = 0;
2634 int spilled_fr_p = 0;
2635 unsigned int regno;
2636 int min_regno;
2637 int max_regno;
2638 int i;
2639
2640 if (current_frame_info.initialized)
2641 return;
2642
2643 memset (&current_frame_info, 0, sizeof current_frame_info);
2644 CLEAR_HARD_REG_SET (mask);
2645
2646 /* Don't allocate scratches to the return register. */
2647 diddle_return_value (mark_reg_gr_used_mask, NULL);
2648
2649 /* Don't allocate scratches to the EH scratch registers. */
2650 if (cfun->machine->ia64_eh_epilogue_sp)
2651 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2652 if (cfun->machine->ia64_eh_epilogue_bsp)
2653 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2654
2655 /* Static stack checking uses r2 and r3. */
2656 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
2657 current_frame_info.gr_used_mask |= 0xc;
2658
2659 /* Find the size of the register stack frame. We have only 80 local
2660 registers, because we reserve 8 for the inputs and 8 for the
2661 outputs. */
2662
2663 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2664 since we'll be adjusting that down later. */
2665 regno = LOC_REG (78) + ! frame_pointer_needed;
2666 for (; regno >= LOC_REG (0); regno--)
2667 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2668 break;
2669 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2670
2671 /* For functions marked with the syscall_linkage attribute, we must mark
2672 all eight input registers as in use, so that locals aren't visible to
2673 the caller. */
2674
2675 if (cfun->machine->n_varargs > 0
2676 || lookup_attribute ("syscall_linkage",
2677 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2678 current_frame_info.n_input_regs = 8;
2679 else
2680 {
2681 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2682 if (df_regs_ever_live_p (regno))
2683 break;
2684 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2685 }
2686
2687 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2688 if (df_regs_ever_live_p (regno))
2689 break;
2690 i = regno - OUT_REG (0) + 1;
2691
2692 #ifndef PROFILE_HOOK
2693 /* When -p profiling, we need one output register for the mcount argument.
2694 Likewise for -a profiling for the bb_init_func argument. For -ax
2695 profiling, we need two output registers for the two bb_init_trace_func
2696 arguments. */
2697 if (crtl->profile)
2698 i = MAX (i, 1);
2699 #endif
2700 current_frame_info.n_output_regs = i;
2701
2702 /* ??? No rotating register support yet. */
2703 current_frame_info.n_rotate_regs = 0;
2704
2705 /* Discover which registers need spilling, and how much room that
2706 will take. Begin with floating point and general registers,
2707 which will always wind up on the stack. */
2708
2709 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2710 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2711 {
2712 SET_HARD_REG_BIT (mask, regno);
2713 spill_size += 16;
2714 n_spilled += 1;
2715 spilled_fr_p = 1;
2716 }
2717
2718 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2719 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2720 {
2721 SET_HARD_REG_BIT (mask, regno);
2722 spill_size += 8;
2723 n_spilled += 1;
2724 spilled_gr_p = 1;
2725 }
2726
2727 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2728 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2729 {
2730 SET_HARD_REG_BIT (mask, regno);
2731 spill_size += 8;
2732 n_spilled += 1;
2733 }
2734
2735 /* Now come all special registers that might get saved in other
2736 general registers. */
2737
2738 if (frame_pointer_needed)
2739 {
2740 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2741 /* If we did not get a register, then we take LOC79. This is guaranteed
2742 to be free, even if regs_ever_live is already set, because this is
2743 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2744 as we don't count loc79 above. */
2745 if (current_frame_info.r[reg_fp] == 0)
2746 {
2747 current_frame_info.r[reg_fp] = LOC_REG (79);
2748 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2749 }
2750 }
2751
2752 if (! crtl->is_leaf)
2753 {
2754 /* Emit a save of BR0 if we call other functions. Do this even
2755 if this function doesn't return, as EH depends on this to be
2756 able to unwind the stack. */
2757 SET_HARD_REG_BIT (mask, BR_REG (0));
2758
2759 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2760 if (current_frame_info.r[reg_save_b0] == 0)
2761 {
2762 extra_spill_size += 8;
2763 n_spilled += 1;
2764 }
2765
2766 /* Similarly for ar.pfs. */
2767 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2768 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2769 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2770 {
2771 extra_spill_size += 8;
2772 n_spilled += 1;
2773 }
2774
2775 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2776 registers are clobbered, so we fall back to the stack. */
2777 current_frame_info.r[reg_save_gp]
2778 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2779 if (current_frame_info.r[reg_save_gp] == 0)
2780 {
2781 SET_HARD_REG_BIT (mask, GR_REG (1));
2782 spill_size += 8;
2783 n_spilled += 1;
2784 }
2785 }
2786 else
2787 {
2788 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2789 {
2790 SET_HARD_REG_BIT (mask, BR_REG (0));
2791 extra_spill_size += 8;
2792 n_spilled += 1;
2793 }
2794
2795 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2796 {
2797 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2798 current_frame_info.r[reg_save_ar_pfs]
2799 = find_gr_spill (reg_save_ar_pfs, 1);
2800 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2801 {
2802 extra_spill_size += 8;
2803 n_spilled += 1;
2804 }
2805 }
2806 }
2807
2808 /* Unwind descriptor hackery: things are most efficient if we allocate
2809 consecutive GR save registers for RP, PFS, FP in that order. However,
2810 it is absolutely critical that FP get the only hard register that's
2811 guaranteed to be free, so we allocated it first. If all three did
2812 happen to be allocated hard regs, and are consecutive, rearrange them
2813 into the preferred order now.
2814
2815 If we have already emitted code for any of those registers,
2816 then it's already too late to change. */
2817 min_regno = MIN (current_frame_info.r[reg_fp],
2818 MIN (current_frame_info.r[reg_save_b0],
2819 current_frame_info.r[reg_save_ar_pfs]));
2820 max_regno = MAX (current_frame_info.r[reg_fp],
2821 MAX (current_frame_info.r[reg_save_b0],
2822 current_frame_info.r[reg_save_ar_pfs]));
2823 if (min_regno > 0
2824 && min_regno + 2 == max_regno
2825 && (current_frame_info.r[reg_fp] == min_regno + 1
2826 || current_frame_info.r[reg_save_b0] == min_regno + 1
2827 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2828 && (emitted_frame_related_regs[reg_save_b0] == 0
2829 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2830 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2831 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2832 && (emitted_frame_related_regs[reg_fp] == 0
2833 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2834 {
2835 current_frame_info.r[reg_save_b0] = min_regno;
2836 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2837 current_frame_info.r[reg_fp] = min_regno + 2;
2838 }
2839
2840 /* See if we need to store the predicate register block. */
2841 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2842 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2843 break;
2844 if (regno <= PR_REG (63))
2845 {
2846 SET_HARD_REG_BIT (mask, PR_REG (0));
2847 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2848 if (current_frame_info.r[reg_save_pr] == 0)
2849 {
2850 extra_spill_size += 8;
2851 n_spilled += 1;
2852 }
2853
2854 /* ??? Mark them all as used so that register renaming and such
2855 are free to use them. */
2856 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2857 df_set_regs_ever_live (regno, true);
2858 }
2859
2860 /* If we're forced to use st8.spill, we're forced to save and restore
2861 ar.unat as well. The check for existing liveness allows inline asm
2862 to touch ar.unat. */
2863 if (spilled_gr_p || cfun->machine->n_varargs
2864 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2865 {
2866 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2867 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2868 current_frame_info.r[reg_save_ar_unat]
2869 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2870 if (current_frame_info.r[reg_save_ar_unat] == 0)
2871 {
2872 extra_spill_size += 8;
2873 n_spilled += 1;
2874 }
2875 }
2876
2877 if (df_regs_ever_live_p (AR_LC_REGNUM))
2878 {
2879 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2880 current_frame_info.r[reg_save_ar_lc]
2881 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2882 if (current_frame_info.r[reg_save_ar_lc] == 0)
2883 {
2884 extra_spill_size += 8;
2885 n_spilled += 1;
2886 }
2887 }
2888
2889 /* If we have an odd number of words of pretend arguments written to
2890 the stack, then the FR save area will be unaligned. We round the
2891 size of this area up to keep things 16 byte aligned. */
2892 if (spilled_fr_p)
2893 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2894 else
2895 pretend_args_size = crtl->args.pretend_args_size;
2896
2897 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2898 + crtl->outgoing_args_size);
2899 total_size = IA64_STACK_ALIGN (total_size);
2900
2901 /* We always use the 16-byte scratch area provided by the caller, but
2902 if we are a leaf function, there's no one to which we need to provide
2903 a scratch area. However, if the function allocates dynamic stack space,
2904 the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
2905 so we need to cope. */
2906 if (crtl->is_leaf && !cfun->calls_alloca)
2907 total_size = MAX (0, total_size - 16);
2908
2909 current_frame_info.total_size = total_size;
2910 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2911 current_frame_info.spill_size = spill_size;
2912 current_frame_info.extra_spill_size = extra_spill_size;
2913 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2914 current_frame_info.n_spilled = n_spilled;
2915 current_frame_info.initialized = reload_completed;
2916 }
2917
2918 /* Worker function for TARGET_CAN_ELIMINATE. */
2919
2920 bool
2921 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2922 {
2923 return (to == BR_REG (0) ? crtl->is_leaf : true);
2924 }
2925
2926 /* Compute the initial difference between the specified pair of registers. */
2927
2928 HOST_WIDE_INT
2929 ia64_initial_elimination_offset (int from, int to)
2930 {
2931 HOST_WIDE_INT offset;
2932
2933 ia64_compute_frame_size (get_frame_size ());
2934 switch (from)
2935 {
2936 case FRAME_POINTER_REGNUM:
2937 switch (to)
2938 {
2939 case HARD_FRAME_POINTER_REGNUM:
2940 offset = -current_frame_info.total_size;
2941 if (!crtl->is_leaf || cfun->calls_alloca)
2942 offset += 16 + crtl->outgoing_args_size;
2943 break;
2944
2945 case STACK_POINTER_REGNUM:
2946 offset = 0;
2947 if (!crtl->is_leaf || cfun->calls_alloca)
2948 offset += 16 + crtl->outgoing_args_size;
2949 break;
2950
2951 default:
2952 gcc_unreachable ();
2953 }
2954 break;
2955
2956 case ARG_POINTER_REGNUM:
2957 /* Arguments start above the 16 byte save area, unless stdarg
2958 in which case we store through the 16 byte save area. */
2959 switch (to)
2960 {
2961 case HARD_FRAME_POINTER_REGNUM:
2962 offset = 16 - crtl->args.pretend_args_size;
2963 break;
2964
2965 case STACK_POINTER_REGNUM:
2966 offset = (current_frame_info.total_size
2967 + 16 - crtl->args.pretend_args_size);
2968 break;
2969
2970 default:
2971 gcc_unreachable ();
2972 }
2973 break;
2974
2975 default:
2976 gcc_unreachable ();
2977 }
2978
2979 return offset;
2980 }
2981
2982 /* If there are more than a trivial number of register spills, we use
2983 two interleaved iterators so that we can get two memory references
2984 per insn group.
2985
2986 In order to simplify things in the prologue and epilogue expanders,
2987 we use helper functions to fix up the memory references after the
2988 fact with the appropriate offsets to a POST_MODIFY memory mode.
2989 The following data structure tracks the state of the two iterators
2990 while insns are being emitted. */
2991
2992 struct spill_fill_data
2993 {
2994 rtx init_after; /* point at which to emit initializations */
2995 rtx init_reg[2]; /* initial base register */
2996 rtx iter_reg[2]; /* the iterator registers */
2997 rtx *prev_addr[2]; /* address of last memory use */
2998 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2999 HOST_WIDE_INT prev_off[2]; /* last offset */
3000 int n_iter; /* number of iterators in use */
3001 int next_iter; /* next iterator to use */
3002 unsigned int save_gr_used_mask;
3003 };
3004
3005 static struct spill_fill_data spill_fill_data;
3006
3007 static void
3008 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
3009 {
3010 int i;
3011
3012 spill_fill_data.init_after = get_last_insn ();
3013 spill_fill_data.init_reg[0] = init_reg;
3014 spill_fill_data.init_reg[1] = init_reg;
3015 spill_fill_data.prev_addr[0] = NULL;
3016 spill_fill_data.prev_addr[1] = NULL;
3017 spill_fill_data.prev_insn[0] = NULL;
3018 spill_fill_data.prev_insn[1] = NULL;
3019 spill_fill_data.prev_off[0] = cfa_off;
3020 spill_fill_data.prev_off[1] = cfa_off;
3021 spill_fill_data.next_iter = 0;
3022 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3023
3024 spill_fill_data.n_iter = 1 + (n_spills > 2);
3025 for (i = 0; i < spill_fill_data.n_iter; ++i)
3026 {
3027 int regno = next_scratch_gr_reg ();
3028 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3029 current_frame_info.gr_used_mask |= 1 << regno;
3030 }
3031 }
3032
3033 static void
3034 finish_spill_pointers (void)
3035 {
3036 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3037 }
3038
3039 static rtx
3040 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3041 {
3042 int iter = spill_fill_data.next_iter;
3043 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3044 rtx disp_rtx = GEN_INT (disp);
3045 rtx mem;
3046
3047 if (spill_fill_data.prev_addr[iter])
3048 {
3049 if (satisfies_constraint_N (disp_rtx))
3050 {
3051 *spill_fill_data.prev_addr[iter]
3052 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3053 gen_rtx_PLUS (DImode,
3054 spill_fill_data.iter_reg[iter],
3055 disp_rtx));
3056 add_reg_note (spill_fill_data.prev_insn[iter],
3057 REG_INC, spill_fill_data.iter_reg[iter]);
3058 }
3059 else
3060 {
3061 /* ??? Could use register post_modify for loads. */
3062 if (!satisfies_constraint_I (disp_rtx))
3063 {
3064 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3065 emit_move_insn (tmp, disp_rtx);
3066 disp_rtx = tmp;
3067 }
3068 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3069 spill_fill_data.iter_reg[iter], disp_rtx));
3070 }
3071 }
3072 /* Micro-optimization: if we've created a frame pointer, it's at
3073 CFA 0, which may allow the real iterator to be initialized lower,
3074 slightly increasing parallelism. Also, if there are few saves
3075 it may eliminate the iterator entirely. */
3076 else if (disp == 0
3077 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3078 && frame_pointer_needed)
3079 {
3080 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3081 set_mem_alias_set (mem, get_varargs_alias_set ());
3082 return mem;
3083 }
3084 else
3085 {
3086 rtx seq, insn;
3087
3088 if (disp == 0)
3089 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3090 spill_fill_data.init_reg[iter]);
3091 else
3092 {
3093 start_sequence ();
3094
3095 if (!satisfies_constraint_I (disp_rtx))
3096 {
3097 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3098 emit_move_insn (tmp, disp_rtx);
3099 disp_rtx = tmp;
3100 }
3101
3102 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3103 spill_fill_data.init_reg[iter],
3104 disp_rtx));
3105
3106 seq = get_insns ();
3107 end_sequence ();
3108 }
3109
3110 /* Careful for being the first insn in a sequence. */
3111 if (spill_fill_data.init_after)
3112 insn = emit_insn_after (seq, spill_fill_data.init_after);
3113 else
3114 {
3115 rtx first = get_insns ();
3116 if (first)
3117 insn = emit_insn_before (seq, first);
3118 else
3119 insn = emit_insn (seq);
3120 }
3121 spill_fill_data.init_after = insn;
3122 }
3123
3124 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3125
3126 /* ??? Not all of the spills are for varargs, but some of them are.
3127 The rest of the spills belong in an alias set of their own. But
3128 it doesn't actually hurt to include them here. */
3129 set_mem_alias_set (mem, get_varargs_alias_set ());
3130
3131 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3132 spill_fill_data.prev_off[iter] = cfa_off;
3133
3134 if (++iter >= spill_fill_data.n_iter)
3135 iter = 0;
3136 spill_fill_data.next_iter = iter;
3137
3138 return mem;
3139 }
3140
3141 static void
3142 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3143 rtx frame_reg)
3144 {
3145 int iter = spill_fill_data.next_iter;
3146 rtx mem, insn;
3147
3148 mem = spill_restore_mem (reg, cfa_off);
3149 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3150 spill_fill_data.prev_insn[iter] = insn;
3151
3152 if (frame_reg)
3153 {
3154 rtx base;
3155 HOST_WIDE_INT off;
3156
3157 RTX_FRAME_RELATED_P (insn) = 1;
3158
3159 /* Don't even pretend that the unwind code can intuit its way
3160 through a pair of interleaved post_modify iterators. Just
3161 provide the correct answer. */
3162
3163 if (frame_pointer_needed)
3164 {
3165 base = hard_frame_pointer_rtx;
3166 off = - cfa_off;
3167 }
3168 else
3169 {
3170 base = stack_pointer_rtx;
3171 off = current_frame_info.total_size - cfa_off;
3172 }
3173
3174 add_reg_note (insn, REG_CFA_OFFSET,
3175 gen_rtx_SET (VOIDmode,
3176 gen_rtx_MEM (GET_MODE (reg),
3177 plus_constant (Pmode,
3178 base, off)),
3179 frame_reg));
3180 }
3181 }
3182
3183 static void
3184 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3185 {
3186 int iter = spill_fill_data.next_iter;
3187 rtx insn;
3188
3189 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3190 GEN_INT (cfa_off)));
3191 spill_fill_data.prev_insn[iter] = insn;
3192 }
3193
3194 /* Wrapper functions that discards the CONST_INT spill offset. These
3195 exist so that we can give gr_spill/gr_fill the offset they need and
3196 use a consistent function interface. */
3197
3198 static rtx
3199 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3200 {
3201 return gen_movdi (dest, src);
3202 }
3203
3204 static rtx
3205 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3206 {
3207 return gen_fr_spill (dest, src);
3208 }
3209
3210 static rtx
3211 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3212 {
3213 return gen_fr_restore (dest, src);
3214 }
3215
3216 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
3217
3218 /* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
3219 #define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
3220
3221 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
3222 inclusive. These are offsets from the current stack pointer. BS_SIZE
3223 is the size of the backing store. ??? This clobbers r2 and r3. */
3224
3225 static void
3226 ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
3227 int bs_size)
3228 {
3229 rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
3230 rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
3231 rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
3232
3233 /* On the IA-64 there is a second stack in memory, namely the Backing Store
3234 of the Register Stack Engine. We also need to probe it after checking
3235 that the 2 stacks don't overlap. */
3236 emit_insn (gen_bsp_value (r3));
3237 emit_move_insn (r2, GEN_INT (-(first + size)));
3238
3239 /* Compare current value of BSP and SP registers. */
3240 emit_insn (gen_rtx_SET (VOIDmode, p6,
3241 gen_rtx_fmt_ee (LTU, BImode,
3242 r3, stack_pointer_rtx)));
3243
3244 /* Compute the address of the probe for the Backing Store (which grows
3245 towards higher addresses). We probe only at the first offset of
3246 the next page because some OS (eg Linux/ia64) only extend the
3247 backing store when this specific address is hit (but generate a SEGV
3248 on other address). Page size is the worst case (4KB). The reserve
3249 size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
3250 Also compute the address of the last probe for the memory stack
3251 (which grows towards lower addresses). */
3252 emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
3253 emit_insn (gen_rtx_SET (VOIDmode, r2,
3254 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3255
3256 /* Compare them and raise SEGV if the former has topped the latter. */
3257 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3258 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3259 gen_rtx_SET (VOIDmode, p6,
3260 gen_rtx_fmt_ee (GEU, BImode,
3261 r3, r2))));
3262 emit_insn (gen_rtx_SET (VOIDmode,
3263 gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
3264 const0_rtx),
3265 const0_rtx));
3266 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3267 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3268 gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
3269 GEN_INT (11))));
3270
3271 /* Probe the Backing Store if necessary. */
3272 if (bs_size > 0)
3273 emit_stack_probe (r3);
3274
3275 /* Probe the memory stack if necessary. */
3276 if (size == 0)
3277 ;
3278
3279 /* See if we have a constant small number of probes to generate. If so,
3280 that's the easy case. */
3281 else if (size <= PROBE_INTERVAL)
3282 emit_stack_probe (r2);
3283
3284 /* The run-time loop is made up of 8 insns in the generic case while this
3285 compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
3286 else if (size <= 4 * PROBE_INTERVAL)
3287 {
3288 HOST_WIDE_INT i;
3289
3290 emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
3291 emit_insn (gen_rtx_SET (VOIDmode, r2,
3292 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3293 emit_stack_probe (r2);
3294
3295 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
3296 it exceeds SIZE. If only two probes are needed, this will not
3297 generate any code. Then probe at FIRST + SIZE. */
3298 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
3299 {
3300 emit_insn (gen_rtx_SET (VOIDmode, r2,
3301 plus_constant (Pmode, r2, -PROBE_INTERVAL)));
3302 emit_stack_probe (r2);
3303 }
3304
3305 emit_insn (gen_rtx_SET (VOIDmode, r2,
3306 plus_constant (Pmode, r2,
3307 (i - PROBE_INTERVAL) - size)));
3308 emit_stack_probe (r2);
3309 }
3310
3311 /* Otherwise, do the same as above, but in a loop. Note that we must be
3312 extra careful with variables wrapping around because we might be at
3313 the very top (or the very bottom) of the address space and we have
3314 to be able to handle this case properly; in particular, we use an
3315 equality test for the loop condition. */
3316 else
3317 {
3318 HOST_WIDE_INT rounded_size;
3319
3320 emit_move_insn (r2, GEN_INT (-first));
3321
3322
3323 /* Step 1: round SIZE to the previous multiple of the interval. */
3324
3325 rounded_size = size & -PROBE_INTERVAL;
3326
3327
3328 /* Step 2: compute initial and final value of the loop counter. */
3329
3330 /* TEST_ADDR = SP + FIRST. */
3331 emit_insn (gen_rtx_SET (VOIDmode, r2,
3332 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3333
3334 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
3335 if (rounded_size > (1 << 21))
3336 {
3337 emit_move_insn (r3, GEN_INT (-rounded_size));
3338 emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
3339 }
3340 else
3341 emit_insn (gen_rtx_SET (VOIDmode, r3,
3342 gen_rtx_PLUS (Pmode, r2,
3343 GEN_INT (-rounded_size))));
3344
3345
3346 /* Step 3: the loop
3347
3348 while (TEST_ADDR != LAST_ADDR)
3349 {
3350 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
3351 probe at TEST_ADDR
3352 }
3353
3354 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
3355 until it is equal to ROUNDED_SIZE. */
3356
3357 emit_insn (gen_probe_stack_range (r2, r2, r3));
3358
3359
3360 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
3361 that SIZE is equal to ROUNDED_SIZE. */
3362
3363 /* TEMP = SIZE - ROUNDED_SIZE. */
3364 if (size != rounded_size)
3365 {
3366 emit_insn (gen_rtx_SET (VOIDmode, r2,
3367 plus_constant (Pmode, r2,
3368 rounded_size - size)));
3369 emit_stack_probe (r2);
3370 }
3371 }
3372
3373 /* Make sure nothing is scheduled before we are done. */
3374 emit_insn (gen_blockage ());
3375 }
3376
3377 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
3378 absolute addresses. */
3379
3380 const char *
3381 output_probe_stack_range (rtx reg1, rtx reg2)
3382 {
3383 static int labelno = 0;
3384 char loop_lab[32], end_lab[32];
3385 rtx xops[3];
3386
3387 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
3388 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
3389
3390 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
3391
3392 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
3393 xops[0] = reg1;
3394 xops[1] = reg2;
3395 xops[2] = gen_rtx_REG (BImode, PR_REG (6));
3396 output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
3397 fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
3398 assemble_name_raw (asm_out_file, end_lab);
3399 fputc ('\n', asm_out_file);
3400
3401 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
3402 xops[1] = GEN_INT (-PROBE_INTERVAL);
3403 output_asm_insn ("addl %0 = %1, %0", xops);
3404 fputs ("\t;;\n", asm_out_file);
3405
3406 /* Probe at TEST_ADDR and branch. */
3407 output_asm_insn ("probe.w.fault %0, 0", xops);
3408 fprintf (asm_out_file, "\tbr ");
3409 assemble_name_raw (asm_out_file, loop_lab);
3410 fputc ('\n', asm_out_file);
3411
3412 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
3413
3414 return "";
3415 }
3416
3417 /* Called after register allocation to add any instructions needed for the
3418 prologue. Using a prologue insn is favored compared to putting all of the
3419 instructions in output_function_prologue(), since it allows the scheduler
3420 to intermix instructions with the saves of the caller saved registers. In
3421 some cases, it might be necessary to emit a barrier instruction as the last
3422 insn to prevent such scheduling.
3423
3424 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3425 so that the debug info generation code can handle them properly.
3426
3427 The register save area is laid out like so:
3428 cfa+16
3429 [ varargs spill area ]
3430 [ fr register spill area ]
3431 [ br register spill area ]
3432 [ ar register spill area ]
3433 [ pr register spill area ]
3434 [ gr register spill area ] */
3435
3436 /* ??? Get inefficient code when the frame size is larger than can fit in an
3437 adds instruction. */
3438
3439 void
3440 ia64_expand_prologue (void)
3441 {
3442 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3443 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3444 rtx reg, alt_reg;
3445
3446 ia64_compute_frame_size (get_frame_size ());
3447 last_scratch_gr_reg = 15;
3448
3449 if (flag_stack_usage_info)
3450 current_function_static_stack_size = current_frame_info.total_size;
3451
3452 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
3453 {
3454 HOST_WIDE_INT size = current_frame_info.total_size;
3455 int bs_size = BACKING_STORE_SIZE (current_frame_info.n_input_regs
3456 + current_frame_info.n_local_regs);
3457
3458 if (crtl->is_leaf && !cfun->calls_alloca)
3459 {
3460 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
3461 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
3462 size - STACK_CHECK_PROTECT,
3463 bs_size);
3464 else if (size + bs_size > STACK_CHECK_PROTECT)
3465 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
3466 }
3467 else if (size + bs_size > 0)
3468 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
3469 }
3470
3471 if (dump_file)
3472 {
3473 fprintf (dump_file, "ia64 frame related registers "
3474 "recorded in current_frame_info.r[]:\n");
3475 #define PRINTREG(a) if (current_frame_info.r[a]) \
3476 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3477 PRINTREG(reg_fp);
3478 PRINTREG(reg_save_b0);
3479 PRINTREG(reg_save_pr);
3480 PRINTREG(reg_save_ar_pfs);
3481 PRINTREG(reg_save_ar_unat);
3482 PRINTREG(reg_save_ar_lc);
3483 PRINTREG(reg_save_gp);
3484 #undef PRINTREG
3485 }
3486
3487 /* If there is no epilogue, then we don't need some prologue insns.
3488 We need to avoid emitting the dead prologue insns, because flow
3489 will complain about them. */
3490 if (optimize)
3491 {
3492 edge e;
3493 edge_iterator ei;
3494
3495 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
3496 if ((e->flags & EDGE_FAKE) == 0
3497 && (e->flags & EDGE_FALLTHRU) != 0)
3498 break;
3499 epilogue_p = (e != NULL);
3500 }
3501 else
3502 epilogue_p = 1;
3503
3504 /* Set the local, input, and output register names. We need to do this
3505 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3506 half. If we use in/loc/out register names, then we get assembler errors
3507 in crtn.S because there is no alloc insn or regstk directive in there. */
3508 if (! TARGET_REG_NAMES)
3509 {
3510 int inputs = current_frame_info.n_input_regs;
3511 int locals = current_frame_info.n_local_regs;
3512 int outputs = current_frame_info.n_output_regs;
3513
3514 for (i = 0; i < inputs; i++)
3515 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3516 for (i = 0; i < locals; i++)
3517 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3518 for (i = 0; i < outputs; i++)
3519 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3520 }
3521
3522 /* Set the frame pointer register name. The regnum is logically loc79,
3523 but of course we'll not have allocated that many locals. Rather than
3524 worrying about renumbering the existing rtxs, we adjust the name. */
3525 /* ??? This code means that we can never use one local register when
3526 there is a frame pointer. loc79 gets wasted in this case, as it is
3527 renamed to a register that will never be used. See also the try_locals
3528 code in find_gr_spill. */
3529 if (current_frame_info.r[reg_fp])
3530 {
3531 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3532 reg_names[HARD_FRAME_POINTER_REGNUM]
3533 = reg_names[current_frame_info.r[reg_fp]];
3534 reg_names[current_frame_info.r[reg_fp]] = tmp;
3535 }
3536
3537 /* We don't need an alloc instruction if we've used no outputs or locals. */
3538 if (current_frame_info.n_local_regs == 0
3539 && current_frame_info.n_output_regs == 0
3540 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3541 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3542 {
3543 /* If there is no alloc, but there are input registers used, then we
3544 need a .regstk directive. */
3545 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3546 ar_pfs_save_reg = NULL_RTX;
3547 }
3548 else
3549 {
3550 current_frame_info.need_regstk = 0;
3551
3552 if (current_frame_info.r[reg_save_ar_pfs])
3553 {
3554 regno = current_frame_info.r[reg_save_ar_pfs];
3555 reg_emitted (reg_save_ar_pfs);
3556 }
3557 else
3558 regno = next_scratch_gr_reg ();
3559 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3560
3561 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3562 GEN_INT (current_frame_info.n_input_regs),
3563 GEN_INT (current_frame_info.n_local_regs),
3564 GEN_INT (current_frame_info.n_output_regs),
3565 GEN_INT (current_frame_info.n_rotate_regs)));
3566 if (current_frame_info.r[reg_save_ar_pfs])
3567 {
3568 RTX_FRAME_RELATED_P (insn) = 1;
3569 add_reg_note (insn, REG_CFA_REGISTER,
3570 gen_rtx_SET (VOIDmode,
3571 ar_pfs_save_reg,
3572 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3573 }
3574 }
3575
3576 /* Set up frame pointer, stack pointer, and spill iterators. */
3577
3578 n_varargs = cfun->machine->n_varargs;
3579 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3580 stack_pointer_rtx, 0);
3581
3582 if (frame_pointer_needed)
3583 {
3584 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3585 RTX_FRAME_RELATED_P (insn) = 1;
3586
3587 /* Force the unwind info to recognize this as defining a new CFA,
3588 rather than some temp register setup. */
3589 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3590 }
3591
3592 if (current_frame_info.total_size != 0)
3593 {
3594 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3595 rtx offset;
3596
3597 if (satisfies_constraint_I (frame_size_rtx))
3598 offset = frame_size_rtx;
3599 else
3600 {
3601 regno = next_scratch_gr_reg ();
3602 offset = gen_rtx_REG (DImode, regno);
3603 emit_move_insn (offset, frame_size_rtx);
3604 }
3605
3606 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3607 stack_pointer_rtx, offset));
3608
3609 if (! frame_pointer_needed)
3610 {
3611 RTX_FRAME_RELATED_P (insn) = 1;
3612 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3613 gen_rtx_SET (VOIDmode,
3614 stack_pointer_rtx,
3615 gen_rtx_PLUS (DImode,
3616 stack_pointer_rtx,
3617 frame_size_rtx)));
3618 }
3619
3620 /* ??? At this point we must generate a magic insn that appears to
3621 modify the stack pointer, the frame pointer, and all spill
3622 iterators. This would allow the most scheduling freedom. For
3623 now, just hard stop. */
3624 emit_insn (gen_blockage ());
3625 }
3626
3627 /* Must copy out ar.unat before doing any integer spills. */
3628 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3629 {
3630 if (current_frame_info.r[reg_save_ar_unat])
3631 {
3632 ar_unat_save_reg
3633 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3634 reg_emitted (reg_save_ar_unat);
3635 }
3636 else
3637 {
3638 alt_regno = next_scratch_gr_reg ();
3639 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3640 current_frame_info.gr_used_mask |= 1 << alt_regno;
3641 }
3642
3643 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3644 insn = emit_move_insn (ar_unat_save_reg, reg);
3645 if (current_frame_info.r[reg_save_ar_unat])
3646 {
3647 RTX_FRAME_RELATED_P (insn) = 1;
3648 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3649 }
3650
3651 /* Even if we're not going to generate an epilogue, we still
3652 need to save the register so that EH works. */
3653 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3654 emit_insn (gen_prologue_use (ar_unat_save_reg));
3655 }
3656 else
3657 ar_unat_save_reg = NULL_RTX;
3658
3659 /* Spill all varargs registers. Do this before spilling any GR registers,
3660 since we want the UNAT bits for the GR registers to override the UNAT
3661 bits from varargs, which we don't care about. */
3662
3663 cfa_off = -16;
3664 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3665 {
3666 reg = gen_rtx_REG (DImode, regno);
3667 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3668 }
3669
3670 /* Locate the bottom of the register save area. */
3671 cfa_off = (current_frame_info.spill_cfa_off
3672 + current_frame_info.spill_size
3673 + current_frame_info.extra_spill_size);
3674
3675 /* Save the predicate register block either in a register or in memory. */
3676 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3677 {
3678 reg = gen_rtx_REG (DImode, PR_REG (0));
3679 if (current_frame_info.r[reg_save_pr] != 0)
3680 {
3681 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3682 reg_emitted (reg_save_pr);
3683 insn = emit_move_insn (alt_reg, reg);
3684
3685 /* ??? Denote pr spill/fill by a DImode move that modifies all
3686 64 hard registers. */
3687 RTX_FRAME_RELATED_P (insn) = 1;
3688 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3689
3690 /* Even if we're not going to generate an epilogue, we still
3691 need to save the register so that EH works. */
3692 if (! epilogue_p)
3693 emit_insn (gen_prologue_use (alt_reg));
3694 }
3695 else
3696 {
3697 alt_regno = next_scratch_gr_reg ();
3698 alt_reg = gen_rtx_REG (DImode, alt_regno);
3699 insn = emit_move_insn (alt_reg, reg);
3700 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3701 cfa_off -= 8;
3702 }
3703 }
3704
3705 /* Handle AR regs in numerical order. All of them get special handling. */
3706 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3707 && current_frame_info.r[reg_save_ar_unat] == 0)
3708 {
3709 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3710 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3711 cfa_off -= 8;
3712 }
3713
3714 /* The alloc insn already copied ar.pfs into a general register. The
3715 only thing we have to do now is copy that register to a stack slot
3716 if we'd not allocated a local register for the job. */
3717 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3718 && current_frame_info.r[reg_save_ar_pfs] == 0)
3719 {
3720 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3721 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3722 cfa_off -= 8;
3723 }
3724
3725 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3726 {
3727 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3728 if (current_frame_info.r[reg_save_ar_lc] != 0)
3729 {
3730 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3731 reg_emitted (reg_save_ar_lc);
3732 insn = emit_move_insn (alt_reg, reg);
3733 RTX_FRAME_RELATED_P (insn) = 1;
3734 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3735
3736 /* Even if we're not going to generate an epilogue, we still
3737 need to save the register so that EH works. */
3738 if (! epilogue_p)
3739 emit_insn (gen_prologue_use (alt_reg));
3740 }
3741 else
3742 {
3743 alt_regno = next_scratch_gr_reg ();
3744 alt_reg = gen_rtx_REG (DImode, alt_regno);
3745 emit_move_insn (alt_reg, reg);
3746 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3747 cfa_off -= 8;
3748 }
3749 }
3750
3751 /* Save the return pointer. */
3752 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3753 {
3754 reg = gen_rtx_REG (DImode, BR_REG (0));
3755 if (current_frame_info.r[reg_save_b0] != 0)
3756 {
3757 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3758 reg_emitted (reg_save_b0);
3759 insn = emit_move_insn (alt_reg, reg);
3760 RTX_FRAME_RELATED_P (insn) = 1;
3761 add_reg_note (insn, REG_CFA_REGISTER,
3762 gen_rtx_SET (VOIDmode, alt_reg, pc_rtx));
3763
3764 /* Even if we're not going to generate an epilogue, we still
3765 need to save the register so that EH works. */
3766 if (! epilogue_p)
3767 emit_insn (gen_prologue_use (alt_reg));
3768 }
3769 else
3770 {
3771 alt_regno = next_scratch_gr_reg ();
3772 alt_reg = gen_rtx_REG (DImode, alt_regno);
3773 emit_move_insn (alt_reg, reg);
3774 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3775 cfa_off -= 8;
3776 }
3777 }
3778
3779 if (current_frame_info.r[reg_save_gp])
3780 {
3781 reg_emitted (reg_save_gp);
3782 insn = emit_move_insn (gen_rtx_REG (DImode,
3783 current_frame_info.r[reg_save_gp]),
3784 pic_offset_table_rtx);
3785 }
3786
3787 /* We should now be at the base of the gr/br/fr spill area. */
3788 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3789 + current_frame_info.spill_size));
3790
3791 /* Spill all general registers. */
3792 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3793 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3794 {
3795 reg = gen_rtx_REG (DImode, regno);
3796 do_spill (gen_gr_spill, reg, cfa_off, reg);
3797 cfa_off -= 8;
3798 }
3799
3800 /* Spill the rest of the BR registers. */
3801 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3802 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3803 {
3804 alt_regno = next_scratch_gr_reg ();
3805 alt_reg = gen_rtx_REG (DImode, alt_regno);
3806 reg = gen_rtx_REG (DImode, regno);
3807 emit_move_insn (alt_reg, reg);
3808 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3809 cfa_off -= 8;
3810 }
3811
3812 /* Align the frame and spill all FR registers. */
3813 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3814 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3815 {
3816 gcc_assert (!(cfa_off & 15));
3817 reg = gen_rtx_REG (XFmode, regno);
3818 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3819 cfa_off -= 16;
3820 }
3821
3822 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3823
3824 finish_spill_pointers ();
3825 }
3826
3827 /* Output the textual info surrounding the prologue. */
3828
3829 void
3830 ia64_start_function (FILE *file, const char *fnname,
3831 tree decl ATTRIBUTE_UNUSED)
3832 {
3833 #if TARGET_ABI_OPEN_VMS
3834 vms_start_function (fnname);
3835 #endif
3836
3837 fputs ("\t.proc ", file);
3838 assemble_name (file, fnname);
3839 fputc ('\n', file);
3840 ASM_OUTPUT_LABEL (file, fnname);
3841 }
3842
3843 /* Called after register allocation to add any instructions needed for the
3844 epilogue. Using an epilogue insn is favored compared to putting all of the
3845 instructions in output_function_prologue(), since it allows the scheduler
3846 to intermix instructions with the saves of the caller saved registers. In
3847 some cases, it might be necessary to emit a barrier instruction as the last
3848 insn to prevent such scheduling. */
3849
3850 void
3851 ia64_expand_epilogue (int sibcall_p)
3852 {
3853 rtx insn, reg, alt_reg, ar_unat_save_reg;
3854 int regno, alt_regno, cfa_off;
3855
3856 ia64_compute_frame_size (get_frame_size ());
3857
3858 /* If there is a frame pointer, then we use it instead of the stack
3859 pointer, so that the stack pointer does not need to be valid when
3860 the epilogue starts. See EXIT_IGNORE_STACK. */
3861 if (frame_pointer_needed)
3862 setup_spill_pointers (current_frame_info.n_spilled,
3863 hard_frame_pointer_rtx, 0);
3864 else
3865 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3866 current_frame_info.total_size);
3867
3868 if (current_frame_info.total_size != 0)
3869 {
3870 /* ??? At this point we must generate a magic insn that appears to
3871 modify the spill iterators and the frame pointer. This would
3872 allow the most scheduling freedom. For now, just hard stop. */
3873 emit_insn (gen_blockage ());
3874 }
3875
3876 /* Locate the bottom of the register save area. */
3877 cfa_off = (current_frame_info.spill_cfa_off
3878 + current_frame_info.spill_size
3879 + current_frame_info.extra_spill_size);
3880
3881 /* Restore the predicate registers. */
3882 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3883 {
3884 if (current_frame_info.r[reg_save_pr] != 0)
3885 {
3886 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3887 reg_emitted (reg_save_pr);
3888 }
3889 else
3890 {
3891 alt_regno = next_scratch_gr_reg ();
3892 alt_reg = gen_rtx_REG (DImode, alt_regno);
3893 do_restore (gen_movdi_x, alt_reg, cfa_off);
3894 cfa_off -= 8;
3895 }
3896 reg = gen_rtx_REG (DImode, PR_REG (0));
3897 emit_move_insn (reg, alt_reg);
3898 }
3899
3900 /* Restore the application registers. */
3901
3902 /* Load the saved unat from the stack, but do not restore it until
3903 after the GRs have been restored. */
3904 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3905 {
3906 if (current_frame_info.r[reg_save_ar_unat] != 0)
3907 {
3908 ar_unat_save_reg
3909 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3910 reg_emitted (reg_save_ar_unat);
3911 }
3912 else
3913 {
3914 alt_regno = next_scratch_gr_reg ();
3915 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3916 current_frame_info.gr_used_mask |= 1 << alt_regno;
3917 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3918 cfa_off -= 8;
3919 }
3920 }
3921 else
3922 ar_unat_save_reg = NULL_RTX;
3923
3924 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3925 {
3926 reg_emitted (reg_save_ar_pfs);
3927 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3928 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3929 emit_move_insn (reg, alt_reg);
3930 }
3931 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3932 {
3933 alt_regno = next_scratch_gr_reg ();
3934 alt_reg = gen_rtx_REG (DImode, alt_regno);
3935 do_restore (gen_movdi_x, alt_reg, cfa_off);
3936 cfa_off -= 8;
3937 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3938 emit_move_insn (reg, alt_reg);
3939 }
3940
3941 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3942 {
3943 if (current_frame_info.r[reg_save_ar_lc] != 0)
3944 {
3945 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3946 reg_emitted (reg_save_ar_lc);
3947 }
3948 else
3949 {
3950 alt_regno = next_scratch_gr_reg ();
3951 alt_reg = gen_rtx_REG (DImode, alt_regno);
3952 do_restore (gen_movdi_x, alt_reg, cfa_off);
3953 cfa_off -= 8;
3954 }
3955 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3956 emit_move_insn (reg, alt_reg);
3957 }
3958
3959 /* Restore the return pointer. */
3960 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3961 {
3962 if (current_frame_info.r[reg_save_b0] != 0)
3963 {
3964 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3965 reg_emitted (reg_save_b0);
3966 }
3967 else
3968 {
3969 alt_regno = next_scratch_gr_reg ();
3970 alt_reg = gen_rtx_REG (DImode, alt_regno);
3971 do_restore (gen_movdi_x, alt_reg, cfa_off);
3972 cfa_off -= 8;
3973 }
3974 reg = gen_rtx_REG (DImode, BR_REG (0));
3975 emit_move_insn (reg, alt_reg);
3976 }
3977
3978 /* We should now be at the base of the gr/br/fr spill area. */
3979 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3980 + current_frame_info.spill_size));
3981
3982 /* The GP may be stored on the stack in the prologue, but it's
3983 never restored in the epilogue. Skip the stack slot. */
3984 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3985 cfa_off -= 8;
3986
3987 /* Restore all general registers. */
3988 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3989 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3990 {
3991 reg = gen_rtx_REG (DImode, regno);
3992 do_restore (gen_gr_restore, reg, cfa_off);
3993 cfa_off -= 8;
3994 }
3995
3996 /* Restore the branch registers. */
3997 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3998 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3999 {
4000 alt_regno = next_scratch_gr_reg ();
4001 alt_reg = gen_rtx_REG (DImode, alt_regno);
4002 do_restore (gen_movdi_x, alt_reg, cfa_off);
4003 cfa_off -= 8;
4004 reg = gen_rtx_REG (DImode, regno);
4005 emit_move_insn (reg, alt_reg);
4006 }
4007
4008 /* Restore floating point registers. */
4009 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
4010 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4011 {
4012 gcc_assert (!(cfa_off & 15));
4013 reg = gen_rtx_REG (XFmode, regno);
4014 do_restore (gen_fr_restore_x, reg, cfa_off);
4015 cfa_off -= 16;
4016 }
4017
4018 /* Restore ar.unat for real. */
4019 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
4020 {
4021 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
4022 emit_move_insn (reg, ar_unat_save_reg);
4023 }
4024
4025 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
4026
4027 finish_spill_pointers ();
4028
4029 if (current_frame_info.total_size
4030 || cfun->machine->ia64_eh_epilogue_sp
4031 || frame_pointer_needed)
4032 {
4033 /* ??? At this point we must generate a magic insn that appears to
4034 modify the spill iterators, the stack pointer, and the frame
4035 pointer. This would allow the most scheduling freedom. For now,
4036 just hard stop. */
4037 emit_insn (gen_blockage ());
4038 }
4039
4040 if (cfun->machine->ia64_eh_epilogue_sp)
4041 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
4042 else if (frame_pointer_needed)
4043 {
4044 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
4045 RTX_FRAME_RELATED_P (insn) = 1;
4046 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
4047 }
4048 else if (current_frame_info.total_size)
4049 {
4050 rtx offset, frame_size_rtx;
4051
4052 frame_size_rtx = GEN_INT (current_frame_info.total_size);
4053 if (satisfies_constraint_I (frame_size_rtx))
4054 offset = frame_size_rtx;
4055 else
4056 {
4057 regno = next_scratch_gr_reg ();
4058 offset = gen_rtx_REG (DImode, regno);
4059 emit_move_insn (offset, frame_size_rtx);
4060 }
4061
4062 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
4063 offset));
4064
4065 RTX_FRAME_RELATED_P (insn) = 1;
4066 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4067 gen_rtx_SET (VOIDmode,
4068 stack_pointer_rtx,
4069 gen_rtx_PLUS (DImode,
4070 stack_pointer_rtx,
4071 frame_size_rtx)));
4072 }
4073
4074 if (cfun->machine->ia64_eh_epilogue_bsp)
4075 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
4076
4077 if (! sibcall_p)
4078 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
4079 else
4080 {
4081 int fp = GR_REG (2);
4082 /* We need a throw away register here, r0 and r1 are reserved,
4083 so r2 is the first available call clobbered register. If
4084 there was a frame_pointer register, we may have swapped the
4085 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
4086 sure we're using the string "r2" when emitting the register
4087 name for the assembler. */
4088 if (current_frame_info.r[reg_fp]
4089 && current_frame_info.r[reg_fp] == GR_REG (2))
4090 fp = HARD_FRAME_POINTER_REGNUM;
4091
4092 /* We must emit an alloc to force the input registers to become output
4093 registers. Otherwise, if the callee tries to pass its parameters
4094 through to another call without an intervening alloc, then these
4095 values get lost. */
4096 /* ??? We don't need to preserve all input registers. We only need to
4097 preserve those input registers used as arguments to the sibling call.
4098 It is unclear how to compute that number here. */
4099 if (current_frame_info.n_input_regs != 0)
4100 {
4101 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
4102
4103 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
4104 const0_rtx, const0_rtx,
4105 n_inputs, const0_rtx));
4106 RTX_FRAME_RELATED_P (insn) = 1;
4107
4108 /* ??? We need to mark the alloc as frame-related so that it gets
4109 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
4110 But there's nothing dwarf2 related to be done wrt the register
4111 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
4112 the empty parallel means dwarf2out will not see anything. */
4113 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4114 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
4115 }
4116 }
4117 }
4118
4119 /* Return 1 if br.ret can do all the work required to return from a
4120 function. */
4121
4122 int
4123 ia64_direct_return (void)
4124 {
4125 if (reload_completed && ! frame_pointer_needed)
4126 {
4127 ia64_compute_frame_size (get_frame_size ());
4128
4129 return (current_frame_info.total_size == 0
4130 && current_frame_info.n_spilled == 0
4131 && current_frame_info.r[reg_save_b0] == 0
4132 && current_frame_info.r[reg_save_pr] == 0
4133 && current_frame_info.r[reg_save_ar_pfs] == 0
4134 && current_frame_info.r[reg_save_ar_unat] == 0
4135 && current_frame_info.r[reg_save_ar_lc] == 0);
4136 }
4137 return 0;
4138 }
4139
4140 /* Return the magic cookie that we use to hold the return address
4141 during early compilation. */
4142
4143 rtx
4144 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
4145 {
4146 if (count != 0)
4147 return NULL;
4148 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
4149 }
4150
4151 /* Split this value after reload, now that we know where the return
4152 address is saved. */
4153
4154 void
4155 ia64_split_return_addr_rtx (rtx dest)
4156 {
4157 rtx src;
4158
4159 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
4160 {
4161 if (current_frame_info.r[reg_save_b0] != 0)
4162 {
4163 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
4164 reg_emitted (reg_save_b0);
4165 }
4166 else
4167 {
4168 HOST_WIDE_INT off;
4169 unsigned int regno;
4170 rtx off_r;
4171
4172 /* Compute offset from CFA for BR0. */
4173 /* ??? Must be kept in sync with ia64_expand_prologue. */
4174 off = (current_frame_info.spill_cfa_off
4175 + current_frame_info.spill_size);
4176 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
4177 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4178 off -= 8;
4179
4180 /* Convert CFA offset to a register based offset. */
4181 if (frame_pointer_needed)
4182 src = hard_frame_pointer_rtx;
4183 else
4184 {
4185 src = stack_pointer_rtx;
4186 off += current_frame_info.total_size;
4187 }
4188
4189 /* Load address into scratch register. */
4190 off_r = GEN_INT (off);
4191 if (satisfies_constraint_I (off_r))
4192 emit_insn (gen_adddi3 (dest, src, off_r));
4193 else
4194 {
4195 emit_move_insn (dest, off_r);
4196 emit_insn (gen_adddi3 (dest, src, dest));
4197 }
4198
4199 src = gen_rtx_MEM (Pmode, dest);
4200 }
4201 }
4202 else
4203 src = gen_rtx_REG (DImode, BR_REG (0));
4204
4205 emit_move_insn (dest, src);
4206 }
4207
4208 int
4209 ia64_hard_regno_rename_ok (int from, int to)
4210 {
4211 /* Don't clobber any of the registers we reserved for the prologue. */
4212 unsigned int r;
4213
4214 for (r = reg_fp; r <= reg_save_ar_lc; r++)
4215 if (to == current_frame_info.r[r]
4216 || from == current_frame_info.r[r]
4217 || to == emitted_frame_related_regs[r]
4218 || from == emitted_frame_related_regs[r])
4219 return 0;
4220
4221 /* Don't use output registers outside the register frame. */
4222 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
4223 return 0;
4224
4225 /* Retain even/oddness on predicate register pairs. */
4226 if (PR_REGNO_P (from) && PR_REGNO_P (to))
4227 return (from & 1) == (to & 1);
4228
4229 return 1;
4230 }
4231
4232 /* Target hook for assembling integer objects. Handle word-sized
4233 aligned objects and detect the cases when @fptr is needed. */
4234
4235 static bool
4236 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
4237 {
4238 if (size == POINTER_SIZE / BITS_PER_UNIT
4239 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
4240 && GET_CODE (x) == SYMBOL_REF
4241 && SYMBOL_REF_FUNCTION_P (x))
4242 {
4243 static const char * const directive[2][2] = {
4244 /* 64-bit pointer */ /* 32-bit pointer */
4245 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4246 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4247 };
4248 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4249 output_addr_const (asm_out_file, x);
4250 fputs (")\n", asm_out_file);
4251 return true;
4252 }
4253 return default_assemble_integer (x, size, aligned_p);
4254 }
4255
4256 /* Emit the function prologue. */
4257
4258 static void
4259 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4260 {
4261 int mask, grsave, grsave_prev;
4262
4263 if (current_frame_info.need_regstk)
4264 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4265 current_frame_info.n_input_regs,
4266 current_frame_info.n_local_regs,
4267 current_frame_info.n_output_regs,
4268 current_frame_info.n_rotate_regs);
4269
4270 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4271 return;
4272
4273 /* Emit the .prologue directive. */
4274
4275 mask = 0;
4276 grsave = grsave_prev = 0;
4277 if (current_frame_info.r[reg_save_b0] != 0)
4278 {
4279 mask |= 8;
4280 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4281 }
4282 if (current_frame_info.r[reg_save_ar_pfs] != 0
4283 && (grsave_prev == 0
4284 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4285 {
4286 mask |= 4;
4287 if (grsave_prev == 0)
4288 grsave = current_frame_info.r[reg_save_ar_pfs];
4289 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4290 }
4291 if (current_frame_info.r[reg_fp] != 0
4292 && (grsave_prev == 0
4293 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4294 {
4295 mask |= 2;
4296 if (grsave_prev == 0)
4297 grsave = HARD_FRAME_POINTER_REGNUM;
4298 grsave_prev = current_frame_info.r[reg_fp];
4299 }
4300 if (current_frame_info.r[reg_save_pr] != 0
4301 && (grsave_prev == 0
4302 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4303 {
4304 mask |= 1;
4305 if (grsave_prev == 0)
4306 grsave = current_frame_info.r[reg_save_pr];
4307 }
4308
4309 if (mask && TARGET_GNU_AS)
4310 fprintf (file, "\t.prologue %d, %d\n", mask,
4311 ia64_dbx_register_number (grsave));
4312 else
4313 fputs ("\t.prologue\n", file);
4314
4315 /* Emit a .spill directive, if necessary, to relocate the base of
4316 the register spill area. */
4317 if (current_frame_info.spill_cfa_off != -16)
4318 fprintf (file, "\t.spill %ld\n",
4319 (long) (current_frame_info.spill_cfa_off
4320 + current_frame_info.spill_size));
4321 }
4322
4323 /* Emit the .body directive at the scheduled end of the prologue. */
4324
4325 static void
4326 ia64_output_function_end_prologue (FILE *file)
4327 {
4328 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4329 return;
4330
4331 fputs ("\t.body\n", file);
4332 }
4333
4334 /* Emit the function epilogue. */
4335
4336 static void
4337 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4338 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4339 {
4340 int i;
4341
4342 if (current_frame_info.r[reg_fp])
4343 {
4344 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4345 reg_names[HARD_FRAME_POINTER_REGNUM]
4346 = reg_names[current_frame_info.r[reg_fp]];
4347 reg_names[current_frame_info.r[reg_fp]] = tmp;
4348 reg_emitted (reg_fp);
4349 }
4350 if (! TARGET_REG_NAMES)
4351 {
4352 for (i = 0; i < current_frame_info.n_input_regs; i++)
4353 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4354 for (i = 0; i < current_frame_info.n_local_regs; i++)
4355 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4356 for (i = 0; i < current_frame_info.n_output_regs; i++)
4357 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4358 }
4359
4360 current_frame_info.initialized = 0;
4361 }
4362
4363 int
4364 ia64_dbx_register_number (int regno)
4365 {
4366 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4367 from its home at loc79 to something inside the register frame. We
4368 must perform the same renumbering here for the debug info. */
4369 if (current_frame_info.r[reg_fp])
4370 {
4371 if (regno == HARD_FRAME_POINTER_REGNUM)
4372 regno = current_frame_info.r[reg_fp];
4373 else if (regno == current_frame_info.r[reg_fp])
4374 regno = HARD_FRAME_POINTER_REGNUM;
4375 }
4376
4377 if (IN_REGNO_P (regno))
4378 return 32 + regno - IN_REG (0);
4379 else if (LOC_REGNO_P (regno))
4380 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4381 else if (OUT_REGNO_P (regno))
4382 return (32 + current_frame_info.n_input_regs
4383 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4384 else
4385 return regno;
4386 }
4387
4388 /* Implement TARGET_TRAMPOLINE_INIT.
4389
4390 The trampoline should set the static chain pointer to value placed
4391 into the trampoline and should branch to the specified routine.
4392 To make the normal indirect-subroutine calling convention work,
4393 the trampoline must look like a function descriptor; the first
4394 word being the target address and the second being the target's
4395 global pointer.
4396
4397 We abuse the concept of a global pointer by arranging for it
4398 to point to the data we need to load. The complete trampoline
4399 has the following form:
4400
4401 +-------------------+ \
4402 TRAMP: | __ia64_trampoline | |
4403 +-------------------+ > fake function descriptor
4404 | TRAMP+16 | |
4405 +-------------------+ /
4406 | target descriptor |
4407 +-------------------+
4408 | static link |
4409 +-------------------+
4410 */
4411
4412 static void
4413 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4414 {
4415 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4416 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4417
4418 /* The Intel assembler requires that the global __ia64_trampoline symbol
4419 be declared explicitly */
4420 if (!TARGET_GNU_AS)
4421 {
4422 static bool declared_ia64_trampoline = false;
4423
4424 if (!declared_ia64_trampoline)
4425 {
4426 declared_ia64_trampoline = true;
4427 (*targetm.asm_out.globalize_label) (asm_out_file,
4428 "__ia64_trampoline");
4429 }
4430 }
4431
4432 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4433 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4434 fnaddr = convert_memory_address (Pmode, fnaddr);
4435 static_chain = convert_memory_address (Pmode, static_chain);
4436
4437 /* Load up our iterator. */
4438 addr_reg = copy_to_reg (addr);
4439 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4440
4441 /* The first two words are the fake descriptor:
4442 __ia64_trampoline, ADDR+16. */
4443 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4444 if (TARGET_ABI_OPEN_VMS)
4445 {
4446 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4447 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4448 relocation against function symbols to make it identical to the
4449 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4450 strict ELF and dereference to get the bare code address. */
4451 rtx reg = gen_reg_rtx (Pmode);
4452 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4453 emit_move_insn (reg, tramp);
4454 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4455 tramp = reg;
4456 }
4457 emit_move_insn (m_tramp, tramp);
4458 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4459 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4460
4461 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
4462 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4463 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4464
4465 /* The third word is the target descriptor. */
4466 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4467 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4468 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4469
4470 /* The fourth word is the static chain. */
4471 emit_move_insn (m_tramp, static_chain);
4472 }
4473 \f
4474 /* Do any needed setup for a variadic function. CUM has not been updated
4475 for the last named argument which has type TYPE and mode MODE.
4476
4477 We generate the actual spill instructions during prologue generation. */
4478
4479 static void
4480 ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
4481 tree type, int * pretend_size,
4482 int second_time ATTRIBUTE_UNUSED)
4483 {
4484 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4485
4486 /* Skip the current argument. */
4487 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4488
4489 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4490 {
4491 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4492 *pretend_size = n * UNITS_PER_WORD;
4493 cfun->machine->n_varargs = n;
4494 }
4495 }
4496
4497 /* Check whether TYPE is a homogeneous floating point aggregate. If
4498 it is, return the mode of the floating point type that appears
4499 in all leafs. If it is not, return VOIDmode.
4500
4501 An aggregate is a homogeneous floating point aggregate is if all
4502 fields/elements in it have the same floating point type (e.g,
4503 SFmode). 128-bit quad-precision floats are excluded.
4504
4505 Variable sized aggregates should never arrive here, since we should
4506 have already decided to pass them by reference. Top-level zero-sized
4507 aggregates are excluded because our parallels crash the middle-end. */
4508
4509 static enum machine_mode
4510 hfa_element_mode (const_tree type, bool nested)
4511 {
4512 enum machine_mode element_mode = VOIDmode;
4513 enum machine_mode mode;
4514 enum tree_code code = TREE_CODE (type);
4515 int know_element_mode = 0;
4516 tree t;
4517
4518 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4519 return VOIDmode;
4520
4521 switch (code)
4522 {
4523 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4524 case BOOLEAN_TYPE: case POINTER_TYPE:
4525 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4526 case LANG_TYPE: case FUNCTION_TYPE:
4527 return VOIDmode;
4528
4529 /* Fortran complex types are supposed to be HFAs, so we need to handle
4530 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4531 types though. */
4532 case COMPLEX_TYPE:
4533 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4534 && TYPE_MODE (type) != TCmode)
4535 return GET_MODE_INNER (TYPE_MODE (type));
4536 else
4537 return VOIDmode;
4538
4539 case REAL_TYPE:
4540 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4541 mode if this is contained within an aggregate. */
4542 if (nested && TYPE_MODE (type) != TFmode)
4543 return TYPE_MODE (type);
4544 else
4545 return VOIDmode;
4546
4547 case ARRAY_TYPE:
4548 return hfa_element_mode (TREE_TYPE (type), 1);
4549
4550 case RECORD_TYPE:
4551 case UNION_TYPE:
4552 case QUAL_UNION_TYPE:
4553 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4554 {
4555 if (TREE_CODE (t) != FIELD_DECL)
4556 continue;
4557
4558 mode = hfa_element_mode (TREE_TYPE (t), 1);
4559 if (know_element_mode)
4560 {
4561 if (mode != element_mode)
4562 return VOIDmode;
4563 }
4564 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4565 return VOIDmode;
4566 else
4567 {
4568 know_element_mode = 1;
4569 element_mode = mode;
4570 }
4571 }
4572 return element_mode;
4573
4574 default:
4575 /* If we reach here, we probably have some front-end specific type
4576 that the backend doesn't know about. This can happen via the
4577 aggregate_value_p call in init_function_start. All we can do is
4578 ignore unknown tree types. */
4579 return VOIDmode;
4580 }
4581
4582 return VOIDmode;
4583 }
4584
4585 /* Return the number of words required to hold a quantity of TYPE and MODE
4586 when passed as an argument. */
4587 static int
4588 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4589 {
4590 int words;
4591
4592 if (mode == BLKmode)
4593 words = int_size_in_bytes (type);
4594 else
4595 words = GET_MODE_SIZE (mode);
4596
4597 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4598 }
4599
4600 /* Return the number of registers that should be skipped so the current
4601 argument (described by TYPE and WORDS) will be properly aligned.
4602
4603 Integer and float arguments larger than 8 bytes start at the next
4604 even boundary. Aggregates larger than 8 bytes start at the next
4605 even boundary if the aggregate has 16 byte alignment. Note that
4606 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4607 but are still to be aligned in registers.
4608
4609 ??? The ABI does not specify how to handle aggregates with
4610 alignment from 9 to 15 bytes, or greater than 16. We handle them
4611 all as if they had 16 byte alignment. Such aggregates can occur
4612 only if gcc extensions are used. */
4613 static int
4614 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4615 const_tree type, int words)
4616 {
4617 /* No registers are skipped on VMS. */
4618 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4619 return 0;
4620
4621 if (type
4622 && TREE_CODE (type) != INTEGER_TYPE
4623 && TREE_CODE (type) != REAL_TYPE)
4624 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4625 else
4626 return words > 1;
4627 }
4628
4629 /* Return rtx for register where argument is passed, or zero if it is passed
4630 on the stack. */
4631 /* ??? 128-bit quad-precision floats are always passed in general
4632 registers. */
4633
4634 static rtx
4635 ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
4636 const_tree type, bool named, bool incoming)
4637 {
4638 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4639
4640 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4641 int words = ia64_function_arg_words (type, mode);
4642 int offset = ia64_function_arg_offset (cum, type, words);
4643 enum machine_mode hfa_mode = VOIDmode;
4644
4645 /* For OPEN VMS, emit the instruction setting up the argument register here,
4646 when we know this will be together with the other arguments setup related
4647 insns. This is not the conceptually best place to do this, but this is
4648 the easiest as we have convenient access to cumulative args info. */
4649
4650 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4651 && named == 1)
4652 {
4653 unsigned HOST_WIDE_INT regval = cum->words;
4654 int i;
4655
4656 for (i = 0; i < 8; i++)
4657 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4658
4659 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4660 GEN_INT (regval));
4661 }
4662
4663 /* If all argument slots are used, then it must go on the stack. */
4664 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4665 return 0;
4666
4667 /* On OpenVMS argument is either in Rn or Fn. */
4668 if (TARGET_ABI_OPEN_VMS)
4669 {
4670 if (FLOAT_MODE_P (mode))
4671 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4672 else
4673 return gen_rtx_REG (mode, basereg + cum->words);
4674 }
4675
4676 /* Check for and handle homogeneous FP aggregates. */
4677 if (type)
4678 hfa_mode = hfa_element_mode (type, 0);
4679
4680 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4681 and unprototyped hfas are passed specially. */
4682 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4683 {
4684 rtx loc[16];
4685 int i = 0;
4686 int fp_regs = cum->fp_regs;
4687 int int_regs = cum->words + offset;
4688 int hfa_size = GET_MODE_SIZE (hfa_mode);
4689 int byte_size;
4690 int args_byte_size;
4691
4692 /* If prototyped, pass it in FR regs then GR regs.
4693 If not prototyped, pass it in both FR and GR regs.
4694
4695 If this is an SFmode aggregate, then it is possible to run out of
4696 FR regs while GR regs are still left. In that case, we pass the
4697 remaining part in the GR regs. */
4698
4699 /* Fill the FP regs. We do this always. We stop if we reach the end
4700 of the argument, the last FP register, or the last argument slot. */
4701
4702 byte_size = ((mode == BLKmode)
4703 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4704 args_byte_size = int_regs * UNITS_PER_WORD;
4705 offset = 0;
4706 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4707 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4708 {
4709 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4710 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4711 + fp_regs)),
4712 GEN_INT (offset));
4713 offset += hfa_size;
4714 args_byte_size += hfa_size;
4715 fp_regs++;
4716 }
4717
4718 /* If no prototype, then the whole thing must go in GR regs. */
4719 if (! cum->prototype)
4720 offset = 0;
4721 /* If this is an SFmode aggregate, then we might have some left over
4722 that needs to go in GR regs. */
4723 else if (byte_size != offset)
4724 int_regs += offset / UNITS_PER_WORD;
4725
4726 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4727
4728 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4729 {
4730 enum machine_mode gr_mode = DImode;
4731 unsigned int gr_size;
4732
4733 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4734 then this goes in a GR reg left adjusted/little endian, right
4735 adjusted/big endian. */
4736 /* ??? Currently this is handled wrong, because 4-byte hunks are
4737 always right adjusted/little endian. */
4738 if (offset & 0x4)
4739 gr_mode = SImode;
4740 /* If we have an even 4 byte hunk because the aggregate is a
4741 multiple of 4 bytes in size, then this goes in a GR reg right
4742 adjusted/little endian. */
4743 else if (byte_size - offset == 4)
4744 gr_mode = SImode;
4745
4746 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4747 gen_rtx_REG (gr_mode, (basereg
4748 + int_regs)),
4749 GEN_INT (offset));
4750
4751 gr_size = GET_MODE_SIZE (gr_mode);
4752 offset += gr_size;
4753 if (gr_size == UNITS_PER_WORD
4754 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4755 int_regs++;
4756 else if (gr_size > UNITS_PER_WORD)
4757 int_regs += gr_size / UNITS_PER_WORD;
4758 }
4759 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4760 }
4761
4762 /* Integral and aggregates go in general registers. If we have run out of
4763 FR registers, then FP values must also go in general registers. This can
4764 happen when we have a SFmode HFA. */
4765 else if (mode == TFmode || mode == TCmode
4766 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4767 {
4768 int byte_size = ((mode == BLKmode)
4769 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4770 if (BYTES_BIG_ENDIAN
4771 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4772 && byte_size < UNITS_PER_WORD
4773 && byte_size > 0)
4774 {
4775 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4776 gen_rtx_REG (DImode,
4777 (basereg + cum->words
4778 + offset)),
4779 const0_rtx);
4780 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4781 }
4782 else
4783 return gen_rtx_REG (mode, basereg + cum->words + offset);
4784
4785 }
4786
4787 /* If there is a prototype, then FP values go in a FR register when
4788 named, and in a GR register when unnamed. */
4789 else if (cum->prototype)
4790 {
4791 if (named)
4792 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4793 /* In big-endian mode, an anonymous SFmode value must be represented
4794 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4795 the value into the high half of the general register. */
4796 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4797 return gen_rtx_PARALLEL (mode,
4798 gen_rtvec (1,
4799 gen_rtx_EXPR_LIST (VOIDmode,
4800 gen_rtx_REG (DImode, basereg + cum->words + offset),
4801 const0_rtx)));
4802 else
4803 return gen_rtx_REG (mode, basereg + cum->words + offset);
4804 }
4805 /* If there is no prototype, then FP values go in both FR and GR
4806 registers. */
4807 else
4808 {
4809 /* See comment above. */
4810 enum machine_mode inner_mode =
4811 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4812
4813 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4814 gen_rtx_REG (mode, (FR_ARG_FIRST
4815 + cum->fp_regs)),
4816 const0_rtx);
4817 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4818 gen_rtx_REG (inner_mode,
4819 (basereg + cum->words
4820 + offset)),
4821 const0_rtx);
4822
4823 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4824 }
4825 }
4826
4827 /* Implement TARGET_FUNCION_ARG target hook. */
4828
4829 static rtx
4830 ia64_function_arg (cumulative_args_t cum, enum machine_mode mode,
4831 const_tree type, bool named)
4832 {
4833 return ia64_function_arg_1 (cum, mode, type, named, false);
4834 }
4835
4836 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4837
4838 static rtx
4839 ia64_function_incoming_arg (cumulative_args_t cum,
4840 enum machine_mode mode,
4841 const_tree type, bool named)
4842 {
4843 return ia64_function_arg_1 (cum, mode, type, named, true);
4844 }
4845
4846 /* Return number of bytes, at the beginning of the argument, that must be
4847 put in registers. 0 is the argument is entirely in registers or entirely
4848 in memory. */
4849
4850 static int
4851 ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
4852 tree type, bool named ATTRIBUTE_UNUSED)
4853 {
4854 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4855
4856 int words = ia64_function_arg_words (type, mode);
4857 int offset = ia64_function_arg_offset (cum, type, words);
4858
4859 /* If all argument slots are used, then it must go on the stack. */
4860 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4861 return 0;
4862
4863 /* It doesn't matter whether the argument goes in FR or GR regs. If
4864 it fits within the 8 argument slots, then it goes entirely in
4865 registers. If it extends past the last argument slot, then the rest
4866 goes on the stack. */
4867
4868 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4869 return 0;
4870
4871 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4872 }
4873
4874 /* Return ivms_arg_type based on machine_mode. */
4875
4876 static enum ivms_arg_type
4877 ia64_arg_type (enum machine_mode mode)
4878 {
4879 switch (mode)
4880 {
4881 case SFmode:
4882 return FS;
4883 case DFmode:
4884 return FT;
4885 default:
4886 return I64;
4887 }
4888 }
4889
4890 /* Update CUM to point after this argument. This is patterned after
4891 ia64_function_arg. */
4892
4893 static void
4894 ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4895 const_tree type, bool named)
4896 {
4897 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4898 int words = ia64_function_arg_words (type, mode);
4899 int offset = ia64_function_arg_offset (cum, type, words);
4900 enum machine_mode hfa_mode = VOIDmode;
4901
4902 /* If all arg slots are already full, then there is nothing to do. */
4903 if (cum->words >= MAX_ARGUMENT_SLOTS)
4904 {
4905 cum->words += words + offset;
4906 return;
4907 }
4908
4909 cum->atypes[cum->words] = ia64_arg_type (mode);
4910 cum->words += words + offset;
4911
4912 /* On OpenVMS argument is either in Rn or Fn. */
4913 if (TARGET_ABI_OPEN_VMS)
4914 {
4915 cum->int_regs = cum->words;
4916 cum->fp_regs = cum->words;
4917 return;
4918 }
4919
4920 /* Check for and handle homogeneous FP aggregates. */
4921 if (type)
4922 hfa_mode = hfa_element_mode (type, 0);
4923
4924 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4925 and unprototyped hfas are passed specially. */
4926 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4927 {
4928 int fp_regs = cum->fp_regs;
4929 /* This is the original value of cum->words + offset. */
4930 int int_regs = cum->words - words;
4931 int hfa_size = GET_MODE_SIZE (hfa_mode);
4932 int byte_size;
4933 int args_byte_size;
4934
4935 /* If prototyped, pass it in FR regs then GR regs.
4936 If not prototyped, pass it in both FR and GR regs.
4937
4938 If this is an SFmode aggregate, then it is possible to run out of
4939 FR regs while GR regs are still left. In that case, we pass the
4940 remaining part in the GR regs. */
4941
4942 /* Fill the FP regs. We do this always. We stop if we reach the end
4943 of the argument, the last FP register, or the last argument slot. */
4944
4945 byte_size = ((mode == BLKmode)
4946 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4947 args_byte_size = int_regs * UNITS_PER_WORD;
4948 offset = 0;
4949 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4950 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4951 {
4952 offset += hfa_size;
4953 args_byte_size += hfa_size;
4954 fp_regs++;
4955 }
4956
4957 cum->fp_regs = fp_regs;
4958 }
4959
4960 /* Integral and aggregates go in general registers. So do TFmode FP values.
4961 If we have run out of FR registers, then other FP values must also go in
4962 general registers. This can happen when we have a SFmode HFA. */
4963 else if (mode == TFmode || mode == TCmode
4964 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4965 cum->int_regs = cum->words;
4966
4967 /* If there is a prototype, then FP values go in a FR register when
4968 named, and in a GR register when unnamed. */
4969 else if (cum->prototype)
4970 {
4971 if (! named)
4972 cum->int_regs = cum->words;
4973 else
4974 /* ??? Complex types should not reach here. */
4975 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4976 }
4977 /* If there is no prototype, then FP values go in both FR and GR
4978 registers. */
4979 else
4980 {
4981 /* ??? Complex types should not reach here. */
4982 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4983 cum->int_regs = cum->words;
4984 }
4985 }
4986
4987 /* Arguments with alignment larger than 8 bytes start at the next even
4988 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4989 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4990
4991 static unsigned int
4992 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4993 {
4994 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4995 return PARM_BOUNDARY * 2;
4996
4997 if (type)
4998 {
4999 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
5000 return PARM_BOUNDARY * 2;
5001 else
5002 return PARM_BOUNDARY;
5003 }
5004
5005 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
5006 return PARM_BOUNDARY * 2;
5007 else
5008 return PARM_BOUNDARY;
5009 }
5010
5011 /* True if it is OK to do sibling call optimization for the specified
5012 call expression EXP. DECL will be the called function, or NULL if
5013 this is an indirect call. */
5014 static bool
5015 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5016 {
5017 /* We can't perform a sibcall if the current function has the syscall_linkage
5018 attribute. */
5019 if (lookup_attribute ("syscall_linkage",
5020 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
5021 return false;
5022
5023 /* We must always return with our current GP. This means we can
5024 only sibcall to functions defined in the current module unless
5025 TARGET_CONST_GP is set to true. */
5026 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
5027 }
5028 \f
5029
5030 /* Implement va_arg. */
5031
5032 static tree
5033 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5034 gimple_seq *post_p)
5035 {
5036 /* Variable sized types are passed by reference. */
5037 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5038 {
5039 tree ptrtype = build_pointer_type (type);
5040 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
5041 return build_va_arg_indirect_ref (addr);
5042 }
5043
5044 /* Aggregate arguments with alignment larger than 8 bytes start at
5045 the next even boundary. Integer and floating point arguments
5046 do so if they are larger than 8 bytes, whether or not they are
5047 also aligned larger than 8 bytes. */
5048 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
5049 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
5050 {
5051 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
5052 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5053 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
5054 gimplify_assign (unshare_expr (valist), t, pre_p);
5055 }
5056
5057 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5058 }
5059 \f
5060 /* Return 1 if function return value returned in memory. Return 0 if it is
5061 in a register. */
5062
5063 static bool
5064 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
5065 {
5066 enum machine_mode mode;
5067 enum machine_mode hfa_mode;
5068 HOST_WIDE_INT byte_size;
5069
5070 mode = TYPE_MODE (valtype);
5071 byte_size = GET_MODE_SIZE (mode);
5072 if (mode == BLKmode)
5073 {
5074 byte_size = int_size_in_bytes (valtype);
5075 if (byte_size < 0)
5076 return true;
5077 }
5078
5079 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
5080
5081 hfa_mode = hfa_element_mode (valtype, 0);
5082 if (hfa_mode != VOIDmode)
5083 {
5084 int hfa_size = GET_MODE_SIZE (hfa_mode);
5085
5086 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
5087 return true;
5088 else
5089 return false;
5090 }
5091 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
5092 return true;
5093 else
5094 return false;
5095 }
5096
5097 /* Return rtx for register that holds the function return value. */
5098
5099 static rtx
5100 ia64_function_value (const_tree valtype,
5101 const_tree fn_decl_or_type,
5102 bool outgoing ATTRIBUTE_UNUSED)
5103 {
5104 enum machine_mode mode;
5105 enum machine_mode hfa_mode;
5106 int unsignedp;
5107 const_tree func = fn_decl_or_type;
5108
5109 if (fn_decl_or_type
5110 && !DECL_P (fn_decl_or_type))
5111 func = NULL;
5112
5113 mode = TYPE_MODE (valtype);
5114 hfa_mode = hfa_element_mode (valtype, 0);
5115
5116 if (hfa_mode != VOIDmode)
5117 {
5118 rtx loc[8];
5119 int i;
5120 int hfa_size;
5121 int byte_size;
5122 int offset;
5123
5124 hfa_size = GET_MODE_SIZE (hfa_mode);
5125 byte_size = ((mode == BLKmode)
5126 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
5127 offset = 0;
5128 for (i = 0; offset < byte_size; i++)
5129 {
5130 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5131 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
5132 GEN_INT (offset));
5133 offset += hfa_size;
5134 }
5135 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5136 }
5137 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
5138 return gen_rtx_REG (mode, FR_ARG_FIRST);
5139 else
5140 {
5141 bool need_parallel = false;
5142
5143 /* In big-endian mode, we need to manage the layout of aggregates
5144 in the registers so that we get the bits properly aligned in
5145 the highpart of the registers. */
5146 if (BYTES_BIG_ENDIAN
5147 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
5148 need_parallel = true;
5149
5150 /* Something like struct S { long double x; char a[0] } is not an
5151 HFA structure, and therefore doesn't go in fp registers. But
5152 the middle-end will give it XFmode anyway, and XFmode values
5153 don't normally fit in integer registers. So we need to smuggle
5154 the value inside a parallel. */
5155 else if (mode == XFmode || mode == XCmode || mode == RFmode)
5156 need_parallel = true;
5157
5158 if (need_parallel)
5159 {
5160 rtx loc[8];
5161 int offset;
5162 int bytesize;
5163 int i;
5164
5165 offset = 0;
5166 bytesize = int_size_in_bytes (valtype);
5167 /* An empty PARALLEL is invalid here, but the return value
5168 doesn't matter for empty structs. */
5169 if (bytesize == 0)
5170 return gen_rtx_REG (mode, GR_RET_FIRST);
5171 for (i = 0; offset < bytesize; i++)
5172 {
5173 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5174 gen_rtx_REG (DImode,
5175 GR_RET_FIRST + i),
5176 GEN_INT (offset));
5177 offset += UNITS_PER_WORD;
5178 }
5179 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5180 }
5181
5182 mode = promote_function_mode (valtype, mode, &unsignedp,
5183 func ? TREE_TYPE (func) : NULL_TREE,
5184 true);
5185
5186 return gen_rtx_REG (mode, GR_RET_FIRST);
5187 }
5188 }
5189
5190 /* Worker function for TARGET_LIBCALL_VALUE. */
5191
5192 static rtx
5193 ia64_libcall_value (enum machine_mode mode,
5194 const_rtx fun ATTRIBUTE_UNUSED)
5195 {
5196 return gen_rtx_REG (mode,
5197 (((GET_MODE_CLASS (mode) == MODE_FLOAT
5198 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5199 && (mode) != TFmode)
5200 ? FR_RET_FIRST : GR_RET_FIRST));
5201 }
5202
5203 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5204
5205 static bool
5206 ia64_function_value_regno_p (const unsigned int regno)
5207 {
5208 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
5209 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
5210 }
5211
5212 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5213 We need to emit DTP-relative relocations. */
5214
5215 static void
5216 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
5217 {
5218 gcc_assert (size == 4 || size == 8);
5219 if (size == 4)
5220 fputs ("\tdata4.ua\t@dtprel(", file);
5221 else
5222 fputs ("\tdata8.ua\t@dtprel(", file);
5223 output_addr_const (file, x);
5224 fputs (")", file);
5225 }
5226
5227 /* Print a memory address as an operand to reference that memory location. */
5228
5229 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5230 also call this from ia64_print_operand for memory addresses. */
5231
5232 static void
5233 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
5234 rtx address ATTRIBUTE_UNUSED)
5235 {
5236 }
5237
5238 /* Print an operand to an assembler instruction.
5239 C Swap and print a comparison operator.
5240 D Print an FP comparison operator.
5241 E Print 32 - constant, for SImode shifts as extract.
5242 e Print 64 - constant, for DImode rotates.
5243 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5244 a floating point register emitted normally.
5245 G A floating point constant.
5246 I Invert a predicate register by adding 1.
5247 J Select the proper predicate register for a condition.
5248 j Select the inverse predicate register for a condition.
5249 O Append .acq for volatile load.
5250 P Postincrement of a MEM.
5251 Q Append .rel for volatile store.
5252 R Print .s .d or nothing for a single, double or no truncation.
5253 S Shift amount for shladd instruction.
5254 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5255 for Intel assembler.
5256 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5257 for Intel assembler.
5258 X A pair of floating point registers.
5259 r Print register name, or constant 0 as r0. HP compatibility for
5260 Linux kernel.
5261 v Print vector constant value as an 8-byte integer value. */
5262
5263 static void
5264 ia64_print_operand (FILE * file, rtx x, int code)
5265 {
5266 const char *str;
5267
5268 switch (code)
5269 {
5270 case 0:
5271 /* Handled below. */
5272 break;
5273
5274 case 'C':
5275 {
5276 enum rtx_code c = swap_condition (GET_CODE (x));
5277 fputs (GET_RTX_NAME (c), file);
5278 return;
5279 }
5280
5281 case 'D':
5282 switch (GET_CODE (x))
5283 {
5284 case NE:
5285 str = "neq";
5286 break;
5287 case UNORDERED:
5288 str = "unord";
5289 break;
5290 case ORDERED:
5291 str = "ord";
5292 break;
5293 case UNLT:
5294 str = "nge";
5295 break;
5296 case UNLE:
5297 str = "ngt";
5298 break;
5299 case UNGT:
5300 str = "nle";
5301 break;
5302 case UNGE:
5303 str = "nlt";
5304 break;
5305 case UNEQ:
5306 case LTGT:
5307 gcc_unreachable ();
5308 default:
5309 str = GET_RTX_NAME (GET_CODE (x));
5310 break;
5311 }
5312 fputs (str, file);
5313 return;
5314
5315 case 'E':
5316 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5317 return;
5318
5319 case 'e':
5320 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5321 return;
5322
5323 case 'F':
5324 if (x == CONST0_RTX (GET_MODE (x)))
5325 str = reg_names [FR_REG (0)];
5326 else if (x == CONST1_RTX (GET_MODE (x)))
5327 str = reg_names [FR_REG (1)];
5328 else
5329 {
5330 gcc_assert (GET_CODE (x) == REG);
5331 str = reg_names [REGNO (x)];
5332 }
5333 fputs (str, file);
5334 return;
5335
5336 case 'G':
5337 {
5338 long val[4];
5339 REAL_VALUE_TYPE rv;
5340 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5341 real_to_target (val, &rv, GET_MODE (x));
5342 if (GET_MODE (x) == SFmode)
5343 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5344 else if (GET_MODE (x) == DFmode)
5345 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5346 & 0xffffffff,
5347 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5348 & 0xffffffff);
5349 else
5350 output_operand_lossage ("invalid %%G mode");
5351 }
5352 return;
5353
5354 case 'I':
5355 fputs (reg_names [REGNO (x) + 1], file);
5356 return;
5357
5358 case 'J':
5359 case 'j':
5360 {
5361 unsigned int regno = REGNO (XEXP (x, 0));
5362 if (GET_CODE (x) == EQ)
5363 regno += 1;
5364 if (code == 'j')
5365 regno ^= 1;
5366 fputs (reg_names [regno], file);
5367 }
5368 return;
5369
5370 case 'O':
5371 if (MEM_VOLATILE_P (x))
5372 fputs(".acq", file);
5373 return;
5374
5375 case 'P':
5376 {
5377 HOST_WIDE_INT value;
5378
5379 switch (GET_CODE (XEXP (x, 0)))
5380 {
5381 default:
5382 return;
5383
5384 case POST_MODIFY:
5385 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5386 if (GET_CODE (x) == CONST_INT)
5387 value = INTVAL (x);
5388 else
5389 {
5390 gcc_assert (GET_CODE (x) == REG);
5391 fprintf (file, ", %s", reg_names[REGNO (x)]);
5392 return;
5393 }
5394 break;
5395
5396 case POST_INC:
5397 value = GET_MODE_SIZE (GET_MODE (x));
5398 break;
5399
5400 case POST_DEC:
5401 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5402 break;
5403 }
5404
5405 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5406 return;
5407 }
5408
5409 case 'Q':
5410 if (MEM_VOLATILE_P (x))
5411 fputs(".rel", file);
5412 return;
5413
5414 case 'R':
5415 if (x == CONST0_RTX (GET_MODE (x)))
5416 fputs(".s", file);
5417 else if (x == CONST1_RTX (GET_MODE (x)))
5418 fputs(".d", file);
5419 else if (x == CONST2_RTX (GET_MODE (x)))
5420 ;
5421 else
5422 output_operand_lossage ("invalid %%R value");
5423 return;
5424
5425 case 'S':
5426 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5427 return;
5428
5429 case 'T':
5430 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5431 {
5432 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5433 return;
5434 }
5435 break;
5436
5437 case 'U':
5438 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5439 {
5440 const char *prefix = "0x";
5441 if (INTVAL (x) & 0x80000000)
5442 {
5443 fprintf (file, "0xffffffff");
5444 prefix = "";
5445 }
5446 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5447 return;
5448 }
5449 break;
5450
5451 case 'X':
5452 {
5453 unsigned int regno = REGNO (x);
5454 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5455 }
5456 return;
5457
5458 case 'r':
5459 /* If this operand is the constant zero, write it as register zero.
5460 Any register, zero, or CONST_INT value is OK here. */
5461 if (GET_CODE (x) == REG)
5462 fputs (reg_names[REGNO (x)], file);
5463 else if (x == CONST0_RTX (GET_MODE (x)))
5464 fputs ("r0", file);
5465 else if (GET_CODE (x) == CONST_INT)
5466 output_addr_const (file, x);
5467 else
5468 output_operand_lossage ("invalid %%r value");
5469 return;
5470
5471 case 'v':
5472 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5473 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5474 break;
5475
5476 case '+':
5477 {
5478 const char *which;
5479
5480 /* For conditional branches, returns or calls, substitute
5481 sptk, dptk, dpnt, or spnt for %s. */
5482 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5483 if (x)
5484 {
5485 int pred_val = XINT (x, 0);
5486
5487 /* Guess top and bottom 10% statically predicted. */
5488 if (pred_val < REG_BR_PROB_BASE / 50
5489 && br_prob_note_reliable_p (x))
5490 which = ".spnt";
5491 else if (pred_val < REG_BR_PROB_BASE / 2)
5492 which = ".dpnt";
5493 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5494 || !br_prob_note_reliable_p (x))
5495 which = ".dptk";
5496 else
5497 which = ".sptk";
5498 }
5499 else if (CALL_P (current_output_insn))
5500 which = ".sptk";
5501 else
5502 which = ".dptk";
5503
5504 fputs (which, file);
5505 return;
5506 }
5507
5508 case ',':
5509 x = current_insn_predicate;
5510 if (x)
5511 {
5512 unsigned int regno = REGNO (XEXP (x, 0));
5513 if (GET_CODE (x) == EQ)
5514 regno += 1;
5515 fprintf (file, "(%s) ", reg_names [regno]);
5516 }
5517 return;
5518
5519 default:
5520 output_operand_lossage ("ia64_print_operand: unknown code");
5521 return;
5522 }
5523
5524 switch (GET_CODE (x))
5525 {
5526 /* This happens for the spill/restore instructions. */
5527 case POST_INC:
5528 case POST_DEC:
5529 case POST_MODIFY:
5530 x = XEXP (x, 0);
5531 /* ... fall through ... */
5532
5533 case REG:
5534 fputs (reg_names [REGNO (x)], file);
5535 break;
5536
5537 case MEM:
5538 {
5539 rtx addr = XEXP (x, 0);
5540 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5541 addr = XEXP (addr, 0);
5542 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5543 break;
5544 }
5545
5546 default:
5547 output_addr_const (file, x);
5548 break;
5549 }
5550
5551 return;
5552 }
5553
5554 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5555
5556 static bool
5557 ia64_print_operand_punct_valid_p (unsigned char code)
5558 {
5559 return (code == '+' || code == ',');
5560 }
5561 \f
5562 /* Compute a (partial) cost for rtx X. Return true if the complete
5563 cost has been computed, and false if subexpressions should be
5564 scanned. In either case, *TOTAL contains the cost result. */
5565 /* ??? This is incomplete. */
5566
5567 static bool
5568 ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
5569 int *total, bool speed ATTRIBUTE_UNUSED)
5570 {
5571 switch (code)
5572 {
5573 case CONST_INT:
5574 switch (outer_code)
5575 {
5576 case SET:
5577 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5578 return true;
5579 case PLUS:
5580 if (satisfies_constraint_I (x))
5581 *total = 0;
5582 else if (satisfies_constraint_J (x))
5583 *total = 1;
5584 else
5585 *total = COSTS_N_INSNS (1);
5586 return true;
5587 default:
5588 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5589 *total = 0;
5590 else
5591 *total = COSTS_N_INSNS (1);
5592 return true;
5593 }
5594
5595 case CONST_DOUBLE:
5596 *total = COSTS_N_INSNS (1);
5597 return true;
5598
5599 case CONST:
5600 case SYMBOL_REF:
5601 case LABEL_REF:
5602 *total = COSTS_N_INSNS (3);
5603 return true;
5604
5605 case FMA:
5606 *total = COSTS_N_INSNS (4);
5607 return true;
5608
5609 case MULT:
5610 /* For multiplies wider than HImode, we have to go to the FPU,
5611 which normally involves copies. Plus there's the latency
5612 of the multiply itself, and the latency of the instructions to
5613 transfer integer regs to FP regs. */
5614 if (FLOAT_MODE_P (GET_MODE (x)))
5615 *total = COSTS_N_INSNS (4);
5616 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5617 *total = COSTS_N_INSNS (10);
5618 else
5619 *total = COSTS_N_INSNS (2);
5620 return true;
5621
5622 case PLUS:
5623 case MINUS:
5624 if (FLOAT_MODE_P (GET_MODE (x)))
5625 {
5626 *total = COSTS_N_INSNS (4);
5627 return true;
5628 }
5629 /* FALLTHRU */
5630
5631 case ASHIFT:
5632 case ASHIFTRT:
5633 case LSHIFTRT:
5634 *total = COSTS_N_INSNS (1);
5635 return true;
5636
5637 case DIV:
5638 case UDIV:
5639 case MOD:
5640 case UMOD:
5641 /* We make divide expensive, so that divide-by-constant will be
5642 optimized to a multiply. */
5643 *total = COSTS_N_INSNS (60);
5644 return true;
5645
5646 default:
5647 return false;
5648 }
5649 }
5650
5651 /* Calculate the cost of moving data from a register in class FROM to
5652 one in class TO, using MODE. */
5653
5654 static int
5655 ia64_register_move_cost (enum machine_mode mode, reg_class_t from,
5656 reg_class_t to)
5657 {
5658 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5659 if (to == ADDL_REGS)
5660 to = GR_REGS;
5661 if (from == ADDL_REGS)
5662 from = GR_REGS;
5663
5664 /* All costs are symmetric, so reduce cases by putting the
5665 lower number class as the destination. */
5666 if (from < to)
5667 {
5668 reg_class_t tmp = to;
5669 to = from, from = tmp;
5670 }
5671
5672 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5673 so that we get secondary memory reloads. Between FR_REGS,
5674 we have to make this at least as expensive as memory_move_cost
5675 to avoid spectacularly poor register class preferencing. */
5676 if (mode == XFmode || mode == RFmode)
5677 {
5678 if (to != GR_REGS || from != GR_REGS)
5679 return memory_move_cost (mode, to, false);
5680 else
5681 return 3;
5682 }
5683
5684 switch (to)
5685 {
5686 case PR_REGS:
5687 /* Moving between PR registers takes two insns. */
5688 if (from == PR_REGS)
5689 return 3;
5690 /* Moving between PR and anything but GR is impossible. */
5691 if (from != GR_REGS)
5692 return memory_move_cost (mode, to, false);
5693 break;
5694
5695 case BR_REGS:
5696 /* Moving between BR and anything but GR is impossible. */
5697 if (from != GR_REGS && from != GR_AND_BR_REGS)
5698 return memory_move_cost (mode, to, false);
5699 break;
5700
5701 case AR_I_REGS:
5702 case AR_M_REGS:
5703 /* Moving between AR and anything but GR is impossible. */
5704 if (from != GR_REGS)
5705 return memory_move_cost (mode, to, false);
5706 break;
5707
5708 case GR_REGS:
5709 case FR_REGS:
5710 case FP_REGS:
5711 case GR_AND_FR_REGS:
5712 case GR_AND_BR_REGS:
5713 case ALL_REGS:
5714 break;
5715
5716 default:
5717 gcc_unreachable ();
5718 }
5719
5720 return 2;
5721 }
5722
5723 /* Calculate the cost of moving data of MODE from a register to or from
5724 memory. */
5725
5726 static int
5727 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5728 reg_class_t rclass,
5729 bool in ATTRIBUTE_UNUSED)
5730 {
5731 if (rclass == GENERAL_REGS
5732 || rclass == FR_REGS
5733 || rclass == FP_REGS
5734 || rclass == GR_AND_FR_REGS)
5735 return 4;
5736 else
5737 return 10;
5738 }
5739
5740 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5741 on RCLASS to use when copying X into that class. */
5742
5743 static reg_class_t
5744 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5745 {
5746 switch (rclass)
5747 {
5748 case FR_REGS:
5749 case FP_REGS:
5750 /* Don't allow volatile mem reloads into floating point registers.
5751 This is defined to force reload to choose the r/m case instead
5752 of the f/f case when reloading (set (reg fX) (mem/v)). */
5753 if (MEM_P (x) && MEM_VOLATILE_P (x))
5754 return NO_REGS;
5755
5756 /* Force all unrecognized constants into the constant pool. */
5757 if (CONSTANT_P (x))
5758 return NO_REGS;
5759 break;
5760
5761 case AR_M_REGS:
5762 case AR_I_REGS:
5763 if (!OBJECT_P (x))
5764 return NO_REGS;
5765 break;
5766
5767 default:
5768 break;
5769 }
5770
5771 return rclass;
5772 }
5773
5774 /* This function returns the register class required for a secondary
5775 register when copying between one of the registers in RCLASS, and X,
5776 using MODE. A return value of NO_REGS means that no secondary register
5777 is required. */
5778
5779 enum reg_class
5780 ia64_secondary_reload_class (enum reg_class rclass,
5781 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5782 {
5783 int regno = -1;
5784
5785 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5786 regno = true_regnum (x);
5787
5788 switch (rclass)
5789 {
5790 case BR_REGS:
5791 case AR_M_REGS:
5792 case AR_I_REGS:
5793 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5794 interaction. We end up with two pseudos with overlapping lifetimes
5795 both of which are equiv to the same constant, and both which need
5796 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5797 changes depending on the path length, which means the qty_first_reg
5798 check in make_regs_eqv can give different answers at different times.
5799 At some point I'll probably need a reload_indi pattern to handle
5800 this.
5801
5802 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5803 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5804 non-general registers for good measure. */
5805 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5806 return GR_REGS;
5807
5808 /* This is needed if a pseudo used as a call_operand gets spilled to a
5809 stack slot. */
5810 if (GET_CODE (x) == MEM)
5811 return GR_REGS;
5812 break;
5813
5814 case FR_REGS:
5815 case FP_REGS:
5816 /* Need to go through general registers to get to other class regs. */
5817 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5818 return GR_REGS;
5819
5820 /* This can happen when a paradoxical subreg is an operand to the
5821 muldi3 pattern. */
5822 /* ??? This shouldn't be necessary after instruction scheduling is
5823 enabled, because paradoxical subregs are not accepted by
5824 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5825 stop the paradoxical subreg stupidity in the *_operand functions
5826 in recog.c. */
5827 if (GET_CODE (x) == MEM
5828 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5829 || GET_MODE (x) == QImode))
5830 return GR_REGS;
5831
5832 /* This can happen because of the ior/and/etc patterns that accept FP
5833 registers as operands. If the third operand is a constant, then it
5834 needs to be reloaded into a FP register. */
5835 if (GET_CODE (x) == CONST_INT)
5836 return GR_REGS;
5837
5838 /* This can happen because of register elimination in a muldi3 insn.
5839 E.g. `26107 * (unsigned long)&u'. */
5840 if (GET_CODE (x) == PLUS)
5841 return GR_REGS;
5842 break;
5843
5844 case PR_REGS:
5845 /* ??? This happens if we cse/gcse a BImode value across a call,
5846 and the function has a nonlocal goto. This is because global
5847 does not allocate call crossing pseudos to hard registers when
5848 crtl->has_nonlocal_goto is true. This is relatively
5849 common for C++ programs that use exceptions. To reproduce,
5850 return NO_REGS and compile libstdc++. */
5851 if (GET_CODE (x) == MEM)
5852 return GR_REGS;
5853
5854 /* This can happen when we take a BImode subreg of a DImode value,
5855 and that DImode value winds up in some non-GR register. */
5856 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5857 return GR_REGS;
5858 break;
5859
5860 default:
5861 break;
5862 }
5863
5864 return NO_REGS;
5865 }
5866
5867 \f
5868 /* Implement targetm.unspec_may_trap_p hook. */
5869 static int
5870 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5871 {
5872 switch (XINT (x, 1))
5873 {
5874 case UNSPEC_LDA:
5875 case UNSPEC_LDS:
5876 case UNSPEC_LDSA:
5877 case UNSPEC_LDCCLR:
5878 case UNSPEC_CHKACLR:
5879 case UNSPEC_CHKS:
5880 /* These unspecs are just wrappers. */
5881 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5882 }
5883
5884 return default_unspec_may_trap_p (x, flags);
5885 }
5886
5887 \f
5888 /* Parse the -mfixed-range= option string. */
5889
5890 static void
5891 fix_range (const char *const_str)
5892 {
5893 int i, first, last;
5894 char *str, *dash, *comma;
5895
5896 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5897 REG2 are either register names or register numbers. The effect
5898 of this option is to mark the registers in the range from REG1 to
5899 REG2 as ``fixed'' so they won't be used by the compiler. This is
5900 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5901
5902 i = strlen (const_str);
5903 str = (char *) alloca (i + 1);
5904 memcpy (str, const_str, i + 1);
5905
5906 while (1)
5907 {
5908 dash = strchr (str, '-');
5909 if (!dash)
5910 {
5911 warning (0, "value of -mfixed-range must have form REG1-REG2");
5912 return;
5913 }
5914 *dash = '\0';
5915
5916 comma = strchr (dash + 1, ',');
5917 if (comma)
5918 *comma = '\0';
5919
5920 first = decode_reg_name (str);
5921 if (first < 0)
5922 {
5923 warning (0, "unknown register name: %s", str);
5924 return;
5925 }
5926
5927 last = decode_reg_name (dash + 1);
5928 if (last < 0)
5929 {
5930 warning (0, "unknown register name: %s", dash + 1);
5931 return;
5932 }
5933
5934 *dash = '-';
5935
5936 if (first > last)
5937 {
5938 warning (0, "%s-%s is an empty range", str, dash + 1);
5939 return;
5940 }
5941
5942 for (i = first; i <= last; ++i)
5943 fixed_regs[i] = call_used_regs[i] = 1;
5944
5945 if (!comma)
5946 break;
5947
5948 *comma = ',';
5949 str = comma + 1;
5950 }
5951 }
5952
5953 /* Implement TARGET_OPTION_OVERRIDE. */
5954
5955 static void
5956 ia64_option_override (void)
5957 {
5958 unsigned int i;
5959 cl_deferred_option *opt;
5960 vec<cl_deferred_option> *v
5961 = (vec<cl_deferred_option> *) ia64_deferred_options;
5962
5963 if (v)
5964 FOR_EACH_VEC_ELT (*v, i, opt)
5965 {
5966 switch (opt->opt_index)
5967 {
5968 case OPT_mfixed_range_:
5969 fix_range (opt->arg);
5970 break;
5971
5972 default:
5973 gcc_unreachable ();
5974 }
5975 }
5976
5977 if (TARGET_AUTO_PIC)
5978 target_flags |= MASK_CONST_GP;
5979
5980 /* Numerous experiment shows that IRA based loop pressure
5981 calculation works better for RTL loop invariant motion on targets
5982 with enough (>= 32) registers. It is an expensive optimization.
5983 So it is on only for peak performance. */
5984 if (optimize >= 3)
5985 flag_ira_loop_pressure = 1;
5986
5987
5988 ia64_section_threshold = (global_options_set.x_g_switch_value
5989 ? g_switch_value
5990 : IA64_DEFAULT_GVALUE);
5991
5992 init_machine_status = ia64_init_machine_status;
5993
5994 if (align_functions <= 0)
5995 align_functions = 64;
5996 if (align_loops <= 0)
5997 align_loops = 32;
5998 if (TARGET_ABI_OPEN_VMS)
5999 flag_no_common = 1;
6000
6001 ia64_override_options_after_change();
6002 }
6003
6004 /* Implement targetm.override_options_after_change. */
6005
6006 static void
6007 ia64_override_options_after_change (void)
6008 {
6009 if (optimize >= 3
6010 && !global_options_set.x_flag_selective_scheduling
6011 && !global_options_set.x_flag_selective_scheduling2)
6012 {
6013 flag_selective_scheduling2 = 1;
6014 flag_sel_sched_pipelining = 1;
6015 }
6016 if (mflag_sched_control_spec == 2)
6017 {
6018 /* Control speculation is on by default for the selective scheduler,
6019 but not for the Haifa scheduler. */
6020 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
6021 }
6022 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
6023 {
6024 /* FIXME: remove this when we'd implement breaking autoinsns as
6025 a transformation. */
6026 flag_auto_inc_dec = 0;
6027 }
6028 }
6029
6030 /* Initialize the record of emitted frame related registers. */
6031
6032 void ia64_init_expanders (void)
6033 {
6034 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
6035 }
6036
6037 static struct machine_function *
6038 ia64_init_machine_status (void)
6039 {
6040 return ggc_alloc_cleared_machine_function ();
6041 }
6042 \f
6043 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
6044 static enum attr_type ia64_safe_type (rtx);
6045
6046 static enum attr_itanium_class
6047 ia64_safe_itanium_class (rtx insn)
6048 {
6049 if (recog_memoized (insn) >= 0)
6050 return get_attr_itanium_class (insn);
6051 else if (DEBUG_INSN_P (insn))
6052 return ITANIUM_CLASS_IGNORE;
6053 else
6054 return ITANIUM_CLASS_UNKNOWN;
6055 }
6056
6057 static enum attr_type
6058 ia64_safe_type (rtx insn)
6059 {
6060 if (recog_memoized (insn) >= 0)
6061 return get_attr_type (insn);
6062 else
6063 return TYPE_UNKNOWN;
6064 }
6065 \f
6066 /* The following collection of routines emit instruction group stop bits as
6067 necessary to avoid dependencies. */
6068
6069 /* Need to track some additional registers as far as serialization is
6070 concerned so we can properly handle br.call and br.ret. We could
6071 make these registers visible to gcc, but since these registers are
6072 never explicitly used in gcc generated code, it seems wasteful to
6073 do so (plus it would make the call and return patterns needlessly
6074 complex). */
6075 #define REG_RP (BR_REG (0))
6076 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
6077 /* This is used for volatile asms which may require a stop bit immediately
6078 before and after them. */
6079 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
6080 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
6081 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
6082
6083 /* For each register, we keep track of how it has been written in the
6084 current instruction group.
6085
6086 If a register is written unconditionally (no qualifying predicate),
6087 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
6088
6089 If a register is written if its qualifying predicate P is true, we
6090 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
6091 may be written again by the complement of P (P^1) and when this happens,
6092 WRITE_COUNT gets set to 2.
6093
6094 The result of this is that whenever an insn attempts to write a register
6095 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
6096
6097 If a predicate register is written by a floating-point insn, we set
6098 WRITTEN_BY_FP to true.
6099
6100 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
6101 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
6102
6103 #if GCC_VERSION >= 4000
6104 #define RWS_FIELD_TYPE __extension__ unsigned short
6105 #else
6106 #define RWS_FIELD_TYPE unsigned int
6107 #endif
6108 struct reg_write_state
6109 {
6110 RWS_FIELD_TYPE write_count : 2;
6111 RWS_FIELD_TYPE first_pred : 10;
6112 RWS_FIELD_TYPE written_by_fp : 1;
6113 RWS_FIELD_TYPE written_by_and : 1;
6114 RWS_FIELD_TYPE written_by_or : 1;
6115 };
6116
6117 /* Cumulative info for the current instruction group. */
6118 struct reg_write_state rws_sum[NUM_REGS];
6119 #ifdef ENABLE_CHECKING
6120 /* Bitmap whether a register has been written in the current insn. */
6121 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
6122 / HOST_BITS_PER_WIDEST_FAST_INT];
6123
6124 static inline void
6125 rws_insn_set (int regno)
6126 {
6127 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
6128 SET_HARD_REG_BIT (rws_insn, regno);
6129 }
6130
6131 static inline int
6132 rws_insn_test (int regno)
6133 {
6134 return TEST_HARD_REG_BIT (rws_insn, regno);
6135 }
6136 #else
6137 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
6138 unsigned char rws_insn[2];
6139
6140 static inline void
6141 rws_insn_set (int regno)
6142 {
6143 if (regno == REG_AR_CFM)
6144 rws_insn[0] = 1;
6145 else if (regno == REG_VOLATILE)
6146 rws_insn[1] = 1;
6147 }
6148
6149 static inline int
6150 rws_insn_test (int regno)
6151 {
6152 if (regno == REG_AR_CFM)
6153 return rws_insn[0];
6154 if (regno == REG_VOLATILE)
6155 return rws_insn[1];
6156 return 0;
6157 }
6158 #endif
6159
6160 /* Indicates whether this is the first instruction after a stop bit,
6161 in which case we don't need another stop bit. Without this,
6162 ia64_variable_issue will die when scheduling an alloc. */
6163 static int first_instruction;
6164
6165 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
6166 RTL for one instruction. */
6167 struct reg_flags
6168 {
6169 unsigned int is_write : 1; /* Is register being written? */
6170 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
6171 unsigned int is_branch : 1; /* Is register used as part of a branch? */
6172 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
6173 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
6174 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
6175 };
6176
6177 static void rws_update (int, struct reg_flags, int);
6178 static int rws_access_regno (int, struct reg_flags, int);
6179 static int rws_access_reg (rtx, struct reg_flags, int);
6180 static void update_set_flags (rtx, struct reg_flags *);
6181 static int set_src_needs_barrier (rtx, struct reg_flags, int);
6182 static int rtx_needs_barrier (rtx, struct reg_flags, int);
6183 static void init_insn_group_barriers (void);
6184 static int group_barrier_needed (rtx);
6185 static int safe_group_barrier_needed (rtx);
6186 static int in_safe_group_barrier;
6187
6188 /* Update *RWS for REGNO, which is being written by the current instruction,
6189 with predicate PRED, and associated register flags in FLAGS. */
6190
6191 static void
6192 rws_update (int regno, struct reg_flags flags, int pred)
6193 {
6194 if (pred)
6195 rws_sum[regno].write_count++;
6196 else
6197 rws_sum[regno].write_count = 2;
6198 rws_sum[regno].written_by_fp |= flags.is_fp;
6199 /* ??? Not tracking and/or across differing predicates. */
6200 rws_sum[regno].written_by_and = flags.is_and;
6201 rws_sum[regno].written_by_or = flags.is_or;
6202 rws_sum[regno].first_pred = pred;
6203 }
6204
6205 /* Handle an access to register REGNO of type FLAGS using predicate register
6206 PRED. Update rws_sum array. Return 1 if this access creates
6207 a dependency with an earlier instruction in the same group. */
6208
6209 static int
6210 rws_access_regno (int regno, struct reg_flags flags, int pred)
6211 {
6212 int need_barrier = 0;
6213
6214 gcc_assert (regno < NUM_REGS);
6215
6216 if (! PR_REGNO_P (regno))
6217 flags.is_and = flags.is_or = 0;
6218
6219 if (flags.is_write)
6220 {
6221 int write_count;
6222
6223 rws_insn_set (regno);
6224 write_count = rws_sum[regno].write_count;
6225
6226 switch (write_count)
6227 {
6228 case 0:
6229 /* The register has not been written yet. */
6230 if (!in_safe_group_barrier)
6231 rws_update (regno, flags, pred);
6232 break;
6233
6234 case 1:
6235 /* The register has been written via a predicate. Treat
6236 it like a unconditional write and do not try to check
6237 for complementary pred reg in earlier write. */
6238 if (flags.is_and && rws_sum[regno].written_by_and)
6239 ;
6240 else if (flags.is_or && rws_sum[regno].written_by_or)
6241 ;
6242 else
6243 need_barrier = 1;
6244 if (!in_safe_group_barrier)
6245 rws_update (regno, flags, pred);
6246 break;
6247
6248 case 2:
6249 /* The register has been unconditionally written already. We
6250 need a barrier. */
6251 if (flags.is_and && rws_sum[regno].written_by_and)
6252 ;
6253 else if (flags.is_or && rws_sum[regno].written_by_or)
6254 ;
6255 else
6256 need_barrier = 1;
6257 if (!in_safe_group_barrier)
6258 {
6259 rws_sum[regno].written_by_and = flags.is_and;
6260 rws_sum[regno].written_by_or = flags.is_or;
6261 }
6262 break;
6263
6264 default:
6265 gcc_unreachable ();
6266 }
6267 }
6268 else
6269 {
6270 if (flags.is_branch)
6271 {
6272 /* Branches have several RAW exceptions that allow to avoid
6273 barriers. */
6274
6275 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6276 /* RAW dependencies on branch regs are permissible as long
6277 as the writer is a non-branch instruction. Since we
6278 never generate code that uses a branch register written
6279 by a branch instruction, handling this case is
6280 easy. */
6281 return 0;
6282
6283 if (REGNO_REG_CLASS (regno) == PR_REGS
6284 && ! rws_sum[regno].written_by_fp)
6285 /* The predicates of a branch are available within the
6286 same insn group as long as the predicate was written by
6287 something other than a floating-point instruction. */
6288 return 0;
6289 }
6290
6291 if (flags.is_and && rws_sum[regno].written_by_and)
6292 return 0;
6293 if (flags.is_or && rws_sum[regno].written_by_or)
6294 return 0;
6295
6296 switch (rws_sum[regno].write_count)
6297 {
6298 case 0:
6299 /* The register has not been written yet. */
6300 break;
6301
6302 case 1:
6303 /* The register has been written via a predicate, assume we
6304 need a barrier (don't check for complementary regs). */
6305 need_barrier = 1;
6306 break;
6307
6308 case 2:
6309 /* The register has been unconditionally written already. We
6310 need a barrier. */
6311 need_barrier = 1;
6312 break;
6313
6314 default:
6315 gcc_unreachable ();
6316 }
6317 }
6318
6319 return need_barrier;
6320 }
6321
6322 static int
6323 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6324 {
6325 int regno = REGNO (reg);
6326 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6327
6328 if (n == 1)
6329 return rws_access_regno (regno, flags, pred);
6330 else
6331 {
6332 int need_barrier = 0;
6333 while (--n >= 0)
6334 need_barrier |= rws_access_regno (regno + n, flags, pred);
6335 return need_barrier;
6336 }
6337 }
6338
6339 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6340 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6341
6342 static void
6343 update_set_flags (rtx x, struct reg_flags *pflags)
6344 {
6345 rtx src = SET_SRC (x);
6346
6347 switch (GET_CODE (src))
6348 {
6349 case CALL:
6350 return;
6351
6352 case IF_THEN_ELSE:
6353 /* There are four cases here:
6354 (1) The destination is (pc), in which case this is a branch,
6355 nothing here applies.
6356 (2) The destination is ar.lc, in which case this is a
6357 doloop_end_internal,
6358 (3) The destination is an fp register, in which case this is
6359 an fselect instruction.
6360 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6361 this is a check load.
6362 In all cases, nothing we do in this function applies. */
6363 return;
6364
6365 default:
6366 if (COMPARISON_P (src)
6367 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6368 /* Set pflags->is_fp to 1 so that we know we're dealing
6369 with a floating point comparison when processing the
6370 destination of the SET. */
6371 pflags->is_fp = 1;
6372
6373 /* Discover if this is a parallel comparison. We only handle
6374 and.orcm and or.andcm at present, since we must retain a
6375 strict inverse on the predicate pair. */
6376 else if (GET_CODE (src) == AND)
6377 pflags->is_and = 1;
6378 else if (GET_CODE (src) == IOR)
6379 pflags->is_or = 1;
6380
6381 break;
6382 }
6383 }
6384
6385 /* Subroutine of rtx_needs_barrier; this function determines whether the
6386 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6387 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6388 for this insn. */
6389
6390 static int
6391 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6392 {
6393 int need_barrier = 0;
6394 rtx dst;
6395 rtx src = SET_SRC (x);
6396
6397 if (GET_CODE (src) == CALL)
6398 /* We don't need to worry about the result registers that
6399 get written by subroutine call. */
6400 return rtx_needs_barrier (src, flags, pred);
6401 else if (SET_DEST (x) == pc_rtx)
6402 {
6403 /* X is a conditional branch. */
6404 /* ??? This seems redundant, as the caller sets this bit for
6405 all JUMP_INSNs. */
6406 if (!ia64_spec_check_src_p (src))
6407 flags.is_branch = 1;
6408 return rtx_needs_barrier (src, flags, pred);
6409 }
6410
6411 if (ia64_spec_check_src_p (src))
6412 /* Avoid checking one register twice (in condition
6413 and in 'then' section) for ldc pattern. */
6414 {
6415 gcc_assert (REG_P (XEXP (src, 2)));
6416 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6417
6418 /* We process MEM below. */
6419 src = XEXP (src, 1);
6420 }
6421
6422 need_barrier |= rtx_needs_barrier (src, flags, pred);
6423
6424 dst = SET_DEST (x);
6425 if (GET_CODE (dst) == ZERO_EXTRACT)
6426 {
6427 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6428 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6429 }
6430 return need_barrier;
6431 }
6432
6433 /* Handle an access to rtx X of type FLAGS using predicate register
6434 PRED. Return 1 if this access creates a dependency with an earlier
6435 instruction in the same group. */
6436
6437 static int
6438 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6439 {
6440 int i, j;
6441 int is_complemented = 0;
6442 int need_barrier = 0;
6443 const char *format_ptr;
6444 struct reg_flags new_flags;
6445 rtx cond;
6446
6447 if (! x)
6448 return 0;
6449
6450 new_flags = flags;
6451
6452 switch (GET_CODE (x))
6453 {
6454 case SET:
6455 update_set_flags (x, &new_flags);
6456 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6457 if (GET_CODE (SET_SRC (x)) != CALL)
6458 {
6459 new_flags.is_write = 1;
6460 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6461 }
6462 break;
6463
6464 case CALL:
6465 new_flags.is_write = 0;
6466 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6467
6468 /* Avoid multiple register writes, in case this is a pattern with
6469 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6470 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6471 {
6472 new_flags.is_write = 1;
6473 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6474 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6475 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6476 }
6477 break;
6478
6479 case COND_EXEC:
6480 /* X is a predicated instruction. */
6481
6482 cond = COND_EXEC_TEST (x);
6483 gcc_assert (!pred);
6484 need_barrier = rtx_needs_barrier (cond, flags, 0);
6485
6486 if (GET_CODE (cond) == EQ)
6487 is_complemented = 1;
6488 cond = XEXP (cond, 0);
6489 gcc_assert (GET_CODE (cond) == REG
6490 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6491 pred = REGNO (cond);
6492 if (is_complemented)
6493 ++pred;
6494
6495 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6496 return need_barrier;
6497
6498 case CLOBBER:
6499 case USE:
6500 /* Clobber & use are for earlier compiler-phases only. */
6501 break;
6502
6503 case ASM_OPERANDS:
6504 case ASM_INPUT:
6505 /* We always emit stop bits for traditional asms. We emit stop bits
6506 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6507 if (GET_CODE (x) != ASM_OPERANDS
6508 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6509 {
6510 /* Avoid writing the register multiple times if we have multiple
6511 asm outputs. This avoids a failure in rws_access_reg. */
6512 if (! rws_insn_test (REG_VOLATILE))
6513 {
6514 new_flags.is_write = 1;
6515 rws_access_regno (REG_VOLATILE, new_flags, pred);
6516 }
6517 return 1;
6518 }
6519
6520 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6521 We cannot just fall through here since then we would be confused
6522 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6523 traditional asms unlike their normal usage. */
6524
6525 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6526 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6527 need_barrier = 1;
6528 break;
6529
6530 case PARALLEL:
6531 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6532 {
6533 rtx pat = XVECEXP (x, 0, i);
6534 switch (GET_CODE (pat))
6535 {
6536 case SET:
6537 update_set_flags (pat, &new_flags);
6538 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6539 break;
6540
6541 case USE:
6542 case CALL:
6543 case ASM_OPERANDS:
6544 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6545 break;
6546
6547 case CLOBBER:
6548 if (REG_P (XEXP (pat, 0))
6549 && extract_asm_operands (x) != NULL_RTX
6550 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6551 {
6552 new_flags.is_write = 1;
6553 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6554 new_flags, pred);
6555 new_flags = flags;
6556 }
6557 break;
6558
6559 case RETURN:
6560 break;
6561
6562 default:
6563 gcc_unreachable ();
6564 }
6565 }
6566 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6567 {
6568 rtx pat = XVECEXP (x, 0, i);
6569 if (GET_CODE (pat) == SET)
6570 {
6571 if (GET_CODE (SET_SRC (pat)) != CALL)
6572 {
6573 new_flags.is_write = 1;
6574 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6575 pred);
6576 }
6577 }
6578 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6579 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6580 }
6581 break;
6582
6583 case SUBREG:
6584 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6585 break;
6586 case REG:
6587 if (REGNO (x) == AR_UNAT_REGNUM)
6588 {
6589 for (i = 0; i < 64; ++i)
6590 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6591 }
6592 else
6593 need_barrier = rws_access_reg (x, flags, pred);
6594 break;
6595
6596 case MEM:
6597 /* Find the regs used in memory address computation. */
6598 new_flags.is_write = 0;
6599 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6600 break;
6601
6602 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6603 case SYMBOL_REF: case LABEL_REF: case CONST:
6604 break;
6605
6606 /* Operators with side-effects. */
6607 case POST_INC: case POST_DEC:
6608 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6609
6610 new_flags.is_write = 0;
6611 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6612 new_flags.is_write = 1;
6613 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6614 break;
6615
6616 case POST_MODIFY:
6617 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6618
6619 new_flags.is_write = 0;
6620 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6621 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6622 new_flags.is_write = 1;
6623 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6624 break;
6625
6626 /* Handle common unary and binary ops for efficiency. */
6627 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6628 case MOD: case UDIV: case UMOD: case AND: case IOR:
6629 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6630 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6631 case NE: case EQ: case GE: case GT: case LE:
6632 case LT: case GEU: case GTU: case LEU: case LTU:
6633 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6634 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6635 break;
6636
6637 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6638 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6639 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6640 case SQRT: case FFS: case POPCOUNT:
6641 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6642 break;
6643
6644 case VEC_SELECT:
6645 /* VEC_SELECT's second argument is a PARALLEL with integers that
6646 describe the elements selected. On ia64, those integers are
6647 always constants. Avoid walking the PARALLEL so that we don't
6648 get confused with "normal" parallels and then die. */
6649 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6650 break;
6651
6652 case UNSPEC:
6653 switch (XINT (x, 1))
6654 {
6655 case UNSPEC_LTOFF_DTPMOD:
6656 case UNSPEC_LTOFF_DTPREL:
6657 case UNSPEC_DTPREL:
6658 case UNSPEC_LTOFF_TPREL:
6659 case UNSPEC_TPREL:
6660 case UNSPEC_PRED_REL_MUTEX:
6661 case UNSPEC_PIC_CALL:
6662 case UNSPEC_MF:
6663 case UNSPEC_FETCHADD_ACQ:
6664 case UNSPEC_FETCHADD_REL:
6665 case UNSPEC_BSP_VALUE:
6666 case UNSPEC_FLUSHRS:
6667 case UNSPEC_BUNDLE_SELECTOR:
6668 break;
6669
6670 case UNSPEC_GR_SPILL:
6671 case UNSPEC_GR_RESTORE:
6672 {
6673 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6674 HOST_WIDE_INT bit = (offset >> 3) & 63;
6675
6676 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6677 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6678 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6679 new_flags, pred);
6680 break;
6681 }
6682
6683 case UNSPEC_FR_SPILL:
6684 case UNSPEC_FR_RESTORE:
6685 case UNSPEC_GETF_EXP:
6686 case UNSPEC_SETF_EXP:
6687 case UNSPEC_ADDP4:
6688 case UNSPEC_FR_SQRT_RECIP_APPROX:
6689 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6690 case UNSPEC_LDA:
6691 case UNSPEC_LDS:
6692 case UNSPEC_LDS_A:
6693 case UNSPEC_LDSA:
6694 case UNSPEC_CHKACLR:
6695 case UNSPEC_CHKS:
6696 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6697 break;
6698
6699 case UNSPEC_FR_RECIP_APPROX:
6700 case UNSPEC_SHRP:
6701 case UNSPEC_COPYSIGN:
6702 case UNSPEC_FR_RECIP_APPROX_RES:
6703 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6704 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6705 break;
6706
6707 case UNSPEC_CMPXCHG_ACQ:
6708 case UNSPEC_CMPXCHG_REL:
6709 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6710 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6711 break;
6712
6713 default:
6714 gcc_unreachable ();
6715 }
6716 break;
6717
6718 case UNSPEC_VOLATILE:
6719 switch (XINT (x, 1))
6720 {
6721 case UNSPECV_ALLOC:
6722 /* Alloc must always be the first instruction of a group.
6723 We force this by always returning true. */
6724 /* ??? We might get better scheduling if we explicitly check for
6725 input/local/output register dependencies, and modify the
6726 scheduler so that alloc is always reordered to the start of
6727 the current group. We could then eliminate all of the
6728 first_instruction code. */
6729 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6730
6731 new_flags.is_write = 1;
6732 rws_access_regno (REG_AR_CFM, new_flags, pred);
6733 return 1;
6734
6735 case UNSPECV_SET_BSP:
6736 case UNSPECV_PROBE_STACK_RANGE:
6737 need_barrier = 1;
6738 break;
6739
6740 case UNSPECV_BLOCKAGE:
6741 case UNSPECV_INSN_GROUP_BARRIER:
6742 case UNSPECV_BREAK:
6743 case UNSPECV_PSAC_ALL:
6744 case UNSPECV_PSAC_NORMAL:
6745 return 0;
6746
6747 case UNSPECV_PROBE_STACK_ADDRESS:
6748 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6749 break;
6750
6751 default:
6752 gcc_unreachable ();
6753 }
6754 break;
6755
6756 case RETURN:
6757 new_flags.is_write = 0;
6758 need_barrier = rws_access_regno (REG_RP, flags, pred);
6759 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6760
6761 new_flags.is_write = 1;
6762 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6763 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6764 break;
6765
6766 default:
6767 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6768 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6769 switch (format_ptr[i])
6770 {
6771 case '0': /* unused field */
6772 case 'i': /* integer */
6773 case 'n': /* note */
6774 case 'w': /* wide integer */
6775 case 's': /* pointer to string */
6776 case 'S': /* optional pointer to string */
6777 break;
6778
6779 case 'e':
6780 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6781 need_barrier = 1;
6782 break;
6783
6784 case 'E':
6785 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6786 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6787 need_barrier = 1;
6788 break;
6789
6790 default:
6791 gcc_unreachable ();
6792 }
6793 break;
6794 }
6795 return need_barrier;
6796 }
6797
6798 /* Clear out the state for group_barrier_needed at the start of a
6799 sequence of insns. */
6800
6801 static void
6802 init_insn_group_barriers (void)
6803 {
6804 memset (rws_sum, 0, sizeof (rws_sum));
6805 first_instruction = 1;
6806 }
6807
6808 /* Given the current state, determine whether a group barrier (a stop bit) is
6809 necessary before INSN. Return nonzero if so. This modifies the state to
6810 include the effects of INSN as a side-effect. */
6811
6812 static int
6813 group_barrier_needed (rtx insn)
6814 {
6815 rtx pat;
6816 int need_barrier = 0;
6817 struct reg_flags flags;
6818
6819 memset (&flags, 0, sizeof (flags));
6820 switch (GET_CODE (insn))
6821 {
6822 case NOTE:
6823 case DEBUG_INSN:
6824 break;
6825
6826 case BARRIER:
6827 /* A barrier doesn't imply an instruction group boundary. */
6828 break;
6829
6830 case CODE_LABEL:
6831 memset (rws_insn, 0, sizeof (rws_insn));
6832 return 1;
6833
6834 case CALL_INSN:
6835 flags.is_branch = 1;
6836 flags.is_sibcall = SIBLING_CALL_P (insn);
6837 memset (rws_insn, 0, sizeof (rws_insn));
6838
6839 /* Don't bundle a call following another call. */
6840 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6841 {
6842 need_barrier = 1;
6843 break;
6844 }
6845
6846 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6847 break;
6848
6849 case JUMP_INSN:
6850 if (!ia64_spec_check_p (insn))
6851 flags.is_branch = 1;
6852
6853 /* Don't bundle a jump following a call. */
6854 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6855 {
6856 need_barrier = 1;
6857 break;
6858 }
6859 /* FALLTHRU */
6860
6861 case INSN:
6862 if (GET_CODE (PATTERN (insn)) == USE
6863 || GET_CODE (PATTERN (insn)) == CLOBBER)
6864 /* Don't care about USE and CLOBBER "insns"---those are used to
6865 indicate to the optimizer that it shouldn't get rid of
6866 certain operations. */
6867 break;
6868
6869 pat = PATTERN (insn);
6870
6871 /* Ug. Hack hacks hacked elsewhere. */
6872 switch (recog_memoized (insn))
6873 {
6874 /* We play dependency tricks with the epilogue in order
6875 to get proper schedules. Undo this for dv analysis. */
6876 case CODE_FOR_epilogue_deallocate_stack:
6877 case CODE_FOR_prologue_allocate_stack:
6878 pat = XVECEXP (pat, 0, 0);
6879 break;
6880
6881 /* The pattern we use for br.cloop confuses the code above.
6882 The second element of the vector is representative. */
6883 case CODE_FOR_doloop_end_internal:
6884 pat = XVECEXP (pat, 0, 1);
6885 break;
6886
6887 /* Doesn't generate code. */
6888 case CODE_FOR_pred_rel_mutex:
6889 case CODE_FOR_prologue_use:
6890 return 0;
6891
6892 default:
6893 break;
6894 }
6895
6896 memset (rws_insn, 0, sizeof (rws_insn));
6897 need_barrier = rtx_needs_barrier (pat, flags, 0);
6898
6899 /* Check to see if the previous instruction was a volatile
6900 asm. */
6901 if (! need_barrier)
6902 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6903
6904 break;
6905
6906 default:
6907 gcc_unreachable ();
6908 }
6909
6910 if (first_instruction && important_for_bundling_p (insn))
6911 {
6912 need_barrier = 0;
6913 first_instruction = 0;
6914 }
6915
6916 return need_barrier;
6917 }
6918
6919 /* Like group_barrier_needed, but do not clobber the current state. */
6920
6921 static int
6922 safe_group_barrier_needed (rtx insn)
6923 {
6924 int saved_first_instruction;
6925 int t;
6926
6927 saved_first_instruction = first_instruction;
6928 in_safe_group_barrier = 1;
6929
6930 t = group_barrier_needed (insn);
6931
6932 first_instruction = saved_first_instruction;
6933 in_safe_group_barrier = 0;
6934
6935 return t;
6936 }
6937
6938 /* Scan the current function and insert stop bits as necessary to
6939 eliminate dependencies. This function assumes that a final
6940 instruction scheduling pass has been run which has already
6941 inserted most of the necessary stop bits. This function only
6942 inserts new ones at basic block boundaries, since these are
6943 invisible to the scheduler. */
6944
6945 static void
6946 emit_insn_group_barriers (FILE *dump)
6947 {
6948 rtx insn;
6949 rtx last_label = 0;
6950 int insns_since_last_label = 0;
6951
6952 init_insn_group_barriers ();
6953
6954 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6955 {
6956 if (LABEL_P (insn))
6957 {
6958 if (insns_since_last_label)
6959 last_label = insn;
6960 insns_since_last_label = 0;
6961 }
6962 else if (NOTE_P (insn)
6963 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6964 {
6965 if (insns_since_last_label)
6966 last_label = insn;
6967 insns_since_last_label = 0;
6968 }
6969 else if (NONJUMP_INSN_P (insn)
6970 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6971 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6972 {
6973 init_insn_group_barriers ();
6974 last_label = 0;
6975 }
6976 else if (NONDEBUG_INSN_P (insn))
6977 {
6978 insns_since_last_label = 1;
6979
6980 if (group_barrier_needed (insn))
6981 {
6982 if (last_label)
6983 {
6984 if (dump)
6985 fprintf (dump, "Emitting stop before label %d\n",
6986 INSN_UID (last_label));
6987 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6988 insn = last_label;
6989
6990 init_insn_group_barriers ();
6991 last_label = 0;
6992 }
6993 }
6994 }
6995 }
6996 }
6997
6998 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6999 This function has to emit all necessary group barriers. */
7000
7001 static void
7002 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7003 {
7004 rtx insn;
7005
7006 init_insn_group_barriers ();
7007
7008 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7009 {
7010 if (BARRIER_P (insn))
7011 {
7012 rtx last = prev_active_insn (insn);
7013
7014 if (! last)
7015 continue;
7016 if (JUMP_TABLE_DATA_P (last))
7017 last = prev_active_insn (last);
7018 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7019 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7020
7021 init_insn_group_barriers ();
7022 }
7023 else if (NONDEBUG_INSN_P (insn))
7024 {
7025 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7026 init_insn_group_barriers ();
7027 else if (group_barrier_needed (insn))
7028 {
7029 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
7030 init_insn_group_barriers ();
7031 group_barrier_needed (insn);
7032 }
7033 }
7034 }
7035 }
7036
7037 \f
7038
7039 /* Instruction scheduling support. */
7040
7041 #define NR_BUNDLES 10
7042
7043 /* A list of names of all available bundles. */
7044
7045 static const char *bundle_name [NR_BUNDLES] =
7046 {
7047 ".mii",
7048 ".mmi",
7049 ".mfi",
7050 ".mmf",
7051 #if NR_BUNDLES == 10
7052 ".bbb",
7053 ".mbb",
7054 #endif
7055 ".mib",
7056 ".mmb",
7057 ".mfb",
7058 ".mlx"
7059 };
7060
7061 /* Nonzero if we should insert stop bits into the schedule. */
7062
7063 int ia64_final_schedule = 0;
7064
7065 /* Codes of the corresponding queried units: */
7066
7067 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
7068 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
7069
7070 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
7071 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
7072
7073 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
7074
7075 /* The following variable value is an insn group barrier. */
7076
7077 static rtx dfa_stop_insn;
7078
7079 /* The following variable value is the last issued insn. */
7080
7081 static rtx last_scheduled_insn;
7082
7083 /* The following variable value is pointer to a DFA state used as
7084 temporary variable. */
7085
7086 static state_t temp_dfa_state = NULL;
7087
7088 /* The following variable value is DFA state after issuing the last
7089 insn. */
7090
7091 static state_t prev_cycle_state = NULL;
7092
7093 /* The following array element values are TRUE if the corresponding
7094 insn requires to add stop bits before it. */
7095
7096 static char *stops_p = NULL;
7097
7098 /* The following variable is used to set up the mentioned above array. */
7099
7100 static int stop_before_p = 0;
7101
7102 /* The following variable value is length of the arrays `clocks' and
7103 `add_cycles'. */
7104
7105 static int clocks_length;
7106
7107 /* The following variable value is number of data speculations in progress. */
7108 static int pending_data_specs = 0;
7109
7110 /* Number of memory references on current and three future processor cycles. */
7111 static char mem_ops_in_group[4];
7112
7113 /* Number of current processor cycle (from scheduler's point of view). */
7114 static int current_cycle;
7115
7116 static rtx ia64_single_set (rtx);
7117 static void ia64_emit_insn_before (rtx, rtx);
7118
7119 /* Map a bundle number to its pseudo-op. */
7120
7121 const char *
7122 get_bundle_name (int b)
7123 {
7124 return bundle_name[b];
7125 }
7126
7127
7128 /* Return the maximum number of instructions a cpu can issue. */
7129
7130 static int
7131 ia64_issue_rate (void)
7132 {
7133 return 6;
7134 }
7135
7136 /* Helper function - like single_set, but look inside COND_EXEC. */
7137
7138 static rtx
7139 ia64_single_set (rtx insn)
7140 {
7141 rtx x = PATTERN (insn), ret;
7142 if (GET_CODE (x) == COND_EXEC)
7143 x = COND_EXEC_CODE (x);
7144 if (GET_CODE (x) == SET)
7145 return x;
7146
7147 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
7148 Although they are not classical single set, the second set is there just
7149 to protect it from moving past FP-relative stack accesses. */
7150 switch (recog_memoized (insn))
7151 {
7152 case CODE_FOR_prologue_allocate_stack:
7153 case CODE_FOR_epilogue_deallocate_stack:
7154 ret = XVECEXP (x, 0, 0);
7155 break;
7156
7157 default:
7158 ret = single_set_2 (insn, x);
7159 break;
7160 }
7161
7162 return ret;
7163 }
7164
7165 /* Adjust the cost of a scheduling dependency.
7166 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
7167 COST is the current cost, DW is dependency weakness. */
7168 static int
7169 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
7170 {
7171 enum reg_note dep_type = (enum reg_note) dep_type1;
7172 enum attr_itanium_class dep_class;
7173 enum attr_itanium_class insn_class;
7174
7175 insn_class = ia64_safe_itanium_class (insn);
7176 dep_class = ia64_safe_itanium_class (dep_insn);
7177
7178 /* Treat true memory dependencies separately. Ignore apparent true
7179 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7180 if (dep_type == REG_DEP_TRUE
7181 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
7182 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
7183 return 0;
7184
7185 if (dw == MIN_DEP_WEAK)
7186 /* Store and load are likely to alias, use higher cost to avoid stall. */
7187 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
7188 else if (dw > MIN_DEP_WEAK)
7189 {
7190 /* Store and load are less likely to alias. */
7191 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
7192 /* Assume there will be no cache conflict for floating-point data.
7193 For integer data, L1 conflict penalty is huge (17 cycles), so we
7194 never assume it will not cause a conflict. */
7195 return 0;
7196 else
7197 return cost;
7198 }
7199
7200 if (dep_type != REG_DEP_OUTPUT)
7201 return cost;
7202
7203 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
7204 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
7205 return 0;
7206
7207 return cost;
7208 }
7209
7210 /* Like emit_insn_before, but skip cycle_display notes.
7211 ??? When cycle display notes are implemented, update this. */
7212
7213 static void
7214 ia64_emit_insn_before (rtx insn, rtx before)
7215 {
7216 emit_insn_before (insn, before);
7217 }
7218
7219 /* The following function marks insns who produce addresses for load
7220 and store insns. Such insns will be placed into M slots because it
7221 decrease latency time for Itanium1 (see function
7222 `ia64_produce_address_p' and the DFA descriptions). */
7223
7224 static void
7225 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
7226 {
7227 rtx insn, next, next_tail;
7228
7229 /* Before reload, which_alternative is not set, which means that
7230 ia64_safe_itanium_class will produce wrong results for (at least)
7231 move instructions. */
7232 if (!reload_completed)
7233 return;
7234
7235 next_tail = NEXT_INSN (tail);
7236 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7237 if (INSN_P (insn))
7238 insn->call = 0;
7239 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7240 if (INSN_P (insn)
7241 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7242 {
7243 sd_iterator_def sd_it;
7244 dep_t dep;
7245 bool has_mem_op_consumer_p = false;
7246
7247 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7248 {
7249 enum attr_itanium_class c;
7250
7251 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7252 continue;
7253
7254 next = DEP_CON (dep);
7255 c = ia64_safe_itanium_class (next);
7256 if ((c == ITANIUM_CLASS_ST
7257 || c == ITANIUM_CLASS_STF)
7258 && ia64_st_address_bypass_p (insn, next))
7259 {
7260 has_mem_op_consumer_p = true;
7261 break;
7262 }
7263 else if ((c == ITANIUM_CLASS_LD
7264 || c == ITANIUM_CLASS_FLD
7265 || c == ITANIUM_CLASS_FLDP)
7266 && ia64_ld_address_bypass_p (insn, next))
7267 {
7268 has_mem_op_consumer_p = true;
7269 break;
7270 }
7271 }
7272
7273 insn->call = has_mem_op_consumer_p;
7274 }
7275 }
7276
7277 /* We're beginning a new block. Initialize data structures as necessary. */
7278
7279 static void
7280 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7281 int sched_verbose ATTRIBUTE_UNUSED,
7282 int max_ready ATTRIBUTE_UNUSED)
7283 {
7284 #ifdef ENABLE_CHECKING
7285 rtx insn;
7286
7287 if (!sel_sched_p () && reload_completed)
7288 for (insn = NEXT_INSN (current_sched_info->prev_head);
7289 insn != current_sched_info->next_tail;
7290 insn = NEXT_INSN (insn))
7291 gcc_assert (!SCHED_GROUP_P (insn));
7292 #endif
7293 last_scheduled_insn = NULL_RTX;
7294 init_insn_group_barriers ();
7295
7296 current_cycle = 0;
7297 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7298 }
7299
7300 /* We're beginning a scheduling pass. Check assertion. */
7301
7302 static void
7303 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7304 int sched_verbose ATTRIBUTE_UNUSED,
7305 int max_ready ATTRIBUTE_UNUSED)
7306 {
7307 gcc_assert (pending_data_specs == 0);
7308 }
7309
7310 /* Scheduling pass is now finished. Free/reset static variable. */
7311 static void
7312 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7313 int sched_verbose ATTRIBUTE_UNUSED)
7314 {
7315 gcc_assert (pending_data_specs == 0);
7316 }
7317
7318 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7319 speculation check), FALSE otherwise. */
7320 static bool
7321 is_load_p (rtx insn)
7322 {
7323 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7324
7325 return
7326 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7327 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7328 }
7329
7330 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7331 (taking account for 3-cycle cache reference postponing for stores: Intel
7332 Itanium 2 Reference Manual for Software Development and Optimization,
7333 6.7.3.1). */
7334 static void
7335 record_memory_reference (rtx insn)
7336 {
7337 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7338
7339 switch (insn_class) {
7340 case ITANIUM_CLASS_FLD:
7341 case ITANIUM_CLASS_LD:
7342 mem_ops_in_group[current_cycle % 4]++;
7343 break;
7344 case ITANIUM_CLASS_STF:
7345 case ITANIUM_CLASS_ST:
7346 mem_ops_in_group[(current_cycle + 3) % 4]++;
7347 break;
7348 default:;
7349 }
7350 }
7351
7352 /* We are about to being issuing insns for this clock cycle.
7353 Override the default sort algorithm to better slot instructions. */
7354
7355 static int
7356 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7357 int *pn_ready, int clock_var,
7358 int reorder_type)
7359 {
7360 int n_asms;
7361 int n_ready = *pn_ready;
7362 rtx *e_ready = ready + n_ready;
7363 rtx *insnp;
7364
7365 if (sched_verbose)
7366 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7367
7368 if (reorder_type == 0)
7369 {
7370 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7371 n_asms = 0;
7372 for (insnp = ready; insnp < e_ready; insnp++)
7373 if (insnp < e_ready)
7374 {
7375 rtx insn = *insnp;
7376 enum attr_type t = ia64_safe_type (insn);
7377 if (t == TYPE_UNKNOWN)
7378 {
7379 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7380 || asm_noperands (PATTERN (insn)) >= 0)
7381 {
7382 rtx lowest = ready[n_asms];
7383 ready[n_asms] = insn;
7384 *insnp = lowest;
7385 n_asms++;
7386 }
7387 else
7388 {
7389 rtx highest = ready[n_ready - 1];
7390 ready[n_ready - 1] = insn;
7391 *insnp = highest;
7392 return 1;
7393 }
7394 }
7395 }
7396
7397 if (n_asms < n_ready)
7398 {
7399 /* Some normal insns to process. Skip the asms. */
7400 ready += n_asms;
7401 n_ready -= n_asms;
7402 }
7403 else if (n_ready > 0)
7404 return 1;
7405 }
7406
7407 if (ia64_final_schedule)
7408 {
7409 int deleted = 0;
7410 int nr_need_stop = 0;
7411
7412 for (insnp = ready; insnp < e_ready; insnp++)
7413 if (safe_group_barrier_needed (*insnp))
7414 nr_need_stop++;
7415
7416 if (reorder_type == 1 && n_ready == nr_need_stop)
7417 return 0;
7418 if (reorder_type == 0)
7419 return 1;
7420 insnp = e_ready;
7421 /* Move down everything that needs a stop bit, preserving
7422 relative order. */
7423 while (insnp-- > ready + deleted)
7424 while (insnp >= ready + deleted)
7425 {
7426 rtx insn = *insnp;
7427 if (! safe_group_barrier_needed (insn))
7428 break;
7429 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7430 *ready = insn;
7431 deleted++;
7432 }
7433 n_ready -= deleted;
7434 ready += deleted;
7435 }
7436
7437 current_cycle = clock_var;
7438 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7439 {
7440 int moved = 0;
7441
7442 insnp = e_ready;
7443 /* Move down loads/stores, preserving relative order. */
7444 while (insnp-- > ready + moved)
7445 while (insnp >= ready + moved)
7446 {
7447 rtx insn = *insnp;
7448 if (! is_load_p (insn))
7449 break;
7450 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7451 *ready = insn;
7452 moved++;
7453 }
7454 n_ready -= moved;
7455 ready += moved;
7456 }
7457
7458 return 1;
7459 }
7460
7461 /* We are about to being issuing insns for this clock cycle. Override
7462 the default sort algorithm to better slot instructions. */
7463
7464 static int
7465 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7466 int clock_var)
7467 {
7468 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7469 pn_ready, clock_var, 0);
7470 }
7471
7472 /* Like ia64_sched_reorder, but called after issuing each insn.
7473 Override the default sort algorithm to better slot instructions. */
7474
7475 static int
7476 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7477 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7478 int *pn_ready, int clock_var)
7479 {
7480 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7481 clock_var, 1);
7482 }
7483
7484 /* We are about to issue INSN. Return the number of insns left on the
7485 ready queue that can be issued this cycle. */
7486
7487 static int
7488 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7489 int sched_verbose ATTRIBUTE_UNUSED,
7490 rtx insn ATTRIBUTE_UNUSED,
7491 int can_issue_more ATTRIBUTE_UNUSED)
7492 {
7493 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7494 /* Modulo scheduling does not extend h_i_d when emitting
7495 new instructions. Don't use h_i_d, if we don't have to. */
7496 {
7497 if (DONE_SPEC (insn) & BEGIN_DATA)
7498 pending_data_specs++;
7499 if (CHECK_SPEC (insn) & BEGIN_DATA)
7500 pending_data_specs--;
7501 }
7502
7503 if (DEBUG_INSN_P (insn))
7504 return 1;
7505
7506 last_scheduled_insn = insn;
7507 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7508 if (reload_completed)
7509 {
7510 int needed = group_barrier_needed (insn);
7511
7512 gcc_assert (!needed);
7513 if (CALL_P (insn))
7514 init_insn_group_barriers ();
7515 stops_p [INSN_UID (insn)] = stop_before_p;
7516 stop_before_p = 0;
7517
7518 record_memory_reference (insn);
7519 }
7520 return 1;
7521 }
7522
7523 /* We are choosing insn from the ready queue. Return nonzero if INSN
7524 can be chosen. */
7525
7526 static int
7527 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7528 {
7529 gcc_assert (insn && INSN_P (insn));
7530 return ((!reload_completed
7531 || !safe_group_barrier_needed (insn))
7532 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7533 && (!mflag_sched_mem_insns_hard_limit
7534 || !is_load_p (insn)
7535 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7536 }
7537
7538 /* We are choosing insn from the ready queue. Return nonzero if INSN
7539 can be chosen. */
7540
7541 static bool
7542 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7543 {
7544 gcc_assert (insn && INSN_P (insn));
7545 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7546 we keep ALAT half-empty. */
7547 return (pending_data_specs < 16
7548 || !(TODO_SPEC (insn) & BEGIN_DATA));
7549 }
7550
7551 /* The following variable value is pseudo-insn used by the DFA insn
7552 scheduler to change the DFA state when the simulated clock is
7553 increased. */
7554
7555 static rtx dfa_pre_cycle_insn;
7556
7557 /* Returns 1 when a meaningful insn was scheduled between the last group
7558 barrier and LAST. */
7559 static int
7560 scheduled_good_insn (rtx last)
7561 {
7562 if (last && recog_memoized (last) >= 0)
7563 return 1;
7564
7565 for ( ;
7566 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7567 && !stops_p[INSN_UID (last)];
7568 last = PREV_INSN (last))
7569 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7570 the ebb we're scheduling. */
7571 if (INSN_P (last) && recog_memoized (last) >= 0)
7572 return 1;
7573
7574 return 0;
7575 }
7576
7577 /* We are about to being issuing INSN. Return nonzero if we cannot
7578 issue it on given cycle CLOCK and return zero if we should not sort
7579 the ready queue on the next clock start. */
7580
7581 static int
7582 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7583 int clock, int *sort_p)
7584 {
7585 gcc_assert (insn && INSN_P (insn));
7586
7587 if (DEBUG_INSN_P (insn))
7588 return 0;
7589
7590 /* When a group barrier is needed for insn, last_scheduled_insn
7591 should be set. */
7592 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7593 || last_scheduled_insn);
7594
7595 if ((reload_completed
7596 && (safe_group_barrier_needed (insn)
7597 || (mflag_sched_stop_bits_after_every_cycle
7598 && last_clock != clock
7599 && last_scheduled_insn
7600 && scheduled_good_insn (last_scheduled_insn))))
7601 || (last_scheduled_insn
7602 && (CALL_P (last_scheduled_insn)
7603 || unknown_for_bundling_p (last_scheduled_insn))))
7604 {
7605 init_insn_group_barriers ();
7606
7607 if (verbose && dump)
7608 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7609 last_clock == clock ? " + cycle advance" : "");
7610
7611 stop_before_p = 1;
7612 current_cycle = clock;
7613 mem_ops_in_group[current_cycle % 4] = 0;
7614
7615 if (last_clock == clock)
7616 {
7617 state_transition (curr_state, dfa_stop_insn);
7618 if (TARGET_EARLY_STOP_BITS)
7619 *sort_p = (last_scheduled_insn == NULL_RTX
7620 || ! CALL_P (last_scheduled_insn));
7621 else
7622 *sort_p = 0;
7623 return 1;
7624 }
7625
7626 if (last_scheduled_insn)
7627 {
7628 if (unknown_for_bundling_p (last_scheduled_insn))
7629 state_reset (curr_state);
7630 else
7631 {
7632 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7633 state_transition (curr_state, dfa_stop_insn);
7634 state_transition (curr_state, dfa_pre_cycle_insn);
7635 state_transition (curr_state, NULL);
7636 }
7637 }
7638 }
7639 return 0;
7640 }
7641
7642 /* Implement targetm.sched.h_i_d_extended hook.
7643 Extend internal data structures. */
7644 static void
7645 ia64_h_i_d_extended (void)
7646 {
7647 if (stops_p != NULL)
7648 {
7649 int new_clocks_length = get_max_uid () * 3 / 2;
7650 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7651 clocks_length = new_clocks_length;
7652 }
7653 }
7654 \f
7655
7656 /* This structure describes the data used by the backend to guide scheduling.
7657 When the current scheduling point is switched, this data should be saved
7658 and restored later, if the scheduler returns to this point. */
7659 struct _ia64_sched_context
7660 {
7661 state_t prev_cycle_state;
7662 rtx last_scheduled_insn;
7663 struct reg_write_state rws_sum[NUM_REGS];
7664 struct reg_write_state rws_insn[NUM_REGS];
7665 int first_instruction;
7666 int pending_data_specs;
7667 int current_cycle;
7668 char mem_ops_in_group[4];
7669 };
7670 typedef struct _ia64_sched_context *ia64_sched_context_t;
7671
7672 /* Allocates a scheduling context. */
7673 static void *
7674 ia64_alloc_sched_context (void)
7675 {
7676 return xmalloc (sizeof (struct _ia64_sched_context));
7677 }
7678
7679 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7680 the global context otherwise. */
7681 static void
7682 ia64_init_sched_context (void *_sc, bool clean_p)
7683 {
7684 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7685
7686 sc->prev_cycle_state = xmalloc (dfa_state_size);
7687 if (clean_p)
7688 {
7689 state_reset (sc->prev_cycle_state);
7690 sc->last_scheduled_insn = NULL_RTX;
7691 memset (sc->rws_sum, 0, sizeof (rws_sum));
7692 memset (sc->rws_insn, 0, sizeof (rws_insn));
7693 sc->first_instruction = 1;
7694 sc->pending_data_specs = 0;
7695 sc->current_cycle = 0;
7696 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7697 }
7698 else
7699 {
7700 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7701 sc->last_scheduled_insn = last_scheduled_insn;
7702 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7703 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7704 sc->first_instruction = first_instruction;
7705 sc->pending_data_specs = pending_data_specs;
7706 sc->current_cycle = current_cycle;
7707 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7708 }
7709 }
7710
7711 /* Sets the global scheduling context to the one pointed to by _SC. */
7712 static void
7713 ia64_set_sched_context (void *_sc)
7714 {
7715 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7716
7717 gcc_assert (sc != NULL);
7718
7719 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7720 last_scheduled_insn = sc->last_scheduled_insn;
7721 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7722 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7723 first_instruction = sc->first_instruction;
7724 pending_data_specs = sc->pending_data_specs;
7725 current_cycle = sc->current_cycle;
7726 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7727 }
7728
7729 /* Clears the data in the _SC scheduling context. */
7730 static void
7731 ia64_clear_sched_context (void *_sc)
7732 {
7733 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7734
7735 free (sc->prev_cycle_state);
7736 sc->prev_cycle_state = NULL;
7737 }
7738
7739 /* Frees the _SC scheduling context. */
7740 static void
7741 ia64_free_sched_context (void *_sc)
7742 {
7743 gcc_assert (_sc != NULL);
7744
7745 free (_sc);
7746 }
7747
7748 typedef rtx (* gen_func_t) (rtx, rtx);
7749
7750 /* Return a function that will generate a load of mode MODE_NO
7751 with speculation types TS. */
7752 static gen_func_t
7753 get_spec_load_gen_function (ds_t ts, int mode_no)
7754 {
7755 static gen_func_t gen_ld_[] = {
7756 gen_movbi,
7757 gen_movqi_internal,
7758 gen_movhi_internal,
7759 gen_movsi_internal,
7760 gen_movdi_internal,
7761 gen_movsf_internal,
7762 gen_movdf_internal,
7763 gen_movxf_internal,
7764 gen_movti_internal,
7765 gen_zero_extendqidi2,
7766 gen_zero_extendhidi2,
7767 gen_zero_extendsidi2,
7768 };
7769
7770 static gen_func_t gen_ld_a[] = {
7771 gen_movbi_advanced,
7772 gen_movqi_advanced,
7773 gen_movhi_advanced,
7774 gen_movsi_advanced,
7775 gen_movdi_advanced,
7776 gen_movsf_advanced,
7777 gen_movdf_advanced,
7778 gen_movxf_advanced,
7779 gen_movti_advanced,
7780 gen_zero_extendqidi2_advanced,
7781 gen_zero_extendhidi2_advanced,
7782 gen_zero_extendsidi2_advanced,
7783 };
7784 static gen_func_t gen_ld_s[] = {
7785 gen_movbi_speculative,
7786 gen_movqi_speculative,
7787 gen_movhi_speculative,
7788 gen_movsi_speculative,
7789 gen_movdi_speculative,
7790 gen_movsf_speculative,
7791 gen_movdf_speculative,
7792 gen_movxf_speculative,
7793 gen_movti_speculative,
7794 gen_zero_extendqidi2_speculative,
7795 gen_zero_extendhidi2_speculative,
7796 gen_zero_extendsidi2_speculative,
7797 };
7798 static gen_func_t gen_ld_sa[] = {
7799 gen_movbi_speculative_advanced,
7800 gen_movqi_speculative_advanced,
7801 gen_movhi_speculative_advanced,
7802 gen_movsi_speculative_advanced,
7803 gen_movdi_speculative_advanced,
7804 gen_movsf_speculative_advanced,
7805 gen_movdf_speculative_advanced,
7806 gen_movxf_speculative_advanced,
7807 gen_movti_speculative_advanced,
7808 gen_zero_extendqidi2_speculative_advanced,
7809 gen_zero_extendhidi2_speculative_advanced,
7810 gen_zero_extendsidi2_speculative_advanced,
7811 };
7812 static gen_func_t gen_ld_s_a[] = {
7813 gen_movbi_speculative_a,
7814 gen_movqi_speculative_a,
7815 gen_movhi_speculative_a,
7816 gen_movsi_speculative_a,
7817 gen_movdi_speculative_a,
7818 gen_movsf_speculative_a,
7819 gen_movdf_speculative_a,
7820 gen_movxf_speculative_a,
7821 gen_movti_speculative_a,
7822 gen_zero_extendqidi2_speculative_a,
7823 gen_zero_extendhidi2_speculative_a,
7824 gen_zero_extendsidi2_speculative_a,
7825 };
7826
7827 gen_func_t *gen_ld;
7828
7829 if (ts & BEGIN_DATA)
7830 {
7831 if (ts & BEGIN_CONTROL)
7832 gen_ld = gen_ld_sa;
7833 else
7834 gen_ld = gen_ld_a;
7835 }
7836 else if (ts & BEGIN_CONTROL)
7837 {
7838 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7839 || ia64_needs_block_p (ts))
7840 gen_ld = gen_ld_s;
7841 else
7842 gen_ld = gen_ld_s_a;
7843 }
7844 else if (ts == 0)
7845 gen_ld = gen_ld_;
7846 else
7847 gcc_unreachable ();
7848
7849 return gen_ld[mode_no];
7850 }
7851
7852 /* Constants that help mapping 'enum machine_mode' to int. */
7853 enum SPEC_MODES
7854 {
7855 SPEC_MODE_INVALID = -1,
7856 SPEC_MODE_FIRST = 0,
7857 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7858 SPEC_MODE_FOR_EXTEND_LAST = 3,
7859 SPEC_MODE_LAST = 8
7860 };
7861
7862 enum
7863 {
7864 /* Offset to reach ZERO_EXTEND patterns. */
7865 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7866 };
7867
7868 /* Return index of the MODE. */
7869 static int
7870 ia64_mode_to_int (enum machine_mode mode)
7871 {
7872 switch (mode)
7873 {
7874 case BImode: return 0; /* SPEC_MODE_FIRST */
7875 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7876 case HImode: return 2;
7877 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7878 case DImode: return 4;
7879 case SFmode: return 5;
7880 case DFmode: return 6;
7881 case XFmode: return 7;
7882 case TImode:
7883 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7884 mentioned in itanium[12].md. Predicate fp_register_operand also
7885 needs to be defined. Bottom line: better disable for now. */
7886 return SPEC_MODE_INVALID;
7887 default: return SPEC_MODE_INVALID;
7888 }
7889 }
7890
7891 /* Provide information about speculation capabilities. */
7892 static void
7893 ia64_set_sched_flags (spec_info_t spec_info)
7894 {
7895 unsigned int *flags = &(current_sched_info->flags);
7896
7897 if (*flags & SCHED_RGN
7898 || *flags & SCHED_EBB
7899 || *flags & SEL_SCHED)
7900 {
7901 int mask = 0;
7902
7903 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7904 || (mflag_sched_ar_data_spec && reload_completed))
7905 {
7906 mask |= BEGIN_DATA;
7907
7908 if (!sel_sched_p ()
7909 && ((mflag_sched_br_in_data_spec && !reload_completed)
7910 || (mflag_sched_ar_in_data_spec && reload_completed)))
7911 mask |= BE_IN_DATA;
7912 }
7913
7914 if (mflag_sched_control_spec
7915 && (!sel_sched_p ()
7916 || reload_completed))
7917 {
7918 mask |= BEGIN_CONTROL;
7919
7920 if (!sel_sched_p () && mflag_sched_in_control_spec)
7921 mask |= BE_IN_CONTROL;
7922 }
7923
7924 spec_info->mask = mask;
7925
7926 if (mask)
7927 {
7928 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7929
7930 if (mask & BE_IN_SPEC)
7931 *flags |= NEW_BBS;
7932
7933 spec_info->flags = 0;
7934
7935 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7936 spec_info->flags |= PREFER_NON_DATA_SPEC;
7937
7938 if (mask & CONTROL_SPEC)
7939 {
7940 if (mflag_sched_prefer_non_control_spec_insns)
7941 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7942
7943 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7944 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7945 }
7946
7947 if (sched_verbose >= 1)
7948 spec_info->dump = sched_dump;
7949 else
7950 spec_info->dump = 0;
7951
7952 if (mflag_sched_count_spec_in_critical_path)
7953 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7954 }
7955 }
7956 else
7957 spec_info->mask = 0;
7958 }
7959
7960 /* If INSN is an appropriate load return its mode.
7961 Return -1 otherwise. */
7962 static int
7963 get_mode_no_for_insn (rtx insn)
7964 {
7965 rtx reg, mem, mode_rtx;
7966 int mode_no;
7967 bool extend_p;
7968
7969 extract_insn_cached (insn);
7970
7971 /* We use WHICH_ALTERNATIVE only after reload. This will
7972 guarantee that reload won't touch a speculative insn. */
7973
7974 if (recog_data.n_operands != 2)
7975 return -1;
7976
7977 reg = recog_data.operand[0];
7978 mem = recog_data.operand[1];
7979
7980 /* We should use MEM's mode since REG's mode in presence of
7981 ZERO_EXTEND will always be DImode. */
7982 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7983 /* Process non-speculative ld. */
7984 {
7985 if (!reload_completed)
7986 {
7987 /* Do not speculate into regs like ar.lc. */
7988 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7989 return -1;
7990
7991 if (!MEM_P (mem))
7992 return -1;
7993
7994 {
7995 rtx mem_reg = XEXP (mem, 0);
7996
7997 if (!REG_P (mem_reg))
7998 return -1;
7999 }
8000
8001 mode_rtx = mem;
8002 }
8003 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
8004 {
8005 gcc_assert (REG_P (reg) && MEM_P (mem));
8006 mode_rtx = mem;
8007 }
8008 else
8009 return -1;
8010 }
8011 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
8012 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
8013 || get_attr_check_load (insn) == CHECK_LOAD_YES)
8014 /* Process speculative ld or ld.c. */
8015 {
8016 gcc_assert (REG_P (reg) && MEM_P (mem));
8017 mode_rtx = mem;
8018 }
8019 else
8020 {
8021 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
8022
8023 if (attr_class == ITANIUM_CLASS_CHK_A
8024 || attr_class == ITANIUM_CLASS_CHK_S_I
8025 || attr_class == ITANIUM_CLASS_CHK_S_F)
8026 /* Process chk. */
8027 mode_rtx = reg;
8028 else
8029 return -1;
8030 }
8031
8032 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
8033
8034 if (mode_no == SPEC_MODE_INVALID)
8035 return -1;
8036
8037 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
8038
8039 if (extend_p)
8040 {
8041 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
8042 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
8043 return -1;
8044
8045 mode_no += SPEC_GEN_EXTEND_OFFSET;
8046 }
8047
8048 return mode_no;
8049 }
8050
8051 /* If X is an unspec part of a speculative load, return its code.
8052 Return -1 otherwise. */
8053 static int
8054 get_spec_unspec_code (const_rtx x)
8055 {
8056 if (GET_CODE (x) != UNSPEC)
8057 return -1;
8058
8059 {
8060 int code;
8061
8062 code = XINT (x, 1);
8063
8064 switch (code)
8065 {
8066 case UNSPEC_LDA:
8067 case UNSPEC_LDS:
8068 case UNSPEC_LDS_A:
8069 case UNSPEC_LDSA:
8070 return code;
8071
8072 default:
8073 return -1;
8074 }
8075 }
8076 }
8077
8078 /* Implement skip_rtx_p hook. */
8079 static bool
8080 ia64_skip_rtx_p (const_rtx x)
8081 {
8082 return get_spec_unspec_code (x) != -1;
8083 }
8084
8085 /* If INSN is a speculative load, return its UNSPEC code.
8086 Return -1 otherwise. */
8087 static int
8088 get_insn_spec_code (const_rtx insn)
8089 {
8090 rtx pat, reg, mem;
8091
8092 pat = PATTERN (insn);
8093
8094 if (GET_CODE (pat) == COND_EXEC)
8095 pat = COND_EXEC_CODE (pat);
8096
8097 if (GET_CODE (pat) != SET)
8098 return -1;
8099
8100 reg = SET_DEST (pat);
8101 if (!REG_P (reg))
8102 return -1;
8103
8104 mem = SET_SRC (pat);
8105 if (GET_CODE (mem) == ZERO_EXTEND)
8106 mem = XEXP (mem, 0);
8107
8108 return get_spec_unspec_code (mem);
8109 }
8110
8111 /* If INSN is a speculative load, return a ds with the speculation types.
8112 Otherwise [if INSN is a normal instruction] return 0. */
8113 static ds_t
8114 ia64_get_insn_spec_ds (rtx insn)
8115 {
8116 int code = get_insn_spec_code (insn);
8117
8118 switch (code)
8119 {
8120 case UNSPEC_LDA:
8121 return BEGIN_DATA;
8122
8123 case UNSPEC_LDS:
8124 case UNSPEC_LDS_A:
8125 return BEGIN_CONTROL;
8126
8127 case UNSPEC_LDSA:
8128 return BEGIN_DATA | BEGIN_CONTROL;
8129
8130 default:
8131 return 0;
8132 }
8133 }
8134
8135 /* If INSN is a speculative load return a ds with the speculation types that
8136 will be checked.
8137 Otherwise [if INSN is a normal instruction] return 0. */
8138 static ds_t
8139 ia64_get_insn_checked_ds (rtx insn)
8140 {
8141 int code = get_insn_spec_code (insn);
8142
8143 switch (code)
8144 {
8145 case UNSPEC_LDA:
8146 return BEGIN_DATA | BEGIN_CONTROL;
8147
8148 case UNSPEC_LDS:
8149 return BEGIN_CONTROL;
8150
8151 case UNSPEC_LDS_A:
8152 case UNSPEC_LDSA:
8153 return BEGIN_DATA | BEGIN_CONTROL;
8154
8155 default:
8156 return 0;
8157 }
8158 }
8159
8160 /* If GEN_P is true, calculate the index of needed speculation check and return
8161 speculative pattern for INSN with speculative mode TS, machine mode
8162 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
8163 If GEN_P is false, just calculate the index of needed speculation check. */
8164 static rtx
8165 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
8166 {
8167 rtx pat, new_pat;
8168 gen_func_t gen_load;
8169
8170 gen_load = get_spec_load_gen_function (ts, mode_no);
8171
8172 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
8173 copy_rtx (recog_data.operand[1]));
8174
8175 pat = PATTERN (insn);
8176 if (GET_CODE (pat) == COND_EXEC)
8177 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8178 new_pat);
8179
8180 return new_pat;
8181 }
8182
8183 static bool
8184 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
8185 ds_t ds ATTRIBUTE_UNUSED)
8186 {
8187 return false;
8188 }
8189
8190 /* Implement targetm.sched.speculate_insn hook.
8191 Check if the INSN can be TS speculative.
8192 If 'no' - return -1.
8193 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8194 If current pattern of the INSN already provides TS speculation,
8195 return 0. */
8196 static int
8197 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
8198 {
8199 int mode_no;
8200 int res;
8201
8202 gcc_assert (!(ts & ~SPECULATIVE));
8203
8204 if (ia64_spec_check_p (insn))
8205 return -1;
8206
8207 if ((ts & BE_IN_SPEC)
8208 && !insn_can_be_in_speculative_p (insn, ts))
8209 return -1;
8210
8211 mode_no = get_mode_no_for_insn (insn);
8212
8213 if (mode_no != SPEC_MODE_INVALID)
8214 {
8215 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
8216 res = 0;
8217 else
8218 {
8219 res = 1;
8220 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
8221 }
8222 }
8223 else
8224 res = -1;
8225
8226 return res;
8227 }
8228
8229 /* Return a function that will generate a check for speculation TS with mode
8230 MODE_NO.
8231 If simple check is needed, pass true for SIMPLE_CHECK_P.
8232 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8233 static gen_func_t
8234 get_spec_check_gen_function (ds_t ts, int mode_no,
8235 bool simple_check_p, bool clearing_check_p)
8236 {
8237 static gen_func_t gen_ld_c_clr[] = {
8238 gen_movbi_clr,
8239 gen_movqi_clr,
8240 gen_movhi_clr,
8241 gen_movsi_clr,
8242 gen_movdi_clr,
8243 gen_movsf_clr,
8244 gen_movdf_clr,
8245 gen_movxf_clr,
8246 gen_movti_clr,
8247 gen_zero_extendqidi2_clr,
8248 gen_zero_extendhidi2_clr,
8249 gen_zero_extendsidi2_clr,
8250 };
8251 static gen_func_t gen_ld_c_nc[] = {
8252 gen_movbi_nc,
8253 gen_movqi_nc,
8254 gen_movhi_nc,
8255 gen_movsi_nc,
8256 gen_movdi_nc,
8257 gen_movsf_nc,
8258 gen_movdf_nc,
8259 gen_movxf_nc,
8260 gen_movti_nc,
8261 gen_zero_extendqidi2_nc,
8262 gen_zero_extendhidi2_nc,
8263 gen_zero_extendsidi2_nc,
8264 };
8265 static gen_func_t gen_chk_a_clr[] = {
8266 gen_advanced_load_check_clr_bi,
8267 gen_advanced_load_check_clr_qi,
8268 gen_advanced_load_check_clr_hi,
8269 gen_advanced_load_check_clr_si,
8270 gen_advanced_load_check_clr_di,
8271 gen_advanced_load_check_clr_sf,
8272 gen_advanced_load_check_clr_df,
8273 gen_advanced_load_check_clr_xf,
8274 gen_advanced_load_check_clr_ti,
8275 gen_advanced_load_check_clr_di,
8276 gen_advanced_load_check_clr_di,
8277 gen_advanced_load_check_clr_di,
8278 };
8279 static gen_func_t gen_chk_a_nc[] = {
8280 gen_advanced_load_check_nc_bi,
8281 gen_advanced_load_check_nc_qi,
8282 gen_advanced_load_check_nc_hi,
8283 gen_advanced_load_check_nc_si,
8284 gen_advanced_load_check_nc_di,
8285 gen_advanced_load_check_nc_sf,
8286 gen_advanced_load_check_nc_df,
8287 gen_advanced_load_check_nc_xf,
8288 gen_advanced_load_check_nc_ti,
8289 gen_advanced_load_check_nc_di,
8290 gen_advanced_load_check_nc_di,
8291 gen_advanced_load_check_nc_di,
8292 };
8293 static gen_func_t gen_chk_s[] = {
8294 gen_speculation_check_bi,
8295 gen_speculation_check_qi,
8296 gen_speculation_check_hi,
8297 gen_speculation_check_si,
8298 gen_speculation_check_di,
8299 gen_speculation_check_sf,
8300 gen_speculation_check_df,
8301 gen_speculation_check_xf,
8302 gen_speculation_check_ti,
8303 gen_speculation_check_di,
8304 gen_speculation_check_di,
8305 gen_speculation_check_di,
8306 };
8307
8308 gen_func_t *gen_check;
8309
8310 if (ts & BEGIN_DATA)
8311 {
8312 /* We don't need recovery because even if this is ld.sa
8313 ALAT entry will be allocated only if NAT bit is set to zero.
8314 So it is enough to use ld.c here. */
8315
8316 if (simple_check_p)
8317 {
8318 gcc_assert (mflag_sched_spec_ldc);
8319
8320 if (clearing_check_p)
8321 gen_check = gen_ld_c_clr;
8322 else
8323 gen_check = gen_ld_c_nc;
8324 }
8325 else
8326 {
8327 if (clearing_check_p)
8328 gen_check = gen_chk_a_clr;
8329 else
8330 gen_check = gen_chk_a_nc;
8331 }
8332 }
8333 else if (ts & BEGIN_CONTROL)
8334 {
8335 if (simple_check_p)
8336 /* We might want to use ld.sa -> ld.c instead of
8337 ld.s -> chk.s. */
8338 {
8339 gcc_assert (!ia64_needs_block_p (ts));
8340
8341 if (clearing_check_p)
8342 gen_check = gen_ld_c_clr;
8343 else
8344 gen_check = gen_ld_c_nc;
8345 }
8346 else
8347 {
8348 gen_check = gen_chk_s;
8349 }
8350 }
8351 else
8352 gcc_unreachable ();
8353
8354 gcc_assert (mode_no >= 0);
8355 return gen_check[mode_no];
8356 }
8357
8358 /* Return nonzero, if INSN needs branchy recovery check. */
8359 static bool
8360 ia64_needs_block_p (ds_t ts)
8361 {
8362 if (ts & BEGIN_DATA)
8363 return !mflag_sched_spec_ldc;
8364
8365 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8366
8367 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8368 }
8369
8370 /* Generate (or regenerate) a recovery check for INSN. */
8371 static rtx
8372 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8373 {
8374 rtx op1, pat, check_pat;
8375 gen_func_t gen_check;
8376 int mode_no;
8377
8378 mode_no = get_mode_no_for_insn (insn);
8379 gcc_assert (mode_no >= 0);
8380
8381 if (label)
8382 op1 = label;
8383 else
8384 {
8385 gcc_assert (!ia64_needs_block_p (ds));
8386 op1 = copy_rtx (recog_data.operand[1]);
8387 }
8388
8389 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8390 true);
8391
8392 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8393
8394 pat = PATTERN (insn);
8395 if (GET_CODE (pat) == COND_EXEC)
8396 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8397 check_pat);
8398
8399 return check_pat;
8400 }
8401
8402 /* Return nonzero, if X is branchy recovery check. */
8403 static int
8404 ia64_spec_check_p (rtx x)
8405 {
8406 x = PATTERN (x);
8407 if (GET_CODE (x) == COND_EXEC)
8408 x = COND_EXEC_CODE (x);
8409 if (GET_CODE (x) == SET)
8410 return ia64_spec_check_src_p (SET_SRC (x));
8411 return 0;
8412 }
8413
8414 /* Return nonzero, if SRC belongs to recovery check. */
8415 static int
8416 ia64_spec_check_src_p (rtx src)
8417 {
8418 if (GET_CODE (src) == IF_THEN_ELSE)
8419 {
8420 rtx t;
8421
8422 t = XEXP (src, 0);
8423 if (GET_CODE (t) == NE)
8424 {
8425 t = XEXP (t, 0);
8426
8427 if (GET_CODE (t) == UNSPEC)
8428 {
8429 int code;
8430
8431 code = XINT (t, 1);
8432
8433 if (code == UNSPEC_LDCCLR
8434 || code == UNSPEC_LDCNC
8435 || code == UNSPEC_CHKACLR
8436 || code == UNSPEC_CHKANC
8437 || code == UNSPEC_CHKS)
8438 {
8439 gcc_assert (code != 0);
8440 return code;
8441 }
8442 }
8443 }
8444 }
8445 return 0;
8446 }
8447 \f
8448
8449 /* The following page contains abstract data `bundle states' which are
8450 used for bundling insns (inserting nops and template generation). */
8451
8452 /* The following describes state of insn bundling. */
8453
8454 struct bundle_state
8455 {
8456 /* Unique bundle state number to identify them in the debugging
8457 output */
8458 int unique_num;
8459 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8460 /* number nops before and after the insn */
8461 short before_nops_num, after_nops_num;
8462 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8463 insn */
8464 int cost; /* cost of the state in cycles */
8465 int accumulated_insns_num; /* number of all previous insns including
8466 nops. L is considered as 2 insns */
8467 int branch_deviation; /* deviation of previous branches from 3rd slots */
8468 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8469 struct bundle_state *next; /* next state with the same insn_num */
8470 struct bundle_state *originator; /* originator (previous insn state) */
8471 /* All bundle states are in the following chain. */
8472 struct bundle_state *allocated_states_chain;
8473 /* The DFA State after issuing the insn and the nops. */
8474 state_t dfa_state;
8475 };
8476
8477 /* The following is map insn number to the corresponding bundle state. */
8478
8479 static struct bundle_state **index_to_bundle_states;
8480
8481 /* The unique number of next bundle state. */
8482
8483 static int bundle_states_num;
8484
8485 /* All allocated bundle states are in the following chain. */
8486
8487 static struct bundle_state *allocated_bundle_states_chain;
8488
8489 /* All allocated but not used bundle states are in the following
8490 chain. */
8491
8492 static struct bundle_state *free_bundle_state_chain;
8493
8494
8495 /* The following function returns a free bundle state. */
8496
8497 static struct bundle_state *
8498 get_free_bundle_state (void)
8499 {
8500 struct bundle_state *result;
8501
8502 if (free_bundle_state_chain != NULL)
8503 {
8504 result = free_bundle_state_chain;
8505 free_bundle_state_chain = result->next;
8506 }
8507 else
8508 {
8509 result = XNEW (struct bundle_state);
8510 result->dfa_state = xmalloc (dfa_state_size);
8511 result->allocated_states_chain = allocated_bundle_states_chain;
8512 allocated_bundle_states_chain = result;
8513 }
8514 result->unique_num = bundle_states_num++;
8515 return result;
8516
8517 }
8518
8519 /* The following function frees given bundle state. */
8520
8521 static void
8522 free_bundle_state (struct bundle_state *state)
8523 {
8524 state->next = free_bundle_state_chain;
8525 free_bundle_state_chain = state;
8526 }
8527
8528 /* Start work with abstract data `bundle states'. */
8529
8530 static void
8531 initiate_bundle_states (void)
8532 {
8533 bundle_states_num = 0;
8534 free_bundle_state_chain = NULL;
8535 allocated_bundle_states_chain = NULL;
8536 }
8537
8538 /* Finish work with abstract data `bundle states'. */
8539
8540 static void
8541 finish_bundle_states (void)
8542 {
8543 struct bundle_state *curr_state, *next_state;
8544
8545 for (curr_state = allocated_bundle_states_chain;
8546 curr_state != NULL;
8547 curr_state = next_state)
8548 {
8549 next_state = curr_state->allocated_states_chain;
8550 free (curr_state->dfa_state);
8551 free (curr_state);
8552 }
8553 }
8554
8555 /* Hashtable helpers. */
8556
8557 struct bundle_state_hasher : typed_noop_remove <bundle_state>
8558 {
8559 typedef bundle_state value_type;
8560 typedef bundle_state compare_type;
8561 static inline hashval_t hash (const value_type *);
8562 static inline bool equal (const value_type *, const compare_type *);
8563 };
8564
8565 /* The function returns hash of BUNDLE_STATE. */
8566
8567 inline hashval_t
8568 bundle_state_hasher::hash (const value_type *state)
8569 {
8570 unsigned result, i;
8571
8572 for (result = i = 0; i < dfa_state_size; i++)
8573 result += (((unsigned char *) state->dfa_state) [i]
8574 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8575 return result + state->insn_num;
8576 }
8577
8578 /* The function returns nonzero if the bundle state keys are equal. */
8579
8580 inline bool
8581 bundle_state_hasher::equal (const value_type *state1,
8582 const compare_type *state2)
8583 {
8584 return (state1->insn_num == state2->insn_num
8585 && memcmp (state1->dfa_state, state2->dfa_state,
8586 dfa_state_size) == 0);
8587 }
8588
8589 /* Hash table of the bundle states. The key is dfa_state and insn_num
8590 of the bundle states. */
8591
8592 static hash_table <bundle_state_hasher> bundle_state_table;
8593
8594 /* The function inserts the BUNDLE_STATE into the hash table. The
8595 function returns nonzero if the bundle has been inserted into the
8596 table. The table contains the best bundle state with given key. */
8597
8598 static int
8599 insert_bundle_state (struct bundle_state *bundle_state)
8600 {
8601 struct bundle_state **entry_ptr;
8602
8603 entry_ptr = bundle_state_table.find_slot (bundle_state, INSERT);
8604 if (*entry_ptr == NULL)
8605 {
8606 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8607 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8608 *entry_ptr = bundle_state;
8609 return TRUE;
8610 }
8611 else if (bundle_state->cost < (*entry_ptr)->cost
8612 || (bundle_state->cost == (*entry_ptr)->cost
8613 && ((*entry_ptr)->accumulated_insns_num
8614 > bundle_state->accumulated_insns_num
8615 || ((*entry_ptr)->accumulated_insns_num
8616 == bundle_state->accumulated_insns_num
8617 && ((*entry_ptr)->branch_deviation
8618 > bundle_state->branch_deviation
8619 || ((*entry_ptr)->branch_deviation
8620 == bundle_state->branch_deviation
8621 && (*entry_ptr)->middle_bundle_stops
8622 > bundle_state->middle_bundle_stops))))))
8623
8624 {
8625 struct bundle_state temp;
8626
8627 temp = **entry_ptr;
8628 **entry_ptr = *bundle_state;
8629 (*entry_ptr)->next = temp.next;
8630 *bundle_state = temp;
8631 }
8632 return FALSE;
8633 }
8634
8635 /* Start work with the hash table. */
8636
8637 static void
8638 initiate_bundle_state_table (void)
8639 {
8640 bundle_state_table.create (50);
8641 }
8642
8643 /* Finish work with the hash table. */
8644
8645 static void
8646 finish_bundle_state_table (void)
8647 {
8648 bundle_state_table.dispose ();
8649 }
8650
8651 \f
8652
8653 /* The following variable is a insn `nop' used to check bundle states
8654 with different number of inserted nops. */
8655
8656 static rtx ia64_nop;
8657
8658 /* The following function tries to issue NOPS_NUM nops for the current
8659 state without advancing processor cycle. If it failed, the
8660 function returns FALSE and frees the current state. */
8661
8662 static int
8663 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8664 {
8665 int i;
8666
8667 for (i = 0; i < nops_num; i++)
8668 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8669 {
8670 free_bundle_state (curr_state);
8671 return FALSE;
8672 }
8673 return TRUE;
8674 }
8675
8676 /* The following function tries to issue INSN for the current
8677 state without advancing processor cycle. If it failed, the
8678 function returns FALSE and frees the current state. */
8679
8680 static int
8681 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8682 {
8683 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8684 {
8685 free_bundle_state (curr_state);
8686 return FALSE;
8687 }
8688 return TRUE;
8689 }
8690
8691 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8692 starting with ORIGINATOR without advancing processor cycle. If
8693 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8694 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8695 If it was successful, the function creates new bundle state and
8696 insert into the hash table and into `index_to_bundle_states'. */
8697
8698 static void
8699 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8700 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8701 {
8702 struct bundle_state *curr_state;
8703
8704 curr_state = get_free_bundle_state ();
8705 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8706 curr_state->insn = insn;
8707 curr_state->insn_num = originator->insn_num + 1;
8708 curr_state->cost = originator->cost;
8709 curr_state->originator = originator;
8710 curr_state->before_nops_num = before_nops_num;
8711 curr_state->after_nops_num = 0;
8712 curr_state->accumulated_insns_num
8713 = originator->accumulated_insns_num + before_nops_num;
8714 curr_state->branch_deviation = originator->branch_deviation;
8715 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8716 gcc_assert (insn);
8717 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8718 {
8719 gcc_assert (GET_MODE (insn) != TImode);
8720 if (!try_issue_nops (curr_state, before_nops_num))
8721 return;
8722 if (!try_issue_insn (curr_state, insn))
8723 return;
8724 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8725 if (curr_state->accumulated_insns_num % 3 != 0)
8726 curr_state->middle_bundle_stops++;
8727 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8728 && curr_state->accumulated_insns_num % 3 != 0)
8729 {
8730 free_bundle_state (curr_state);
8731 return;
8732 }
8733 }
8734 else if (GET_MODE (insn) != TImode)
8735 {
8736 if (!try_issue_nops (curr_state, before_nops_num))
8737 return;
8738 if (!try_issue_insn (curr_state, insn))
8739 return;
8740 curr_state->accumulated_insns_num++;
8741 gcc_assert (!unknown_for_bundling_p (insn));
8742
8743 if (ia64_safe_type (insn) == TYPE_L)
8744 curr_state->accumulated_insns_num++;
8745 }
8746 else
8747 {
8748 /* If this is an insn that must be first in a group, then don't allow
8749 nops to be emitted before it. Currently, alloc is the only such
8750 supported instruction. */
8751 /* ??? The bundling automatons should handle this for us, but they do
8752 not yet have support for the first_insn attribute. */
8753 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8754 {
8755 free_bundle_state (curr_state);
8756 return;
8757 }
8758
8759 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8760 state_transition (curr_state->dfa_state, NULL);
8761 curr_state->cost++;
8762 if (!try_issue_nops (curr_state, before_nops_num))
8763 return;
8764 if (!try_issue_insn (curr_state, insn))
8765 return;
8766 curr_state->accumulated_insns_num++;
8767 if (unknown_for_bundling_p (insn))
8768 {
8769 /* Finish bundle containing asm insn. */
8770 curr_state->after_nops_num
8771 = 3 - curr_state->accumulated_insns_num % 3;
8772 curr_state->accumulated_insns_num
8773 += 3 - curr_state->accumulated_insns_num % 3;
8774 }
8775 else if (ia64_safe_type (insn) == TYPE_L)
8776 curr_state->accumulated_insns_num++;
8777 }
8778 if (ia64_safe_type (insn) == TYPE_B)
8779 curr_state->branch_deviation
8780 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8781 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8782 {
8783 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8784 {
8785 state_t dfa_state;
8786 struct bundle_state *curr_state1;
8787 struct bundle_state *allocated_states_chain;
8788
8789 curr_state1 = get_free_bundle_state ();
8790 dfa_state = curr_state1->dfa_state;
8791 allocated_states_chain = curr_state1->allocated_states_chain;
8792 *curr_state1 = *curr_state;
8793 curr_state1->dfa_state = dfa_state;
8794 curr_state1->allocated_states_chain = allocated_states_chain;
8795 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8796 dfa_state_size);
8797 curr_state = curr_state1;
8798 }
8799 if (!try_issue_nops (curr_state,
8800 3 - curr_state->accumulated_insns_num % 3))
8801 return;
8802 curr_state->after_nops_num
8803 = 3 - curr_state->accumulated_insns_num % 3;
8804 curr_state->accumulated_insns_num
8805 += 3 - curr_state->accumulated_insns_num % 3;
8806 }
8807 if (!insert_bundle_state (curr_state))
8808 free_bundle_state (curr_state);
8809 return;
8810 }
8811
8812 /* The following function returns position in the two window bundle
8813 for given STATE. */
8814
8815 static int
8816 get_max_pos (state_t state)
8817 {
8818 if (cpu_unit_reservation_p (state, pos_6))
8819 return 6;
8820 else if (cpu_unit_reservation_p (state, pos_5))
8821 return 5;
8822 else if (cpu_unit_reservation_p (state, pos_4))
8823 return 4;
8824 else if (cpu_unit_reservation_p (state, pos_3))
8825 return 3;
8826 else if (cpu_unit_reservation_p (state, pos_2))
8827 return 2;
8828 else if (cpu_unit_reservation_p (state, pos_1))
8829 return 1;
8830 else
8831 return 0;
8832 }
8833
8834 /* The function returns code of a possible template for given position
8835 and state. The function should be called only with 2 values of
8836 position equal to 3 or 6. We avoid generating F NOPs by putting
8837 templates containing F insns at the end of the template search
8838 because undocumented anomaly in McKinley derived cores which can
8839 cause stalls if an F-unit insn (including a NOP) is issued within a
8840 six-cycle window after reading certain application registers (such
8841 as ar.bsp). Furthermore, power-considerations also argue against
8842 the use of F-unit instructions unless they're really needed. */
8843
8844 static int
8845 get_template (state_t state, int pos)
8846 {
8847 switch (pos)
8848 {
8849 case 3:
8850 if (cpu_unit_reservation_p (state, _0mmi_))
8851 return 1;
8852 else if (cpu_unit_reservation_p (state, _0mii_))
8853 return 0;
8854 else if (cpu_unit_reservation_p (state, _0mmb_))
8855 return 7;
8856 else if (cpu_unit_reservation_p (state, _0mib_))
8857 return 6;
8858 else if (cpu_unit_reservation_p (state, _0mbb_))
8859 return 5;
8860 else if (cpu_unit_reservation_p (state, _0bbb_))
8861 return 4;
8862 else if (cpu_unit_reservation_p (state, _0mmf_))
8863 return 3;
8864 else if (cpu_unit_reservation_p (state, _0mfi_))
8865 return 2;
8866 else if (cpu_unit_reservation_p (state, _0mfb_))
8867 return 8;
8868 else if (cpu_unit_reservation_p (state, _0mlx_))
8869 return 9;
8870 else
8871 gcc_unreachable ();
8872 case 6:
8873 if (cpu_unit_reservation_p (state, _1mmi_))
8874 return 1;
8875 else if (cpu_unit_reservation_p (state, _1mii_))
8876 return 0;
8877 else if (cpu_unit_reservation_p (state, _1mmb_))
8878 return 7;
8879 else if (cpu_unit_reservation_p (state, _1mib_))
8880 return 6;
8881 else if (cpu_unit_reservation_p (state, _1mbb_))
8882 return 5;
8883 else if (cpu_unit_reservation_p (state, _1bbb_))
8884 return 4;
8885 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8886 return 3;
8887 else if (cpu_unit_reservation_p (state, _1mfi_))
8888 return 2;
8889 else if (cpu_unit_reservation_p (state, _1mfb_))
8890 return 8;
8891 else if (cpu_unit_reservation_p (state, _1mlx_))
8892 return 9;
8893 else
8894 gcc_unreachable ();
8895 default:
8896 gcc_unreachable ();
8897 }
8898 }
8899
8900 /* True when INSN is important for bundling. */
8901
8902 static bool
8903 important_for_bundling_p (rtx insn)
8904 {
8905 return (INSN_P (insn)
8906 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8907 && GET_CODE (PATTERN (insn)) != USE
8908 && GET_CODE (PATTERN (insn)) != CLOBBER);
8909 }
8910
8911 /* The following function returns an insn important for insn bundling
8912 followed by INSN and before TAIL. */
8913
8914 static rtx
8915 get_next_important_insn (rtx insn, rtx tail)
8916 {
8917 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8918 if (important_for_bundling_p (insn))
8919 return insn;
8920 return NULL_RTX;
8921 }
8922
8923 /* True when INSN is unknown, but important, for bundling. */
8924
8925 static bool
8926 unknown_for_bundling_p (rtx insn)
8927 {
8928 return (INSN_P (insn)
8929 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
8930 && GET_CODE (PATTERN (insn)) != USE
8931 && GET_CODE (PATTERN (insn)) != CLOBBER);
8932 }
8933
8934 /* Add a bundle selector TEMPLATE0 before INSN. */
8935
8936 static void
8937 ia64_add_bundle_selector_before (int template0, rtx insn)
8938 {
8939 rtx b = gen_bundle_selector (GEN_INT (template0));
8940
8941 ia64_emit_insn_before (b, insn);
8942 #if NR_BUNDLES == 10
8943 if ((template0 == 4 || template0 == 5)
8944 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8945 {
8946 int i;
8947 rtx note = NULL_RTX;
8948
8949 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8950 first or second slot. If it is and has REG_EH_NOTE set, copy it
8951 to following nops, as br.call sets rp to the address of following
8952 bundle and therefore an EH region end must be on a bundle
8953 boundary. */
8954 insn = PREV_INSN (insn);
8955 for (i = 0; i < 3; i++)
8956 {
8957 do
8958 insn = next_active_insn (insn);
8959 while (NONJUMP_INSN_P (insn)
8960 && get_attr_empty (insn) == EMPTY_YES);
8961 if (CALL_P (insn))
8962 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8963 else if (note)
8964 {
8965 int code;
8966
8967 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8968 || code == CODE_FOR_nop_b);
8969 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8970 note = NULL_RTX;
8971 else
8972 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8973 }
8974 }
8975 }
8976 #endif
8977 }
8978
8979 /* The following function does insn bundling. Bundling means
8980 inserting templates and nop insns to fit insn groups into permitted
8981 templates. Instruction scheduling uses NDFA (non-deterministic
8982 finite automata) encoding informations about the templates and the
8983 inserted nops. Nondeterminism of the automata permits follows
8984 all possible insn sequences very fast.
8985
8986 Unfortunately it is not possible to get information about inserting
8987 nop insns and used templates from the automata states. The
8988 automata only says that we can issue an insn possibly inserting
8989 some nops before it and using some template. Therefore insn
8990 bundling in this function is implemented by using DFA
8991 (deterministic finite automata). We follow all possible insn
8992 sequences by inserting 0-2 nops (that is what the NDFA describe for
8993 insn scheduling) before/after each insn being bundled. We know the
8994 start of simulated processor cycle from insn scheduling (insn
8995 starting a new cycle has TImode).
8996
8997 Simple implementation of insn bundling would create enormous
8998 number of possible insn sequences satisfying information about new
8999 cycle ticks taken from the insn scheduling. To make the algorithm
9000 practical we use dynamic programming. Each decision (about
9001 inserting nops and implicitly about previous decisions) is described
9002 by structure bundle_state (see above). If we generate the same
9003 bundle state (key is automaton state after issuing the insns and
9004 nops for it), we reuse already generated one. As consequence we
9005 reject some decisions which cannot improve the solution and
9006 reduce memory for the algorithm.
9007
9008 When we reach the end of EBB (extended basic block), we choose the
9009 best sequence and then, moving back in EBB, insert templates for
9010 the best alternative. The templates are taken from querying
9011 automaton state for each insn in chosen bundle states.
9012
9013 So the algorithm makes two (forward and backward) passes through
9014 EBB. */
9015
9016 static void
9017 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
9018 {
9019 struct bundle_state *curr_state, *next_state, *best_state;
9020 rtx insn, next_insn;
9021 int insn_num;
9022 int i, bundle_end_p, only_bundle_end_p, asm_p;
9023 int pos = 0, max_pos, template0, template1;
9024 rtx b;
9025 rtx nop;
9026 enum attr_type type;
9027
9028 insn_num = 0;
9029 /* Count insns in the EBB. */
9030 for (insn = NEXT_INSN (prev_head_insn);
9031 insn && insn != tail;
9032 insn = NEXT_INSN (insn))
9033 if (INSN_P (insn))
9034 insn_num++;
9035 if (insn_num == 0)
9036 return;
9037 bundling_p = 1;
9038 dfa_clean_insn_cache ();
9039 initiate_bundle_state_table ();
9040 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
9041 /* First (forward) pass -- generation of bundle states. */
9042 curr_state = get_free_bundle_state ();
9043 curr_state->insn = NULL;
9044 curr_state->before_nops_num = 0;
9045 curr_state->after_nops_num = 0;
9046 curr_state->insn_num = 0;
9047 curr_state->cost = 0;
9048 curr_state->accumulated_insns_num = 0;
9049 curr_state->branch_deviation = 0;
9050 curr_state->middle_bundle_stops = 0;
9051 curr_state->next = NULL;
9052 curr_state->originator = NULL;
9053 state_reset (curr_state->dfa_state);
9054 index_to_bundle_states [0] = curr_state;
9055 insn_num = 0;
9056 /* Shift cycle mark if it is put on insn which could be ignored. */
9057 for (insn = NEXT_INSN (prev_head_insn);
9058 insn != tail;
9059 insn = NEXT_INSN (insn))
9060 if (INSN_P (insn)
9061 && !important_for_bundling_p (insn)
9062 && GET_MODE (insn) == TImode)
9063 {
9064 PUT_MODE (insn, VOIDmode);
9065 for (next_insn = NEXT_INSN (insn);
9066 next_insn != tail;
9067 next_insn = NEXT_INSN (next_insn))
9068 if (important_for_bundling_p (next_insn)
9069 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
9070 {
9071 PUT_MODE (next_insn, TImode);
9072 break;
9073 }
9074 }
9075 /* Forward pass: generation of bundle states. */
9076 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
9077 insn != NULL_RTX;
9078 insn = next_insn)
9079 {
9080 gcc_assert (important_for_bundling_p (insn));
9081 type = ia64_safe_type (insn);
9082 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
9083 insn_num++;
9084 index_to_bundle_states [insn_num] = NULL;
9085 for (curr_state = index_to_bundle_states [insn_num - 1];
9086 curr_state != NULL;
9087 curr_state = next_state)
9088 {
9089 pos = curr_state->accumulated_insns_num % 3;
9090 next_state = curr_state->next;
9091 /* We must fill up the current bundle in order to start a
9092 subsequent asm insn in a new bundle. Asm insn is always
9093 placed in a separate bundle. */
9094 only_bundle_end_p
9095 = (next_insn != NULL_RTX
9096 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
9097 && unknown_for_bundling_p (next_insn));
9098 /* We may fill up the current bundle if it is the cycle end
9099 without a group barrier. */
9100 bundle_end_p
9101 = (only_bundle_end_p || next_insn == NULL_RTX
9102 || (GET_MODE (next_insn) == TImode
9103 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
9104 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
9105 || type == TYPE_S)
9106 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
9107 only_bundle_end_p);
9108 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
9109 only_bundle_end_p);
9110 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
9111 only_bundle_end_p);
9112 }
9113 gcc_assert (index_to_bundle_states [insn_num]);
9114 for (curr_state = index_to_bundle_states [insn_num];
9115 curr_state != NULL;
9116 curr_state = curr_state->next)
9117 if (verbose >= 2 && dump)
9118 {
9119 /* This structure is taken from generated code of the
9120 pipeline hazard recognizer (see file insn-attrtab.c).
9121 Please don't forget to change the structure if a new
9122 automaton is added to .md file. */
9123 struct DFA_chip
9124 {
9125 unsigned short one_automaton_state;
9126 unsigned short oneb_automaton_state;
9127 unsigned short two_automaton_state;
9128 unsigned short twob_automaton_state;
9129 };
9130
9131 fprintf
9132 (dump,
9133 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
9134 curr_state->unique_num,
9135 (curr_state->originator == NULL
9136 ? -1 : curr_state->originator->unique_num),
9137 curr_state->cost,
9138 curr_state->before_nops_num, curr_state->after_nops_num,
9139 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9140 curr_state->middle_bundle_stops,
9141 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9142 INSN_UID (insn));
9143 }
9144 }
9145
9146 /* We should find a solution because the 2nd insn scheduling has
9147 found one. */
9148 gcc_assert (index_to_bundle_states [insn_num]);
9149 /* Find a state corresponding to the best insn sequence. */
9150 best_state = NULL;
9151 for (curr_state = index_to_bundle_states [insn_num];
9152 curr_state != NULL;
9153 curr_state = curr_state->next)
9154 /* We are just looking at the states with fully filled up last
9155 bundle. The first we prefer insn sequences with minimal cost
9156 then with minimal inserted nops and finally with branch insns
9157 placed in the 3rd slots. */
9158 if (curr_state->accumulated_insns_num % 3 == 0
9159 && (best_state == NULL || best_state->cost > curr_state->cost
9160 || (best_state->cost == curr_state->cost
9161 && (curr_state->accumulated_insns_num
9162 < best_state->accumulated_insns_num
9163 || (curr_state->accumulated_insns_num
9164 == best_state->accumulated_insns_num
9165 && (curr_state->branch_deviation
9166 < best_state->branch_deviation
9167 || (curr_state->branch_deviation
9168 == best_state->branch_deviation
9169 && curr_state->middle_bundle_stops
9170 < best_state->middle_bundle_stops)))))))
9171 best_state = curr_state;
9172 /* Second (backward) pass: adding nops and templates. */
9173 gcc_assert (best_state);
9174 insn_num = best_state->before_nops_num;
9175 template0 = template1 = -1;
9176 for (curr_state = best_state;
9177 curr_state->originator != NULL;
9178 curr_state = curr_state->originator)
9179 {
9180 insn = curr_state->insn;
9181 asm_p = unknown_for_bundling_p (insn);
9182 insn_num++;
9183 if (verbose >= 2 && dump)
9184 {
9185 struct DFA_chip
9186 {
9187 unsigned short one_automaton_state;
9188 unsigned short oneb_automaton_state;
9189 unsigned short two_automaton_state;
9190 unsigned short twob_automaton_state;
9191 };
9192
9193 fprintf
9194 (dump,
9195 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9196 curr_state->unique_num,
9197 (curr_state->originator == NULL
9198 ? -1 : curr_state->originator->unique_num),
9199 curr_state->cost,
9200 curr_state->before_nops_num, curr_state->after_nops_num,
9201 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9202 curr_state->middle_bundle_stops,
9203 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9204 INSN_UID (insn));
9205 }
9206 /* Find the position in the current bundle window. The window can
9207 contain at most two bundles. Two bundle window means that
9208 the processor will make two bundle rotation. */
9209 max_pos = get_max_pos (curr_state->dfa_state);
9210 if (max_pos == 6
9211 /* The following (negative template number) means that the
9212 processor did one bundle rotation. */
9213 || (max_pos == 3 && template0 < 0))
9214 {
9215 /* We are at the end of the window -- find template(s) for
9216 its bundle(s). */
9217 pos = max_pos;
9218 if (max_pos == 3)
9219 template0 = get_template (curr_state->dfa_state, 3);
9220 else
9221 {
9222 template1 = get_template (curr_state->dfa_state, 3);
9223 template0 = get_template (curr_state->dfa_state, 6);
9224 }
9225 }
9226 if (max_pos > 3 && template1 < 0)
9227 /* It may happen when we have the stop inside a bundle. */
9228 {
9229 gcc_assert (pos <= 3);
9230 template1 = get_template (curr_state->dfa_state, 3);
9231 pos += 3;
9232 }
9233 if (!asm_p)
9234 /* Emit nops after the current insn. */
9235 for (i = 0; i < curr_state->after_nops_num; i++)
9236 {
9237 nop = gen_nop ();
9238 emit_insn_after (nop, insn);
9239 pos--;
9240 gcc_assert (pos >= 0);
9241 if (pos % 3 == 0)
9242 {
9243 /* We are at the start of a bundle: emit the template
9244 (it should be defined). */
9245 gcc_assert (template0 >= 0);
9246 ia64_add_bundle_selector_before (template0, nop);
9247 /* If we have two bundle window, we make one bundle
9248 rotation. Otherwise template0 will be undefined
9249 (negative value). */
9250 template0 = template1;
9251 template1 = -1;
9252 }
9253 }
9254 /* Move the position backward in the window. Group barrier has
9255 no slot. Asm insn takes all bundle. */
9256 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9257 && !unknown_for_bundling_p (insn))
9258 pos--;
9259 /* Long insn takes 2 slots. */
9260 if (ia64_safe_type (insn) == TYPE_L)
9261 pos--;
9262 gcc_assert (pos >= 0);
9263 if (pos % 3 == 0
9264 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9265 && !unknown_for_bundling_p (insn))
9266 {
9267 /* The current insn is at the bundle start: emit the
9268 template. */
9269 gcc_assert (template0 >= 0);
9270 ia64_add_bundle_selector_before (template0, insn);
9271 b = PREV_INSN (insn);
9272 insn = b;
9273 /* See comment above in analogous place for emitting nops
9274 after the insn. */
9275 template0 = template1;
9276 template1 = -1;
9277 }
9278 /* Emit nops after the current insn. */
9279 for (i = 0; i < curr_state->before_nops_num; i++)
9280 {
9281 nop = gen_nop ();
9282 ia64_emit_insn_before (nop, insn);
9283 nop = PREV_INSN (insn);
9284 insn = nop;
9285 pos--;
9286 gcc_assert (pos >= 0);
9287 if (pos % 3 == 0)
9288 {
9289 /* See comment above in analogous place for emitting nops
9290 after the insn. */
9291 gcc_assert (template0 >= 0);
9292 ia64_add_bundle_selector_before (template0, insn);
9293 b = PREV_INSN (insn);
9294 insn = b;
9295 template0 = template1;
9296 template1 = -1;
9297 }
9298 }
9299 }
9300
9301 #ifdef ENABLE_CHECKING
9302 {
9303 /* Assert right calculation of middle_bundle_stops. */
9304 int num = best_state->middle_bundle_stops;
9305 bool start_bundle = true, end_bundle = false;
9306
9307 for (insn = NEXT_INSN (prev_head_insn);
9308 insn && insn != tail;
9309 insn = NEXT_INSN (insn))
9310 {
9311 if (!INSN_P (insn))
9312 continue;
9313 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9314 start_bundle = true;
9315 else
9316 {
9317 rtx next_insn;
9318
9319 for (next_insn = NEXT_INSN (insn);
9320 next_insn && next_insn != tail;
9321 next_insn = NEXT_INSN (next_insn))
9322 if (INSN_P (next_insn)
9323 && (ia64_safe_itanium_class (next_insn)
9324 != ITANIUM_CLASS_IGNORE
9325 || recog_memoized (next_insn)
9326 == CODE_FOR_bundle_selector)
9327 && GET_CODE (PATTERN (next_insn)) != USE
9328 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9329 break;
9330
9331 end_bundle = next_insn == NULL_RTX
9332 || next_insn == tail
9333 || (INSN_P (next_insn)
9334 && recog_memoized (next_insn)
9335 == CODE_FOR_bundle_selector);
9336 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9337 && !start_bundle && !end_bundle
9338 && next_insn
9339 && !unknown_for_bundling_p (next_insn))
9340 num--;
9341
9342 start_bundle = false;
9343 }
9344 }
9345
9346 gcc_assert (num == 0);
9347 }
9348 #endif
9349
9350 free (index_to_bundle_states);
9351 finish_bundle_state_table ();
9352 bundling_p = 0;
9353 dfa_clean_insn_cache ();
9354 }
9355
9356 /* The following function is called at the end of scheduling BB or
9357 EBB. After reload, it inserts stop bits and does insn bundling. */
9358
9359 static void
9360 ia64_sched_finish (FILE *dump, int sched_verbose)
9361 {
9362 if (sched_verbose)
9363 fprintf (dump, "// Finishing schedule.\n");
9364 if (!reload_completed)
9365 return;
9366 if (reload_completed)
9367 {
9368 final_emit_insn_group_barriers (dump);
9369 bundling (dump, sched_verbose, current_sched_info->prev_head,
9370 current_sched_info->next_tail);
9371 if (sched_verbose && dump)
9372 fprintf (dump, "// finishing %d-%d\n",
9373 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9374 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9375
9376 return;
9377 }
9378 }
9379
9380 /* The following function inserts stop bits in scheduled BB or EBB. */
9381
9382 static void
9383 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9384 {
9385 rtx insn;
9386 int need_barrier_p = 0;
9387 int seen_good_insn = 0;
9388
9389 init_insn_group_barriers ();
9390
9391 for (insn = NEXT_INSN (current_sched_info->prev_head);
9392 insn != current_sched_info->next_tail;
9393 insn = NEXT_INSN (insn))
9394 {
9395 if (BARRIER_P (insn))
9396 {
9397 rtx last = prev_active_insn (insn);
9398
9399 if (! last)
9400 continue;
9401 if (JUMP_TABLE_DATA_P (last))
9402 last = prev_active_insn (last);
9403 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9404 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9405
9406 init_insn_group_barriers ();
9407 seen_good_insn = 0;
9408 need_barrier_p = 0;
9409 }
9410 else if (NONDEBUG_INSN_P (insn))
9411 {
9412 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9413 {
9414 init_insn_group_barriers ();
9415 seen_good_insn = 0;
9416 need_barrier_p = 0;
9417 }
9418 else if (need_barrier_p || group_barrier_needed (insn)
9419 || (mflag_sched_stop_bits_after_every_cycle
9420 && GET_MODE (insn) == TImode
9421 && seen_good_insn))
9422 {
9423 if (TARGET_EARLY_STOP_BITS)
9424 {
9425 rtx last;
9426
9427 for (last = insn;
9428 last != current_sched_info->prev_head;
9429 last = PREV_INSN (last))
9430 if (INSN_P (last) && GET_MODE (last) == TImode
9431 && stops_p [INSN_UID (last)])
9432 break;
9433 if (last == current_sched_info->prev_head)
9434 last = insn;
9435 last = prev_active_insn (last);
9436 if (last
9437 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9438 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9439 last);
9440 init_insn_group_barriers ();
9441 for (last = NEXT_INSN (last);
9442 last != insn;
9443 last = NEXT_INSN (last))
9444 if (INSN_P (last))
9445 {
9446 group_barrier_needed (last);
9447 if (recog_memoized (last) >= 0
9448 && important_for_bundling_p (last))
9449 seen_good_insn = 1;
9450 }
9451 }
9452 else
9453 {
9454 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9455 insn);
9456 init_insn_group_barriers ();
9457 seen_good_insn = 0;
9458 }
9459 group_barrier_needed (insn);
9460 if (recog_memoized (insn) >= 0
9461 && important_for_bundling_p (insn))
9462 seen_good_insn = 1;
9463 }
9464 else if (recog_memoized (insn) >= 0
9465 && important_for_bundling_p (insn))
9466 seen_good_insn = 1;
9467 need_barrier_p = (CALL_P (insn) || unknown_for_bundling_p (insn));
9468 }
9469 }
9470 }
9471
9472 \f
9473
9474 /* If the following function returns TRUE, we will use the DFA
9475 insn scheduler. */
9476
9477 static int
9478 ia64_first_cycle_multipass_dfa_lookahead (void)
9479 {
9480 return (reload_completed ? 6 : 4);
9481 }
9482
9483 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9484
9485 static void
9486 ia64_init_dfa_pre_cycle_insn (void)
9487 {
9488 if (temp_dfa_state == NULL)
9489 {
9490 dfa_state_size = state_size ();
9491 temp_dfa_state = xmalloc (dfa_state_size);
9492 prev_cycle_state = xmalloc (dfa_state_size);
9493 }
9494 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9495 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9496 recog_memoized (dfa_pre_cycle_insn);
9497 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9498 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9499 recog_memoized (dfa_stop_insn);
9500 }
9501
9502 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9503 used by the DFA insn scheduler. */
9504
9505 static rtx
9506 ia64_dfa_pre_cycle_insn (void)
9507 {
9508 return dfa_pre_cycle_insn;
9509 }
9510
9511 /* The following function returns TRUE if PRODUCER (of type ilog or
9512 ld) produces address for CONSUMER (of type st or stf). */
9513
9514 int
9515 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9516 {
9517 rtx dest, reg, mem;
9518
9519 gcc_assert (producer && consumer);
9520 dest = ia64_single_set (producer);
9521 gcc_assert (dest);
9522 reg = SET_DEST (dest);
9523 gcc_assert (reg);
9524 if (GET_CODE (reg) == SUBREG)
9525 reg = SUBREG_REG (reg);
9526 gcc_assert (GET_CODE (reg) == REG);
9527
9528 dest = ia64_single_set (consumer);
9529 gcc_assert (dest);
9530 mem = SET_DEST (dest);
9531 gcc_assert (mem && GET_CODE (mem) == MEM);
9532 return reg_mentioned_p (reg, mem);
9533 }
9534
9535 /* The following function returns TRUE if PRODUCER (of type ilog or
9536 ld) produces address for CONSUMER (of type ld or fld). */
9537
9538 int
9539 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9540 {
9541 rtx dest, src, reg, mem;
9542
9543 gcc_assert (producer && consumer);
9544 dest = ia64_single_set (producer);
9545 gcc_assert (dest);
9546 reg = SET_DEST (dest);
9547 gcc_assert (reg);
9548 if (GET_CODE (reg) == SUBREG)
9549 reg = SUBREG_REG (reg);
9550 gcc_assert (GET_CODE (reg) == REG);
9551
9552 src = ia64_single_set (consumer);
9553 gcc_assert (src);
9554 mem = SET_SRC (src);
9555 gcc_assert (mem);
9556
9557 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9558 mem = XVECEXP (mem, 0, 0);
9559 else if (GET_CODE (mem) == IF_THEN_ELSE)
9560 /* ??? Is this bypass necessary for ld.c? */
9561 {
9562 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9563 mem = XEXP (mem, 1);
9564 }
9565
9566 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9567 mem = XEXP (mem, 0);
9568
9569 if (GET_CODE (mem) == UNSPEC)
9570 {
9571 int c = XINT (mem, 1);
9572
9573 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9574 || c == UNSPEC_LDSA);
9575 mem = XVECEXP (mem, 0, 0);
9576 }
9577
9578 /* Note that LO_SUM is used for GOT loads. */
9579 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9580
9581 return reg_mentioned_p (reg, mem);
9582 }
9583
9584 /* The following function returns TRUE if INSN produces address for a
9585 load/store insn. We will place such insns into M slot because it
9586 decreases its latency time. */
9587
9588 int
9589 ia64_produce_address_p (rtx insn)
9590 {
9591 return insn->call;
9592 }
9593
9594 \f
9595 /* Emit pseudo-ops for the assembler to describe predicate relations.
9596 At present this assumes that we only consider predicate pairs to
9597 be mutex, and that the assembler can deduce proper values from
9598 straight-line code. */
9599
9600 static void
9601 emit_predicate_relation_info (void)
9602 {
9603 basic_block bb;
9604
9605 FOR_EACH_BB_REVERSE (bb)
9606 {
9607 int r;
9608 rtx head = BB_HEAD (bb);
9609
9610 /* We only need such notes at code labels. */
9611 if (! LABEL_P (head))
9612 continue;
9613 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9614 head = NEXT_INSN (head);
9615
9616 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9617 grabbing the entire block of predicate registers. */
9618 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9619 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9620 {
9621 rtx p = gen_rtx_REG (BImode, r);
9622 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9623 if (head == BB_END (bb))
9624 BB_END (bb) = n;
9625 head = n;
9626 }
9627 }
9628
9629 /* Look for conditional calls that do not return, and protect predicate
9630 relations around them. Otherwise the assembler will assume the call
9631 returns, and complain about uses of call-clobbered predicates after
9632 the call. */
9633 FOR_EACH_BB_REVERSE (bb)
9634 {
9635 rtx insn = BB_HEAD (bb);
9636
9637 while (1)
9638 {
9639 if (CALL_P (insn)
9640 && GET_CODE (PATTERN (insn)) == COND_EXEC
9641 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9642 {
9643 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9644 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9645 if (BB_HEAD (bb) == insn)
9646 BB_HEAD (bb) = b;
9647 if (BB_END (bb) == insn)
9648 BB_END (bb) = a;
9649 }
9650
9651 if (insn == BB_END (bb))
9652 break;
9653 insn = NEXT_INSN (insn);
9654 }
9655 }
9656 }
9657
9658 /* Perform machine dependent operations on the rtl chain INSNS. */
9659
9660 static void
9661 ia64_reorg (void)
9662 {
9663 /* We are freeing block_for_insn in the toplev to keep compatibility
9664 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9665 compute_bb_for_insn ();
9666
9667 /* If optimizing, we'll have split before scheduling. */
9668 if (optimize == 0)
9669 split_all_insns ();
9670
9671 if (optimize && flag_schedule_insns_after_reload
9672 && dbg_cnt (ia64_sched2))
9673 {
9674 basic_block bb;
9675 timevar_push (TV_SCHED2);
9676 ia64_final_schedule = 1;
9677
9678 /* We can't let modulo-sched prevent us from scheduling any bbs,
9679 since we need the final schedule to produce bundle information. */
9680 FOR_EACH_BB (bb)
9681 bb->flags &= ~BB_DISABLE_SCHEDULE;
9682
9683 initiate_bundle_states ();
9684 ia64_nop = make_insn_raw (gen_nop ());
9685 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9686 recog_memoized (ia64_nop);
9687 clocks_length = get_max_uid () + 1;
9688 stops_p = XCNEWVEC (char, clocks_length);
9689
9690 if (ia64_tune == PROCESSOR_ITANIUM2)
9691 {
9692 pos_1 = get_cpu_unit_code ("2_1");
9693 pos_2 = get_cpu_unit_code ("2_2");
9694 pos_3 = get_cpu_unit_code ("2_3");
9695 pos_4 = get_cpu_unit_code ("2_4");
9696 pos_5 = get_cpu_unit_code ("2_5");
9697 pos_6 = get_cpu_unit_code ("2_6");
9698 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9699 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9700 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9701 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9702 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9703 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9704 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9705 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9706 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9707 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9708 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9709 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9710 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9711 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9712 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9713 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9714 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9715 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9716 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9717 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9718 }
9719 else
9720 {
9721 pos_1 = get_cpu_unit_code ("1_1");
9722 pos_2 = get_cpu_unit_code ("1_2");
9723 pos_3 = get_cpu_unit_code ("1_3");
9724 pos_4 = get_cpu_unit_code ("1_4");
9725 pos_5 = get_cpu_unit_code ("1_5");
9726 pos_6 = get_cpu_unit_code ("1_6");
9727 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9728 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9729 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9730 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9731 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9732 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9733 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9734 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9735 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9736 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9737 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9738 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9739 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9740 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9741 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9742 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9743 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9744 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9745 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9746 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9747 }
9748
9749 if (flag_selective_scheduling2
9750 && !maybe_skip_selective_scheduling ())
9751 run_selective_scheduling ();
9752 else
9753 schedule_ebbs ();
9754
9755 /* Redo alignment computation, as it might gone wrong. */
9756 compute_alignments ();
9757
9758 /* We cannot reuse this one because it has been corrupted by the
9759 evil glat. */
9760 finish_bundle_states ();
9761 free (stops_p);
9762 stops_p = NULL;
9763 emit_insn_group_barriers (dump_file);
9764
9765 ia64_final_schedule = 0;
9766 timevar_pop (TV_SCHED2);
9767 }
9768 else
9769 emit_all_insn_group_barriers (dump_file);
9770
9771 df_analyze ();
9772
9773 /* A call must not be the last instruction in a function, so that the
9774 return address is still within the function, so that unwinding works
9775 properly. Note that IA-64 differs from dwarf2 on this point. */
9776 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9777 {
9778 rtx insn;
9779 int saw_stop = 0;
9780
9781 insn = get_last_insn ();
9782 if (! INSN_P (insn))
9783 insn = prev_active_insn (insn);
9784 if (insn)
9785 {
9786 /* Skip over insns that expand to nothing. */
9787 while (NONJUMP_INSN_P (insn)
9788 && get_attr_empty (insn) == EMPTY_YES)
9789 {
9790 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9791 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9792 saw_stop = 1;
9793 insn = prev_active_insn (insn);
9794 }
9795 if (CALL_P (insn))
9796 {
9797 if (! saw_stop)
9798 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9799 emit_insn (gen_break_f ());
9800 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9801 }
9802 }
9803 }
9804
9805 emit_predicate_relation_info ();
9806
9807 if (flag_var_tracking)
9808 {
9809 timevar_push (TV_VAR_TRACKING);
9810 variable_tracking_main ();
9811 timevar_pop (TV_VAR_TRACKING);
9812 }
9813 df_finish_pass (false);
9814 }
9815 \f
9816 /* Return true if REGNO is used by the epilogue. */
9817
9818 int
9819 ia64_epilogue_uses (int regno)
9820 {
9821 switch (regno)
9822 {
9823 case R_GR (1):
9824 /* With a call to a function in another module, we will write a new
9825 value to "gp". After returning from such a call, we need to make
9826 sure the function restores the original gp-value, even if the
9827 function itself does not use the gp anymore. */
9828 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9829
9830 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9831 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9832 /* For functions defined with the syscall_linkage attribute, all
9833 input registers are marked as live at all function exits. This
9834 prevents the register allocator from using the input registers,
9835 which in turn makes it possible to restart a system call after
9836 an interrupt without having to save/restore the input registers.
9837 This also prevents kernel data from leaking to application code. */
9838 return lookup_attribute ("syscall_linkage",
9839 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9840
9841 case R_BR (0):
9842 /* Conditional return patterns can't represent the use of `b0' as
9843 the return address, so we force the value live this way. */
9844 return 1;
9845
9846 case AR_PFS_REGNUM:
9847 /* Likewise for ar.pfs, which is used by br.ret. */
9848 return 1;
9849
9850 default:
9851 return 0;
9852 }
9853 }
9854
9855 /* Return true if REGNO is used by the frame unwinder. */
9856
9857 int
9858 ia64_eh_uses (int regno)
9859 {
9860 unsigned int r;
9861
9862 if (! reload_completed)
9863 return 0;
9864
9865 if (regno == 0)
9866 return 0;
9867
9868 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9869 if (regno == current_frame_info.r[r]
9870 || regno == emitted_frame_related_regs[r])
9871 return 1;
9872
9873 return 0;
9874 }
9875 \f
9876 /* Return true if this goes in small data/bss. */
9877
9878 /* ??? We could also support own long data here. Generating movl/add/ld8
9879 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9880 code faster because there is one less load. This also includes incomplete
9881 types which can't go in sdata/sbss. */
9882
9883 static bool
9884 ia64_in_small_data_p (const_tree exp)
9885 {
9886 if (TARGET_NO_SDATA)
9887 return false;
9888
9889 /* We want to merge strings, so we never consider them small data. */
9890 if (TREE_CODE (exp) == STRING_CST)
9891 return false;
9892
9893 /* Functions are never small data. */
9894 if (TREE_CODE (exp) == FUNCTION_DECL)
9895 return false;
9896
9897 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9898 {
9899 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9900
9901 if (strcmp (section, ".sdata") == 0
9902 || strncmp (section, ".sdata.", 7) == 0
9903 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9904 || strcmp (section, ".sbss") == 0
9905 || strncmp (section, ".sbss.", 6) == 0
9906 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9907 return true;
9908 }
9909 else
9910 {
9911 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9912
9913 /* If this is an incomplete type with size 0, then we can't put it
9914 in sdata because it might be too big when completed. */
9915 if (size > 0 && size <= ia64_section_threshold)
9916 return true;
9917 }
9918
9919 return false;
9920 }
9921 \f
9922 /* Output assembly directives for prologue regions. */
9923
9924 /* The current basic block number. */
9925
9926 static bool last_block;
9927
9928 /* True if we need a copy_state command at the start of the next block. */
9929
9930 static bool need_copy_state;
9931
9932 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9933 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9934 #endif
9935
9936 /* The function emits unwind directives for the start of an epilogue. */
9937
9938 static void
9939 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9940 bool unwind, bool frame ATTRIBUTE_UNUSED)
9941 {
9942 /* If this isn't the last block of the function, then we need to label the
9943 current state, and copy it back in at the start of the next block. */
9944
9945 if (!last_block)
9946 {
9947 if (unwind)
9948 fprintf (asm_out_file, "\t.label_state %d\n",
9949 ++cfun->machine->state_num);
9950 need_copy_state = true;
9951 }
9952
9953 if (unwind)
9954 fprintf (asm_out_file, "\t.restore sp\n");
9955 }
9956
9957 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9958
9959 static void
9960 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9961 bool unwind, bool frame)
9962 {
9963 rtx dest = SET_DEST (pat);
9964 rtx src = SET_SRC (pat);
9965
9966 if (dest == stack_pointer_rtx)
9967 {
9968 if (GET_CODE (src) == PLUS)
9969 {
9970 rtx op0 = XEXP (src, 0);
9971 rtx op1 = XEXP (src, 1);
9972
9973 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9974
9975 if (INTVAL (op1) < 0)
9976 {
9977 gcc_assert (!frame_pointer_needed);
9978 if (unwind)
9979 fprintf (asm_out_file,
9980 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9981 -INTVAL (op1));
9982 }
9983 else
9984 process_epilogue (asm_out_file, insn, unwind, frame);
9985 }
9986 else
9987 {
9988 gcc_assert (src == hard_frame_pointer_rtx);
9989 process_epilogue (asm_out_file, insn, unwind, frame);
9990 }
9991 }
9992 else if (dest == hard_frame_pointer_rtx)
9993 {
9994 gcc_assert (src == stack_pointer_rtx);
9995 gcc_assert (frame_pointer_needed);
9996
9997 if (unwind)
9998 fprintf (asm_out_file, "\t.vframe r%d\n",
9999 ia64_dbx_register_number (REGNO (dest)));
10000 }
10001 else
10002 gcc_unreachable ();
10003 }
10004
10005 /* This function processes a SET pattern for REG_CFA_REGISTER. */
10006
10007 static void
10008 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
10009 {
10010 rtx dest = SET_DEST (pat);
10011 rtx src = SET_SRC (pat);
10012 int dest_regno = REGNO (dest);
10013 int src_regno;
10014
10015 if (src == pc_rtx)
10016 {
10017 /* Saving return address pointer. */
10018 if (unwind)
10019 fprintf (asm_out_file, "\t.save rp, r%d\n",
10020 ia64_dbx_register_number (dest_regno));
10021 return;
10022 }
10023
10024 src_regno = REGNO (src);
10025
10026 switch (src_regno)
10027 {
10028 case PR_REG (0):
10029 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
10030 if (unwind)
10031 fprintf (asm_out_file, "\t.save pr, r%d\n",
10032 ia64_dbx_register_number (dest_regno));
10033 break;
10034
10035 case AR_UNAT_REGNUM:
10036 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
10037 if (unwind)
10038 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
10039 ia64_dbx_register_number (dest_regno));
10040 break;
10041
10042 case AR_LC_REGNUM:
10043 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
10044 if (unwind)
10045 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
10046 ia64_dbx_register_number (dest_regno));
10047 break;
10048
10049 default:
10050 /* Everything else should indicate being stored to memory. */
10051 gcc_unreachable ();
10052 }
10053 }
10054
10055 /* This function processes a SET pattern for REG_CFA_OFFSET. */
10056
10057 static void
10058 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
10059 {
10060 rtx dest = SET_DEST (pat);
10061 rtx src = SET_SRC (pat);
10062 int src_regno = REGNO (src);
10063 const char *saveop;
10064 HOST_WIDE_INT off;
10065 rtx base;
10066
10067 gcc_assert (MEM_P (dest));
10068 if (GET_CODE (XEXP (dest, 0)) == REG)
10069 {
10070 base = XEXP (dest, 0);
10071 off = 0;
10072 }
10073 else
10074 {
10075 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
10076 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
10077 base = XEXP (XEXP (dest, 0), 0);
10078 off = INTVAL (XEXP (XEXP (dest, 0), 1));
10079 }
10080
10081 if (base == hard_frame_pointer_rtx)
10082 {
10083 saveop = ".savepsp";
10084 off = - off;
10085 }
10086 else
10087 {
10088 gcc_assert (base == stack_pointer_rtx);
10089 saveop = ".savesp";
10090 }
10091
10092 src_regno = REGNO (src);
10093 switch (src_regno)
10094 {
10095 case BR_REG (0):
10096 gcc_assert (!current_frame_info.r[reg_save_b0]);
10097 if (unwind)
10098 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
10099 saveop, off);
10100 break;
10101
10102 case PR_REG (0):
10103 gcc_assert (!current_frame_info.r[reg_save_pr]);
10104 if (unwind)
10105 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
10106 saveop, off);
10107 break;
10108
10109 case AR_LC_REGNUM:
10110 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
10111 if (unwind)
10112 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
10113 saveop, off);
10114 break;
10115
10116 case AR_PFS_REGNUM:
10117 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
10118 if (unwind)
10119 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
10120 saveop, off);
10121 break;
10122
10123 case AR_UNAT_REGNUM:
10124 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
10125 if (unwind)
10126 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
10127 saveop, off);
10128 break;
10129
10130 case GR_REG (4):
10131 case GR_REG (5):
10132 case GR_REG (6):
10133 case GR_REG (7):
10134 if (unwind)
10135 fprintf (asm_out_file, "\t.save.g 0x%x\n",
10136 1 << (src_regno - GR_REG (4)));
10137 break;
10138
10139 case BR_REG (1):
10140 case BR_REG (2):
10141 case BR_REG (3):
10142 case BR_REG (4):
10143 case BR_REG (5):
10144 if (unwind)
10145 fprintf (asm_out_file, "\t.save.b 0x%x\n",
10146 1 << (src_regno - BR_REG (1)));
10147 break;
10148
10149 case FR_REG (2):
10150 case FR_REG (3):
10151 case FR_REG (4):
10152 case FR_REG (5):
10153 if (unwind)
10154 fprintf (asm_out_file, "\t.save.f 0x%x\n",
10155 1 << (src_regno - FR_REG (2)));
10156 break;
10157
10158 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
10159 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10160 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10161 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10162 if (unwind)
10163 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10164 1 << (src_regno - FR_REG (12)));
10165 break;
10166
10167 default:
10168 /* ??? For some reason we mark other general registers, even those
10169 we can't represent in the unwind info. Ignore them. */
10170 break;
10171 }
10172 }
10173
10174 /* This function looks at a single insn and emits any directives
10175 required to unwind this insn. */
10176
10177 static void
10178 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
10179 {
10180 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10181 bool frame = dwarf2out_do_frame ();
10182 rtx note, pat;
10183 bool handled_one;
10184
10185 if (!unwind && !frame)
10186 return;
10187
10188 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10189 {
10190 last_block = NOTE_BASIC_BLOCK (insn)->next_bb
10191 == EXIT_BLOCK_PTR_FOR_FN (cfun);
10192
10193 /* Restore unwind state from immediately before the epilogue. */
10194 if (need_copy_state)
10195 {
10196 if (unwind)
10197 {
10198 fprintf (asm_out_file, "\t.body\n");
10199 fprintf (asm_out_file, "\t.copy_state %d\n",
10200 cfun->machine->state_num);
10201 }
10202 need_copy_state = false;
10203 }
10204 }
10205
10206 if (NOTE_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10207 return;
10208
10209 /* Look for the ALLOC insn. */
10210 if (INSN_CODE (insn) == CODE_FOR_alloc)
10211 {
10212 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10213 int dest_regno = REGNO (dest);
10214
10215 /* If this is the final destination for ar.pfs, then this must
10216 be the alloc in the prologue. */
10217 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10218 {
10219 if (unwind)
10220 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10221 ia64_dbx_register_number (dest_regno));
10222 }
10223 else
10224 {
10225 /* This must be an alloc before a sibcall. We must drop the
10226 old frame info. The easiest way to drop the old frame
10227 info is to ensure we had a ".restore sp" directive
10228 followed by a new prologue. If the procedure doesn't
10229 have a memory-stack frame, we'll issue a dummy ".restore
10230 sp" now. */
10231 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10232 /* if haven't done process_epilogue() yet, do it now */
10233 process_epilogue (asm_out_file, insn, unwind, frame);
10234 if (unwind)
10235 fprintf (asm_out_file, "\t.prologue\n");
10236 }
10237 return;
10238 }
10239
10240 handled_one = false;
10241 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10242 switch (REG_NOTE_KIND (note))
10243 {
10244 case REG_CFA_ADJUST_CFA:
10245 pat = XEXP (note, 0);
10246 if (pat == NULL)
10247 pat = PATTERN (insn);
10248 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10249 handled_one = true;
10250 break;
10251
10252 case REG_CFA_OFFSET:
10253 pat = XEXP (note, 0);
10254 if (pat == NULL)
10255 pat = PATTERN (insn);
10256 process_cfa_offset (asm_out_file, pat, unwind);
10257 handled_one = true;
10258 break;
10259
10260 case REG_CFA_REGISTER:
10261 pat = XEXP (note, 0);
10262 if (pat == NULL)
10263 pat = PATTERN (insn);
10264 process_cfa_register (asm_out_file, pat, unwind);
10265 handled_one = true;
10266 break;
10267
10268 case REG_FRAME_RELATED_EXPR:
10269 case REG_CFA_DEF_CFA:
10270 case REG_CFA_EXPRESSION:
10271 case REG_CFA_RESTORE:
10272 case REG_CFA_SET_VDRAP:
10273 /* Not used in the ia64 port. */
10274 gcc_unreachable ();
10275
10276 default:
10277 /* Not a frame-related note. */
10278 break;
10279 }
10280
10281 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10282 explicit action to take. No guessing required. */
10283 gcc_assert (handled_one);
10284 }
10285
10286 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10287
10288 static void
10289 ia64_asm_emit_except_personality (rtx personality)
10290 {
10291 fputs ("\t.personality\t", asm_out_file);
10292 output_addr_const (asm_out_file, personality);
10293 fputc ('\n', asm_out_file);
10294 }
10295
10296 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10297
10298 static void
10299 ia64_asm_init_sections (void)
10300 {
10301 exception_section = get_unnamed_section (0, output_section_asm_op,
10302 "\t.handlerdata");
10303 }
10304
10305 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10306
10307 static enum unwind_info_type
10308 ia64_debug_unwind_info (void)
10309 {
10310 return UI_TARGET;
10311 }
10312 \f
10313 enum ia64_builtins
10314 {
10315 IA64_BUILTIN_BSP,
10316 IA64_BUILTIN_COPYSIGNQ,
10317 IA64_BUILTIN_FABSQ,
10318 IA64_BUILTIN_FLUSHRS,
10319 IA64_BUILTIN_INFQ,
10320 IA64_BUILTIN_HUGE_VALQ,
10321 IA64_BUILTIN_max
10322 };
10323
10324 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10325
10326 void
10327 ia64_init_builtins (void)
10328 {
10329 tree fpreg_type;
10330 tree float80_type;
10331 tree decl;
10332
10333 /* The __fpreg type. */
10334 fpreg_type = make_node (REAL_TYPE);
10335 TYPE_PRECISION (fpreg_type) = 82;
10336 layout_type (fpreg_type);
10337 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10338
10339 /* The __float80 type. */
10340 float80_type = make_node (REAL_TYPE);
10341 TYPE_PRECISION (float80_type) = 80;
10342 layout_type (float80_type);
10343 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10344
10345 /* The __float128 type. */
10346 if (!TARGET_HPUX)
10347 {
10348 tree ftype;
10349 tree float128_type = make_node (REAL_TYPE);
10350
10351 TYPE_PRECISION (float128_type) = 128;
10352 layout_type (float128_type);
10353 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10354
10355 /* TFmode support builtins. */
10356 ftype = build_function_type_list (float128_type, NULL_TREE);
10357 decl = add_builtin_function ("__builtin_infq", ftype,
10358 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10359 NULL, NULL_TREE);
10360 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10361
10362 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10363 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10364 NULL, NULL_TREE);
10365 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10366
10367 ftype = build_function_type_list (float128_type,
10368 float128_type,
10369 NULL_TREE);
10370 decl = add_builtin_function ("__builtin_fabsq", ftype,
10371 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10372 "__fabstf2", NULL_TREE);
10373 TREE_READONLY (decl) = 1;
10374 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10375
10376 ftype = build_function_type_list (float128_type,
10377 float128_type,
10378 float128_type,
10379 NULL_TREE);
10380 decl = add_builtin_function ("__builtin_copysignq", ftype,
10381 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10382 "__copysigntf3", NULL_TREE);
10383 TREE_READONLY (decl) = 1;
10384 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10385 }
10386 else
10387 /* Under HPUX, this is a synonym for "long double". */
10388 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10389 "__float128");
10390
10391 /* Fwrite on VMS is non-standard. */
10392 #if TARGET_ABI_OPEN_VMS
10393 vms_patch_builtins ();
10394 #endif
10395
10396 #define def_builtin(name, type, code) \
10397 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10398 NULL, NULL_TREE)
10399
10400 decl = def_builtin ("__builtin_ia64_bsp",
10401 build_function_type_list (ptr_type_node, NULL_TREE),
10402 IA64_BUILTIN_BSP);
10403 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10404
10405 decl = def_builtin ("__builtin_ia64_flushrs",
10406 build_function_type_list (void_type_node, NULL_TREE),
10407 IA64_BUILTIN_FLUSHRS);
10408 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10409
10410 #undef def_builtin
10411
10412 if (TARGET_HPUX)
10413 {
10414 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10415 set_user_assembler_name (decl, "_Isfinite");
10416 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10417 set_user_assembler_name (decl, "_Isfinitef");
10418 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10419 set_user_assembler_name (decl, "_Isfinitef128");
10420 }
10421 }
10422
10423 rtx
10424 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10425 enum machine_mode mode ATTRIBUTE_UNUSED,
10426 int ignore ATTRIBUTE_UNUSED)
10427 {
10428 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10429 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10430
10431 switch (fcode)
10432 {
10433 case IA64_BUILTIN_BSP:
10434 if (! target || ! register_operand (target, DImode))
10435 target = gen_reg_rtx (DImode);
10436 emit_insn (gen_bsp_value (target));
10437 #ifdef POINTERS_EXTEND_UNSIGNED
10438 target = convert_memory_address (ptr_mode, target);
10439 #endif
10440 return target;
10441
10442 case IA64_BUILTIN_FLUSHRS:
10443 emit_insn (gen_flushrs ());
10444 return const0_rtx;
10445
10446 case IA64_BUILTIN_INFQ:
10447 case IA64_BUILTIN_HUGE_VALQ:
10448 {
10449 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10450 REAL_VALUE_TYPE inf;
10451 rtx tmp;
10452
10453 real_inf (&inf);
10454 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10455
10456 tmp = validize_mem (force_const_mem (target_mode, tmp));
10457
10458 if (target == 0)
10459 target = gen_reg_rtx (target_mode);
10460
10461 emit_move_insn (target, tmp);
10462 return target;
10463 }
10464
10465 case IA64_BUILTIN_FABSQ:
10466 case IA64_BUILTIN_COPYSIGNQ:
10467 return expand_call (exp, target, ignore);
10468
10469 default:
10470 gcc_unreachable ();
10471 }
10472
10473 return NULL_RTX;
10474 }
10475
10476 /* Return the ia64 builtin for CODE. */
10477
10478 static tree
10479 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10480 {
10481 if (code >= IA64_BUILTIN_max)
10482 return error_mark_node;
10483
10484 return ia64_builtins[code];
10485 }
10486
10487 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10488 most significant bits of the stack slot. */
10489
10490 enum direction
10491 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10492 {
10493 /* Exception to normal case for structures/unions/etc. */
10494
10495 if (type && AGGREGATE_TYPE_P (type)
10496 && int_size_in_bytes (type) < UNITS_PER_WORD)
10497 return upward;
10498
10499 /* Fall back to the default. */
10500 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10501 }
10502
10503 /* Emit text to declare externally defined variables and functions, because
10504 the Intel assembler does not support undefined externals. */
10505
10506 void
10507 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10508 {
10509 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10510 set in order to avoid putting out names that are never really
10511 used. */
10512 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10513 {
10514 /* maybe_assemble_visibility will return 1 if the assembler
10515 visibility directive is output. */
10516 int need_visibility = ((*targetm.binds_local_p) (decl)
10517 && maybe_assemble_visibility (decl));
10518
10519 /* GNU as does not need anything here, but the HP linker does
10520 need something for external functions. */
10521 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10522 && TREE_CODE (decl) == FUNCTION_DECL)
10523 (*targetm.asm_out.globalize_decl_name) (file, decl);
10524 else if (need_visibility && !TARGET_GNU_AS)
10525 (*targetm.asm_out.globalize_label) (file, name);
10526 }
10527 }
10528
10529 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10530 modes of word_mode and larger. Rename the TFmode libfuncs using the
10531 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10532 backward compatibility. */
10533
10534 static void
10535 ia64_init_libfuncs (void)
10536 {
10537 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10538 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10539 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10540 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10541
10542 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10543 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10544 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10545 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10546 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10547
10548 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10549 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10550 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10551 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10552 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10553 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10554
10555 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10556 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10557 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10558 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10559 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10560
10561 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10562 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10563 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10564 /* HP-UX 11.23 libc does not have a function for unsigned
10565 SImode-to-TFmode conversion. */
10566 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10567 }
10568
10569 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10570
10571 static void
10572 ia64_hpux_init_libfuncs (void)
10573 {
10574 ia64_init_libfuncs ();
10575
10576 /* The HP SI millicode division and mod functions expect DI arguments.
10577 By turning them off completely we avoid using both libgcc and the
10578 non-standard millicode routines and use the HP DI millicode routines
10579 instead. */
10580
10581 set_optab_libfunc (sdiv_optab, SImode, 0);
10582 set_optab_libfunc (udiv_optab, SImode, 0);
10583 set_optab_libfunc (smod_optab, SImode, 0);
10584 set_optab_libfunc (umod_optab, SImode, 0);
10585
10586 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10587 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10588 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10589 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10590
10591 /* HP-UX libc has TF min/max/abs routines in it. */
10592 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10593 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10594 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10595
10596 /* ia64_expand_compare uses this. */
10597 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10598
10599 /* These should never be used. */
10600 set_optab_libfunc (eq_optab, TFmode, 0);
10601 set_optab_libfunc (ne_optab, TFmode, 0);
10602 set_optab_libfunc (gt_optab, TFmode, 0);
10603 set_optab_libfunc (ge_optab, TFmode, 0);
10604 set_optab_libfunc (lt_optab, TFmode, 0);
10605 set_optab_libfunc (le_optab, TFmode, 0);
10606 }
10607
10608 /* Rename the division and modulus functions in VMS. */
10609
10610 static void
10611 ia64_vms_init_libfuncs (void)
10612 {
10613 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10614 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10615 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10616 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10617 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10618 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10619 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10620 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10621 abort_libfunc = init_one_libfunc ("decc$abort");
10622 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10623 #ifdef MEM_LIBFUNCS_INIT
10624 MEM_LIBFUNCS_INIT;
10625 #endif
10626 }
10627
10628 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10629 the HPUX conventions. */
10630
10631 static void
10632 ia64_sysv4_init_libfuncs (void)
10633 {
10634 ia64_init_libfuncs ();
10635
10636 /* These functions are not part of the HPUX TFmode interface. We
10637 use them instead of _U_Qfcmp, which doesn't work the way we
10638 expect. */
10639 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10640 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10641 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10642 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10643 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10644 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10645
10646 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10647 glibc doesn't have them. */
10648 }
10649
10650 /* Use soft-fp. */
10651
10652 static void
10653 ia64_soft_fp_init_libfuncs (void)
10654 {
10655 }
10656
10657 static bool
10658 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10659 {
10660 return (mode == SImode || mode == DImode);
10661 }
10662 \f
10663 /* For HPUX, it is illegal to have relocations in shared segments. */
10664
10665 static int
10666 ia64_hpux_reloc_rw_mask (void)
10667 {
10668 return 3;
10669 }
10670
10671 /* For others, relax this so that relocations to local data goes in
10672 read-only segments, but we still cannot allow global relocations
10673 in read-only segments. */
10674
10675 static int
10676 ia64_reloc_rw_mask (void)
10677 {
10678 return flag_pic ? 3 : 2;
10679 }
10680
10681 /* Return the section to use for X. The only special thing we do here
10682 is to honor small data. */
10683
10684 static section *
10685 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10686 unsigned HOST_WIDE_INT align)
10687 {
10688 if (GET_MODE_SIZE (mode) > 0
10689 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10690 && !TARGET_NO_SDATA)
10691 return sdata_section;
10692 else
10693 return default_elf_select_rtx_section (mode, x, align);
10694 }
10695
10696 static unsigned int
10697 ia64_section_type_flags (tree decl, const char *name, int reloc)
10698 {
10699 unsigned int flags = 0;
10700
10701 if (strcmp (name, ".sdata") == 0
10702 || strncmp (name, ".sdata.", 7) == 0
10703 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10704 || strncmp (name, ".sdata2.", 8) == 0
10705 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10706 || strcmp (name, ".sbss") == 0
10707 || strncmp (name, ".sbss.", 6) == 0
10708 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10709 flags = SECTION_SMALL;
10710
10711 flags |= default_section_type_flags (decl, name, reloc);
10712 return flags;
10713 }
10714
10715 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10716 structure type and that the address of that type should be passed
10717 in out0, rather than in r8. */
10718
10719 static bool
10720 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10721 {
10722 tree ret_type = TREE_TYPE (fntype);
10723
10724 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10725 as the structure return address parameter, if the return value
10726 type has a non-trivial copy constructor or destructor. It is not
10727 clear if this same convention should be used for other
10728 programming languages. Until G++ 3.4, we incorrectly used r8 for
10729 these return values. */
10730 return (abi_version_at_least (2)
10731 && ret_type
10732 && TYPE_MODE (ret_type) == BLKmode
10733 && TREE_ADDRESSABLE (ret_type)
10734 && strcmp (lang_hooks.name, "GNU C++") == 0);
10735 }
10736
10737 /* Output the assembler code for a thunk function. THUNK_DECL is the
10738 declaration for the thunk function itself, FUNCTION is the decl for
10739 the target function. DELTA is an immediate constant offset to be
10740 added to THIS. If VCALL_OFFSET is nonzero, the word at
10741 *(*this + vcall_offset) should be added to THIS. */
10742
10743 static void
10744 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10745 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10746 tree function)
10747 {
10748 rtx this_rtx, insn, funexp;
10749 unsigned int this_parmno;
10750 unsigned int this_regno;
10751 rtx delta_rtx;
10752
10753 reload_completed = 1;
10754 epilogue_completed = 1;
10755
10756 /* Set things up as ia64_expand_prologue might. */
10757 last_scratch_gr_reg = 15;
10758
10759 memset (&current_frame_info, 0, sizeof (current_frame_info));
10760 current_frame_info.spill_cfa_off = -16;
10761 current_frame_info.n_input_regs = 1;
10762 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10763
10764 /* Mark the end of the (empty) prologue. */
10765 emit_note (NOTE_INSN_PROLOGUE_END);
10766
10767 /* Figure out whether "this" will be the first parameter (the
10768 typical case) or the second parameter (as happens when the
10769 virtual function returns certain class objects). */
10770 this_parmno
10771 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10772 ? 1 : 0);
10773 this_regno = IN_REG (this_parmno);
10774 if (!TARGET_REG_NAMES)
10775 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10776
10777 this_rtx = gen_rtx_REG (Pmode, this_regno);
10778
10779 /* Apply the constant offset, if required. */
10780 delta_rtx = GEN_INT (delta);
10781 if (TARGET_ILP32)
10782 {
10783 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10784 REG_POINTER (tmp) = 1;
10785 if (delta && satisfies_constraint_I (delta_rtx))
10786 {
10787 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10788 delta = 0;
10789 }
10790 else
10791 emit_insn (gen_ptr_extend (this_rtx, tmp));
10792 }
10793 if (delta)
10794 {
10795 if (!satisfies_constraint_I (delta_rtx))
10796 {
10797 rtx tmp = gen_rtx_REG (Pmode, 2);
10798 emit_move_insn (tmp, delta_rtx);
10799 delta_rtx = tmp;
10800 }
10801 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10802 }
10803
10804 /* Apply the offset from the vtable, if required. */
10805 if (vcall_offset)
10806 {
10807 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10808 rtx tmp = gen_rtx_REG (Pmode, 2);
10809
10810 if (TARGET_ILP32)
10811 {
10812 rtx t = gen_rtx_REG (ptr_mode, 2);
10813 REG_POINTER (t) = 1;
10814 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10815 if (satisfies_constraint_I (vcall_offset_rtx))
10816 {
10817 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10818 vcall_offset = 0;
10819 }
10820 else
10821 emit_insn (gen_ptr_extend (tmp, t));
10822 }
10823 else
10824 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10825
10826 if (vcall_offset)
10827 {
10828 if (!satisfies_constraint_J (vcall_offset_rtx))
10829 {
10830 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10831 emit_move_insn (tmp2, vcall_offset_rtx);
10832 vcall_offset_rtx = tmp2;
10833 }
10834 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10835 }
10836
10837 if (TARGET_ILP32)
10838 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10839 else
10840 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10841
10842 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10843 }
10844
10845 /* Generate a tail call to the target function. */
10846 if (! TREE_USED (function))
10847 {
10848 assemble_external (function);
10849 TREE_USED (function) = 1;
10850 }
10851 funexp = XEXP (DECL_RTL (function), 0);
10852 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10853 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10854 insn = get_last_insn ();
10855 SIBLING_CALL_P (insn) = 1;
10856
10857 /* Code generation for calls relies on splitting. */
10858 reload_completed = 1;
10859 epilogue_completed = 1;
10860 try_split (PATTERN (insn), insn, 0);
10861
10862 emit_barrier ();
10863
10864 /* Run just enough of rest_of_compilation to get the insns emitted.
10865 There's not really enough bulk here to make other passes such as
10866 instruction scheduling worth while. Note that use_thunk calls
10867 assemble_start_function and assemble_end_function. */
10868
10869 emit_all_insn_group_barriers (NULL);
10870 insn = get_insns ();
10871 shorten_branches (insn);
10872 final_start_function (insn, file, 1);
10873 final (insn, file, 1);
10874 final_end_function ();
10875
10876 reload_completed = 0;
10877 epilogue_completed = 0;
10878 }
10879
10880 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10881
10882 static rtx
10883 ia64_struct_value_rtx (tree fntype,
10884 int incoming ATTRIBUTE_UNUSED)
10885 {
10886 if (TARGET_ABI_OPEN_VMS ||
10887 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10888 return NULL_RTX;
10889 return gen_rtx_REG (Pmode, GR_REG (8));
10890 }
10891
10892 static bool
10893 ia64_scalar_mode_supported_p (enum machine_mode mode)
10894 {
10895 switch (mode)
10896 {
10897 case QImode:
10898 case HImode:
10899 case SImode:
10900 case DImode:
10901 case TImode:
10902 return true;
10903
10904 case SFmode:
10905 case DFmode:
10906 case XFmode:
10907 case RFmode:
10908 return true;
10909
10910 case TFmode:
10911 return true;
10912
10913 default:
10914 return false;
10915 }
10916 }
10917
10918 static bool
10919 ia64_vector_mode_supported_p (enum machine_mode mode)
10920 {
10921 switch (mode)
10922 {
10923 case V8QImode:
10924 case V4HImode:
10925 case V2SImode:
10926 return true;
10927
10928 case V2SFmode:
10929 return true;
10930
10931 default:
10932 return false;
10933 }
10934 }
10935
10936 /* Implement the FUNCTION_PROFILER macro. */
10937
10938 void
10939 ia64_output_function_profiler (FILE *file, int labelno)
10940 {
10941 bool indirect_call;
10942
10943 /* If the function needs a static chain and the static chain
10944 register is r15, we use an indirect call so as to bypass
10945 the PLT stub in case the executable is dynamically linked,
10946 because the stub clobbers r15 as per 5.3.6 of the psABI.
10947 We don't need to do that in non canonical PIC mode. */
10948
10949 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10950 {
10951 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10952 indirect_call = true;
10953 }
10954 else
10955 indirect_call = false;
10956
10957 if (TARGET_GNU_AS)
10958 fputs ("\t.prologue 4, r40\n", file);
10959 else
10960 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10961 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10962
10963 if (NO_PROFILE_COUNTERS)
10964 fputs ("\tmov out3 = r0\n", file);
10965 else
10966 {
10967 char buf[20];
10968 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10969
10970 if (TARGET_AUTO_PIC)
10971 fputs ("\tmovl out3 = @gprel(", file);
10972 else
10973 fputs ("\taddl out3 = @ltoff(", file);
10974 assemble_name (file, buf);
10975 if (TARGET_AUTO_PIC)
10976 fputs (")\n", file);
10977 else
10978 fputs ("), r1\n", file);
10979 }
10980
10981 if (indirect_call)
10982 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10983 fputs ("\t;;\n", file);
10984
10985 fputs ("\t.save rp, r42\n", file);
10986 fputs ("\tmov out2 = b0\n", file);
10987 if (indirect_call)
10988 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10989 fputs ("\t.body\n", file);
10990 fputs ("\tmov out1 = r1\n", file);
10991 if (indirect_call)
10992 {
10993 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10994 fputs ("\tmov b6 = r16\n", file);
10995 fputs ("\tld8 r1 = [r14]\n", file);
10996 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10997 }
10998 else
10999 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
11000 }
11001
11002 static GTY(()) rtx mcount_func_rtx;
11003 static rtx
11004 gen_mcount_func_rtx (void)
11005 {
11006 if (!mcount_func_rtx)
11007 mcount_func_rtx = init_one_libfunc ("_mcount");
11008 return mcount_func_rtx;
11009 }
11010
11011 void
11012 ia64_profile_hook (int labelno)
11013 {
11014 rtx label, ip;
11015
11016 if (NO_PROFILE_COUNTERS)
11017 label = const0_rtx;
11018 else
11019 {
11020 char buf[30];
11021 const char *label_name;
11022 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11023 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
11024 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
11025 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
11026 }
11027 ip = gen_reg_rtx (Pmode);
11028 emit_insn (gen_ip_value (ip));
11029 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
11030 VOIDmode, 3,
11031 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
11032 ip, Pmode,
11033 label, Pmode);
11034 }
11035
11036 /* Return the mangling of TYPE if it is an extended fundamental type. */
11037
11038 static const char *
11039 ia64_mangle_type (const_tree type)
11040 {
11041 type = TYPE_MAIN_VARIANT (type);
11042
11043 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
11044 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
11045 return NULL;
11046
11047 /* On HP-UX, "long double" is mangled as "e" so __float128 is
11048 mangled as "e". */
11049 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
11050 return "g";
11051 /* On HP-UX, "e" is not available as a mangling of __float80 so use
11052 an extended mangling. Elsewhere, "e" is available since long
11053 double is 80 bits. */
11054 if (TYPE_MODE (type) == XFmode)
11055 return TARGET_HPUX ? "u9__float80" : "e";
11056 if (TYPE_MODE (type) == RFmode)
11057 return "u7__fpreg";
11058 return NULL;
11059 }
11060
11061 /* Return the diagnostic message string if conversion from FROMTYPE to
11062 TOTYPE is not allowed, NULL otherwise. */
11063 static const char *
11064 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
11065 {
11066 /* Reject nontrivial conversion to or from __fpreg. */
11067 if (TYPE_MODE (fromtype) == RFmode
11068 && TYPE_MODE (totype) != RFmode
11069 && TYPE_MODE (totype) != VOIDmode)
11070 return N_("invalid conversion from %<__fpreg%>");
11071 if (TYPE_MODE (totype) == RFmode
11072 && TYPE_MODE (fromtype) != RFmode)
11073 return N_("invalid conversion to %<__fpreg%>");
11074 return NULL;
11075 }
11076
11077 /* Return the diagnostic message string if the unary operation OP is
11078 not permitted on TYPE, NULL otherwise. */
11079 static const char *
11080 ia64_invalid_unary_op (int op, const_tree type)
11081 {
11082 /* Reject operations on __fpreg other than unary + or &. */
11083 if (TYPE_MODE (type) == RFmode
11084 && op != CONVERT_EXPR
11085 && op != ADDR_EXPR)
11086 return N_("invalid operation on %<__fpreg%>");
11087 return NULL;
11088 }
11089
11090 /* Return the diagnostic message string if the binary operation OP is
11091 not permitted on TYPE1 and TYPE2, NULL otherwise. */
11092 static const char *
11093 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
11094 {
11095 /* Reject operations on __fpreg. */
11096 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
11097 return N_("invalid operation on %<__fpreg%>");
11098 return NULL;
11099 }
11100
11101 /* HP-UX version_id attribute.
11102 For object foo, if the version_id is set to 1234 put out an alias
11103 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
11104 other than an alias statement because it is an illegal symbol name. */
11105
11106 static tree
11107 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
11108 tree name ATTRIBUTE_UNUSED,
11109 tree args,
11110 int flags ATTRIBUTE_UNUSED,
11111 bool *no_add_attrs)
11112 {
11113 tree arg = TREE_VALUE (args);
11114
11115 if (TREE_CODE (arg) != STRING_CST)
11116 {
11117 error("version attribute is not a string");
11118 *no_add_attrs = true;
11119 return NULL_TREE;
11120 }
11121 return NULL_TREE;
11122 }
11123
11124 /* Target hook for c_mode_for_suffix. */
11125
11126 static enum machine_mode
11127 ia64_c_mode_for_suffix (char suffix)
11128 {
11129 if (suffix == 'q')
11130 return TFmode;
11131 if (suffix == 'w')
11132 return XFmode;
11133
11134 return VOIDmode;
11135 }
11136
11137 static GTY(()) rtx ia64_dconst_0_5_rtx;
11138
11139 rtx
11140 ia64_dconst_0_5 (void)
11141 {
11142 if (! ia64_dconst_0_5_rtx)
11143 {
11144 REAL_VALUE_TYPE rv;
11145 real_from_string (&rv, "0.5");
11146 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11147 }
11148 return ia64_dconst_0_5_rtx;
11149 }
11150
11151 static GTY(()) rtx ia64_dconst_0_375_rtx;
11152
11153 rtx
11154 ia64_dconst_0_375 (void)
11155 {
11156 if (! ia64_dconst_0_375_rtx)
11157 {
11158 REAL_VALUE_TYPE rv;
11159 real_from_string (&rv, "0.375");
11160 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11161 }
11162 return ia64_dconst_0_375_rtx;
11163 }
11164
11165 static enum machine_mode
11166 ia64_get_reg_raw_mode (int regno)
11167 {
11168 if (FR_REGNO_P (regno))
11169 return XFmode;
11170 return default_get_reg_raw_mode(regno);
11171 }
11172
11173 /* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
11174 anymore. */
11175
11176 bool
11177 ia64_member_type_forces_blk (const_tree, enum machine_mode mode)
11178 {
11179 return TARGET_HPUX && mode == TFmode;
11180 }
11181
11182 /* Always default to .text section until HP-UX linker is fixed. */
11183
11184 ATTRIBUTE_UNUSED static section *
11185 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11186 enum node_frequency freq ATTRIBUTE_UNUSED,
11187 bool startup ATTRIBUTE_UNUSED,
11188 bool exit ATTRIBUTE_UNUSED)
11189 {
11190 return NULL;
11191 }
11192 \f
11193 /* Construct (set target (vec_select op0 (parallel perm))) and
11194 return true if that's a valid instruction in the active ISA. */
11195
11196 static bool
11197 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
11198 {
11199 rtx rperm[MAX_VECT_LEN], x;
11200 unsigned i;
11201
11202 for (i = 0; i < nelt; ++i)
11203 rperm[i] = GEN_INT (perm[i]);
11204
11205 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
11206 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
11207 x = gen_rtx_SET (VOIDmode, target, x);
11208
11209 x = emit_insn (x);
11210 if (recog_memoized (x) < 0)
11211 {
11212 remove_insn (x);
11213 return false;
11214 }
11215 return true;
11216 }
11217
11218 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11219
11220 static bool
11221 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
11222 const unsigned char *perm, unsigned nelt)
11223 {
11224 enum machine_mode v2mode;
11225 rtx x;
11226
11227 v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
11228 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
11229 return expand_vselect (target, x, perm, nelt);
11230 }
11231
11232 /* Try to expand a no-op permutation. */
11233
11234 static bool
11235 expand_vec_perm_identity (struct expand_vec_perm_d *d)
11236 {
11237 unsigned i, nelt = d->nelt;
11238
11239 for (i = 0; i < nelt; ++i)
11240 if (d->perm[i] != i)
11241 return false;
11242
11243 if (!d->testing_p)
11244 emit_move_insn (d->target, d->op0);
11245
11246 return true;
11247 }
11248
11249 /* Try to expand D via a shrp instruction. */
11250
11251 static bool
11252 expand_vec_perm_shrp (struct expand_vec_perm_d *d)
11253 {
11254 unsigned i, nelt = d->nelt, shift, mask;
11255 rtx tmp, hi, lo;
11256
11257 /* ??? Don't force V2SFmode into the integer registers. */
11258 if (d->vmode == V2SFmode)
11259 return false;
11260
11261 mask = (d->one_operand_p ? nelt - 1 : 2 * nelt - 1);
11262
11263 shift = d->perm[0];
11264 if (BYTES_BIG_ENDIAN && shift > nelt)
11265 return false;
11266
11267 for (i = 1; i < nelt; ++i)
11268 if (d->perm[i] != ((shift + i) & mask))
11269 return false;
11270
11271 if (d->testing_p)
11272 return true;
11273
11274 hi = shift < nelt ? d->op1 : d->op0;
11275 lo = shift < nelt ? d->op0 : d->op1;
11276
11277 shift %= nelt;
11278
11279 shift *= GET_MODE_UNIT_SIZE (d->vmode) * BITS_PER_UNIT;
11280
11281 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11282 gcc_assert (IN_RANGE (shift, 1, 63));
11283
11284 /* Recall that big-endian elements are numbered starting at the top of
11285 the register. Ideally we'd have a shift-left-pair. But since we
11286 don't, convert to a shift the other direction. */
11287 if (BYTES_BIG_ENDIAN)
11288 shift = 64 - shift;
11289
11290 tmp = gen_reg_rtx (DImode);
11291 hi = gen_lowpart (DImode, hi);
11292 lo = gen_lowpart (DImode, lo);
11293 emit_insn (gen_shrp (tmp, hi, lo, GEN_INT (shift)));
11294
11295 emit_move_insn (d->target, gen_lowpart (d->vmode, tmp));
11296 return true;
11297 }
11298
11299 /* Try to instantiate D in a single instruction. */
11300
11301 static bool
11302 expand_vec_perm_1 (struct expand_vec_perm_d *d)
11303 {
11304 unsigned i, nelt = d->nelt;
11305 unsigned char perm2[MAX_VECT_LEN];
11306
11307 /* Try single-operand selections. */
11308 if (d->one_operand_p)
11309 {
11310 if (expand_vec_perm_identity (d))
11311 return true;
11312 if (expand_vselect (d->target, d->op0, d->perm, nelt))
11313 return true;
11314 }
11315
11316 /* Try two operand selections. */
11317 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
11318 return true;
11319
11320 /* Recognize interleave style patterns with reversed operands. */
11321 if (!d->one_operand_p)
11322 {
11323 for (i = 0; i < nelt; ++i)
11324 {
11325 unsigned e = d->perm[i];
11326 if (e >= nelt)
11327 e -= nelt;
11328 else
11329 e += nelt;
11330 perm2[i] = e;
11331 }
11332
11333 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
11334 return true;
11335 }
11336
11337 if (expand_vec_perm_shrp (d))
11338 return true;
11339
11340 /* ??? Look for deposit-like permutations where most of the result
11341 comes from one vector unchanged and the rest comes from a
11342 sequential hunk of the other vector. */
11343
11344 return false;
11345 }
11346
11347 /* Pattern match broadcast permutations. */
11348
11349 static bool
11350 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
11351 {
11352 unsigned i, elt, nelt = d->nelt;
11353 unsigned char perm2[2];
11354 rtx temp;
11355 bool ok;
11356
11357 if (!d->one_operand_p)
11358 return false;
11359
11360 elt = d->perm[0];
11361 for (i = 1; i < nelt; ++i)
11362 if (d->perm[i] != elt)
11363 return false;
11364
11365 switch (d->vmode)
11366 {
11367 case V2SImode:
11368 case V2SFmode:
11369 /* Implementable by interleave. */
11370 perm2[0] = elt;
11371 perm2[1] = elt + 2;
11372 ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2);
11373 gcc_assert (ok);
11374 break;
11375
11376 case V8QImode:
11377 /* Implementable by extract + broadcast. */
11378 if (BYTES_BIG_ENDIAN)
11379 elt = 7 - elt;
11380 elt *= BITS_PER_UNIT;
11381 temp = gen_reg_rtx (DImode);
11382 emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0),
11383 GEN_INT (8), GEN_INT (elt)));
11384 emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp)));
11385 break;
11386
11387 case V4HImode:
11388 /* Should have been matched directly by vec_select. */
11389 default:
11390 gcc_unreachable ();
11391 }
11392
11393 return true;
11394 }
11395
11396 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11397 two vector permutation into a single vector permutation by using
11398 an interleave operation to merge the vectors. */
11399
11400 static bool
11401 expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d)
11402 {
11403 struct expand_vec_perm_d dremap, dfinal;
11404 unsigned char remap[2 * MAX_VECT_LEN];
11405 unsigned contents, i, nelt, nelt2;
11406 unsigned h0, h1, h2, h3;
11407 rtx seq;
11408 bool ok;
11409
11410 if (d->one_operand_p)
11411 return false;
11412
11413 nelt = d->nelt;
11414 nelt2 = nelt / 2;
11415
11416 /* Examine from whence the elements come. */
11417 contents = 0;
11418 for (i = 0; i < nelt; ++i)
11419 contents |= 1u << d->perm[i];
11420
11421 memset (remap, 0xff, sizeof (remap));
11422 dremap = *d;
11423
11424 h0 = (1u << nelt2) - 1;
11425 h1 = h0 << nelt2;
11426 h2 = h0 << nelt;
11427 h3 = h0 << (nelt + nelt2);
11428
11429 if ((contents & (h0 | h2)) == contents) /* punpck even halves */
11430 {
11431 for (i = 0; i < nelt; ++i)
11432 {
11433 unsigned which = i / 2 + (i & 1 ? nelt : 0);
11434 remap[which] = i;
11435 dremap.perm[i] = which;
11436 }
11437 }
11438 else if ((contents & (h1 | h3)) == contents) /* punpck odd halves */
11439 {
11440 for (i = 0; i < nelt; ++i)
11441 {
11442 unsigned which = i / 2 + nelt2 + (i & 1 ? nelt : 0);
11443 remap[which] = i;
11444 dremap.perm[i] = which;
11445 }
11446 }
11447 else if ((contents & 0x5555) == contents) /* mix even elements */
11448 {
11449 for (i = 0; i < nelt; ++i)
11450 {
11451 unsigned which = (i & ~1) + (i & 1 ? nelt : 0);
11452 remap[which] = i;
11453 dremap.perm[i] = which;
11454 }
11455 }
11456 else if ((contents & 0xaaaa) == contents) /* mix odd elements */
11457 {
11458 for (i = 0; i < nelt; ++i)
11459 {
11460 unsigned which = (i | 1) + (i & 1 ? nelt : 0);
11461 remap[which] = i;
11462 dremap.perm[i] = which;
11463 }
11464 }
11465 else if (floor_log2 (contents) - ctz_hwi (contents) < (int)nelt) /* shrp */
11466 {
11467 unsigned shift = ctz_hwi (contents);
11468 for (i = 0; i < nelt; ++i)
11469 {
11470 unsigned which = (i + shift) & (2 * nelt - 1);
11471 remap[which] = i;
11472 dremap.perm[i] = which;
11473 }
11474 }
11475 else
11476 return false;
11477
11478 /* Use the remapping array set up above to move the elements from their
11479 swizzled locations into their final destinations. */
11480 dfinal = *d;
11481 for (i = 0; i < nelt; ++i)
11482 {
11483 unsigned e = remap[d->perm[i]];
11484 gcc_assert (e < nelt);
11485 dfinal.perm[i] = e;
11486 }
11487 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
11488 dfinal.op1 = dfinal.op0;
11489 dfinal.one_operand_p = true;
11490 dremap.target = dfinal.op0;
11491
11492 /* Test if the final remap can be done with a single insn. For V4HImode
11493 this *will* succeed. For V8QImode or V2SImode it may not. */
11494 start_sequence ();
11495 ok = expand_vec_perm_1 (&dfinal);
11496 seq = get_insns ();
11497 end_sequence ();
11498 if (!ok)
11499 return false;
11500 if (d->testing_p)
11501 return true;
11502
11503 ok = expand_vec_perm_1 (&dremap);
11504 gcc_assert (ok);
11505
11506 emit_insn (seq);
11507 return true;
11508 }
11509
11510 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11511 constant permutation via two mux2 and a merge. */
11512
11513 static bool
11514 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d *d)
11515 {
11516 unsigned char perm2[4];
11517 rtx rmask[4];
11518 unsigned i;
11519 rtx t0, t1, mask, x;
11520 bool ok;
11521
11522 if (d->vmode != V4HImode || d->one_operand_p)
11523 return false;
11524 if (d->testing_p)
11525 return true;
11526
11527 for (i = 0; i < 4; ++i)
11528 {
11529 perm2[i] = d->perm[i] & 3;
11530 rmask[i] = (d->perm[i] & 4 ? const0_rtx : constm1_rtx);
11531 }
11532 mask = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmask));
11533 mask = force_reg (V4HImode, mask);
11534
11535 t0 = gen_reg_rtx (V4HImode);
11536 t1 = gen_reg_rtx (V4HImode);
11537
11538 ok = expand_vselect (t0, d->op0, perm2, 4);
11539 gcc_assert (ok);
11540 ok = expand_vselect (t1, d->op1, perm2, 4);
11541 gcc_assert (ok);
11542
11543 x = gen_rtx_AND (V4HImode, mask, t0);
11544 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
11545
11546 x = gen_rtx_NOT (V4HImode, mask);
11547 x = gen_rtx_AND (V4HImode, x, t1);
11548 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
11549
11550 x = gen_rtx_IOR (V4HImode, t0, t1);
11551 emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
11552
11553 return true;
11554 }
11555
11556 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11557 With all of the interface bits taken care of, perform the expansion
11558 in D and return true on success. */
11559
11560 static bool
11561 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
11562 {
11563 if (expand_vec_perm_1 (d))
11564 return true;
11565 if (expand_vec_perm_broadcast (d))
11566 return true;
11567 if (expand_vec_perm_interleave_2 (d))
11568 return true;
11569 if (expand_vec_perm_v4hi_5 (d))
11570 return true;
11571 return false;
11572 }
11573
11574 bool
11575 ia64_expand_vec_perm_const (rtx operands[4])
11576 {
11577 struct expand_vec_perm_d d;
11578 unsigned char perm[MAX_VECT_LEN];
11579 int i, nelt, which;
11580 rtx sel;
11581
11582 d.target = operands[0];
11583 d.op0 = operands[1];
11584 d.op1 = operands[2];
11585 sel = operands[3];
11586
11587 d.vmode = GET_MODE (d.target);
11588 gcc_assert (VECTOR_MODE_P (d.vmode));
11589 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11590 d.testing_p = false;
11591
11592 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
11593 gcc_assert (XVECLEN (sel, 0) == nelt);
11594 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
11595
11596 for (i = which = 0; i < nelt; ++i)
11597 {
11598 rtx e = XVECEXP (sel, 0, i);
11599 int ei = INTVAL (e) & (2 * nelt - 1);
11600
11601 which |= (ei < nelt ? 1 : 2);
11602 d.perm[i] = ei;
11603 perm[i] = ei;
11604 }
11605
11606 switch (which)
11607 {
11608 default:
11609 gcc_unreachable();
11610
11611 case 3:
11612 if (!rtx_equal_p (d.op0, d.op1))
11613 {
11614 d.one_operand_p = false;
11615 break;
11616 }
11617
11618 /* The elements of PERM do not suggest that only the first operand
11619 is used, but both operands are identical. Allow easier matching
11620 of the permutation by folding the permutation into the single
11621 input vector. */
11622 for (i = 0; i < nelt; ++i)
11623 if (d.perm[i] >= nelt)
11624 d.perm[i] -= nelt;
11625 /* FALLTHRU */
11626
11627 case 1:
11628 d.op1 = d.op0;
11629 d.one_operand_p = true;
11630 break;
11631
11632 case 2:
11633 for (i = 0; i < nelt; ++i)
11634 d.perm[i] -= nelt;
11635 d.op0 = d.op1;
11636 d.one_operand_p = true;
11637 break;
11638 }
11639
11640 if (ia64_expand_vec_perm_const_1 (&d))
11641 return true;
11642
11643 /* If the mask says both arguments are needed, but they are the same,
11644 the above tried to expand with one_operand_p true. If that didn't
11645 work, retry with one_operand_p false, as that's what we used in _ok. */
11646 if (which == 3 && d.one_operand_p)
11647 {
11648 memcpy (d.perm, perm, sizeof (perm));
11649 d.one_operand_p = false;
11650 return ia64_expand_vec_perm_const_1 (&d);
11651 }
11652
11653 return false;
11654 }
11655
11656 /* Implement targetm.vectorize.vec_perm_const_ok. */
11657
11658 static bool
11659 ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
11660 const unsigned char *sel)
11661 {
11662 struct expand_vec_perm_d d;
11663 unsigned int i, nelt, which;
11664 bool ret;
11665
11666 d.vmode = vmode;
11667 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11668 d.testing_p = true;
11669
11670 /* Extract the values from the vector CST into the permutation
11671 array in D. */
11672 memcpy (d.perm, sel, nelt);
11673 for (i = which = 0; i < nelt; ++i)
11674 {
11675 unsigned char e = d.perm[i];
11676 gcc_assert (e < 2 * nelt);
11677 which |= (e < nelt ? 1 : 2);
11678 }
11679
11680 /* For all elements from second vector, fold the elements to first. */
11681 if (which == 2)
11682 for (i = 0; i < nelt; ++i)
11683 d.perm[i] -= nelt;
11684
11685 /* Check whether the mask can be applied to the vector type. */
11686 d.one_operand_p = (which != 3);
11687
11688 /* Otherwise we have to go through the motions and see if we can
11689 figure out how to generate the requested permutation. */
11690 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
11691 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
11692 if (!d.one_operand_p)
11693 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
11694
11695 start_sequence ();
11696 ret = ia64_expand_vec_perm_const_1 (&d);
11697 end_sequence ();
11698
11699 return ret;
11700 }
11701
11702 void
11703 ia64_expand_vec_setv2sf (rtx operands[3])
11704 {
11705 struct expand_vec_perm_d d;
11706 unsigned int which;
11707 bool ok;
11708
11709 d.target = operands[0];
11710 d.op0 = operands[0];
11711 d.op1 = gen_reg_rtx (V2SFmode);
11712 d.vmode = V2SFmode;
11713 d.nelt = 2;
11714 d.one_operand_p = false;
11715 d.testing_p = false;
11716
11717 which = INTVAL (operands[2]);
11718 gcc_assert (which <= 1);
11719 d.perm[0] = 1 - which;
11720 d.perm[1] = which + 2;
11721
11722 emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode)));
11723
11724 ok = ia64_expand_vec_perm_const_1 (&d);
11725 gcc_assert (ok);
11726 }
11727
11728 void
11729 ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd)
11730 {
11731 struct expand_vec_perm_d d;
11732 enum machine_mode vmode = GET_MODE (target);
11733 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
11734 bool ok;
11735
11736 d.target = target;
11737 d.op0 = op0;
11738 d.op1 = op1;
11739 d.vmode = vmode;
11740 d.nelt = nelt;
11741 d.one_operand_p = false;
11742 d.testing_p = false;
11743
11744 for (i = 0; i < nelt; ++i)
11745 d.perm[i] = i * 2 + odd;
11746
11747 ok = ia64_expand_vec_perm_const_1 (&d);
11748 gcc_assert (ok);
11749 }
11750
11751 #include "gt-ia64.h"