]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/ia64/ia64.c
basic-block.h (BLOCK_HEAD, BLOCK_END): Remove.
[thirdparty/gcc.git] / gcc / config / ia64 / ia64.c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
53
54 /* This is used for communication between ASM_OUTPUT_LABEL and
55 ASM_OUTPUT_LABELREF. */
56 int ia64_asm_output_label = 0;
57
58 /* Define the information needed to generate branch and scc insns. This is
59 stored from the compare operation. */
60 struct rtx_def * ia64_compare_op0;
61 struct rtx_def * ia64_compare_op1;
62
63 /* Register names for ia64_expand_prologue. */
64 static const char * const ia64_reg_numbers[96] =
65 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
66 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
67 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
68 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
69 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
70 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
72 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
73 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
74 "r104","r105","r106","r107","r108","r109","r110","r111",
75 "r112","r113","r114","r115","r116","r117","r118","r119",
76 "r120","r121","r122","r123","r124","r125","r126","r127"};
77
78 /* ??? These strings could be shared with REGISTER_NAMES. */
79 static const char * const ia64_input_reg_names[8] =
80 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
81
82 /* ??? These strings could be shared with REGISTER_NAMES. */
83 static const char * const ia64_local_reg_names[80] =
84 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
85 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
86 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
87 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
88 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
89 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
90 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
91 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
92 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
93 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
94
95 /* ??? These strings could be shared with REGISTER_NAMES. */
96 static const char * const ia64_output_reg_names[8] =
97 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
98
99 /* String used with the -mfixed-range= option. */
100 const char *ia64_fixed_range_string;
101
102 /* Determines whether we use adds, addl, or movl to generate our
103 TLS immediate offsets. */
104 int ia64_tls_size = 22;
105
106 /* String used with the -mtls-size= option. */
107 const char *ia64_tls_size_string;
108
109 /* Which cpu are we scheduling for. */
110 enum processor_type ia64_tune;
111
112 /* String used with the -tune= option. */
113 const char *ia64_tune_string;
114
115 /* Determines whether we run our final scheduling pass or not. We always
116 avoid the normal second scheduling pass. */
117 static int ia64_flag_schedule_insns2;
118
119 /* Variables which are this size or smaller are put in the sdata/sbss
120 sections. */
121
122 unsigned int ia64_section_threshold;
123
124 /* The following variable is used by the DFA insn scheduler. The value is
125 TRUE if we do insn bundling instead of insn scheduling. */
126 int bundling_p = 0;
127
128 /* Structure to be filled in by ia64_compute_frame_size with register
129 save masks and offsets for the current function. */
130
131 struct ia64_frame_info
132 {
133 HOST_WIDE_INT total_size; /* size of the stack frame, not including
134 the caller's scratch area. */
135 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
136 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
137 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
138 HARD_REG_SET mask; /* mask of saved registers. */
139 unsigned int gr_used_mask; /* mask of registers in use as gr spill
140 registers or long-term scratches. */
141 int n_spilled; /* number of spilled registers. */
142 int reg_fp; /* register for fp. */
143 int reg_save_b0; /* save register for b0. */
144 int reg_save_pr; /* save register for prs. */
145 int reg_save_ar_pfs; /* save register for ar.pfs. */
146 int reg_save_ar_unat; /* save register for ar.unat. */
147 int reg_save_ar_lc; /* save register for ar.lc. */
148 int reg_save_gp; /* save register for gp. */
149 int n_input_regs; /* number of input registers used. */
150 int n_local_regs; /* number of local registers used. */
151 int n_output_regs; /* number of output registers used. */
152 int n_rotate_regs; /* number of rotating registers used. */
153
154 char need_regstk; /* true if a .regstk directive needed. */
155 char initialized; /* true if the data is finalized. */
156 };
157
158 /* Current frame information calculated by ia64_compute_frame_size. */
159 static struct ia64_frame_info current_frame_info;
160 \f
161 static int ia64_use_dfa_pipeline_interface (void);
162 static int ia64_first_cycle_multipass_dfa_lookahead (void);
163 static void ia64_dependencies_evaluation_hook (rtx, rtx);
164 static void ia64_init_dfa_pre_cycle_insn (void);
165 static rtx ia64_dfa_pre_cycle_insn (void);
166 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
167 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
168 static rtx gen_tls_get_addr (void);
169 static rtx gen_thread_pointer (void);
170 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
171 static int find_gr_spill (int);
172 static int next_scratch_gr_reg (void);
173 static void mark_reg_gr_used_mask (rtx, void *);
174 static void ia64_compute_frame_size (HOST_WIDE_INT);
175 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
176 static void finish_spill_pointers (void);
177 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
178 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
179 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
180 static rtx gen_movdi_x (rtx, rtx, rtx);
181 static rtx gen_fr_spill_x (rtx, rtx, rtx);
182 static rtx gen_fr_restore_x (rtx, rtx, rtx);
183
184 static enum machine_mode hfa_element_mode (tree, int);
185 static bool ia64_function_ok_for_sibcall (tree, tree);
186 static bool ia64_rtx_costs (rtx, int, int, int *);
187 static void fix_range (const char *);
188 static struct machine_function * ia64_init_machine_status (void);
189 static void emit_insn_group_barriers (FILE *);
190 static void emit_all_insn_group_barriers (FILE *);
191 static void final_emit_insn_group_barriers (FILE *);
192 static void emit_predicate_relation_info (void);
193 static void ia64_reorg (void);
194 static bool ia64_in_small_data_p (tree);
195 static void process_epilogue (void);
196 static int process_set (FILE *, rtx);
197
198 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
199 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
200 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
201 int, tree, rtx);
202 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
203 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
204 static bool ia64_assemble_integer (rtx, unsigned int, int);
205 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
206 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
207 static void ia64_output_function_end_prologue (FILE *);
208
209 static int ia64_issue_rate (void);
210 static int ia64_adjust_cost (rtx, rtx, rtx, int);
211 static void ia64_sched_init (FILE *, int, int);
212 static void ia64_sched_finish (FILE *, int);
213 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
214 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
215 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
216 static int ia64_variable_issue (FILE *, int, rtx, int);
217
218 static struct bundle_state *get_free_bundle_state (void);
219 static void free_bundle_state (struct bundle_state *);
220 static void initiate_bundle_states (void);
221 static void finish_bundle_states (void);
222 static unsigned bundle_state_hash (const void *);
223 static int bundle_state_eq_p (const void *, const void *);
224 static int insert_bundle_state (struct bundle_state *);
225 static void initiate_bundle_state_table (void);
226 static void finish_bundle_state_table (void);
227 static int try_issue_nops (struct bundle_state *, int);
228 static int try_issue_insn (struct bundle_state *, rtx);
229 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
230 static int get_max_pos (state_t);
231 static int get_template (state_t, int);
232
233 static rtx get_next_important_insn (rtx, rtx);
234 static void bundling (FILE *, int, rtx, rtx);
235
236 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
237 HOST_WIDE_INT, tree);
238 static void ia64_file_start (void);
239
240 static void ia64_select_rtx_section (enum machine_mode, rtx,
241 unsigned HOST_WIDE_INT);
242 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
243 ATTRIBUTE_UNUSED;
244 static void ia64_rwreloc_unique_section (tree, int)
245 ATTRIBUTE_UNUSED;
246 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
247 unsigned HOST_WIDE_INT)
248 ATTRIBUTE_UNUSED;
249 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
250 ATTRIBUTE_UNUSED;
251
252 static void ia64_hpux_add_extern_decl (const char *name)
253 ATTRIBUTE_UNUSED;
254 static void ia64_hpux_file_end (void)
255 ATTRIBUTE_UNUSED;
256 static void ia64_hpux_init_libfuncs (void)
257 ATTRIBUTE_UNUSED;
258 static void ia64_vms_init_libfuncs (void)
259 ATTRIBUTE_UNUSED;
260
261 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
262 static void ia64_encode_section_info (tree, rtx, int);
263
264 \f
265 /* Table of valid machine attributes. */
266 static const struct attribute_spec ia64_attribute_table[] =
267 {
268 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
269 { "syscall_linkage", 0, 0, false, true, true, NULL },
270 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
271 { NULL, 0, 0, false, false, false, NULL }
272 };
273
274 /* Initialize the GCC target structure. */
275 #undef TARGET_ATTRIBUTE_TABLE
276 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
277
278 #undef TARGET_INIT_BUILTINS
279 #define TARGET_INIT_BUILTINS ia64_init_builtins
280
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
283
284 #undef TARGET_ASM_BYTE_OP
285 #define TARGET_ASM_BYTE_OP "\tdata1\t"
286 #undef TARGET_ASM_ALIGNED_HI_OP
287 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
288 #undef TARGET_ASM_ALIGNED_SI_OP
289 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
290 #undef TARGET_ASM_ALIGNED_DI_OP
291 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
292 #undef TARGET_ASM_UNALIGNED_HI_OP
293 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
294 #undef TARGET_ASM_UNALIGNED_SI_OP
295 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
296 #undef TARGET_ASM_UNALIGNED_DI_OP
297 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
298 #undef TARGET_ASM_INTEGER
299 #define TARGET_ASM_INTEGER ia64_assemble_integer
300
301 #undef TARGET_ASM_FUNCTION_PROLOGUE
302 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
303 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
304 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
305 #undef TARGET_ASM_FUNCTION_EPILOGUE
306 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
307
308 #undef TARGET_IN_SMALL_DATA_P
309 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
310
311 #undef TARGET_SCHED_ADJUST_COST
312 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
313 #undef TARGET_SCHED_ISSUE_RATE
314 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
315 #undef TARGET_SCHED_VARIABLE_ISSUE
316 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
317 #undef TARGET_SCHED_INIT
318 #define TARGET_SCHED_INIT ia64_sched_init
319 #undef TARGET_SCHED_FINISH
320 #define TARGET_SCHED_FINISH ia64_sched_finish
321 #undef TARGET_SCHED_REORDER
322 #define TARGET_SCHED_REORDER ia64_sched_reorder
323 #undef TARGET_SCHED_REORDER2
324 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
325
326 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
327 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
328
329 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
330 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE ia64_use_dfa_pipeline_interface
331
332 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
333 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
334
335 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
336 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
337 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
338 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
339
340 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
341 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
342 ia64_first_cycle_multipass_dfa_lookahead_guard
343
344 #undef TARGET_SCHED_DFA_NEW_CYCLE
345 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
346
347 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
348 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
349
350 #undef TARGET_ASM_OUTPUT_MI_THUNK
351 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
352 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
353 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
354
355 #undef TARGET_ASM_FILE_START
356 #define TARGET_ASM_FILE_START ia64_file_start
357
358 #undef TARGET_RTX_COSTS
359 #define TARGET_RTX_COSTS ia64_rtx_costs
360 #undef TARGET_ADDRESS_COST
361 #define TARGET_ADDRESS_COST hook_int_rtx_0
362
363 #undef TARGET_MACHINE_DEPENDENT_REORG
364 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
365
366 #undef TARGET_ENCODE_SECTION_INFO
367 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
368
369 struct gcc_target targetm = TARGET_INITIALIZER;
370 \f
371 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
372
373 int
374 call_operand (rtx op, enum machine_mode mode)
375 {
376 if (mode != GET_MODE (op) && mode != VOIDmode)
377 return 0;
378
379 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG
380 || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG));
381 }
382
383 /* Return 1 if OP refers to a symbol in the sdata section. */
384
385 int
386 sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
387 {
388 switch (GET_CODE (op))
389 {
390 case CONST:
391 if (GET_CODE (XEXP (op, 0)) != PLUS
392 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
393 break;
394 op = XEXP (XEXP (op, 0), 0);
395 /* FALLTHRU */
396
397 case SYMBOL_REF:
398 if (CONSTANT_POOL_ADDRESS_P (op))
399 return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
400 else
401 return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
402
403 default:
404 break;
405 }
406
407 return 0;
408 }
409
410 int
411 small_addr_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
412 {
413 return SYMBOL_REF_SMALL_ADDR_P (op);
414 }
415
416 /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */
417
418 int
419 got_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
420 {
421 switch (GET_CODE (op))
422 {
423 case CONST:
424 op = XEXP (op, 0);
425 if (GET_CODE (op) != PLUS)
426 return 0;
427 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
428 return 0;
429 op = XEXP (op, 1);
430 if (GET_CODE (op) != CONST_INT)
431 return 0;
432
433 return 1;
434
435 /* Ok if we're not using GOT entries at all. */
436 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
437 return 1;
438
439 /* "Ok" while emitting rtl, since otherwise we won't be provided
440 with the entire offset during emission, which makes it very
441 hard to split the offset into high and low parts. */
442 if (rtx_equal_function_value_matters)
443 return 1;
444
445 /* Force the low 14 bits of the constant to zero so that we do not
446 use up so many GOT entries. */
447 return (INTVAL (op) & 0x3fff) == 0;
448
449 case SYMBOL_REF:
450 if (SYMBOL_REF_SMALL_ADDR_P (op))
451 return 0;
452 case LABEL_REF:
453 return 1;
454
455 default:
456 break;
457 }
458 return 0;
459 }
460
461 /* Return 1 if OP refers to a symbol. */
462
463 int
464 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
465 {
466 switch (GET_CODE (op))
467 {
468 case CONST:
469 case SYMBOL_REF:
470 case LABEL_REF:
471 return 1;
472
473 default:
474 break;
475 }
476 return 0;
477 }
478
479 /* Return tls_model if OP refers to a TLS symbol. */
480
481 int
482 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
483 {
484 if (GET_CODE (op) != SYMBOL_REF)
485 return 0;
486 return SYMBOL_REF_TLS_MODEL (op);
487 }
488
489
490 /* Return 1 if OP refers to a function. */
491
492 int
493 function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
494 {
495 if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (op))
496 return 1;
497 else
498 return 0;
499 }
500
501 /* Return 1 if OP is setjmp or a similar function. */
502
503 /* ??? This is an unsatisfying solution. Should rethink. */
504
505 int
506 setjmp_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
507 {
508 const char *name;
509 int retval = 0;
510
511 if (GET_CODE (op) != SYMBOL_REF)
512 return 0;
513
514 name = XSTR (op, 0);
515
516 /* The following code is borrowed from special_function_p in calls.c. */
517
518 /* Disregard prefix _, __ or __x. */
519 if (name[0] == '_')
520 {
521 if (name[1] == '_' && name[2] == 'x')
522 name += 3;
523 else if (name[1] == '_')
524 name += 2;
525 else
526 name += 1;
527 }
528
529 if (name[0] == 's')
530 {
531 retval
532 = ((name[1] == 'e'
533 && (! strcmp (name, "setjmp")
534 || ! strcmp (name, "setjmp_syscall")))
535 || (name[1] == 'i'
536 && ! strcmp (name, "sigsetjmp"))
537 || (name[1] == 'a'
538 && ! strcmp (name, "savectx")));
539 }
540 else if ((name[0] == 'q' && name[1] == 's'
541 && ! strcmp (name, "qsetjmp"))
542 || (name[0] == 'v' && name[1] == 'f'
543 && ! strcmp (name, "vfork")))
544 retval = 1;
545
546 return retval;
547 }
548
549 /* Return 1 if OP is a general operand, excluding tls symbolic operands. */
550
551 int
552 move_operand (rtx op, enum machine_mode mode)
553 {
554 return general_operand (op, mode) && !tls_symbolic_operand (op, mode);
555 }
556
557 /* Return 1 if OP is a register operand that is (or could be) a GR reg. */
558
559 int
560 gr_register_operand (rtx op, enum machine_mode mode)
561 {
562 if (! register_operand (op, mode))
563 return 0;
564 if (GET_CODE (op) == SUBREG)
565 op = SUBREG_REG (op);
566 if (GET_CODE (op) == REG)
567 {
568 unsigned int regno = REGNO (op);
569 if (regno < FIRST_PSEUDO_REGISTER)
570 return GENERAL_REGNO_P (regno);
571 }
572 return 1;
573 }
574
575 /* Return 1 if OP is a register operand that is (or could be) an FR reg. */
576
577 int
578 fr_register_operand (rtx op, enum machine_mode mode)
579 {
580 if (! register_operand (op, mode))
581 return 0;
582 if (GET_CODE (op) == SUBREG)
583 op = SUBREG_REG (op);
584 if (GET_CODE (op) == REG)
585 {
586 unsigned int regno = REGNO (op);
587 if (regno < FIRST_PSEUDO_REGISTER)
588 return FR_REGNO_P (regno);
589 }
590 return 1;
591 }
592
593 /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */
594
595 int
596 grfr_register_operand (rtx op, enum machine_mode mode)
597 {
598 if (! register_operand (op, mode))
599 return 0;
600 if (GET_CODE (op) == SUBREG)
601 op = SUBREG_REG (op);
602 if (GET_CODE (op) == REG)
603 {
604 unsigned int regno = REGNO (op);
605 if (regno < FIRST_PSEUDO_REGISTER)
606 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
607 }
608 return 1;
609 }
610
611 /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */
612
613 int
614 gr_nonimmediate_operand (rtx op, enum machine_mode mode)
615 {
616 if (! nonimmediate_operand (op, mode))
617 return 0;
618 if (GET_CODE (op) == SUBREG)
619 op = SUBREG_REG (op);
620 if (GET_CODE (op) == REG)
621 {
622 unsigned int regno = REGNO (op);
623 if (regno < FIRST_PSEUDO_REGISTER)
624 return GENERAL_REGNO_P (regno);
625 }
626 return 1;
627 }
628
629 /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */
630
631 int
632 fr_nonimmediate_operand (rtx op, enum machine_mode mode)
633 {
634 if (! nonimmediate_operand (op, mode))
635 return 0;
636 if (GET_CODE (op) == SUBREG)
637 op = SUBREG_REG (op);
638 if (GET_CODE (op) == REG)
639 {
640 unsigned int regno = REGNO (op);
641 if (regno < FIRST_PSEUDO_REGISTER)
642 return FR_REGNO_P (regno);
643 }
644 return 1;
645 }
646
647 /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */
648
649 int
650 grfr_nonimmediate_operand (rtx op, enum machine_mode mode)
651 {
652 if (! nonimmediate_operand (op, mode))
653 return 0;
654 if (GET_CODE (op) == SUBREG)
655 op = SUBREG_REG (op);
656 if (GET_CODE (op) == REG)
657 {
658 unsigned int regno = REGNO (op);
659 if (regno < FIRST_PSEUDO_REGISTER)
660 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
661 }
662 return 1;
663 }
664
665 /* Return 1 if OP is a GR register operand, or zero. */
666
667 int
668 gr_reg_or_0_operand (rtx op, enum machine_mode mode)
669 {
670 return (op == const0_rtx || gr_register_operand (op, mode));
671 }
672
673 /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */
674
675 int
676 gr_reg_or_5bit_operand (rtx op, enum machine_mode mode)
677 {
678 return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32)
679 || GET_CODE (op) == CONSTANT_P_RTX
680 || gr_register_operand (op, mode));
681 }
682
683 /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */
684
685 int
686 gr_reg_or_6bit_operand (rtx op, enum machine_mode mode)
687 {
688 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
689 || GET_CODE (op) == CONSTANT_P_RTX
690 || gr_register_operand (op, mode));
691 }
692
693 /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */
694
695 int
696 gr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
697 {
698 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
699 || GET_CODE (op) == CONSTANT_P_RTX
700 || gr_register_operand (op, mode));
701 }
702
703 /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */
704
705 int
706 grfr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
707 {
708 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
709 || GET_CODE (op) == CONSTANT_P_RTX
710 || grfr_register_operand (op, mode));
711 }
712
713 /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate
714 operand. */
715
716 int
717 gr_reg_or_8bit_adjusted_operand (rtx op, enum machine_mode mode)
718 {
719 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op)))
720 || GET_CODE (op) == CONSTANT_P_RTX
721 || gr_register_operand (op, mode));
722 }
723
724 /* Return 1 if OP is a register operand, or is valid for both an 8 bit
725 immediate and an 8 bit adjusted immediate operand. This is necessary
726 because when we emit a compare, we don't know what the condition will be,
727 so we need the union of the immediates accepted by GT and LT. */
728
729 int
730 gr_reg_or_8bit_and_adjusted_operand (rtx op, enum machine_mode mode)
731 {
732 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))
733 && CONST_OK_FOR_L (INTVAL (op)))
734 || GET_CODE (op) == CONSTANT_P_RTX
735 || gr_register_operand (op, mode));
736 }
737
738 /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */
739
740 int
741 gr_reg_or_14bit_operand (rtx op, enum machine_mode mode)
742 {
743 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op)))
744 || GET_CODE (op) == CONSTANT_P_RTX
745 || gr_register_operand (op, mode));
746 }
747
748 /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */
749
750 int
751 gr_reg_or_22bit_operand (rtx op, enum machine_mode mode)
752 {
753 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
754 || GET_CODE (op) == CONSTANT_P_RTX
755 || gr_register_operand (op, mode));
756 }
757
758 /* Return 1 if OP is a 6 bit immediate operand. */
759
760 int
761 shift_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
762 {
763 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
764 || GET_CODE (op) == CONSTANT_P_RTX);
765 }
766
767 /* Return 1 if OP is a 5 bit immediate operand. */
768
769 int
770 shift_32bit_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
771 {
772 return ((GET_CODE (op) == CONST_INT
773 && (INTVAL (op) >= 0 && INTVAL (op) < 32))
774 || GET_CODE (op) == CONSTANT_P_RTX);
775 }
776
777 /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */
778
779 int
780 shladd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
781 {
782 return (GET_CODE (op) == CONST_INT
783 && (INTVAL (op) == 2 || INTVAL (op) == 4
784 || INTVAL (op) == 8 || INTVAL (op) == 16));
785 }
786
787 /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */
788
789 int
790 fetchadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
791 {
792 return (GET_CODE (op) == CONST_INT
793 && (INTVAL (op) == -16 || INTVAL (op) == -8 ||
794 INTVAL (op) == -4 || INTVAL (op) == -1 ||
795 INTVAL (op) == 1 || INTVAL (op) == 4 ||
796 INTVAL (op) == 8 || INTVAL (op) == 16));
797 }
798
799 /* Return 1 if OP is a floating-point constant zero, one, or a register. */
800
801 int
802 fr_reg_or_fp01_operand (rtx op, enum machine_mode mode)
803 {
804 return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op))
805 || fr_register_operand (op, mode));
806 }
807
808 /* Like nonimmediate_operand, but don't allow MEMs that try to use a
809 POST_MODIFY with a REG as displacement. */
810
811 int
812 destination_operand (rtx op, enum machine_mode mode)
813 {
814 if (! nonimmediate_operand (op, mode))
815 return 0;
816 if (GET_CODE (op) == MEM
817 && GET_CODE (XEXP (op, 0)) == POST_MODIFY
818 && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG)
819 return 0;
820 return 1;
821 }
822
823 /* Like memory_operand, but don't allow post-increments. */
824
825 int
826 not_postinc_memory_operand (rtx op, enum machine_mode mode)
827 {
828 return (memory_operand (op, mode)
829 && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != 'a');
830 }
831
832 /* Return 1 if this is a comparison operator, which accepts a normal 8-bit
833 signed immediate operand. */
834
835 int
836 normal_comparison_operator (register rtx op, enum machine_mode mode)
837 {
838 enum rtx_code code = GET_CODE (op);
839 return ((mode == VOIDmode || GET_MODE (op) == mode)
840 && (code == EQ || code == NE
841 || code == GT || code == LE || code == GTU || code == LEU));
842 }
843
844 /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit
845 signed immediate operand. */
846
847 int
848 adjusted_comparison_operator (register rtx op, enum machine_mode mode)
849 {
850 enum rtx_code code = GET_CODE (op);
851 return ((mode == VOIDmode || GET_MODE (op) == mode)
852 && (code == LT || code == GE || code == LTU || code == GEU));
853 }
854
855 /* Return 1 if this is a signed inequality operator. */
856
857 int
858 signed_inequality_operator (register rtx op, enum machine_mode mode)
859 {
860 enum rtx_code code = GET_CODE (op);
861 return ((mode == VOIDmode || GET_MODE (op) == mode)
862 && (code == GE || code == GT
863 || code == LE || code == LT));
864 }
865
866 /* Return 1 if this operator is valid for predication. */
867
868 int
869 predicate_operator (register rtx op, enum machine_mode mode)
870 {
871 enum rtx_code code = GET_CODE (op);
872 return ((GET_MODE (op) == mode || mode == VOIDmode)
873 && (code == EQ || code == NE));
874 }
875
876 /* Return 1 if this operator can be used in a conditional operation. */
877
878 int
879 condop_operator (register rtx op, enum machine_mode mode)
880 {
881 enum rtx_code code = GET_CODE (op);
882 return ((GET_MODE (op) == mode || mode == VOIDmode)
883 && (code == PLUS || code == MINUS || code == AND
884 || code == IOR || code == XOR));
885 }
886
887 /* Return 1 if this is the ar.lc register. */
888
889 int
890 ar_lc_reg_operand (register rtx op, enum machine_mode mode)
891 {
892 return (GET_MODE (op) == DImode
893 && (mode == DImode || mode == VOIDmode)
894 && GET_CODE (op) == REG
895 && REGNO (op) == AR_LC_REGNUM);
896 }
897
898 /* Return 1 if this is the ar.ccv register. */
899
900 int
901 ar_ccv_reg_operand (register rtx op, enum machine_mode mode)
902 {
903 return ((GET_MODE (op) == mode || mode == VOIDmode)
904 && GET_CODE (op) == REG
905 && REGNO (op) == AR_CCV_REGNUM);
906 }
907
908 /* Return 1 if this is the ar.pfs register. */
909
910 int
911 ar_pfs_reg_operand (register rtx op, enum machine_mode mode)
912 {
913 return ((GET_MODE (op) == mode || mode == VOIDmode)
914 && GET_CODE (op) == REG
915 && REGNO (op) == AR_PFS_REGNUM);
916 }
917
918 /* Like general_operand, but don't allow (mem (addressof)). */
919
920 int
921 general_xfmode_operand (rtx op, enum machine_mode mode)
922 {
923 if (! general_operand (op, mode))
924 return 0;
925 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
926 return 0;
927 return 1;
928 }
929
930 /* Similarly. */
931
932 int
933 destination_xfmode_operand (rtx op, enum machine_mode mode)
934 {
935 if (! destination_operand (op, mode))
936 return 0;
937 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
938 return 0;
939 return 1;
940 }
941
942 /* Similarly. */
943
944 int
945 xfreg_or_fp01_operand (rtx op, enum machine_mode mode)
946 {
947 if (GET_CODE (op) == SUBREG)
948 return 0;
949 return fr_reg_or_fp01_operand (op, mode);
950 }
951
952 /* Return 1 if OP is valid as a base register in a reg + offset address. */
953
954 int
955 basereg_operand (rtx op, enum machine_mode mode)
956 {
957 /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
958 checks from pa.c basereg_operand as well? Seems to be OK without them
959 in test runs. */
960
961 return (register_operand (op, mode) &&
962 REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
963 }
964 \f
965 typedef enum
966 {
967 ADDR_AREA_NORMAL, /* normal address area */
968 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
969 }
970 ia64_addr_area;
971
972 static GTY(()) tree small_ident1;
973 static GTY(()) tree small_ident2;
974
975 static void
976 init_idents (void)
977 {
978 if (small_ident1 == 0)
979 {
980 small_ident1 = get_identifier ("small");
981 small_ident2 = get_identifier ("__small__");
982 }
983 }
984
985 /* Retrieve the address area that has been chosen for the given decl. */
986
987 static ia64_addr_area
988 ia64_get_addr_area (tree decl)
989 {
990 tree model_attr;
991
992 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
993 if (model_attr)
994 {
995 tree id;
996
997 init_idents ();
998 id = TREE_VALUE (TREE_VALUE (model_attr));
999 if (id == small_ident1 || id == small_ident2)
1000 return ADDR_AREA_SMALL;
1001 }
1002 return ADDR_AREA_NORMAL;
1003 }
1004
1005 static tree
1006 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1007 {
1008 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
1009 ia64_addr_area area;
1010 tree arg, decl = *node;
1011
1012 init_idents ();
1013 arg = TREE_VALUE (args);
1014 if (arg == small_ident1 || arg == small_ident2)
1015 {
1016 addr_area = ADDR_AREA_SMALL;
1017 }
1018 else
1019 {
1020 warning ("invalid argument of `%s' attribute",
1021 IDENTIFIER_POINTER (name));
1022 *no_add_attrs = true;
1023 }
1024
1025 switch (TREE_CODE (decl))
1026 {
1027 case VAR_DECL:
1028 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
1029 == FUNCTION_DECL)
1030 && !TREE_STATIC (decl))
1031 {
1032 error ("%Jan address area attribute cannot be specified for "
1033 "local variables", decl, decl);
1034 *no_add_attrs = true;
1035 }
1036 area = ia64_get_addr_area (decl);
1037 if (area != ADDR_AREA_NORMAL && addr_area != area)
1038 {
1039 error ("%Jaddress area of '%s' conflicts with previous "
1040 "declaration", decl, decl);
1041 *no_add_attrs = true;
1042 }
1043 break;
1044
1045 case FUNCTION_DECL:
1046 error ("%Jaddress area attribute cannot be specified for functions",
1047 decl, decl);
1048 *no_add_attrs = true;
1049 break;
1050
1051 default:
1052 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1053 *no_add_attrs = true;
1054 break;
1055 }
1056
1057 return NULL_TREE;
1058 }
1059
1060 static void
1061 ia64_encode_addr_area (tree decl, rtx symbol)
1062 {
1063 int flags;
1064
1065 flags = SYMBOL_REF_FLAGS (symbol);
1066 switch (ia64_get_addr_area (decl))
1067 {
1068 case ADDR_AREA_NORMAL: break;
1069 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
1070 default: abort ();
1071 }
1072 SYMBOL_REF_FLAGS (symbol) = flags;
1073 }
1074
1075 static void
1076 ia64_encode_section_info (tree decl, rtx rtl, int first)
1077 {
1078 default_encode_section_info (decl, rtl, first);
1079
1080 if (TREE_CODE (decl) == VAR_DECL
1081 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
1082 ia64_encode_addr_area (decl, XEXP (rtl, 0));
1083 }
1084 \f
1085 /* Return 1 if the operands of a move are ok. */
1086
1087 int
1088 ia64_move_ok (rtx dst, rtx src)
1089 {
1090 /* If we're under init_recog_no_volatile, we'll not be able to use
1091 memory_operand. So check the code directly and don't worry about
1092 the validity of the underlying address, which should have been
1093 checked elsewhere anyway. */
1094 if (GET_CODE (dst) != MEM)
1095 return 1;
1096 if (GET_CODE (src) == MEM)
1097 return 0;
1098 if (register_operand (src, VOIDmode))
1099 return 1;
1100
1101 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
1102 if (INTEGRAL_MODE_P (GET_MODE (dst)))
1103 return src == const0_rtx;
1104 else
1105 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
1106 }
1107
1108 int
1109 addp4_optimize_ok (rtx op1, rtx op2)
1110 {
1111 return (basereg_operand (op1, GET_MODE(op1)) !=
1112 basereg_operand (op2, GET_MODE(op2)));
1113 }
1114
1115 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
1116 Return the length of the field, or <= 0 on failure. */
1117
1118 int
1119 ia64_depz_field_mask (rtx rop, rtx rshift)
1120 {
1121 unsigned HOST_WIDE_INT op = INTVAL (rop);
1122 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
1123
1124 /* Get rid of the zero bits we're shifting in. */
1125 op >>= shift;
1126
1127 /* We must now have a solid block of 1's at bit 0. */
1128 return exact_log2 (op + 1);
1129 }
1130
1131 /* Expand a symbolic constant load. */
1132
1133 void
1134 ia64_expand_load_address (rtx dest, rtx src)
1135 {
1136 if (tls_symbolic_operand (src, VOIDmode))
1137 abort ();
1138 if (GET_CODE (dest) != REG)
1139 abort ();
1140
1141 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1142 having to pointer-extend the value afterward. Other forms of address
1143 computation below are also more natural to compute as 64-bit quantities.
1144 If we've been given an SImode destination register, change it. */
1145 if (GET_MODE (dest) != Pmode)
1146 dest = gen_rtx_REG (Pmode, REGNO (dest));
1147
1148 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
1149 {
1150 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
1151 return;
1152 }
1153 else if (TARGET_AUTO_PIC)
1154 {
1155 emit_insn (gen_load_gprel64 (dest, src));
1156 return;
1157 }
1158 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1159 {
1160 emit_insn (gen_load_fptr (dest, src));
1161 return;
1162 }
1163 else if (sdata_symbolic_operand (src, VOIDmode))
1164 {
1165 emit_insn (gen_load_gprel (dest, src));
1166 return;
1167 }
1168
1169 if (GET_CODE (src) == CONST
1170 && GET_CODE (XEXP (src, 0)) == PLUS
1171 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
1172 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)
1173 {
1174 rtx sym = XEXP (XEXP (src, 0), 0);
1175 HOST_WIDE_INT ofs, hi, lo;
1176
1177 /* Split the offset into a sign extended 14-bit low part
1178 and a complementary high part. */
1179 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
1180 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
1181 hi = ofs - lo;
1182
1183 ia64_expand_load_address (dest, plus_constant (sym, hi));
1184 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
1185 }
1186 else
1187 {
1188 rtx tmp;
1189
1190 tmp = gen_rtx_HIGH (Pmode, src);
1191 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1192 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1193
1194 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
1195 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1196 }
1197 }
1198
1199 static GTY(()) rtx gen_tls_tga;
1200 static rtx
1201 gen_tls_get_addr (void)
1202 {
1203 if (!gen_tls_tga)
1204 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1205 return gen_tls_tga;
1206 }
1207
1208 static GTY(()) rtx thread_pointer_rtx;
1209 static rtx
1210 gen_thread_pointer (void)
1211 {
1212 if (!thread_pointer_rtx)
1213 {
1214 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1215 RTX_UNCHANGING_P (thread_pointer_rtx) = 1;
1216 }
1217 return thread_pointer_rtx;
1218 }
1219
1220 static rtx
1221 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
1222 {
1223 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1224 rtx orig_op0 = op0;
1225
1226 switch (tls_kind)
1227 {
1228 case TLS_MODEL_GLOBAL_DYNAMIC:
1229 start_sequence ();
1230
1231 tga_op1 = gen_reg_rtx (Pmode);
1232 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1233 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1234 RTX_UNCHANGING_P (tga_op1) = 1;
1235
1236 tga_op2 = gen_reg_rtx (Pmode);
1237 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
1238 tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
1239 RTX_UNCHANGING_P (tga_op2) = 1;
1240
1241 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1242 LCT_CONST, Pmode, 2, tga_op1,
1243 Pmode, tga_op2, Pmode);
1244
1245 insns = get_insns ();
1246 end_sequence ();
1247
1248 if (GET_MODE (op0) != Pmode)
1249 op0 = tga_ret;
1250 emit_libcall_block (insns, op0, tga_ret, op1);
1251 break;
1252
1253 case TLS_MODEL_LOCAL_DYNAMIC:
1254 /* ??? This isn't the completely proper way to do local-dynamic
1255 If the call to __tls_get_addr is used only by a single symbol,
1256 then we should (somehow) move the dtprel to the second arg
1257 to avoid the extra add. */
1258 start_sequence ();
1259
1260 tga_op1 = gen_reg_rtx (Pmode);
1261 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1262 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1263 RTX_UNCHANGING_P (tga_op1) = 1;
1264
1265 tga_op2 = const0_rtx;
1266
1267 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1268 LCT_CONST, Pmode, 2, tga_op1,
1269 Pmode, tga_op2, Pmode);
1270
1271 insns = get_insns ();
1272 end_sequence ();
1273
1274 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1275 UNSPEC_LD_BASE);
1276 tmp = gen_reg_rtx (Pmode);
1277 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1278
1279 if (!register_operand (op0, Pmode))
1280 op0 = gen_reg_rtx (Pmode);
1281 if (TARGET_TLS64)
1282 {
1283 emit_insn (gen_load_dtprel (op0, op1));
1284 emit_insn (gen_adddi3 (op0, tmp, op0));
1285 }
1286 else
1287 emit_insn (gen_add_dtprel (op0, tmp, op1));
1288 break;
1289
1290 case TLS_MODEL_INITIAL_EXEC:
1291 tmp = gen_reg_rtx (Pmode);
1292 emit_insn (gen_load_ltoff_tprel (tmp, op1));
1293 tmp = gen_rtx_MEM (Pmode, tmp);
1294 RTX_UNCHANGING_P (tmp) = 1;
1295 tmp = force_reg (Pmode, tmp);
1296
1297 if (!register_operand (op0, Pmode))
1298 op0 = gen_reg_rtx (Pmode);
1299 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1300 break;
1301
1302 case TLS_MODEL_LOCAL_EXEC:
1303 if (!register_operand (op0, Pmode))
1304 op0 = gen_reg_rtx (Pmode);
1305 if (TARGET_TLS64)
1306 {
1307 emit_insn (gen_load_tprel (op0, op1));
1308 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
1309 }
1310 else
1311 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
1312 break;
1313
1314 default:
1315 abort ();
1316 }
1317
1318 if (orig_op0 == op0)
1319 return NULL_RTX;
1320 if (GET_MODE (orig_op0) == Pmode)
1321 return op0;
1322 return gen_lowpart (GET_MODE (orig_op0), op0);
1323 }
1324
1325 rtx
1326 ia64_expand_move (rtx op0, rtx op1)
1327 {
1328 enum machine_mode mode = GET_MODE (op0);
1329
1330 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1331 op1 = force_reg (mode, op1);
1332
1333 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1334 {
1335 enum tls_model tls_kind;
1336 if ((tls_kind = tls_symbolic_operand (op1, VOIDmode)))
1337 return ia64_expand_tls_address (tls_kind, op0, op1);
1338
1339 if (!TARGET_NO_PIC && reload_completed)
1340 {
1341 ia64_expand_load_address (op0, op1);
1342 return NULL_RTX;
1343 }
1344 }
1345
1346 return op1;
1347 }
1348
1349 /* Split a move from OP1 to OP0 conditional on COND. */
1350
1351 void
1352 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1353 {
1354 rtx insn, first = get_last_insn ();
1355
1356 emit_move_insn (op0, op1);
1357
1358 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1359 if (INSN_P (insn))
1360 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1361 PATTERN (insn));
1362 }
1363
1364 /* Split a post-reload TImode reference into two DImode components. */
1365
1366 rtx
1367 ia64_split_timode (rtx out[2], rtx in, rtx scratch)
1368 {
1369 switch (GET_CODE (in))
1370 {
1371 case REG:
1372 out[0] = gen_rtx_REG (DImode, REGNO (in));
1373 out[1] = gen_rtx_REG (DImode, REGNO (in) + 1);
1374 return NULL_RTX;
1375
1376 case MEM:
1377 {
1378 rtx base = XEXP (in, 0);
1379
1380 switch (GET_CODE (base))
1381 {
1382 case REG:
1383 out[0] = adjust_address (in, DImode, 0);
1384 break;
1385 case POST_MODIFY:
1386 base = XEXP (base, 0);
1387 out[0] = adjust_address (in, DImode, 0);
1388 break;
1389
1390 /* Since we're changing the mode, we need to change to POST_MODIFY
1391 as well to preserve the size of the increment. Either that or
1392 do the update in two steps, but we've already got this scratch
1393 register handy so let's use it. */
1394 case POST_INC:
1395 base = XEXP (base, 0);
1396 out[0]
1397 = change_address (in, DImode,
1398 gen_rtx_POST_MODIFY
1399 (Pmode, base, plus_constant (base, 16)));
1400 break;
1401 case POST_DEC:
1402 base = XEXP (base, 0);
1403 out[0]
1404 = change_address (in, DImode,
1405 gen_rtx_POST_MODIFY
1406 (Pmode, base, plus_constant (base, -16)));
1407 break;
1408 default:
1409 abort ();
1410 }
1411
1412 if (scratch == NULL_RTX)
1413 abort ();
1414 out[1] = change_address (in, DImode, scratch);
1415 return gen_adddi3 (scratch, base, GEN_INT (8));
1416 }
1417
1418 case CONST_INT:
1419 case CONST_DOUBLE:
1420 split_double (in, &out[0], &out[1]);
1421 return NULL_RTX;
1422
1423 default:
1424 abort ();
1425 }
1426 }
1427
1428 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1429 through memory plus an extra GR scratch register. Except that you can
1430 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1431 SECONDARY_RELOAD_CLASS, but not both.
1432
1433 We got into problems in the first place by allowing a construct like
1434 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1435 This solution attempts to prevent this situation from occurring. When
1436 we see something like the above, we spill the inner register to memory. */
1437
1438 rtx
1439 spill_xfmode_operand (rtx in, int force)
1440 {
1441 if (GET_CODE (in) == SUBREG
1442 && GET_MODE (SUBREG_REG (in)) == TImode
1443 && GET_CODE (SUBREG_REG (in)) == REG)
1444 {
1445 rtx mem = gen_mem_addressof (SUBREG_REG (in), NULL_TREE, /*rescan=*/true);
1446 return gen_rtx_MEM (XFmode, copy_to_reg (XEXP (mem, 0)));
1447 }
1448 else if (force && GET_CODE (in) == REG)
1449 {
1450 rtx mem = gen_mem_addressof (in, NULL_TREE, /*rescan=*/true);
1451 return gen_rtx_MEM (XFmode, copy_to_reg (XEXP (mem, 0)));
1452 }
1453 else if (GET_CODE (in) == MEM
1454 && GET_CODE (XEXP (in, 0)) == ADDRESSOF)
1455 return change_address (in, XFmode, copy_to_reg (XEXP (in, 0)));
1456 else
1457 return in;
1458 }
1459
1460 /* Emit comparison instruction if necessary, returning the expression
1461 that holds the compare result in the proper mode. */
1462
1463 static GTY(()) rtx cmptf_libfunc;
1464
1465 rtx
1466 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1467 {
1468 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1469 rtx cmp;
1470
1471 /* If we have a BImode input, then we already have a compare result, and
1472 do not need to emit another comparison. */
1473 if (GET_MODE (op0) == BImode)
1474 {
1475 if ((code == NE || code == EQ) && op1 == const0_rtx)
1476 cmp = op0;
1477 else
1478 abort ();
1479 }
1480 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1481 magic number as its third argument, that indicates what to do.
1482 The return value is an integer to be compared against zero. */
1483 else if (TARGET_HPUX && GET_MODE (op0) == TFmode)
1484 {
1485 enum qfcmp_magic {
1486 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1487 QCMP_UNORD = 2,
1488 QCMP_EQ = 4,
1489 QCMP_LT = 8,
1490 QCMP_GT = 16
1491 } magic;
1492 enum rtx_code ncode;
1493 rtx ret, insns;
1494 if (GET_MODE (op1) != TFmode)
1495 abort ();
1496 switch (code)
1497 {
1498 /* 1 = equal, 0 = not equal. Equality operators do
1499 not raise FP_INVALID when given an SNaN operand. */
1500 case EQ: magic = QCMP_EQ; ncode = NE; break;
1501 case NE: magic = QCMP_EQ; ncode = EQ; break;
1502 /* isunordered() from C99. */
1503 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1504 /* Relational operators raise FP_INVALID when given
1505 an SNaN operand. */
1506 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1507 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1508 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1509 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1510 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1511 Expanders for buneq etc. weuld have to be added to ia64.md
1512 for this to be useful. */
1513 default: abort ();
1514 }
1515
1516 start_sequence ();
1517
1518 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1519 op0, TFmode, op1, TFmode,
1520 GEN_INT (magic), DImode);
1521 cmp = gen_reg_rtx (BImode);
1522 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1523 gen_rtx_fmt_ee (ncode, BImode,
1524 ret, const0_rtx)));
1525
1526 insns = get_insns ();
1527 end_sequence ();
1528
1529 emit_libcall_block (insns, cmp, cmp,
1530 gen_rtx_fmt_ee (code, BImode, op0, op1));
1531 code = NE;
1532 }
1533 else
1534 {
1535 cmp = gen_reg_rtx (BImode);
1536 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1537 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1538 code = NE;
1539 }
1540
1541 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1542 }
1543
1544 /* Emit the appropriate sequence for a call. */
1545
1546 void
1547 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1548 int sibcall_p)
1549 {
1550 rtx insn, b0;
1551
1552 addr = XEXP (addr, 0);
1553 addr = convert_memory_address (DImode, addr);
1554 b0 = gen_rtx_REG (DImode, R_BR (0));
1555
1556 /* ??? Should do this for functions known to bind local too. */
1557 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1558 {
1559 if (sibcall_p)
1560 insn = gen_sibcall_nogp (addr);
1561 else if (! retval)
1562 insn = gen_call_nogp (addr, b0);
1563 else
1564 insn = gen_call_value_nogp (retval, addr, b0);
1565 insn = emit_call_insn (insn);
1566 }
1567 else
1568 {
1569 if (sibcall_p)
1570 insn = gen_sibcall_gp (addr);
1571 else if (! retval)
1572 insn = gen_call_gp (addr, b0);
1573 else
1574 insn = gen_call_value_gp (retval, addr, b0);
1575 insn = emit_call_insn (insn);
1576
1577 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1578 }
1579
1580 if (sibcall_p)
1581 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1582 }
1583
1584 void
1585 ia64_reload_gp (void)
1586 {
1587 rtx tmp;
1588
1589 if (current_frame_info.reg_save_gp)
1590 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1591 else
1592 {
1593 HOST_WIDE_INT offset;
1594
1595 offset = (current_frame_info.spill_cfa_off
1596 + current_frame_info.spill_size);
1597 if (frame_pointer_needed)
1598 {
1599 tmp = hard_frame_pointer_rtx;
1600 offset = -offset;
1601 }
1602 else
1603 {
1604 tmp = stack_pointer_rtx;
1605 offset = current_frame_info.total_size - offset;
1606 }
1607
1608 if (CONST_OK_FOR_I (offset))
1609 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1610 tmp, GEN_INT (offset)));
1611 else
1612 {
1613 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1614 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1615 pic_offset_table_rtx, tmp));
1616 }
1617
1618 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1619 }
1620
1621 emit_move_insn (pic_offset_table_rtx, tmp);
1622 }
1623
1624 void
1625 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1626 rtx scratch_b, int noreturn_p, int sibcall_p)
1627 {
1628 rtx insn;
1629 bool is_desc = false;
1630
1631 /* If we find we're calling through a register, then we're actually
1632 calling through a descriptor, so load up the values. */
1633 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1634 {
1635 rtx tmp;
1636 bool addr_dead_p;
1637
1638 /* ??? We are currently constrained to *not* use peep2, because
1639 we can legitimately change the global lifetime of the GP
1640 (in the form of killing where previously live). This is
1641 because a call through a descriptor doesn't use the previous
1642 value of the GP, while a direct call does, and we do not
1643 commit to either form until the split here.
1644
1645 That said, this means that we lack precise life info for
1646 whether ADDR is dead after this call. This is not terribly
1647 important, since we can fix things up essentially for free
1648 with the POST_DEC below, but it's nice to not use it when we
1649 can immediately tell it's not necessary. */
1650 addr_dead_p = ((noreturn_p || sibcall_p
1651 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1652 REGNO (addr)))
1653 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1654
1655 /* Load the code address into scratch_b. */
1656 tmp = gen_rtx_POST_INC (Pmode, addr);
1657 tmp = gen_rtx_MEM (Pmode, tmp);
1658 emit_move_insn (scratch_r, tmp);
1659 emit_move_insn (scratch_b, scratch_r);
1660
1661 /* Load the GP address. If ADDR is not dead here, then we must
1662 revert the change made above via the POST_INCREMENT. */
1663 if (!addr_dead_p)
1664 tmp = gen_rtx_POST_DEC (Pmode, addr);
1665 else
1666 tmp = addr;
1667 tmp = gen_rtx_MEM (Pmode, tmp);
1668 emit_move_insn (pic_offset_table_rtx, tmp);
1669
1670 is_desc = true;
1671 addr = scratch_b;
1672 }
1673
1674 if (sibcall_p)
1675 insn = gen_sibcall_nogp (addr);
1676 else if (retval)
1677 insn = gen_call_value_nogp (retval, addr, retaddr);
1678 else
1679 insn = gen_call_nogp (addr, retaddr);
1680 emit_call_insn (insn);
1681
1682 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1683 ia64_reload_gp ();
1684 }
1685 \f
1686 /* Begin the assembly file. */
1687
1688 static void
1689 ia64_file_start (void)
1690 {
1691 default_file_start ();
1692 emit_safe_across_calls ();
1693 }
1694
1695 void
1696 emit_safe_across_calls (void)
1697 {
1698 unsigned int rs, re;
1699 int out_state;
1700
1701 rs = 1;
1702 out_state = 0;
1703 while (1)
1704 {
1705 while (rs < 64 && call_used_regs[PR_REG (rs)])
1706 rs++;
1707 if (rs >= 64)
1708 break;
1709 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1710 continue;
1711 if (out_state == 0)
1712 {
1713 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1714 out_state = 1;
1715 }
1716 else
1717 fputc (',', asm_out_file);
1718 if (re == rs + 1)
1719 fprintf (asm_out_file, "p%u", rs);
1720 else
1721 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1722 rs = re + 1;
1723 }
1724 if (out_state)
1725 fputc ('\n', asm_out_file);
1726 }
1727
1728 /* Helper function for ia64_compute_frame_size: find an appropriate general
1729 register to spill some special register to. SPECIAL_SPILL_MASK contains
1730 bits in GR0 to GR31 that have already been allocated by this routine.
1731 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1732
1733 static int
1734 find_gr_spill (int try_locals)
1735 {
1736 int regno;
1737
1738 /* If this is a leaf function, first try an otherwise unused
1739 call-clobbered register. */
1740 if (current_function_is_leaf)
1741 {
1742 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1743 if (! regs_ever_live[regno]
1744 && call_used_regs[regno]
1745 && ! fixed_regs[regno]
1746 && ! global_regs[regno]
1747 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1748 {
1749 current_frame_info.gr_used_mask |= 1 << regno;
1750 return regno;
1751 }
1752 }
1753
1754 if (try_locals)
1755 {
1756 regno = current_frame_info.n_local_regs;
1757 /* If there is a frame pointer, then we can't use loc79, because
1758 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1759 reg_name switching code in ia64_expand_prologue. */
1760 if (regno < (80 - frame_pointer_needed))
1761 {
1762 current_frame_info.n_local_regs = regno + 1;
1763 return LOC_REG (0) + regno;
1764 }
1765 }
1766
1767 /* Failed to find a general register to spill to. Must use stack. */
1768 return 0;
1769 }
1770
1771 /* In order to make for nice schedules, we try to allocate every temporary
1772 to a different register. We must of course stay away from call-saved,
1773 fixed, and global registers. We must also stay away from registers
1774 allocated in current_frame_info.gr_used_mask, since those include regs
1775 used all through the prologue.
1776
1777 Any register allocated here must be used immediately. The idea is to
1778 aid scheduling, not to solve data flow problems. */
1779
1780 static int last_scratch_gr_reg;
1781
1782 static int
1783 next_scratch_gr_reg (void)
1784 {
1785 int i, regno;
1786
1787 for (i = 0; i < 32; ++i)
1788 {
1789 regno = (last_scratch_gr_reg + i + 1) & 31;
1790 if (call_used_regs[regno]
1791 && ! fixed_regs[regno]
1792 && ! global_regs[regno]
1793 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1794 {
1795 last_scratch_gr_reg = regno;
1796 return regno;
1797 }
1798 }
1799
1800 /* There must be _something_ available. */
1801 abort ();
1802 }
1803
1804 /* Helper function for ia64_compute_frame_size, called through
1805 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1806
1807 static void
1808 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1809 {
1810 unsigned int regno = REGNO (reg);
1811 if (regno < 32)
1812 {
1813 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1814 for (i = 0; i < n; ++i)
1815 current_frame_info.gr_used_mask |= 1 << (regno + i);
1816 }
1817 }
1818
1819 /* Returns the number of bytes offset between the frame pointer and the stack
1820 pointer for the current function. SIZE is the number of bytes of space
1821 needed for local variables. */
1822
1823 static void
1824 ia64_compute_frame_size (HOST_WIDE_INT size)
1825 {
1826 HOST_WIDE_INT total_size;
1827 HOST_WIDE_INT spill_size = 0;
1828 HOST_WIDE_INT extra_spill_size = 0;
1829 HOST_WIDE_INT pretend_args_size;
1830 HARD_REG_SET mask;
1831 int n_spilled = 0;
1832 int spilled_gr_p = 0;
1833 int spilled_fr_p = 0;
1834 unsigned int regno;
1835 int i;
1836
1837 if (current_frame_info.initialized)
1838 return;
1839
1840 memset (&current_frame_info, 0, sizeof current_frame_info);
1841 CLEAR_HARD_REG_SET (mask);
1842
1843 /* Don't allocate scratches to the return register. */
1844 diddle_return_value (mark_reg_gr_used_mask, NULL);
1845
1846 /* Don't allocate scratches to the EH scratch registers. */
1847 if (cfun->machine->ia64_eh_epilogue_sp)
1848 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1849 if (cfun->machine->ia64_eh_epilogue_bsp)
1850 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1851
1852 /* Find the size of the register stack frame. We have only 80 local
1853 registers, because we reserve 8 for the inputs and 8 for the
1854 outputs. */
1855
1856 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1857 since we'll be adjusting that down later. */
1858 regno = LOC_REG (78) + ! frame_pointer_needed;
1859 for (; regno >= LOC_REG (0); regno--)
1860 if (regs_ever_live[regno])
1861 break;
1862 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1863
1864 /* For functions marked with the syscall_linkage attribute, we must mark
1865 all eight input registers as in use, so that locals aren't visible to
1866 the caller. */
1867
1868 if (cfun->machine->n_varargs > 0
1869 || lookup_attribute ("syscall_linkage",
1870 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1871 current_frame_info.n_input_regs = 8;
1872 else
1873 {
1874 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1875 if (regs_ever_live[regno])
1876 break;
1877 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1878 }
1879
1880 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1881 if (regs_ever_live[regno])
1882 break;
1883 i = regno - OUT_REG (0) + 1;
1884
1885 /* When -p profiling, we need one output register for the mcount argument.
1886 Likewise for -a profiling for the bb_init_func argument. For -ax
1887 profiling, we need two output registers for the two bb_init_trace_func
1888 arguments. */
1889 if (current_function_profile)
1890 i = MAX (i, 1);
1891 current_frame_info.n_output_regs = i;
1892
1893 /* ??? No rotating register support yet. */
1894 current_frame_info.n_rotate_regs = 0;
1895
1896 /* Discover which registers need spilling, and how much room that
1897 will take. Begin with floating point and general registers,
1898 which will always wind up on the stack. */
1899
1900 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1901 if (regs_ever_live[regno] && ! call_used_regs[regno])
1902 {
1903 SET_HARD_REG_BIT (mask, regno);
1904 spill_size += 16;
1905 n_spilled += 1;
1906 spilled_fr_p = 1;
1907 }
1908
1909 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1910 if (regs_ever_live[regno] && ! call_used_regs[regno])
1911 {
1912 SET_HARD_REG_BIT (mask, regno);
1913 spill_size += 8;
1914 n_spilled += 1;
1915 spilled_gr_p = 1;
1916 }
1917
1918 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1919 if (regs_ever_live[regno] && ! call_used_regs[regno])
1920 {
1921 SET_HARD_REG_BIT (mask, regno);
1922 spill_size += 8;
1923 n_spilled += 1;
1924 }
1925
1926 /* Now come all special registers that might get saved in other
1927 general registers. */
1928
1929 if (frame_pointer_needed)
1930 {
1931 current_frame_info.reg_fp = find_gr_spill (1);
1932 /* If we did not get a register, then we take LOC79. This is guaranteed
1933 to be free, even if regs_ever_live is already set, because this is
1934 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1935 as we don't count loc79 above. */
1936 if (current_frame_info.reg_fp == 0)
1937 {
1938 current_frame_info.reg_fp = LOC_REG (79);
1939 current_frame_info.n_local_regs++;
1940 }
1941 }
1942
1943 if (! current_function_is_leaf)
1944 {
1945 /* Emit a save of BR0 if we call other functions. Do this even
1946 if this function doesn't return, as EH depends on this to be
1947 able to unwind the stack. */
1948 SET_HARD_REG_BIT (mask, BR_REG (0));
1949
1950 current_frame_info.reg_save_b0 = find_gr_spill (1);
1951 if (current_frame_info.reg_save_b0 == 0)
1952 {
1953 spill_size += 8;
1954 n_spilled += 1;
1955 }
1956
1957 /* Similarly for ar.pfs. */
1958 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1959 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1960 if (current_frame_info.reg_save_ar_pfs == 0)
1961 {
1962 extra_spill_size += 8;
1963 n_spilled += 1;
1964 }
1965
1966 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1967 registers are clobbered, so we fall back to the stack. */
1968 current_frame_info.reg_save_gp
1969 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1970 if (current_frame_info.reg_save_gp == 0)
1971 {
1972 SET_HARD_REG_BIT (mask, GR_REG (1));
1973 spill_size += 8;
1974 n_spilled += 1;
1975 }
1976 }
1977 else
1978 {
1979 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1980 {
1981 SET_HARD_REG_BIT (mask, BR_REG (0));
1982 spill_size += 8;
1983 n_spilled += 1;
1984 }
1985
1986 if (regs_ever_live[AR_PFS_REGNUM])
1987 {
1988 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1989 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1990 if (current_frame_info.reg_save_ar_pfs == 0)
1991 {
1992 extra_spill_size += 8;
1993 n_spilled += 1;
1994 }
1995 }
1996 }
1997
1998 /* Unwind descriptor hackery: things are most efficient if we allocate
1999 consecutive GR save registers for RP, PFS, FP in that order. However,
2000 it is absolutely critical that FP get the only hard register that's
2001 guaranteed to be free, so we allocated it first. If all three did
2002 happen to be allocated hard regs, and are consecutive, rearrange them
2003 into the preferred order now. */
2004 if (current_frame_info.reg_fp != 0
2005 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2006 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2007 {
2008 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2009 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2010 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2011 }
2012
2013 /* See if we need to store the predicate register block. */
2014 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2015 if (regs_ever_live[regno] && ! call_used_regs[regno])
2016 break;
2017 if (regno <= PR_REG (63))
2018 {
2019 SET_HARD_REG_BIT (mask, PR_REG (0));
2020 current_frame_info.reg_save_pr = find_gr_spill (1);
2021 if (current_frame_info.reg_save_pr == 0)
2022 {
2023 extra_spill_size += 8;
2024 n_spilled += 1;
2025 }
2026
2027 /* ??? Mark them all as used so that register renaming and such
2028 are free to use them. */
2029 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2030 regs_ever_live[regno] = 1;
2031 }
2032
2033 /* If we're forced to use st8.spill, we're forced to save and restore
2034 ar.unat as well. The check for existing liveness allows inline asm
2035 to touch ar.unat. */
2036 if (spilled_gr_p || cfun->machine->n_varargs
2037 || regs_ever_live[AR_UNAT_REGNUM])
2038 {
2039 regs_ever_live[AR_UNAT_REGNUM] = 1;
2040 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2041 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2042 if (current_frame_info.reg_save_ar_unat == 0)
2043 {
2044 extra_spill_size += 8;
2045 n_spilled += 1;
2046 }
2047 }
2048
2049 if (regs_ever_live[AR_LC_REGNUM])
2050 {
2051 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2052 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2053 if (current_frame_info.reg_save_ar_lc == 0)
2054 {
2055 extra_spill_size += 8;
2056 n_spilled += 1;
2057 }
2058 }
2059
2060 /* If we have an odd number of words of pretend arguments written to
2061 the stack, then the FR save area will be unaligned. We round the
2062 size of this area up to keep things 16 byte aligned. */
2063 if (spilled_fr_p)
2064 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2065 else
2066 pretend_args_size = current_function_pretend_args_size;
2067
2068 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2069 + current_function_outgoing_args_size);
2070 total_size = IA64_STACK_ALIGN (total_size);
2071
2072 /* We always use the 16-byte scratch area provided by the caller, but
2073 if we are a leaf function, there's no one to which we need to provide
2074 a scratch area. */
2075 if (current_function_is_leaf)
2076 total_size = MAX (0, total_size - 16);
2077
2078 current_frame_info.total_size = total_size;
2079 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2080 current_frame_info.spill_size = spill_size;
2081 current_frame_info.extra_spill_size = extra_spill_size;
2082 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2083 current_frame_info.n_spilled = n_spilled;
2084 current_frame_info.initialized = reload_completed;
2085 }
2086
2087 /* Compute the initial difference between the specified pair of registers. */
2088
2089 HOST_WIDE_INT
2090 ia64_initial_elimination_offset (int from, int to)
2091 {
2092 HOST_WIDE_INT offset;
2093
2094 ia64_compute_frame_size (get_frame_size ());
2095 switch (from)
2096 {
2097 case FRAME_POINTER_REGNUM:
2098 if (to == HARD_FRAME_POINTER_REGNUM)
2099 {
2100 if (current_function_is_leaf)
2101 offset = -current_frame_info.total_size;
2102 else
2103 offset = -(current_frame_info.total_size
2104 - current_function_outgoing_args_size - 16);
2105 }
2106 else if (to == STACK_POINTER_REGNUM)
2107 {
2108 if (current_function_is_leaf)
2109 offset = 0;
2110 else
2111 offset = 16 + current_function_outgoing_args_size;
2112 }
2113 else
2114 abort ();
2115 break;
2116
2117 case ARG_POINTER_REGNUM:
2118 /* Arguments start above the 16 byte save area, unless stdarg
2119 in which case we store through the 16 byte save area. */
2120 if (to == HARD_FRAME_POINTER_REGNUM)
2121 offset = 16 - current_function_pretend_args_size;
2122 else if (to == STACK_POINTER_REGNUM)
2123 offset = (current_frame_info.total_size
2124 + 16 - current_function_pretend_args_size);
2125 else
2126 abort ();
2127 break;
2128
2129 default:
2130 abort ();
2131 }
2132
2133 return offset;
2134 }
2135
2136 /* If there are more than a trivial number of register spills, we use
2137 two interleaved iterators so that we can get two memory references
2138 per insn group.
2139
2140 In order to simplify things in the prologue and epilogue expanders,
2141 we use helper functions to fix up the memory references after the
2142 fact with the appropriate offsets to a POST_MODIFY memory mode.
2143 The following data structure tracks the state of the two iterators
2144 while insns are being emitted. */
2145
2146 struct spill_fill_data
2147 {
2148 rtx init_after; /* point at which to emit initializations */
2149 rtx init_reg[2]; /* initial base register */
2150 rtx iter_reg[2]; /* the iterator registers */
2151 rtx *prev_addr[2]; /* address of last memory use */
2152 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2153 HOST_WIDE_INT prev_off[2]; /* last offset */
2154 int n_iter; /* number of iterators in use */
2155 int next_iter; /* next iterator to use */
2156 unsigned int save_gr_used_mask;
2157 };
2158
2159 static struct spill_fill_data spill_fill_data;
2160
2161 static void
2162 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2163 {
2164 int i;
2165
2166 spill_fill_data.init_after = get_last_insn ();
2167 spill_fill_data.init_reg[0] = init_reg;
2168 spill_fill_data.init_reg[1] = init_reg;
2169 spill_fill_data.prev_addr[0] = NULL;
2170 spill_fill_data.prev_addr[1] = NULL;
2171 spill_fill_data.prev_insn[0] = NULL;
2172 spill_fill_data.prev_insn[1] = NULL;
2173 spill_fill_data.prev_off[0] = cfa_off;
2174 spill_fill_data.prev_off[1] = cfa_off;
2175 spill_fill_data.next_iter = 0;
2176 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2177
2178 spill_fill_data.n_iter = 1 + (n_spills > 2);
2179 for (i = 0; i < spill_fill_data.n_iter; ++i)
2180 {
2181 int regno = next_scratch_gr_reg ();
2182 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2183 current_frame_info.gr_used_mask |= 1 << regno;
2184 }
2185 }
2186
2187 static void
2188 finish_spill_pointers (void)
2189 {
2190 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2191 }
2192
2193 static rtx
2194 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2195 {
2196 int iter = spill_fill_data.next_iter;
2197 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2198 rtx disp_rtx = GEN_INT (disp);
2199 rtx mem;
2200
2201 if (spill_fill_data.prev_addr[iter])
2202 {
2203 if (CONST_OK_FOR_N (disp))
2204 {
2205 *spill_fill_data.prev_addr[iter]
2206 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2207 gen_rtx_PLUS (DImode,
2208 spill_fill_data.iter_reg[iter],
2209 disp_rtx));
2210 REG_NOTES (spill_fill_data.prev_insn[iter])
2211 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2212 REG_NOTES (spill_fill_data.prev_insn[iter]));
2213 }
2214 else
2215 {
2216 /* ??? Could use register post_modify for loads. */
2217 if (! CONST_OK_FOR_I (disp))
2218 {
2219 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2220 emit_move_insn (tmp, disp_rtx);
2221 disp_rtx = tmp;
2222 }
2223 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2224 spill_fill_data.iter_reg[iter], disp_rtx));
2225 }
2226 }
2227 /* Micro-optimization: if we've created a frame pointer, it's at
2228 CFA 0, which may allow the real iterator to be initialized lower,
2229 slightly increasing parallelism. Also, if there are few saves
2230 it may eliminate the iterator entirely. */
2231 else if (disp == 0
2232 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2233 && frame_pointer_needed)
2234 {
2235 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2236 set_mem_alias_set (mem, get_varargs_alias_set ());
2237 return mem;
2238 }
2239 else
2240 {
2241 rtx seq, insn;
2242
2243 if (disp == 0)
2244 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2245 spill_fill_data.init_reg[iter]);
2246 else
2247 {
2248 start_sequence ();
2249
2250 if (! CONST_OK_FOR_I (disp))
2251 {
2252 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2253 emit_move_insn (tmp, disp_rtx);
2254 disp_rtx = tmp;
2255 }
2256
2257 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2258 spill_fill_data.init_reg[iter],
2259 disp_rtx));
2260
2261 seq = get_insns ();
2262 end_sequence ();
2263 }
2264
2265 /* Careful for being the first insn in a sequence. */
2266 if (spill_fill_data.init_after)
2267 insn = emit_insn_after (seq, spill_fill_data.init_after);
2268 else
2269 {
2270 rtx first = get_insns ();
2271 if (first)
2272 insn = emit_insn_before (seq, first);
2273 else
2274 insn = emit_insn (seq);
2275 }
2276 spill_fill_data.init_after = insn;
2277
2278 /* If DISP is 0, we may or may not have a further adjustment
2279 afterward. If we do, then the load/store insn may be modified
2280 to be a post-modify. If we don't, then this copy may be
2281 eliminated by copyprop_hardreg_forward, which makes this
2282 insn garbage, which runs afoul of the sanity check in
2283 propagate_one_insn. So mark this insn as legal to delete. */
2284 if (disp == 0)
2285 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2286 REG_NOTES (insn));
2287 }
2288
2289 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2290
2291 /* ??? Not all of the spills are for varargs, but some of them are.
2292 The rest of the spills belong in an alias set of their own. But
2293 it doesn't actually hurt to include them here. */
2294 set_mem_alias_set (mem, get_varargs_alias_set ());
2295
2296 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2297 spill_fill_data.prev_off[iter] = cfa_off;
2298
2299 if (++iter >= spill_fill_data.n_iter)
2300 iter = 0;
2301 spill_fill_data.next_iter = iter;
2302
2303 return mem;
2304 }
2305
2306 static void
2307 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2308 rtx frame_reg)
2309 {
2310 int iter = spill_fill_data.next_iter;
2311 rtx mem, insn;
2312
2313 mem = spill_restore_mem (reg, cfa_off);
2314 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2315 spill_fill_data.prev_insn[iter] = insn;
2316
2317 if (frame_reg)
2318 {
2319 rtx base;
2320 HOST_WIDE_INT off;
2321
2322 RTX_FRAME_RELATED_P (insn) = 1;
2323
2324 /* Don't even pretend that the unwind code can intuit its way
2325 through a pair of interleaved post_modify iterators. Just
2326 provide the correct answer. */
2327
2328 if (frame_pointer_needed)
2329 {
2330 base = hard_frame_pointer_rtx;
2331 off = - cfa_off;
2332 }
2333 else
2334 {
2335 base = stack_pointer_rtx;
2336 off = current_frame_info.total_size - cfa_off;
2337 }
2338
2339 REG_NOTES (insn)
2340 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2341 gen_rtx_SET (VOIDmode,
2342 gen_rtx_MEM (GET_MODE (reg),
2343 plus_constant (base, off)),
2344 frame_reg),
2345 REG_NOTES (insn));
2346 }
2347 }
2348
2349 static void
2350 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2351 {
2352 int iter = spill_fill_data.next_iter;
2353 rtx insn;
2354
2355 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2356 GEN_INT (cfa_off)));
2357 spill_fill_data.prev_insn[iter] = insn;
2358 }
2359
2360 /* Wrapper functions that discards the CONST_INT spill offset. These
2361 exist so that we can give gr_spill/gr_fill the offset they need and
2362 use a consistent function interface. */
2363
2364 static rtx
2365 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2366 {
2367 return gen_movdi (dest, src);
2368 }
2369
2370 static rtx
2371 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2372 {
2373 return gen_fr_spill (dest, src);
2374 }
2375
2376 static rtx
2377 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2378 {
2379 return gen_fr_restore (dest, src);
2380 }
2381
2382 /* Called after register allocation to add any instructions needed for the
2383 prologue. Using a prologue insn is favored compared to putting all of the
2384 instructions in output_function_prologue(), since it allows the scheduler
2385 to intermix instructions with the saves of the caller saved registers. In
2386 some cases, it might be necessary to emit a barrier instruction as the last
2387 insn to prevent such scheduling.
2388
2389 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2390 so that the debug info generation code can handle them properly.
2391
2392 The register save area is layed out like so:
2393 cfa+16
2394 [ varargs spill area ]
2395 [ fr register spill area ]
2396 [ br register spill area ]
2397 [ ar register spill area ]
2398 [ pr register spill area ]
2399 [ gr register spill area ] */
2400
2401 /* ??? Get inefficient code when the frame size is larger than can fit in an
2402 adds instruction. */
2403
2404 void
2405 ia64_expand_prologue (void)
2406 {
2407 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2408 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2409 rtx reg, alt_reg;
2410
2411 ia64_compute_frame_size (get_frame_size ());
2412 last_scratch_gr_reg = 15;
2413
2414 /* If there is no epilogue, then we don't need some prologue insns.
2415 We need to avoid emitting the dead prologue insns, because flow
2416 will complain about them. */
2417 if (optimize)
2418 {
2419 edge e;
2420
2421 for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
2422 if ((e->flags & EDGE_FAKE) == 0
2423 && (e->flags & EDGE_FALLTHRU) != 0)
2424 break;
2425 epilogue_p = (e != NULL);
2426 }
2427 else
2428 epilogue_p = 1;
2429
2430 /* Set the local, input, and output register names. We need to do this
2431 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2432 half. If we use in/loc/out register names, then we get assembler errors
2433 in crtn.S because there is no alloc insn or regstk directive in there. */
2434 if (! TARGET_REG_NAMES)
2435 {
2436 int inputs = current_frame_info.n_input_regs;
2437 int locals = current_frame_info.n_local_regs;
2438 int outputs = current_frame_info.n_output_regs;
2439
2440 for (i = 0; i < inputs; i++)
2441 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2442 for (i = 0; i < locals; i++)
2443 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2444 for (i = 0; i < outputs; i++)
2445 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2446 }
2447
2448 /* Set the frame pointer register name. The regnum is logically loc79,
2449 but of course we'll not have allocated that many locals. Rather than
2450 worrying about renumbering the existing rtxs, we adjust the name. */
2451 /* ??? This code means that we can never use one local register when
2452 there is a frame pointer. loc79 gets wasted in this case, as it is
2453 renamed to a register that will never be used. See also the try_locals
2454 code in find_gr_spill. */
2455 if (current_frame_info.reg_fp)
2456 {
2457 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2458 reg_names[HARD_FRAME_POINTER_REGNUM]
2459 = reg_names[current_frame_info.reg_fp];
2460 reg_names[current_frame_info.reg_fp] = tmp;
2461 }
2462
2463 /* We don't need an alloc instruction if we've used no outputs or locals. */
2464 if (current_frame_info.n_local_regs == 0
2465 && current_frame_info.n_output_regs == 0
2466 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2467 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2468 {
2469 /* If there is no alloc, but there are input registers used, then we
2470 need a .regstk directive. */
2471 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2472 ar_pfs_save_reg = NULL_RTX;
2473 }
2474 else
2475 {
2476 current_frame_info.need_regstk = 0;
2477
2478 if (current_frame_info.reg_save_ar_pfs)
2479 regno = current_frame_info.reg_save_ar_pfs;
2480 else
2481 regno = next_scratch_gr_reg ();
2482 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2483
2484 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2485 GEN_INT (current_frame_info.n_input_regs),
2486 GEN_INT (current_frame_info.n_local_regs),
2487 GEN_INT (current_frame_info.n_output_regs),
2488 GEN_INT (current_frame_info.n_rotate_regs)));
2489 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2490 }
2491
2492 /* Set up frame pointer, stack pointer, and spill iterators. */
2493
2494 n_varargs = cfun->machine->n_varargs;
2495 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2496 stack_pointer_rtx, 0);
2497
2498 if (frame_pointer_needed)
2499 {
2500 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2501 RTX_FRAME_RELATED_P (insn) = 1;
2502 }
2503
2504 if (current_frame_info.total_size != 0)
2505 {
2506 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2507 rtx offset;
2508
2509 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2510 offset = frame_size_rtx;
2511 else
2512 {
2513 regno = next_scratch_gr_reg ();
2514 offset = gen_rtx_REG (DImode, regno);
2515 emit_move_insn (offset, frame_size_rtx);
2516 }
2517
2518 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2519 stack_pointer_rtx, offset));
2520
2521 if (! frame_pointer_needed)
2522 {
2523 RTX_FRAME_RELATED_P (insn) = 1;
2524 if (GET_CODE (offset) != CONST_INT)
2525 {
2526 REG_NOTES (insn)
2527 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2528 gen_rtx_SET (VOIDmode,
2529 stack_pointer_rtx,
2530 gen_rtx_PLUS (DImode,
2531 stack_pointer_rtx,
2532 frame_size_rtx)),
2533 REG_NOTES (insn));
2534 }
2535 }
2536
2537 /* ??? At this point we must generate a magic insn that appears to
2538 modify the stack pointer, the frame pointer, and all spill
2539 iterators. This would allow the most scheduling freedom. For
2540 now, just hard stop. */
2541 emit_insn (gen_blockage ());
2542 }
2543
2544 /* Must copy out ar.unat before doing any integer spills. */
2545 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2546 {
2547 if (current_frame_info.reg_save_ar_unat)
2548 ar_unat_save_reg
2549 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2550 else
2551 {
2552 alt_regno = next_scratch_gr_reg ();
2553 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2554 current_frame_info.gr_used_mask |= 1 << alt_regno;
2555 }
2556
2557 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2558 insn = emit_move_insn (ar_unat_save_reg, reg);
2559 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2560
2561 /* Even if we're not going to generate an epilogue, we still
2562 need to save the register so that EH works. */
2563 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2564 emit_insn (gen_prologue_use (ar_unat_save_reg));
2565 }
2566 else
2567 ar_unat_save_reg = NULL_RTX;
2568
2569 /* Spill all varargs registers. Do this before spilling any GR registers,
2570 since we want the UNAT bits for the GR registers to override the UNAT
2571 bits from varargs, which we don't care about. */
2572
2573 cfa_off = -16;
2574 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2575 {
2576 reg = gen_rtx_REG (DImode, regno);
2577 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2578 }
2579
2580 /* Locate the bottom of the register save area. */
2581 cfa_off = (current_frame_info.spill_cfa_off
2582 + current_frame_info.spill_size
2583 + current_frame_info.extra_spill_size);
2584
2585 /* Save the predicate register block either in a register or in memory. */
2586 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2587 {
2588 reg = gen_rtx_REG (DImode, PR_REG (0));
2589 if (current_frame_info.reg_save_pr != 0)
2590 {
2591 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2592 insn = emit_move_insn (alt_reg, reg);
2593
2594 /* ??? Denote pr spill/fill by a DImode move that modifies all
2595 64 hard registers. */
2596 RTX_FRAME_RELATED_P (insn) = 1;
2597 REG_NOTES (insn)
2598 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2599 gen_rtx_SET (VOIDmode, alt_reg, reg),
2600 REG_NOTES (insn));
2601
2602 /* Even if we're not going to generate an epilogue, we still
2603 need to save the register so that EH works. */
2604 if (! epilogue_p)
2605 emit_insn (gen_prologue_use (alt_reg));
2606 }
2607 else
2608 {
2609 alt_regno = next_scratch_gr_reg ();
2610 alt_reg = gen_rtx_REG (DImode, alt_regno);
2611 insn = emit_move_insn (alt_reg, reg);
2612 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2613 cfa_off -= 8;
2614 }
2615 }
2616
2617 /* Handle AR regs in numerical order. All of them get special handling. */
2618 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2619 && current_frame_info.reg_save_ar_unat == 0)
2620 {
2621 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2622 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2623 cfa_off -= 8;
2624 }
2625
2626 /* The alloc insn already copied ar.pfs into a general register. The
2627 only thing we have to do now is copy that register to a stack slot
2628 if we'd not allocated a local register for the job. */
2629 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2630 && current_frame_info.reg_save_ar_pfs == 0)
2631 {
2632 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2633 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2634 cfa_off -= 8;
2635 }
2636
2637 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2638 {
2639 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2640 if (current_frame_info.reg_save_ar_lc != 0)
2641 {
2642 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2643 insn = emit_move_insn (alt_reg, reg);
2644 RTX_FRAME_RELATED_P (insn) = 1;
2645
2646 /* Even if we're not going to generate an epilogue, we still
2647 need to save the register so that EH works. */
2648 if (! epilogue_p)
2649 emit_insn (gen_prologue_use (alt_reg));
2650 }
2651 else
2652 {
2653 alt_regno = next_scratch_gr_reg ();
2654 alt_reg = gen_rtx_REG (DImode, alt_regno);
2655 emit_move_insn (alt_reg, reg);
2656 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2657 cfa_off -= 8;
2658 }
2659 }
2660
2661 if (current_frame_info.reg_save_gp)
2662 {
2663 insn = emit_move_insn (gen_rtx_REG (DImode,
2664 current_frame_info.reg_save_gp),
2665 pic_offset_table_rtx);
2666 /* We don't know for sure yet if this is actually needed, since
2667 we've not split the PIC call patterns. If all of the calls
2668 are indirect, and not followed by any uses of the gp, then
2669 this save is dead. Allow it to go away. */
2670 REG_NOTES (insn)
2671 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2672 }
2673
2674 /* We should now be at the base of the gr/br/fr spill area. */
2675 if (cfa_off != (current_frame_info.spill_cfa_off
2676 + current_frame_info.spill_size))
2677 abort ();
2678
2679 /* Spill all general registers. */
2680 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2681 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2682 {
2683 reg = gen_rtx_REG (DImode, regno);
2684 do_spill (gen_gr_spill, reg, cfa_off, reg);
2685 cfa_off -= 8;
2686 }
2687
2688 /* Handle BR0 specially -- it may be getting stored permanently in
2689 some GR register. */
2690 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2691 {
2692 reg = gen_rtx_REG (DImode, BR_REG (0));
2693 if (current_frame_info.reg_save_b0 != 0)
2694 {
2695 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2696 insn = emit_move_insn (alt_reg, reg);
2697 RTX_FRAME_RELATED_P (insn) = 1;
2698
2699 /* Even if we're not going to generate an epilogue, we still
2700 need to save the register so that EH works. */
2701 if (! epilogue_p)
2702 emit_insn (gen_prologue_use (alt_reg));
2703 }
2704 else
2705 {
2706 alt_regno = next_scratch_gr_reg ();
2707 alt_reg = gen_rtx_REG (DImode, alt_regno);
2708 emit_move_insn (alt_reg, reg);
2709 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2710 cfa_off -= 8;
2711 }
2712 }
2713
2714 /* Spill the rest of the BR registers. */
2715 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2716 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2717 {
2718 alt_regno = next_scratch_gr_reg ();
2719 alt_reg = gen_rtx_REG (DImode, alt_regno);
2720 reg = gen_rtx_REG (DImode, regno);
2721 emit_move_insn (alt_reg, reg);
2722 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2723 cfa_off -= 8;
2724 }
2725
2726 /* Align the frame and spill all FR registers. */
2727 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2728 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2729 {
2730 if (cfa_off & 15)
2731 abort ();
2732 reg = gen_rtx_REG (XFmode, regno);
2733 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2734 cfa_off -= 16;
2735 }
2736
2737 if (cfa_off != current_frame_info.spill_cfa_off)
2738 abort ();
2739
2740 finish_spill_pointers ();
2741 }
2742
2743 /* Called after register allocation to add any instructions needed for the
2744 epilogue. Using an epilogue insn is favored compared to putting all of the
2745 instructions in output_function_prologue(), since it allows the scheduler
2746 to intermix instructions with the saves of the caller saved registers. In
2747 some cases, it might be necessary to emit a barrier instruction as the last
2748 insn to prevent such scheduling. */
2749
2750 void
2751 ia64_expand_epilogue (int sibcall_p)
2752 {
2753 rtx insn, reg, alt_reg, ar_unat_save_reg;
2754 int regno, alt_regno, cfa_off;
2755
2756 ia64_compute_frame_size (get_frame_size ());
2757
2758 /* If there is a frame pointer, then we use it instead of the stack
2759 pointer, so that the stack pointer does not need to be valid when
2760 the epilogue starts. See EXIT_IGNORE_STACK. */
2761 if (frame_pointer_needed)
2762 setup_spill_pointers (current_frame_info.n_spilled,
2763 hard_frame_pointer_rtx, 0);
2764 else
2765 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2766 current_frame_info.total_size);
2767
2768 if (current_frame_info.total_size != 0)
2769 {
2770 /* ??? At this point we must generate a magic insn that appears to
2771 modify the spill iterators and the frame pointer. This would
2772 allow the most scheduling freedom. For now, just hard stop. */
2773 emit_insn (gen_blockage ());
2774 }
2775
2776 /* Locate the bottom of the register save area. */
2777 cfa_off = (current_frame_info.spill_cfa_off
2778 + current_frame_info.spill_size
2779 + current_frame_info.extra_spill_size);
2780
2781 /* Restore the predicate registers. */
2782 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2783 {
2784 if (current_frame_info.reg_save_pr != 0)
2785 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2786 else
2787 {
2788 alt_regno = next_scratch_gr_reg ();
2789 alt_reg = gen_rtx_REG (DImode, alt_regno);
2790 do_restore (gen_movdi_x, alt_reg, cfa_off);
2791 cfa_off -= 8;
2792 }
2793 reg = gen_rtx_REG (DImode, PR_REG (0));
2794 emit_move_insn (reg, alt_reg);
2795 }
2796
2797 /* Restore the application registers. */
2798
2799 /* Load the saved unat from the stack, but do not restore it until
2800 after the GRs have been restored. */
2801 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2802 {
2803 if (current_frame_info.reg_save_ar_unat != 0)
2804 ar_unat_save_reg
2805 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2806 else
2807 {
2808 alt_regno = next_scratch_gr_reg ();
2809 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2810 current_frame_info.gr_used_mask |= 1 << alt_regno;
2811 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2812 cfa_off -= 8;
2813 }
2814 }
2815 else
2816 ar_unat_save_reg = NULL_RTX;
2817
2818 if (current_frame_info.reg_save_ar_pfs != 0)
2819 {
2820 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2821 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2822 emit_move_insn (reg, alt_reg);
2823 }
2824 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2825 {
2826 alt_regno = next_scratch_gr_reg ();
2827 alt_reg = gen_rtx_REG (DImode, alt_regno);
2828 do_restore (gen_movdi_x, alt_reg, cfa_off);
2829 cfa_off -= 8;
2830 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2831 emit_move_insn (reg, alt_reg);
2832 }
2833
2834 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2835 {
2836 if (current_frame_info.reg_save_ar_lc != 0)
2837 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2838 else
2839 {
2840 alt_regno = next_scratch_gr_reg ();
2841 alt_reg = gen_rtx_REG (DImode, alt_regno);
2842 do_restore (gen_movdi_x, alt_reg, cfa_off);
2843 cfa_off -= 8;
2844 }
2845 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2846 emit_move_insn (reg, alt_reg);
2847 }
2848
2849 /* We should now be at the base of the gr/br/fr spill area. */
2850 if (cfa_off != (current_frame_info.spill_cfa_off
2851 + current_frame_info.spill_size))
2852 abort ();
2853
2854 /* The GP may be stored on the stack in the prologue, but it's
2855 never restored in the epilogue. Skip the stack slot. */
2856 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2857 cfa_off -= 8;
2858
2859 /* Restore all general registers. */
2860 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2861 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2862 {
2863 reg = gen_rtx_REG (DImode, regno);
2864 do_restore (gen_gr_restore, reg, cfa_off);
2865 cfa_off -= 8;
2866 }
2867
2868 /* Restore the branch registers. Handle B0 specially, as it may
2869 have gotten stored in some GR register. */
2870 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2871 {
2872 if (current_frame_info.reg_save_b0 != 0)
2873 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2874 else
2875 {
2876 alt_regno = next_scratch_gr_reg ();
2877 alt_reg = gen_rtx_REG (DImode, alt_regno);
2878 do_restore (gen_movdi_x, alt_reg, cfa_off);
2879 cfa_off -= 8;
2880 }
2881 reg = gen_rtx_REG (DImode, BR_REG (0));
2882 emit_move_insn (reg, alt_reg);
2883 }
2884
2885 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2886 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2887 {
2888 alt_regno = next_scratch_gr_reg ();
2889 alt_reg = gen_rtx_REG (DImode, alt_regno);
2890 do_restore (gen_movdi_x, alt_reg, cfa_off);
2891 cfa_off -= 8;
2892 reg = gen_rtx_REG (DImode, regno);
2893 emit_move_insn (reg, alt_reg);
2894 }
2895
2896 /* Restore floating point registers. */
2897 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2898 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2899 {
2900 if (cfa_off & 15)
2901 abort ();
2902 reg = gen_rtx_REG (XFmode, regno);
2903 do_restore (gen_fr_restore_x, reg, cfa_off);
2904 cfa_off -= 16;
2905 }
2906
2907 /* Restore ar.unat for real. */
2908 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2909 {
2910 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2911 emit_move_insn (reg, ar_unat_save_reg);
2912 }
2913
2914 if (cfa_off != current_frame_info.spill_cfa_off)
2915 abort ();
2916
2917 finish_spill_pointers ();
2918
2919 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2920 {
2921 /* ??? At this point we must generate a magic insn that appears to
2922 modify the spill iterators, the stack pointer, and the frame
2923 pointer. This would allow the most scheduling freedom. For now,
2924 just hard stop. */
2925 emit_insn (gen_blockage ());
2926 }
2927
2928 if (cfun->machine->ia64_eh_epilogue_sp)
2929 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2930 else if (frame_pointer_needed)
2931 {
2932 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2933 RTX_FRAME_RELATED_P (insn) = 1;
2934 }
2935 else if (current_frame_info.total_size)
2936 {
2937 rtx offset, frame_size_rtx;
2938
2939 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2940 if (CONST_OK_FOR_I (current_frame_info.total_size))
2941 offset = frame_size_rtx;
2942 else
2943 {
2944 regno = next_scratch_gr_reg ();
2945 offset = gen_rtx_REG (DImode, regno);
2946 emit_move_insn (offset, frame_size_rtx);
2947 }
2948
2949 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2950 offset));
2951
2952 RTX_FRAME_RELATED_P (insn) = 1;
2953 if (GET_CODE (offset) != CONST_INT)
2954 {
2955 REG_NOTES (insn)
2956 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2957 gen_rtx_SET (VOIDmode,
2958 stack_pointer_rtx,
2959 gen_rtx_PLUS (DImode,
2960 stack_pointer_rtx,
2961 frame_size_rtx)),
2962 REG_NOTES (insn));
2963 }
2964 }
2965
2966 if (cfun->machine->ia64_eh_epilogue_bsp)
2967 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2968
2969 if (! sibcall_p)
2970 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2971 else
2972 {
2973 int fp = GR_REG (2);
2974 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2975 first available call clobbered register. If there was a frame_pointer
2976 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2977 so we have to make sure we're using the string "r2" when emitting
2978 the register name for the assembler. */
2979 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2980 fp = HARD_FRAME_POINTER_REGNUM;
2981
2982 /* We must emit an alloc to force the input registers to become output
2983 registers. Otherwise, if the callee tries to pass its parameters
2984 through to another call without an intervening alloc, then these
2985 values get lost. */
2986 /* ??? We don't need to preserve all input registers. We only need to
2987 preserve those input registers used as arguments to the sibling call.
2988 It is unclear how to compute that number here. */
2989 if (current_frame_info.n_input_regs != 0)
2990 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2991 GEN_INT (0), GEN_INT (0),
2992 GEN_INT (current_frame_info.n_input_regs),
2993 GEN_INT (0)));
2994 }
2995 }
2996
2997 /* Return 1 if br.ret can do all the work required to return from a
2998 function. */
2999
3000 int
3001 ia64_direct_return (void)
3002 {
3003 if (reload_completed && ! frame_pointer_needed)
3004 {
3005 ia64_compute_frame_size (get_frame_size ());
3006
3007 return (current_frame_info.total_size == 0
3008 && current_frame_info.n_spilled == 0
3009 && current_frame_info.reg_save_b0 == 0
3010 && current_frame_info.reg_save_pr == 0
3011 && current_frame_info.reg_save_ar_pfs == 0
3012 && current_frame_info.reg_save_ar_unat == 0
3013 && current_frame_info.reg_save_ar_lc == 0);
3014 }
3015 return 0;
3016 }
3017
3018 /* Return the magic cookie that we use to hold the return address
3019 during early compilation. */
3020
3021 rtx
3022 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3023 {
3024 if (count != 0)
3025 return NULL;
3026 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3027 }
3028
3029 /* Split this value after reload, now that we know where the return
3030 address is saved. */
3031
3032 void
3033 ia64_split_return_addr_rtx (rtx dest)
3034 {
3035 rtx src;
3036
3037 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3038 {
3039 if (current_frame_info.reg_save_b0 != 0)
3040 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3041 else
3042 {
3043 HOST_WIDE_INT off;
3044 unsigned int regno;
3045
3046 /* Compute offset from CFA for BR0. */
3047 /* ??? Must be kept in sync with ia64_expand_prologue. */
3048 off = (current_frame_info.spill_cfa_off
3049 + current_frame_info.spill_size);
3050 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3051 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3052 off -= 8;
3053
3054 /* Convert CFA offset to a register based offset. */
3055 if (frame_pointer_needed)
3056 src = hard_frame_pointer_rtx;
3057 else
3058 {
3059 src = stack_pointer_rtx;
3060 off += current_frame_info.total_size;
3061 }
3062
3063 /* Load address into scratch register. */
3064 if (CONST_OK_FOR_I (off))
3065 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3066 else
3067 {
3068 emit_move_insn (dest, GEN_INT (off));
3069 emit_insn (gen_adddi3 (dest, src, dest));
3070 }
3071
3072 src = gen_rtx_MEM (Pmode, dest);
3073 }
3074 }
3075 else
3076 src = gen_rtx_REG (DImode, BR_REG (0));
3077
3078 emit_move_insn (dest, src);
3079 }
3080
3081 int
3082 ia64_hard_regno_rename_ok (int from, int to)
3083 {
3084 /* Don't clobber any of the registers we reserved for the prologue. */
3085 if (to == current_frame_info.reg_fp
3086 || to == current_frame_info.reg_save_b0
3087 || to == current_frame_info.reg_save_pr
3088 || to == current_frame_info.reg_save_ar_pfs
3089 || to == current_frame_info.reg_save_ar_unat
3090 || to == current_frame_info.reg_save_ar_lc)
3091 return 0;
3092
3093 if (from == current_frame_info.reg_fp
3094 || from == current_frame_info.reg_save_b0
3095 || from == current_frame_info.reg_save_pr
3096 || from == current_frame_info.reg_save_ar_pfs
3097 || from == current_frame_info.reg_save_ar_unat
3098 || from == current_frame_info.reg_save_ar_lc)
3099 return 0;
3100
3101 /* Don't use output registers outside the register frame. */
3102 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3103 return 0;
3104
3105 /* Retain even/oddness on predicate register pairs. */
3106 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3107 return (from & 1) == (to & 1);
3108
3109 return 1;
3110 }
3111
3112 /* Target hook for assembling integer objects. Handle word-sized
3113 aligned objects and detect the cases when @fptr is needed. */
3114
3115 static bool
3116 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3117 {
3118 if (size == POINTER_SIZE / BITS_PER_UNIT
3119 && aligned_p
3120 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3121 && GET_CODE (x) == SYMBOL_REF
3122 && SYMBOL_REF_FUNCTION_P (x))
3123 {
3124 if (POINTER_SIZE == 32)
3125 fputs ("\tdata4\t@fptr(", asm_out_file);
3126 else
3127 fputs ("\tdata8\t@fptr(", asm_out_file);
3128 output_addr_const (asm_out_file, x);
3129 fputs (")\n", asm_out_file);
3130 return true;
3131 }
3132 return default_assemble_integer (x, size, aligned_p);
3133 }
3134
3135 /* Emit the function prologue. */
3136
3137 static void
3138 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3139 {
3140 int mask, grsave, grsave_prev;
3141
3142 if (current_frame_info.need_regstk)
3143 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3144 current_frame_info.n_input_regs,
3145 current_frame_info.n_local_regs,
3146 current_frame_info.n_output_regs,
3147 current_frame_info.n_rotate_regs);
3148
3149 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3150 return;
3151
3152 /* Emit the .prologue directive. */
3153
3154 mask = 0;
3155 grsave = grsave_prev = 0;
3156 if (current_frame_info.reg_save_b0 != 0)
3157 {
3158 mask |= 8;
3159 grsave = grsave_prev = current_frame_info.reg_save_b0;
3160 }
3161 if (current_frame_info.reg_save_ar_pfs != 0
3162 && (grsave_prev == 0
3163 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3164 {
3165 mask |= 4;
3166 if (grsave_prev == 0)
3167 grsave = current_frame_info.reg_save_ar_pfs;
3168 grsave_prev = current_frame_info.reg_save_ar_pfs;
3169 }
3170 if (current_frame_info.reg_fp != 0
3171 && (grsave_prev == 0
3172 || current_frame_info.reg_fp == grsave_prev + 1))
3173 {
3174 mask |= 2;
3175 if (grsave_prev == 0)
3176 grsave = HARD_FRAME_POINTER_REGNUM;
3177 grsave_prev = current_frame_info.reg_fp;
3178 }
3179 if (current_frame_info.reg_save_pr != 0
3180 && (grsave_prev == 0
3181 || current_frame_info.reg_save_pr == grsave_prev + 1))
3182 {
3183 mask |= 1;
3184 if (grsave_prev == 0)
3185 grsave = current_frame_info.reg_save_pr;
3186 }
3187
3188 if (mask && TARGET_GNU_AS)
3189 fprintf (file, "\t.prologue %d, %d\n", mask,
3190 ia64_dbx_register_number (grsave));
3191 else
3192 fputs ("\t.prologue\n", file);
3193
3194 /* Emit a .spill directive, if necessary, to relocate the base of
3195 the register spill area. */
3196 if (current_frame_info.spill_cfa_off != -16)
3197 fprintf (file, "\t.spill %ld\n",
3198 (long) (current_frame_info.spill_cfa_off
3199 + current_frame_info.spill_size));
3200 }
3201
3202 /* Emit the .body directive at the scheduled end of the prologue. */
3203
3204 static void
3205 ia64_output_function_end_prologue (FILE *file)
3206 {
3207 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3208 return;
3209
3210 fputs ("\t.body\n", file);
3211 }
3212
3213 /* Emit the function epilogue. */
3214
3215 static void
3216 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3217 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3218 {
3219 int i;
3220
3221 if (current_frame_info.reg_fp)
3222 {
3223 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3224 reg_names[HARD_FRAME_POINTER_REGNUM]
3225 = reg_names[current_frame_info.reg_fp];
3226 reg_names[current_frame_info.reg_fp] = tmp;
3227 }
3228 if (! TARGET_REG_NAMES)
3229 {
3230 for (i = 0; i < current_frame_info.n_input_regs; i++)
3231 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3232 for (i = 0; i < current_frame_info.n_local_regs; i++)
3233 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3234 for (i = 0; i < current_frame_info.n_output_regs; i++)
3235 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3236 }
3237
3238 current_frame_info.initialized = 0;
3239 }
3240
3241 int
3242 ia64_dbx_register_number (int regno)
3243 {
3244 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3245 from its home at loc79 to something inside the register frame. We
3246 must perform the same renumbering here for the debug info. */
3247 if (current_frame_info.reg_fp)
3248 {
3249 if (regno == HARD_FRAME_POINTER_REGNUM)
3250 regno = current_frame_info.reg_fp;
3251 else if (regno == current_frame_info.reg_fp)
3252 regno = HARD_FRAME_POINTER_REGNUM;
3253 }
3254
3255 if (IN_REGNO_P (regno))
3256 return 32 + regno - IN_REG (0);
3257 else if (LOC_REGNO_P (regno))
3258 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3259 else if (OUT_REGNO_P (regno))
3260 return (32 + current_frame_info.n_input_regs
3261 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3262 else
3263 return regno;
3264 }
3265
3266 void
3267 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3268 {
3269 rtx addr_reg, eight = GEN_INT (8);
3270
3271 /* The Intel assembler requires that the global __ia64_trampoline symbol
3272 be declared explicitly */
3273 if (!TARGET_GNU_AS)
3274 {
3275 static bool declared_ia64_trampoline = false;
3276
3277 if (!declared_ia64_trampoline)
3278 {
3279 declared_ia64_trampoline = true;
3280 (*targetm.asm_out.globalize_label) (asm_out_file,
3281 "__ia64_trampoline");
3282 }
3283 }
3284
3285 /* Load up our iterator. */
3286 addr_reg = gen_reg_rtx (Pmode);
3287 emit_move_insn (addr_reg, addr);
3288
3289 /* The first two words are the fake descriptor:
3290 __ia64_trampoline, ADDR+16. */
3291 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3292 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3293 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3294
3295 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3296 copy_to_reg (plus_constant (addr, 16)));
3297 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3298
3299 /* The third word is the target descriptor. */
3300 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3301 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3302
3303 /* The fourth word is the static chain. */
3304 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3305 }
3306 \f
3307 /* Do any needed setup for a variadic function. CUM has not been updated
3308 for the last named argument which has type TYPE and mode MODE.
3309
3310 We generate the actual spill instructions during prologue generation. */
3311
3312 void
3313 ia64_setup_incoming_varargs (CUMULATIVE_ARGS cum, int int_mode, tree type,
3314 int * pretend_size,
3315 int second_time ATTRIBUTE_UNUSED)
3316 {
3317 /* Skip the current argument. */
3318 ia64_function_arg_advance (&cum, int_mode, type, 1);
3319
3320 if (cum.words < MAX_ARGUMENT_SLOTS)
3321 {
3322 int n = MAX_ARGUMENT_SLOTS - cum.words;
3323 *pretend_size = n * UNITS_PER_WORD;
3324 cfun->machine->n_varargs = n;
3325 }
3326 }
3327
3328 /* Check whether TYPE is a homogeneous floating point aggregate. If
3329 it is, return the mode of the floating point type that appears
3330 in all leafs. If it is not, return VOIDmode.
3331
3332 An aggregate is a homogeneous floating point aggregate is if all
3333 fields/elements in it have the same floating point type (e.g,
3334 SFmode). 128-bit quad-precision floats are excluded. */
3335
3336 static enum machine_mode
3337 hfa_element_mode (tree type, int nested)
3338 {
3339 enum machine_mode element_mode = VOIDmode;
3340 enum machine_mode mode;
3341 enum tree_code code = TREE_CODE (type);
3342 int know_element_mode = 0;
3343 tree t;
3344
3345 switch (code)
3346 {
3347 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3348 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3349 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3350 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
3351 case FUNCTION_TYPE:
3352 return VOIDmode;
3353
3354 /* Fortran complex types are supposed to be HFAs, so we need to handle
3355 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3356 types though. */
3357 case COMPLEX_TYPE:
3358 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3359 && TYPE_MODE (type) != TCmode)
3360 return GET_MODE_INNER (TYPE_MODE (type));
3361 else
3362 return VOIDmode;
3363
3364 case REAL_TYPE:
3365 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3366 mode if this is contained within an aggregate. */
3367 if (nested && TYPE_MODE (type) != TFmode)
3368 return TYPE_MODE (type);
3369 else
3370 return VOIDmode;
3371
3372 case ARRAY_TYPE:
3373 return hfa_element_mode (TREE_TYPE (type), 1);
3374
3375 case RECORD_TYPE:
3376 case UNION_TYPE:
3377 case QUAL_UNION_TYPE:
3378 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3379 {
3380 if (TREE_CODE (t) != FIELD_DECL)
3381 continue;
3382
3383 mode = hfa_element_mode (TREE_TYPE (t), 1);
3384 if (know_element_mode)
3385 {
3386 if (mode != element_mode)
3387 return VOIDmode;
3388 }
3389 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3390 return VOIDmode;
3391 else
3392 {
3393 know_element_mode = 1;
3394 element_mode = mode;
3395 }
3396 }
3397 return element_mode;
3398
3399 default:
3400 /* If we reach here, we probably have some front-end specific type
3401 that the backend doesn't know about. This can happen via the
3402 aggregate_value_p call in init_function_start. All we can do is
3403 ignore unknown tree types. */
3404 return VOIDmode;
3405 }
3406
3407 return VOIDmode;
3408 }
3409
3410 /* Return rtx for register where argument is passed, or zero if it is passed
3411 on the stack. */
3412
3413 /* ??? 128-bit quad-precision floats are always passed in general
3414 registers. */
3415
3416 rtx
3417 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3418 int named, int incoming)
3419 {
3420 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3421 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3422 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3423 / UNITS_PER_WORD);
3424 int offset = 0;
3425 enum machine_mode hfa_mode = VOIDmode;
3426
3427 /* Integer and float arguments larger than 8 bytes start at the next even
3428 boundary. Aggregates larger than 8 bytes start at the next even boundary
3429 if the aggregate has 16 byte alignment. Net effect is that types with
3430 alignment greater than 8 start at the next even boundary. */
3431 /* ??? The ABI does not specify how to handle aggregates with alignment from
3432 9 to 15 bytes, or greater than 16. We handle them all as if they had
3433 16 byte alignment. Such aggregates can occur only if gcc extensions are
3434 used. */
3435 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3436 : (words > 1))
3437 && (cum->words & 1))
3438 offset = 1;
3439
3440 /* If all argument slots are used, then it must go on the stack. */
3441 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3442 return 0;
3443
3444 /* Check for and handle homogeneous FP aggregates. */
3445 if (type)
3446 hfa_mode = hfa_element_mode (type, 0);
3447
3448 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3449 and unprototyped hfas are passed specially. */
3450 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3451 {
3452 rtx loc[16];
3453 int i = 0;
3454 int fp_regs = cum->fp_regs;
3455 int int_regs = cum->words + offset;
3456 int hfa_size = GET_MODE_SIZE (hfa_mode);
3457 int byte_size;
3458 int args_byte_size;
3459
3460 /* If prototyped, pass it in FR regs then GR regs.
3461 If not prototyped, pass it in both FR and GR regs.
3462
3463 If this is an SFmode aggregate, then it is possible to run out of
3464 FR regs while GR regs are still left. In that case, we pass the
3465 remaining part in the GR regs. */
3466
3467 /* Fill the FP regs. We do this always. We stop if we reach the end
3468 of the argument, the last FP register, or the last argument slot. */
3469
3470 byte_size = ((mode == BLKmode)
3471 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3472 args_byte_size = int_regs * UNITS_PER_WORD;
3473 offset = 0;
3474 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3475 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3476 {
3477 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3478 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3479 + fp_regs)),
3480 GEN_INT (offset));
3481 offset += hfa_size;
3482 args_byte_size += hfa_size;
3483 fp_regs++;
3484 }
3485
3486 /* If no prototype, then the whole thing must go in GR regs. */
3487 if (! cum->prototype)
3488 offset = 0;
3489 /* If this is an SFmode aggregate, then we might have some left over
3490 that needs to go in GR regs. */
3491 else if (byte_size != offset)
3492 int_regs += offset / UNITS_PER_WORD;
3493
3494 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3495
3496 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3497 {
3498 enum machine_mode gr_mode = DImode;
3499
3500 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3501 then this goes in a GR reg left adjusted/little endian, right
3502 adjusted/big endian. */
3503 /* ??? Currently this is handled wrong, because 4-byte hunks are
3504 always right adjusted/little endian. */
3505 if (offset & 0x4)
3506 gr_mode = SImode;
3507 /* If we have an even 4 byte hunk because the aggregate is a
3508 multiple of 4 bytes in size, then this goes in a GR reg right
3509 adjusted/little endian. */
3510 else if (byte_size - offset == 4)
3511 gr_mode = SImode;
3512 /* Complex floats need to have float mode. */
3513 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3514 gr_mode = hfa_mode;
3515
3516 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3517 gen_rtx_REG (gr_mode, (basereg
3518 + int_regs)),
3519 GEN_INT (offset));
3520 offset += GET_MODE_SIZE (gr_mode);
3521 int_regs += GET_MODE_SIZE (gr_mode) <= UNITS_PER_WORD
3522 ? 1 : GET_MODE_SIZE (gr_mode) / UNITS_PER_WORD;
3523 }
3524
3525 /* If we ended up using just one location, just return that one loc, but
3526 change the mode back to the argument mode. */
3527 if (i == 1)
3528 return gen_rtx_REG (mode, REGNO (XEXP (loc[0], 0)));
3529 else
3530 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3531 }
3532
3533 /* Integral and aggregates go in general registers. If we have run out of
3534 FR registers, then FP values must also go in general registers. This can
3535 happen when we have a SFmode HFA. */
3536 else if (mode == TFmode || mode == TCmode
3537 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3538 {
3539 int byte_size = ((mode == BLKmode)
3540 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3541 if (BYTES_BIG_ENDIAN
3542 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3543 && byte_size < UNITS_PER_WORD
3544 && byte_size > 0)
3545 {
3546 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3547 gen_rtx_REG (DImode,
3548 (basereg + cum->words
3549 + offset)),
3550 const0_rtx);
3551 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3552 }
3553 else
3554 return gen_rtx_REG (mode, basereg + cum->words + offset);
3555
3556 }
3557
3558 /* If there is a prototype, then FP values go in a FR register when
3559 named, and in a GR register when unnamed. */
3560 else if (cum->prototype)
3561 {
3562 if (! named)
3563 return gen_rtx_REG (mode, basereg + cum->words + offset);
3564 else
3565 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3566 }
3567 /* If there is no prototype, then FP values go in both FR and GR
3568 registers. */
3569 else
3570 {
3571 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3572 gen_rtx_REG (mode, (FR_ARG_FIRST
3573 + cum->fp_regs)),
3574 const0_rtx);
3575 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3576 gen_rtx_REG (mode,
3577 (basereg + cum->words
3578 + offset)),
3579 const0_rtx);
3580
3581 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3582 }
3583 }
3584
3585 /* Return number of words, at the beginning of the argument, that must be
3586 put in registers. 0 is the argument is entirely in registers or entirely
3587 in memory. */
3588
3589 int
3590 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3591 tree type, int named ATTRIBUTE_UNUSED)
3592 {
3593 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3594 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3595 / UNITS_PER_WORD);
3596 int offset = 0;
3597
3598 /* Arguments with alignment larger than 8 bytes start at the next even
3599 boundary. */
3600 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3601 : (words > 1))
3602 && (cum->words & 1))
3603 offset = 1;
3604
3605 /* If all argument slots are used, then it must go on the stack. */
3606 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3607 return 0;
3608
3609 /* It doesn't matter whether the argument goes in FR or GR regs. If
3610 it fits within the 8 argument slots, then it goes entirely in
3611 registers. If it extends past the last argument slot, then the rest
3612 goes on the stack. */
3613
3614 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3615 return 0;
3616
3617 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3618 }
3619
3620 /* Update CUM to point after this argument. This is patterned after
3621 ia64_function_arg. */
3622
3623 void
3624 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3625 tree type, int named)
3626 {
3627 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3628 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3629 / UNITS_PER_WORD);
3630 int offset = 0;
3631 enum machine_mode hfa_mode = VOIDmode;
3632
3633 /* If all arg slots are already full, then there is nothing to do. */
3634 if (cum->words >= MAX_ARGUMENT_SLOTS)
3635 return;
3636
3637 /* Arguments with alignment larger than 8 bytes start at the next even
3638 boundary. */
3639 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3640 : (words > 1))
3641 && (cum->words & 1))
3642 offset = 1;
3643
3644 cum->words += words + offset;
3645
3646 /* Check for and handle homogeneous FP aggregates. */
3647 if (type)
3648 hfa_mode = hfa_element_mode (type, 0);
3649
3650 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3651 and unprototyped hfas are passed specially. */
3652 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3653 {
3654 int fp_regs = cum->fp_regs;
3655 /* This is the original value of cum->words + offset. */
3656 int int_regs = cum->words - words;
3657 int hfa_size = GET_MODE_SIZE (hfa_mode);
3658 int byte_size;
3659 int args_byte_size;
3660
3661 /* If prototyped, pass it in FR regs then GR regs.
3662 If not prototyped, pass it in both FR and GR regs.
3663
3664 If this is an SFmode aggregate, then it is possible to run out of
3665 FR regs while GR regs are still left. In that case, we pass the
3666 remaining part in the GR regs. */
3667
3668 /* Fill the FP regs. We do this always. We stop if we reach the end
3669 of the argument, the last FP register, or the last argument slot. */
3670
3671 byte_size = ((mode == BLKmode)
3672 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3673 args_byte_size = int_regs * UNITS_PER_WORD;
3674 offset = 0;
3675 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3676 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3677 {
3678 offset += hfa_size;
3679 args_byte_size += hfa_size;
3680 fp_regs++;
3681 }
3682
3683 cum->fp_regs = fp_regs;
3684 }
3685
3686 /* Integral and aggregates go in general registers. If we have run out of
3687 FR registers, then FP values must also go in general registers. This can
3688 happen when we have a SFmode HFA. */
3689 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3690 cum->int_regs = cum->words;
3691
3692 /* If there is a prototype, then FP values go in a FR register when
3693 named, and in a GR register when unnamed. */
3694 else if (cum->prototype)
3695 {
3696 if (! named)
3697 cum->int_regs = cum->words;
3698 else
3699 /* ??? Complex types should not reach here. */
3700 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3701 }
3702 /* If there is no prototype, then FP values go in both FR and GR
3703 registers. */
3704 else
3705 {
3706 /* ??? Complex types should not reach here. */
3707 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3708 cum->int_regs = cum->words;
3709 }
3710 }
3711
3712 /* Variable sized types are passed by reference. */
3713 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3714
3715 int
3716 ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3717 enum machine_mode mode ATTRIBUTE_UNUSED,
3718 tree type, int named ATTRIBUTE_UNUSED)
3719 {
3720 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3721 }
3722
3723 /* True if it is OK to do sibling call optimization for the specified
3724 call expression EXP. DECL will be the called function, or NULL if
3725 this is an indirect call. */
3726 static bool
3727 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3728 {
3729 /* We must always return with our current GP. This means we can
3730 only sibcall to functions defined in the current module. */
3731 return decl && (*targetm.binds_local_p) (decl);
3732 }
3733 \f
3734
3735 /* Implement va_arg. */
3736
3737 rtx
3738 ia64_va_arg (tree valist, tree type)
3739 {
3740 tree t;
3741
3742 /* Variable sized types are passed by reference. */
3743 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
3744 {
3745 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
3746 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
3747 }
3748
3749 /* Arguments with alignment larger than 8 bytes start at the next even
3750 boundary. */
3751 if (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3752 {
3753 t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3754 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
3755 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3756 build_int_2 (-2 * UNITS_PER_WORD, -1));
3757 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3758 TREE_SIDE_EFFECTS (t) = 1;
3759 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3760 }
3761
3762 return std_expand_builtin_va_arg (valist, type);
3763 }
3764 \f
3765 /* Return 1 if function return value returned in memory. Return 0 if it is
3766 in a register. */
3767
3768 int
3769 ia64_return_in_memory (tree valtype)
3770 {
3771 enum machine_mode mode;
3772 enum machine_mode hfa_mode;
3773 HOST_WIDE_INT byte_size;
3774
3775 mode = TYPE_MODE (valtype);
3776 byte_size = GET_MODE_SIZE (mode);
3777 if (mode == BLKmode)
3778 {
3779 byte_size = int_size_in_bytes (valtype);
3780 if (byte_size < 0)
3781 return 1;
3782 }
3783
3784 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3785
3786 hfa_mode = hfa_element_mode (valtype, 0);
3787 if (hfa_mode != VOIDmode)
3788 {
3789 int hfa_size = GET_MODE_SIZE (hfa_mode);
3790
3791 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3792 return 1;
3793 else
3794 return 0;
3795 }
3796 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3797 return 1;
3798 else
3799 return 0;
3800 }
3801
3802 /* Return rtx for register that holds the function return value. */
3803
3804 rtx
3805 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3806 {
3807 enum machine_mode mode;
3808 enum machine_mode hfa_mode;
3809
3810 mode = TYPE_MODE (valtype);
3811 hfa_mode = hfa_element_mode (valtype, 0);
3812
3813 if (hfa_mode != VOIDmode)
3814 {
3815 rtx loc[8];
3816 int i;
3817 int hfa_size;
3818 int byte_size;
3819 int offset;
3820
3821 hfa_size = GET_MODE_SIZE (hfa_mode);
3822 byte_size = ((mode == BLKmode)
3823 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3824 offset = 0;
3825 for (i = 0; offset < byte_size; i++)
3826 {
3827 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3828 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3829 GEN_INT (offset));
3830 offset += hfa_size;
3831 }
3832
3833 if (i == 1)
3834 return XEXP (loc[0], 0);
3835 else
3836 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3837 }
3838 else if (FLOAT_TYPE_P (valtype) && mode != TFmode)
3839 return gen_rtx_REG (mode, FR_ARG_FIRST);
3840 else
3841 {
3842 if (BYTES_BIG_ENDIAN
3843 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3844 {
3845 rtx loc[8];
3846 int offset;
3847 int bytesize;
3848 int i;
3849
3850 offset = 0;
3851 bytesize = int_size_in_bytes (valtype);
3852 for (i = 0; offset < bytesize; i++)
3853 {
3854 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3855 gen_rtx_REG (DImode,
3856 GR_RET_FIRST + i),
3857 GEN_INT (offset));
3858 offset += UNITS_PER_WORD;
3859 }
3860 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3861 }
3862 else
3863 return gen_rtx_REG (mode, GR_RET_FIRST);
3864 }
3865 }
3866
3867 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3868 We need to emit DTP-relative relocations. */
3869
3870 void
3871 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3872 {
3873 if (size != 8)
3874 abort ();
3875 fputs ("\tdata8.ua\t@dtprel(", file);
3876 output_addr_const (file, x);
3877 fputs (")", file);
3878 }
3879
3880 /* Print a memory address as an operand to reference that memory location. */
3881
3882 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3883 also call this from ia64_print_operand for memory addresses. */
3884
3885 void
3886 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3887 rtx address ATTRIBUTE_UNUSED)
3888 {
3889 }
3890
3891 /* Print an operand to an assembler instruction.
3892 C Swap and print a comparison operator.
3893 D Print an FP comparison operator.
3894 E Print 32 - constant, for SImode shifts as extract.
3895 e Print 64 - constant, for DImode rotates.
3896 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3897 a floating point register emitted normally.
3898 I Invert a predicate register by adding 1.
3899 J Select the proper predicate register for a condition.
3900 j Select the inverse predicate register for a condition.
3901 O Append .acq for volatile load.
3902 P Postincrement of a MEM.
3903 Q Append .rel for volatile store.
3904 S Shift amount for shladd instruction.
3905 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3906 for Intel assembler.
3907 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3908 for Intel assembler.
3909 r Print register name, or constant 0 as r0. HP compatibility for
3910 Linux kernel. */
3911 void
3912 ia64_print_operand (FILE * file, rtx x, int code)
3913 {
3914 const char *str;
3915
3916 switch (code)
3917 {
3918 case 0:
3919 /* Handled below. */
3920 break;
3921
3922 case 'C':
3923 {
3924 enum rtx_code c = swap_condition (GET_CODE (x));
3925 fputs (GET_RTX_NAME (c), file);
3926 return;
3927 }
3928
3929 case 'D':
3930 switch (GET_CODE (x))
3931 {
3932 case NE:
3933 str = "neq";
3934 break;
3935 case UNORDERED:
3936 str = "unord";
3937 break;
3938 case ORDERED:
3939 str = "ord";
3940 break;
3941 default:
3942 str = GET_RTX_NAME (GET_CODE (x));
3943 break;
3944 }
3945 fputs (str, file);
3946 return;
3947
3948 case 'E':
3949 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3950 return;
3951
3952 case 'e':
3953 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3954 return;
3955
3956 case 'F':
3957 if (x == CONST0_RTX (GET_MODE (x)))
3958 str = reg_names [FR_REG (0)];
3959 else if (x == CONST1_RTX (GET_MODE (x)))
3960 str = reg_names [FR_REG (1)];
3961 else if (GET_CODE (x) == REG)
3962 str = reg_names [REGNO (x)];
3963 else
3964 abort ();
3965 fputs (str, file);
3966 return;
3967
3968 case 'I':
3969 fputs (reg_names [REGNO (x) + 1], file);
3970 return;
3971
3972 case 'J':
3973 case 'j':
3974 {
3975 unsigned int regno = REGNO (XEXP (x, 0));
3976 if (GET_CODE (x) == EQ)
3977 regno += 1;
3978 if (code == 'j')
3979 regno ^= 1;
3980 fputs (reg_names [regno], file);
3981 }
3982 return;
3983
3984 case 'O':
3985 if (MEM_VOLATILE_P (x))
3986 fputs(".acq", file);
3987 return;
3988
3989 case 'P':
3990 {
3991 HOST_WIDE_INT value;
3992
3993 switch (GET_CODE (XEXP (x, 0)))
3994 {
3995 default:
3996 return;
3997
3998 case POST_MODIFY:
3999 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4000 if (GET_CODE (x) == CONST_INT)
4001 value = INTVAL (x);
4002 else if (GET_CODE (x) == REG)
4003 {
4004 fprintf (file, ", %s", reg_names[REGNO (x)]);
4005 return;
4006 }
4007 else
4008 abort ();
4009 break;
4010
4011 case POST_INC:
4012 value = GET_MODE_SIZE (GET_MODE (x));
4013 break;
4014
4015 case POST_DEC:
4016 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4017 break;
4018 }
4019
4020 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4021 return;
4022 }
4023
4024 case 'Q':
4025 if (MEM_VOLATILE_P (x))
4026 fputs(".rel", file);
4027 return;
4028
4029 case 'S':
4030 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4031 return;
4032
4033 case 'T':
4034 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4035 {
4036 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4037 return;
4038 }
4039 break;
4040
4041 case 'U':
4042 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4043 {
4044 const char *prefix = "0x";
4045 if (INTVAL (x) & 0x80000000)
4046 {
4047 fprintf (file, "0xffffffff");
4048 prefix = "";
4049 }
4050 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4051 return;
4052 }
4053 break;
4054
4055 case 'r':
4056 /* If this operand is the constant zero, write it as register zero.
4057 Any register, zero, or CONST_INT value is OK here. */
4058 if (GET_CODE (x) == REG)
4059 fputs (reg_names[REGNO (x)], file);
4060 else if (x == CONST0_RTX (GET_MODE (x)))
4061 fputs ("r0", file);
4062 else if (GET_CODE (x) == CONST_INT)
4063 output_addr_const (file, x);
4064 else
4065 output_operand_lossage ("invalid %%r value");
4066 return;
4067
4068 case '+':
4069 {
4070 const char *which;
4071
4072 /* For conditional branches, returns or calls, substitute
4073 sptk, dptk, dpnt, or spnt for %s. */
4074 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4075 if (x)
4076 {
4077 int pred_val = INTVAL (XEXP (x, 0));
4078
4079 /* Guess top and bottom 10% statically predicted. */
4080 if (pred_val < REG_BR_PROB_BASE / 50)
4081 which = ".spnt";
4082 else if (pred_val < REG_BR_PROB_BASE / 2)
4083 which = ".dpnt";
4084 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4085 which = ".dptk";
4086 else
4087 which = ".sptk";
4088 }
4089 else if (GET_CODE (current_output_insn) == CALL_INSN)
4090 which = ".sptk";
4091 else
4092 which = ".dptk";
4093
4094 fputs (which, file);
4095 return;
4096 }
4097
4098 case ',':
4099 x = current_insn_predicate;
4100 if (x)
4101 {
4102 unsigned int regno = REGNO (XEXP (x, 0));
4103 if (GET_CODE (x) == EQ)
4104 regno += 1;
4105 fprintf (file, "(%s) ", reg_names [regno]);
4106 }
4107 return;
4108
4109 default:
4110 output_operand_lossage ("ia64_print_operand: unknown code");
4111 return;
4112 }
4113
4114 switch (GET_CODE (x))
4115 {
4116 /* This happens for the spill/restore instructions. */
4117 case POST_INC:
4118 case POST_DEC:
4119 case POST_MODIFY:
4120 x = XEXP (x, 0);
4121 /* ... fall through ... */
4122
4123 case REG:
4124 fputs (reg_names [REGNO (x)], file);
4125 break;
4126
4127 case MEM:
4128 {
4129 rtx addr = XEXP (x, 0);
4130 if (GET_RTX_CLASS (GET_CODE (addr)) == 'a')
4131 addr = XEXP (addr, 0);
4132 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4133 break;
4134 }
4135
4136 default:
4137 output_addr_const (file, x);
4138 break;
4139 }
4140
4141 return;
4142 }
4143 \f
4144 /* Compute a (partial) cost for rtx X. Return true if the complete
4145 cost has been computed, and false if subexpressions should be
4146 scanned. In either case, *TOTAL contains the cost result. */
4147 /* ??? This is incomplete. */
4148
4149 static bool
4150 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4151 {
4152 switch (code)
4153 {
4154 case CONST_INT:
4155 switch (outer_code)
4156 {
4157 case SET:
4158 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4159 return true;
4160 case PLUS:
4161 if (CONST_OK_FOR_I (INTVAL (x)))
4162 *total = 0;
4163 else if (CONST_OK_FOR_J (INTVAL (x)))
4164 *total = 1;
4165 else
4166 *total = COSTS_N_INSNS (1);
4167 return true;
4168 default:
4169 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4170 *total = 0;
4171 else
4172 *total = COSTS_N_INSNS (1);
4173 return true;
4174 }
4175
4176 case CONST_DOUBLE:
4177 *total = COSTS_N_INSNS (1);
4178 return true;
4179
4180 case CONST:
4181 case SYMBOL_REF:
4182 case LABEL_REF:
4183 *total = COSTS_N_INSNS (3);
4184 return true;
4185
4186 case MULT:
4187 /* For multiplies wider than HImode, we have to go to the FPU,
4188 which normally involves copies. Plus there's the latency
4189 of the multiply itself, and the latency of the instructions to
4190 transfer integer regs to FP regs. */
4191 /* ??? Check for FP mode. */
4192 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4193 *total = COSTS_N_INSNS (10);
4194 else
4195 *total = COSTS_N_INSNS (2);
4196 return true;
4197
4198 case PLUS:
4199 case MINUS:
4200 case ASHIFT:
4201 case ASHIFTRT:
4202 case LSHIFTRT:
4203 *total = COSTS_N_INSNS (1);
4204 return true;
4205
4206 case DIV:
4207 case UDIV:
4208 case MOD:
4209 case UMOD:
4210 /* We make divide expensive, so that divide-by-constant will be
4211 optimized to a multiply. */
4212 *total = COSTS_N_INSNS (60);
4213 return true;
4214
4215 default:
4216 return false;
4217 }
4218 }
4219
4220 /* Calculate the cost of moving data from a register in class FROM to
4221 one in class TO, using MODE. */
4222
4223 int
4224 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4225 enum reg_class to)
4226 {
4227 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4228 if (to == ADDL_REGS)
4229 to = GR_REGS;
4230 if (from == ADDL_REGS)
4231 from = GR_REGS;
4232
4233 /* All costs are symmetric, so reduce cases by putting the
4234 lower number class as the destination. */
4235 if (from < to)
4236 {
4237 enum reg_class tmp = to;
4238 to = from, from = tmp;
4239 }
4240
4241 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4242 so that we get secondary memory reloads. Between FR_REGS,
4243 we have to make this at least as expensive as MEMORY_MOVE_COST
4244 to avoid spectacularly poor register class preferencing. */
4245 if (mode == XFmode)
4246 {
4247 if (to != GR_REGS || from != GR_REGS)
4248 return MEMORY_MOVE_COST (mode, to, 0);
4249 else
4250 return 3;
4251 }
4252
4253 switch (to)
4254 {
4255 case PR_REGS:
4256 /* Moving between PR registers takes two insns. */
4257 if (from == PR_REGS)
4258 return 3;
4259 /* Moving between PR and anything but GR is impossible. */
4260 if (from != GR_REGS)
4261 return MEMORY_MOVE_COST (mode, to, 0);
4262 break;
4263
4264 case BR_REGS:
4265 /* Moving between BR and anything but GR is impossible. */
4266 if (from != GR_REGS && from != GR_AND_BR_REGS)
4267 return MEMORY_MOVE_COST (mode, to, 0);
4268 break;
4269
4270 case AR_I_REGS:
4271 case AR_M_REGS:
4272 /* Moving between AR and anything but GR is impossible. */
4273 if (from != GR_REGS)
4274 return MEMORY_MOVE_COST (mode, to, 0);
4275 break;
4276
4277 case GR_REGS:
4278 case FR_REGS:
4279 case GR_AND_FR_REGS:
4280 case GR_AND_BR_REGS:
4281 case ALL_REGS:
4282 break;
4283
4284 default:
4285 abort ();
4286 }
4287
4288 return 2;
4289 }
4290
4291 /* This function returns the register class required for a secondary
4292 register when copying between one of the registers in CLASS, and X,
4293 using MODE. A return value of NO_REGS means that no secondary register
4294 is required. */
4295
4296 enum reg_class
4297 ia64_secondary_reload_class (enum reg_class class,
4298 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4299 {
4300 int regno = -1;
4301
4302 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4303 regno = true_regnum (x);
4304
4305 switch (class)
4306 {
4307 case BR_REGS:
4308 case AR_M_REGS:
4309 case AR_I_REGS:
4310 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4311 interaction. We end up with two pseudos with overlapping lifetimes
4312 both of which are equiv to the same constant, and both which need
4313 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4314 changes depending on the path length, which means the qty_first_reg
4315 check in make_regs_eqv can give different answers at different times.
4316 At some point I'll probably need a reload_indi pattern to handle
4317 this.
4318
4319 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4320 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4321 non-general registers for good measure. */
4322 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4323 return GR_REGS;
4324
4325 /* This is needed if a pseudo used as a call_operand gets spilled to a
4326 stack slot. */
4327 if (GET_CODE (x) == MEM)
4328 return GR_REGS;
4329 break;
4330
4331 case FR_REGS:
4332 /* Need to go through general registers to get to other class regs. */
4333 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4334 return GR_REGS;
4335
4336 /* This can happen when a paradoxical subreg is an operand to the
4337 muldi3 pattern. */
4338 /* ??? This shouldn't be necessary after instruction scheduling is
4339 enabled, because paradoxical subregs are not accepted by
4340 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4341 stop the paradoxical subreg stupidity in the *_operand functions
4342 in recog.c. */
4343 if (GET_CODE (x) == MEM
4344 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4345 || GET_MODE (x) == QImode))
4346 return GR_REGS;
4347
4348 /* This can happen because of the ior/and/etc patterns that accept FP
4349 registers as operands. If the third operand is a constant, then it
4350 needs to be reloaded into a FP register. */
4351 if (GET_CODE (x) == CONST_INT)
4352 return GR_REGS;
4353
4354 /* This can happen because of register elimination in a muldi3 insn.
4355 E.g. `26107 * (unsigned long)&u'. */
4356 if (GET_CODE (x) == PLUS)
4357 return GR_REGS;
4358 break;
4359
4360 case PR_REGS:
4361 /* ??? This happens if we cse/gcse a BImode value across a call,
4362 and the function has a nonlocal goto. This is because global
4363 does not allocate call crossing pseudos to hard registers when
4364 current_function_has_nonlocal_goto is true. This is relatively
4365 common for C++ programs that use exceptions. To reproduce,
4366 return NO_REGS and compile libstdc++. */
4367 if (GET_CODE (x) == MEM)
4368 return GR_REGS;
4369
4370 /* This can happen when we take a BImode subreg of a DImode value,
4371 and that DImode value winds up in some non-GR register. */
4372 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4373 return GR_REGS;
4374 break;
4375
4376 case GR_REGS:
4377 /* Since we have no offsettable memory addresses, we need a temporary
4378 to hold the address of the second word. */
4379 if (mode == TImode)
4380 return GR_REGS;
4381 break;
4382
4383 default:
4384 break;
4385 }
4386
4387 return NO_REGS;
4388 }
4389
4390 \f
4391 /* Emit text to declare externally defined variables and functions, because
4392 the Intel assembler does not support undefined externals. */
4393
4394 void
4395 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4396 {
4397 int save_referenced;
4398
4399 /* GNU as does not need anything here, but the HP linker does need
4400 something for external functions. */
4401
4402 if (TARGET_GNU_AS
4403 && (!TARGET_HPUX_LD
4404 || TREE_CODE (decl) != FUNCTION_DECL
4405 || strstr (name, "__builtin_") == name))
4406 return;
4407
4408 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4409 the linker when we do this, so we need to be careful not to do this for
4410 builtin functions which have no library equivalent. Unfortunately, we
4411 can't tell here whether or not a function will actually be called by
4412 expand_expr, so we pull in library functions even if we may not need
4413 them later. */
4414 if (! strcmp (name, "__builtin_next_arg")
4415 || ! strcmp (name, "alloca")
4416 || ! strcmp (name, "__builtin_constant_p")
4417 || ! strcmp (name, "__builtin_args_info"))
4418 return;
4419
4420 if (TARGET_HPUX_LD)
4421 ia64_hpux_add_extern_decl (name);
4422 else
4423 {
4424 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4425 restore it. */
4426 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4427 if (TREE_CODE (decl) == FUNCTION_DECL)
4428 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4429 (*targetm.asm_out.globalize_label) (file, name);
4430 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4431 }
4432 }
4433 \f
4434 /* Parse the -mfixed-range= option string. */
4435
4436 static void
4437 fix_range (const char *const_str)
4438 {
4439 int i, first, last;
4440 char *str, *dash, *comma;
4441
4442 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4443 REG2 are either register names or register numbers. The effect
4444 of this option is to mark the registers in the range from REG1 to
4445 REG2 as ``fixed'' so they won't be used by the compiler. This is
4446 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4447
4448 i = strlen (const_str);
4449 str = (char *) alloca (i + 1);
4450 memcpy (str, const_str, i + 1);
4451
4452 while (1)
4453 {
4454 dash = strchr (str, '-');
4455 if (!dash)
4456 {
4457 warning ("value of -mfixed-range must have form REG1-REG2");
4458 return;
4459 }
4460 *dash = '\0';
4461
4462 comma = strchr (dash + 1, ',');
4463 if (comma)
4464 *comma = '\0';
4465
4466 first = decode_reg_name (str);
4467 if (first < 0)
4468 {
4469 warning ("unknown register name: %s", str);
4470 return;
4471 }
4472
4473 last = decode_reg_name (dash + 1);
4474 if (last < 0)
4475 {
4476 warning ("unknown register name: %s", dash + 1);
4477 return;
4478 }
4479
4480 *dash = '-';
4481
4482 if (first > last)
4483 {
4484 warning ("%s-%s is an empty range", str, dash + 1);
4485 return;
4486 }
4487
4488 for (i = first; i <= last; ++i)
4489 fixed_regs[i] = call_used_regs[i] = 1;
4490
4491 if (!comma)
4492 break;
4493
4494 *comma = ',';
4495 str = comma + 1;
4496 }
4497 }
4498
4499 static struct machine_function *
4500 ia64_init_machine_status (void)
4501 {
4502 return ggc_alloc_cleared (sizeof (struct machine_function));
4503 }
4504
4505 /* Handle TARGET_OPTIONS switches. */
4506
4507 void
4508 ia64_override_options (void)
4509 {
4510 static struct pta
4511 {
4512 const char *const name; /* processor name or nickname. */
4513 const enum processor_type processor;
4514 }
4515 const processor_alias_table[] =
4516 {
4517 {"itanium", PROCESSOR_ITANIUM},
4518 {"itanium1", PROCESSOR_ITANIUM},
4519 {"merced", PROCESSOR_ITANIUM},
4520 {"itanium2", PROCESSOR_ITANIUM2},
4521 {"mckinley", PROCESSOR_ITANIUM2},
4522 };
4523
4524 int const pta_size = ARRAY_SIZE (processor_alias_table);
4525 int i;
4526
4527 if (TARGET_AUTO_PIC)
4528 target_flags |= MASK_CONST_GP;
4529
4530 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4531 {
4532 warning ("cannot optimize floating point division for both latency and throughput");
4533 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4534 }
4535
4536 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4537 {
4538 warning ("cannot optimize integer division for both latency and throughput");
4539 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4540 }
4541
4542 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4543 {
4544 warning ("cannot optimize square root for both latency and throughput");
4545 target_flags &= ~MASK_INLINE_SQRT_THR;
4546 }
4547
4548 if (TARGET_INLINE_SQRT_LAT)
4549 {
4550 warning ("not yet implemented: latency-optimized inline square root");
4551 target_flags &= ~MASK_INLINE_SQRT_LAT;
4552 }
4553
4554 if (ia64_fixed_range_string)
4555 fix_range (ia64_fixed_range_string);
4556
4557 if (ia64_tls_size_string)
4558 {
4559 char *end;
4560 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4561 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4562 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4563 else
4564 ia64_tls_size = tmp;
4565 }
4566
4567 if (!ia64_tune_string)
4568 ia64_tune_string = "itanium2";
4569
4570 for (i = 0; i < pta_size; i++)
4571 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4572 {
4573 ia64_tune = processor_alias_table[i].processor;
4574 break;
4575 }
4576
4577 if (i == pta_size)
4578 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4579
4580 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4581 flag_schedule_insns_after_reload = 0;
4582
4583 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4584
4585 init_machine_status = ia64_init_machine_status;
4586 }
4587 \f
4588 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4589 static enum attr_type ia64_safe_type (rtx);
4590
4591 static enum attr_itanium_class
4592 ia64_safe_itanium_class (rtx insn)
4593 {
4594 if (recog_memoized (insn) >= 0)
4595 return get_attr_itanium_class (insn);
4596 else
4597 return ITANIUM_CLASS_UNKNOWN;
4598 }
4599
4600 static enum attr_type
4601 ia64_safe_type (rtx insn)
4602 {
4603 if (recog_memoized (insn) >= 0)
4604 return get_attr_type (insn);
4605 else
4606 return TYPE_UNKNOWN;
4607 }
4608 \f
4609 /* The following collection of routines emit instruction group stop bits as
4610 necessary to avoid dependencies. */
4611
4612 /* Need to track some additional registers as far as serialization is
4613 concerned so we can properly handle br.call and br.ret. We could
4614 make these registers visible to gcc, but since these registers are
4615 never explicitly used in gcc generated code, it seems wasteful to
4616 do so (plus it would make the call and return patterns needlessly
4617 complex). */
4618 #define REG_GP (GR_REG (1))
4619 #define REG_RP (BR_REG (0))
4620 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4621 /* This is used for volatile asms which may require a stop bit immediately
4622 before and after them. */
4623 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4624 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4625 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4626
4627 /* For each register, we keep track of how it has been written in the
4628 current instruction group.
4629
4630 If a register is written unconditionally (no qualifying predicate),
4631 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4632
4633 If a register is written if its qualifying predicate P is true, we
4634 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4635 may be written again by the complement of P (P^1) and when this happens,
4636 WRITE_COUNT gets set to 2.
4637
4638 The result of this is that whenever an insn attempts to write a register
4639 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4640
4641 If a predicate register is written by a floating-point insn, we set
4642 WRITTEN_BY_FP to true.
4643
4644 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4645 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4646
4647 struct reg_write_state
4648 {
4649 unsigned int write_count : 2;
4650 unsigned int first_pred : 16;
4651 unsigned int written_by_fp : 1;
4652 unsigned int written_by_and : 1;
4653 unsigned int written_by_or : 1;
4654 };
4655
4656 /* Cumulative info for the current instruction group. */
4657 struct reg_write_state rws_sum[NUM_REGS];
4658 /* Info for the current instruction. This gets copied to rws_sum after a
4659 stop bit is emitted. */
4660 struct reg_write_state rws_insn[NUM_REGS];
4661
4662 /* Indicates whether this is the first instruction after a stop bit,
4663 in which case we don't need another stop bit. Without this, we hit
4664 the abort in ia64_variable_issue when scheduling an alloc. */
4665 static int first_instruction;
4666
4667 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4668 RTL for one instruction. */
4669 struct reg_flags
4670 {
4671 unsigned int is_write : 1; /* Is register being written? */
4672 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4673 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4674 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4675 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4676 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4677 };
4678
4679 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4680 static int rws_access_regno (int, struct reg_flags, int);
4681 static int rws_access_reg (rtx, struct reg_flags, int);
4682 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4683 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4684 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4685 static void init_insn_group_barriers (void);
4686 static int group_barrier_needed_p (rtx);
4687 static int safe_group_barrier_needed_p (rtx);
4688
4689 /* Update *RWS for REGNO, which is being written by the current instruction,
4690 with predicate PRED, and associated register flags in FLAGS. */
4691
4692 static void
4693 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4694 {
4695 if (pred)
4696 rws[regno].write_count++;
4697 else
4698 rws[regno].write_count = 2;
4699 rws[regno].written_by_fp |= flags.is_fp;
4700 /* ??? Not tracking and/or across differing predicates. */
4701 rws[regno].written_by_and = flags.is_and;
4702 rws[regno].written_by_or = flags.is_or;
4703 rws[regno].first_pred = pred;
4704 }
4705
4706 /* Handle an access to register REGNO of type FLAGS using predicate register
4707 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4708 a dependency with an earlier instruction in the same group. */
4709
4710 static int
4711 rws_access_regno (int regno, struct reg_flags flags, int pred)
4712 {
4713 int need_barrier = 0;
4714
4715 if (regno >= NUM_REGS)
4716 abort ();
4717
4718 if (! PR_REGNO_P (regno))
4719 flags.is_and = flags.is_or = 0;
4720
4721 if (flags.is_write)
4722 {
4723 int write_count;
4724
4725 /* One insn writes same reg multiple times? */
4726 if (rws_insn[regno].write_count > 0)
4727 abort ();
4728
4729 /* Update info for current instruction. */
4730 rws_update (rws_insn, regno, flags, pred);
4731 write_count = rws_sum[regno].write_count;
4732
4733 switch (write_count)
4734 {
4735 case 0:
4736 /* The register has not been written yet. */
4737 rws_update (rws_sum, regno, flags, pred);
4738 break;
4739
4740 case 1:
4741 /* The register has been written via a predicate. If this is
4742 not a complementary predicate, then we need a barrier. */
4743 /* ??? This assumes that P and P+1 are always complementary
4744 predicates for P even. */
4745 if (flags.is_and && rws_sum[regno].written_by_and)
4746 ;
4747 else if (flags.is_or && rws_sum[regno].written_by_or)
4748 ;
4749 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4750 need_barrier = 1;
4751 rws_update (rws_sum, regno, flags, pred);
4752 break;
4753
4754 case 2:
4755 /* The register has been unconditionally written already. We
4756 need a barrier. */
4757 if (flags.is_and && rws_sum[regno].written_by_and)
4758 ;
4759 else if (flags.is_or && rws_sum[regno].written_by_or)
4760 ;
4761 else
4762 need_barrier = 1;
4763 rws_sum[regno].written_by_and = flags.is_and;
4764 rws_sum[regno].written_by_or = flags.is_or;
4765 break;
4766
4767 default:
4768 abort ();
4769 }
4770 }
4771 else
4772 {
4773 if (flags.is_branch)
4774 {
4775 /* Branches have several RAW exceptions that allow to avoid
4776 barriers. */
4777
4778 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4779 /* RAW dependencies on branch regs are permissible as long
4780 as the writer is a non-branch instruction. Since we
4781 never generate code that uses a branch register written
4782 by a branch instruction, handling this case is
4783 easy. */
4784 return 0;
4785
4786 if (REGNO_REG_CLASS (regno) == PR_REGS
4787 && ! rws_sum[regno].written_by_fp)
4788 /* The predicates of a branch are available within the
4789 same insn group as long as the predicate was written by
4790 something other than a floating-point instruction. */
4791 return 0;
4792 }
4793
4794 if (flags.is_and && rws_sum[regno].written_by_and)
4795 return 0;
4796 if (flags.is_or && rws_sum[regno].written_by_or)
4797 return 0;
4798
4799 switch (rws_sum[regno].write_count)
4800 {
4801 case 0:
4802 /* The register has not been written yet. */
4803 break;
4804
4805 case 1:
4806 /* The register has been written via a predicate. If this is
4807 not a complementary predicate, then we need a barrier. */
4808 /* ??? This assumes that P and P+1 are always complementary
4809 predicates for P even. */
4810 if ((rws_sum[regno].first_pred ^ 1) != pred)
4811 need_barrier = 1;
4812 break;
4813
4814 case 2:
4815 /* The register has been unconditionally written already. We
4816 need a barrier. */
4817 need_barrier = 1;
4818 break;
4819
4820 default:
4821 abort ();
4822 }
4823 }
4824
4825 return need_barrier;
4826 }
4827
4828 static int
4829 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4830 {
4831 int regno = REGNO (reg);
4832 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4833
4834 if (n == 1)
4835 return rws_access_regno (regno, flags, pred);
4836 else
4837 {
4838 int need_barrier = 0;
4839 while (--n >= 0)
4840 need_barrier |= rws_access_regno (regno + n, flags, pred);
4841 return need_barrier;
4842 }
4843 }
4844
4845 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4846 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4847
4848 static void
4849 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4850 {
4851 rtx src = SET_SRC (x);
4852
4853 *pcond = 0;
4854
4855 switch (GET_CODE (src))
4856 {
4857 case CALL:
4858 return;
4859
4860 case IF_THEN_ELSE:
4861 if (SET_DEST (x) == pc_rtx)
4862 /* X is a conditional branch. */
4863 return;
4864 else
4865 {
4866 int is_complemented = 0;
4867
4868 /* X is a conditional move. */
4869 rtx cond = XEXP (src, 0);
4870 if (GET_CODE (cond) == EQ)
4871 is_complemented = 1;
4872 cond = XEXP (cond, 0);
4873 if (GET_CODE (cond) != REG
4874 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4875 abort ();
4876 *pcond = cond;
4877 if (XEXP (src, 1) == SET_DEST (x)
4878 || XEXP (src, 2) == SET_DEST (x))
4879 {
4880 /* X is a conditional move that conditionally writes the
4881 destination. */
4882
4883 /* We need another complement in this case. */
4884 if (XEXP (src, 1) == SET_DEST (x))
4885 is_complemented = ! is_complemented;
4886
4887 *ppred = REGNO (cond);
4888 if (is_complemented)
4889 ++*ppred;
4890 }
4891
4892 /* ??? If this is a conditional write to the dest, then this
4893 instruction does not actually read one source. This probably
4894 doesn't matter, because that source is also the dest. */
4895 /* ??? Multiple writes to predicate registers are allowed
4896 if they are all AND type compares, or if they are all OR
4897 type compares. We do not generate such instructions
4898 currently. */
4899 }
4900 /* ... fall through ... */
4901
4902 default:
4903 if (GET_RTX_CLASS (GET_CODE (src)) == '<'
4904 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4905 /* Set pflags->is_fp to 1 so that we know we're dealing
4906 with a floating point comparison when processing the
4907 destination of the SET. */
4908 pflags->is_fp = 1;
4909
4910 /* Discover if this is a parallel comparison. We only handle
4911 and.orcm and or.andcm at present, since we must retain a
4912 strict inverse on the predicate pair. */
4913 else if (GET_CODE (src) == AND)
4914 pflags->is_and = 1;
4915 else if (GET_CODE (src) == IOR)
4916 pflags->is_or = 1;
4917
4918 break;
4919 }
4920 }
4921
4922 /* Subroutine of rtx_needs_barrier; this function determines whether the
4923 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4924 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4925 for this insn. */
4926
4927 static int
4928 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4929 {
4930 int need_barrier = 0;
4931 rtx dst;
4932 rtx src = SET_SRC (x);
4933
4934 if (GET_CODE (src) == CALL)
4935 /* We don't need to worry about the result registers that
4936 get written by subroutine call. */
4937 return rtx_needs_barrier (src, flags, pred);
4938 else if (SET_DEST (x) == pc_rtx)
4939 {
4940 /* X is a conditional branch. */
4941 /* ??? This seems redundant, as the caller sets this bit for
4942 all JUMP_INSNs. */
4943 flags.is_branch = 1;
4944 return rtx_needs_barrier (src, flags, pred);
4945 }
4946
4947 need_barrier = rtx_needs_barrier (src, flags, pred);
4948
4949 /* This instruction unconditionally uses a predicate register. */
4950 if (cond)
4951 need_barrier |= rws_access_reg (cond, flags, 0);
4952
4953 dst = SET_DEST (x);
4954 if (GET_CODE (dst) == ZERO_EXTRACT)
4955 {
4956 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4957 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4958 dst = XEXP (dst, 0);
4959 }
4960 return need_barrier;
4961 }
4962
4963 /* Handle an access to rtx X of type FLAGS using predicate register
4964 PRED. Return 1 if this access creates a dependency with an earlier
4965 instruction in the same group. */
4966
4967 static int
4968 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4969 {
4970 int i, j;
4971 int is_complemented = 0;
4972 int need_barrier = 0;
4973 const char *format_ptr;
4974 struct reg_flags new_flags;
4975 rtx cond = 0;
4976
4977 if (! x)
4978 return 0;
4979
4980 new_flags = flags;
4981
4982 switch (GET_CODE (x))
4983 {
4984 case SET:
4985 update_set_flags (x, &new_flags, &pred, &cond);
4986 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4987 if (GET_CODE (SET_SRC (x)) != CALL)
4988 {
4989 new_flags.is_write = 1;
4990 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4991 }
4992 break;
4993
4994 case CALL:
4995 new_flags.is_write = 0;
4996 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4997
4998 /* Avoid multiple register writes, in case this is a pattern with
4999 multiple CALL rtx. This avoids an abort in rws_access_reg. */
5000 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5001 {
5002 new_flags.is_write = 1;
5003 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5004 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5005 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5006 }
5007 break;
5008
5009 case COND_EXEC:
5010 /* X is a predicated instruction. */
5011
5012 cond = COND_EXEC_TEST (x);
5013 if (pred)
5014 abort ();
5015 need_barrier = rtx_needs_barrier (cond, flags, 0);
5016
5017 if (GET_CODE (cond) == EQ)
5018 is_complemented = 1;
5019 cond = XEXP (cond, 0);
5020 if (GET_CODE (cond) != REG
5021 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5022 abort ();
5023 pred = REGNO (cond);
5024 if (is_complemented)
5025 ++pred;
5026
5027 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5028 return need_barrier;
5029
5030 case CLOBBER:
5031 case USE:
5032 /* Clobber & use are for earlier compiler-phases only. */
5033 break;
5034
5035 case ASM_OPERANDS:
5036 case ASM_INPUT:
5037 /* We always emit stop bits for traditional asms. We emit stop bits
5038 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5039 if (GET_CODE (x) != ASM_OPERANDS
5040 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5041 {
5042 /* Avoid writing the register multiple times if we have multiple
5043 asm outputs. This avoids an abort in rws_access_reg. */
5044 if (! rws_insn[REG_VOLATILE].write_count)
5045 {
5046 new_flags.is_write = 1;
5047 rws_access_regno (REG_VOLATILE, new_flags, pred);
5048 }
5049 return 1;
5050 }
5051
5052 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5053 We can not just fall through here since then we would be confused
5054 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5055 traditional asms unlike their normal usage. */
5056
5057 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5058 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5059 need_barrier = 1;
5060 break;
5061
5062 case PARALLEL:
5063 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5064 {
5065 rtx pat = XVECEXP (x, 0, i);
5066 if (GET_CODE (pat) == SET)
5067 {
5068 update_set_flags (pat, &new_flags, &pred, &cond);
5069 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
5070 }
5071 else if (GET_CODE (pat) == USE
5072 || GET_CODE (pat) == CALL
5073 || GET_CODE (pat) == ASM_OPERANDS)
5074 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5075 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
5076 abort ();
5077 }
5078 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5079 {
5080 rtx pat = XVECEXP (x, 0, i);
5081 if (GET_CODE (pat) == SET)
5082 {
5083 if (GET_CODE (SET_SRC (pat)) != CALL)
5084 {
5085 new_flags.is_write = 1;
5086 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5087 pred);
5088 }
5089 }
5090 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5091 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5092 }
5093 break;
5094
5095 case SUBREG:
5096 x = SUBREG_REG (x);
5097 /* FALLTHRU */
5098 case REG:
5099 if (REGNO (x) == AR_UNAT_REGNUM)
5100 {
5101 for (i = 0; i < 64; ++i)
5102 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5103 }
5104 else
5105 need_barrier = rws_access_reg (x, flags, pred);
5106 break;
5107
5108 case MEM:
5109 /* Find the regs used in memory address computation. */
5110 new_flags.is_write = 0;
5111 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5112 break;
5113
5114 case CONST_INT: case CONST_DOUBLE:
5115 case SYMBOL_REF: case LABEL_REF: case CONST:
5116 break;
5117
5118 /* Operators with side-effects. */
5119 case POST_INC: case POST_DEC:
5120 if (GET_CODE (XEXP (x, 0)) != REG)
5121 abort ();
5122
5123 new_flags.is_write = 0;
5124 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5125 new_flags.is_write = 1;
5126 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5127 break;
5128
5129 case POST_MODIFY:
5130 if (GET_CODE (XEXP (x, 0)) != REG)
5131 abort ();
5132
5133 new_flags.is_write = 0;
5134 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5135 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5136 new_flags.is_write = 1;
5137 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5138 break;
5139
5140 /* Handle common unary and binary ops for efficiency. */
5141 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5142 case MOD: case UDIV: case UMOD: case AND: case IOR:
5143 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5144 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5145 case NE: case EQ: case GE: case GT: case LE:
5146 case LT: case GEU: case GTU: case LEU: case LTU:
5147 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5148 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5149 break;
5150
5151 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5152 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5153 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5154 case SQRT: case FFS: case POPCOUNT:
5155 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5156 break;
5157
5158 case UNSPEC:
5159 switch (XINT (x, 1))
5160 {
5161 case UNSPEC_LTOFF_DTPMOD:
5162 case UNSPEC_LTOFF_DTPREL:
5163 case UNSPEC_DTPREL:
5164 case UNSPEC_LTOFF_TPREL:
5165 case UNSPEC_TPREL:
5166 case UNSPEC_PRED_REL_MUTEX:
5167 case UNSPEC_PIC_CALL:
5168 case UNSPEC_MF:
5169 case UNSPEC_FETCHADD_ACQ:
5170 case UNSPEC_BSP_VALUE:
5171 case UNSPEC_FLUSHRS:
5172 case UNSPEC_BUNDLE_SELECTOR:
5173 break;
5174
5175 case UNSPEC_GR_SPILL:
5176 case UNSPEC_GR_RESTORE:
5177 {
5178 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5179 HOST_WIDE_INT bit = (offset >> 3) & 63;
5180
5181 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5182 new_flags.is_write = (XINT (x, 1) == 1);
5183 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5184 new_flags, pred);
5185 break;
5186 }
5187
5188 case UNSPEC_FR_SPILL:
5189 case UNSPEC_FR_RESTORE:
5190 case UNSPEC_GETF_EXP:
5191 case UNSPEC_SETF_EXP:
5192 case UNSPEC_ADDP4:
5193 case UNSPEC_FR_SQRT_RECIP_APPROX:
5194 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5195 break;
5196
5197 case UNSPEC_FR_RECIP_APPROX:
5198 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5199 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5200 break;
5201
5202 case UNSPEC_CMPXCHG_ACQ:
5203 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5204 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5205 break;
5206
5207 default:
5208 abort ();
5209 }
5210 break;
5211
5212 case UNSPEC_VOLATILE:
5213 switch (XINT (x, 1))
5214 {
5215 case UNSPECV_ALLOC:
5216 /* Alloc must always be the first instruction of a group.
5217 We force this by always returning true. */
5218 /* ??? We might get better scheduling if we explicitly check for
5219 input/local/output register dependencies, and modify the
5220 scheduler so that alloc is always reordered to the start of
5221 the current group. We could then eliminate all of the
5222 first_instruction code. */
5223 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5224
5225 new_flags.is_write = 1;
5226 rws_access_regno (REG_AR_CFM, new_flags, pred);
5227 return 1;
5228
5229 case UNSPECV_SET_BSP:
5230 need_barrier = 1;
5231 break;
5232
5233 case UNSPECV_BLOCKAGE:
5234 case UNSPECV_INSN_GROUP_BARRIER:
5235 case UNSPECV_BREAK:
5236 case UNSPECV_PSAC_ALL:
5237 case UNSPECV_PSAC_NORMAL:
5238 return 0;
5239
5240 default:
5241 abort ();
5242 }
5243 break;
5244
5245 case RETURN:
5246 new_flags.is_write = 0;
5247 need_barrier = rws_access_regno (REG_RP, flags, pred);
5248 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5249
5250 new_flags.is_write = 1;
5251 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5252 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5253 break;
5254
5255 default:
5256 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5257 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5258 switch (format_ptr[i])
5259 {
5260 case '0': /* unused field */
5261 case 'i': /* integer */
5262 case 'n': /* note */
5263 case 'w': /* wide integer */
5264 case 's': /* pointer to string */
5265 case 'S': /* optional pointer to string */
5266 break;
5267
5268 case 'e':
5269 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5270 need_barrier = 1;
5271 break;
5272
5273 case 'E':
5274 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5275 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5276 need_barrier = 1;
5277 break;
5278
5279 default:
5280 abort ();
5281 }
5282 break;
5283 }
5284 return need_barrier;
5285 }
5286
5287 /* Clear out the state for group_barrier_needed_p at the start of a
5288 sequence of insns. */
5289
5290 static void
5291 init_insn_group_barriers (void)
5292 {
5293 memset (rws_sum, 0, sizeof (rws_sum));
5294 first_instruction = 1;
5295 }
5296
5297 /* Given the current state, recorded by previous calls to this function,
5298 determine whether a group barrier (a stop bit) is necessary before INSN.
5299 Return nonzero if so. */
5300
5301 static int
5302 group_barrier_needed_p (rtx insn)
5303 {
5304 rtx pat;
5305 int need_barrier = 0;
5306 struct reg_flags flags;
5307
5308 memset (&flags, 0, sizeof (flags));
5309 switch (GET_CODE (insn))
5310 {
5311 case NOTE:
5312 break;
5313
5314 case BARRIER:
5315 /* A barrier doesn't imply an instruction group boundary. */
5316 break;
5317
5318 case CODE_LABEL:
5319 memset (rws_insn, 0, sizeof (rws_insn));
5320 return 1;
5321
5322 case CALL_INSN:
5323 flags.is_branch = 1;
5324 flags.is_sibcall = SIBLING_CALL_P (insn);
5325 memset (rws_insn, 0, sizeof (rws_insn));
5326
5327 /* Don't bundle a call following another call. */
5328 if ((pat = prev_active_insn (insn))
5329 && GET_CODE (pat) == CALL_INSN)
5330 {
5331 need_barrier = 1;
5332 break;
5333 }
5334
5335 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5336 break;
5337
5338 case JUMP_INSN:
5339 flags.is_branch = 1;
5340
5341 /* Don't bundle a jump following a call. */
5342 if ((pat = prev_active_insn (insn))
5343 && GET_CODE (pat) == CALL_INSN)
5344 {
5345 need_barrier = 1;
5346 break;
5347 }
5348 /* FALLTHRU */
5349
5350 case INSN:
5351 if (GET_CODE (PATTERN (insn)) == USE
5352 || GET_CODE (PATTERN (insn)) == CLOBBER)
5353 /* Don't care about USE and CLOBBER "insns"---those are used to
5354 indicate to the optimizer that it shouldn't get rid of
5355 certain operations. */
5356 break;
5357
5358 pat = PATTERN (insn);
5359
5360 /* Ug. Hack hacks hacked elsewhere. */
5361 switch (recog_memoized (insn))
5362 {
5363 /* We play dependency tricks with the epilogue in order
5364 to get proper schedules. Undo this for dv analysis. */
5365 case CODE_FOR_epilogue_deallocate_stack:
5366 case CODE_FOR_prologue_allocate_stack:
5367 pat = XVECEXP (pat, 0, 0);
5368 break;
5369
5370 /* The pattern we use for br.cloop confuses the code above.
5371 The second element of the vector is representative. */
5372 case CODE_FOR_doloop_end_internal:
5373 pat = XVECEXP (pat, 0, 1);
5374 break;
5375
5376 /* Doesn't generate code. */
5377 case CODE_FOR_pred_rel_mutex:
5378 case CODE_FOR_prologue_use:
5379 return 0;
5380
5381 default:
5382 break;
5383 }
5384
5385 memset (rws_insn, 0, sizeof (rws_insn));
5386 need_barrier = rtx_needs_barrier (pat, flags, 0);
5387
5388 /* Check to see if the previous instruction was a volatile
5389 asm. */
5390 if (! need_barrier)
5391 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5392 break;
5393
5394 default:
5395 abort ();
5396 }
5397
5398 if (first_instruction && INSN_P (insn)
5399 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5400 && GET_CODE (PATTERN (insn)) != USE
5401 && GET_CODE (PATTERN (insn)) != CLOBBER)
5402 {
5403 need_barrier = 0;
5404 first_instruction = 0;
5405 }
5406
5407 return need_barrier;
5408 }
5409
5410 /* Like group_barrier_needed_p, but do not clobber the current state. */
5411
5412 static int
5413 safe_group_barrier_needed_p (rtx insn)
5414 {
5415 struct reg_write_state rws_saved[NUM_REGS];
5416 int saved_first_instruction;
5417 int t;
5418
5419 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5420 saved_first_instruction = first_instruction;
5421
5422 t = group_barrier_needed_p (insn);
5423
5424 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5425 first_instruction = saved_first_instruction;
5426
5427 return t;
5428 }
5429
5430 /* Scan the current function and insert stop bits as necessary to
5431 eliminate dependencies. This function assumes that a final
5432 instruction scheduling pass has been run which has already
5433 inserted most of the necessary stop bits. This function only
5434 inserts new ones at basic block boundaries, since these are
5435 invisible to the scheduler. */
5436
5437 static void
5438 emit_insn_group_barriers (FILE *dump)
5439 {
5440 rtx insn;
5441 rtx last_label = 0;
5442 int insns_since_last_label = 0;
5443
5444 init_insn_group_barriers ();
5445
5446 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5447 {
5448 if (GET_CODE (insn) == CODE_LABEL)
5449 {
5450 if (insns_since_last_label)
5451 last_label = insn;
5452 insns_since_last_label = 0;
5453 }
5454 else if (GET_CODE (insn) == NOTE
5455 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5456 {
5457 if (insns_since_last_label)
5458 last_label = insn;
5459 insns_since_last_label = 0;
5460 }
5461 else if (GET_CODE (insn) == INSN
5462 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5463 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5464 {
5465 init_insn_group_barriers ();
5466 last_label = 0;
5467 }
5468 else if (INSN_P (insn))
5469 {
5470 insns_since_last_label = 1;
5471
5472 if (group_barrier_needed_p (insn))
5473 {
5474 if (last_label)
5475 {
5476 if (dump)
5477 fprintf (dump, "Emitting stop before label %d\n",
5478 INSN_UID (last_label));
5479 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5480 insn = last_label;
5481
5482 init_insn_group_barriers ();
5483 last_label = 0;
5484 }
5485 }
5486 }
5487 }
5488 }
5489
5490 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5491 This function has to emit all necessary group barriers. */
5492
5493 static void
5494 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5495 {
5496 rtx insn;
5497
5498 init_insn_group_barriers ();
5499
5500 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5501 {
5502 if (GET_CODE (insn) == BARRIER)
5503 {
5504 rtx last = prev_active_insn (insn);
5505
5506 if (! last)
5507 continue;
5508 if (GET_CODE (last) == JUMP_INSN
5509 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5510 last = prev_active_insn (last);
5511 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5512 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5513
5514 init_insn_group_barriers ();
5515 }
5516 else if (INSN_P (insn))
5517 {
5518 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5519 init_insn_group_barriers ();
5520 else if (group_barrier_needed_p (insn))
5521 {
5522 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5523 init_insn_group_barriers ();
5524 group_barrier_needed_p (insn);
5525 }
5526 }
5527 }
5528 }
5529
5530 \f
5531 static int errata_find_address_regs (rtx *, void *);
5532 static void errata_emit_nops (rtx);
5533 static void fixup_errata (void);
5534
5535 /* This structure is used to track some details about the previous insns
5536 groups so we can determine if it may be necessary to insert NOPs to
5537 workaround hardware errata. */
5538 static struct group
5539 {
5540 HARD_REG_SET p_reg_set;
5541 HARD_REG_SET gr_reg_conditionally_set;
5542 } last_group[2];
5543
5544 /* Index into the last_group array. */
5545 static int group_idx;
5546
5547 /* Called through for_each_rtx; determines if a hard register that was
5548 conditionally set in the previous group is used as an address register.
5549 It ensures that for_each_rtx returns 1 in that case. */
5550 static int
5551 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5552 {
5553 rtx x = *xp;
5554 if (GET_CODE (x) != MEM)
5555 return 0;
5556 x = XEXP (x, 0);
5557 if (GET_CODE (x) == POST_MODIFY)
5558 x = XEXP (x, 0);
5559 if (GET_CODE (x) == REG)
5560 {
5561 struct group *prev_group = last_group + (group_idx ^ 1);
5562 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5563 REGNO (x)))
5564 return 1;
5565 return -1;
5566 }
5567 return 0;
5568 }
5569
5570 /* Called for each insn; this function keeps track of the state in
5571 last_group and emits additional NOPs if necessary to work around
5572 an Itanium A/B step erratum. */
5573 static void
5574 errata_emit_nops (rtx insn)
5575 {
5576 struct group *this_group = last_group + group_idx;
5577 struct group *prev_group = last_group + (group_idx ^ 1);
5578 rtx pat = PATTERN (insn);
5579 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5580 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5581 enum attr_type type;
5582 rtx set = real_pat;
5583
5584 if (GET_CODE (real_pat) == USE
5585 || GET_CODE (real_pat) == CLOBBER
5586 || GET_CODE (real_pat) == ASM_INPUT
5587 || GET_CODE (real_pat) == ADDR_VEC
5588 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5589 || asm_noperands (PATTERN (insn)) >= 0)
5590 return;
5591
5592 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5593 parts of it. */
5594
5595 if (GET_CODE (set) == PARALLEL)
5596 {
5597 int i;
5598 set = XVECEXP (real_pat, 0, 0);
5599 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5600 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5601 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5602 {
5603 set = 0;
5604 break;
5605 }
5606 }
5607
5608 if (set && GET_CODE (set) != SET)
5609 set = 0;
5610
5611 type = get_attr_type (insn);
5612
5613 if (type == TYPE_F
5614 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5615 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5616
5617 if ((type == TYPE_M || type == TYPE_A) && cond && set
5618 && REG_P (SET_DEST (set))
5619 && GET_CODE (SET_SRC (set)) != PLUS
5620 && GET_CODE (SET_SRC (set)) != MINUS
5621 && (GET_CODE (SET_SRC (set)) != ASHIFT
5622 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5623 && (GET_CODE (SET_SRC (set)) != MEM
5624 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5625 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5626 {
5627 if (GET_RTX_CLASS (GET_CODE (cond)) != '<'
5628 || ! REG_P (XEXP (cond, 0)))
5629 abort ();
5630
5631 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5632 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5633 }
5634 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5635 {
5636 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5637 emit_insn_before (gen_nop (), insn);
5638 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5639 group_idx = 0;
5640 memset (last_group, 0, sizeof last_group);
5641 }
5642 }
5643
5644 /* Emit extra nops if they are required to work around hardware errata. */
5645
5646 static void
5647 fixup_errata (void)
5648 {
5649 rtx insn;
5650
5651 if (! TARGET_B_STEP)
5652 return;
5653
5654 group_idx = 0;
5655 memset (last_group, 0, sizeof last_group);
5656
5657 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5658 {
5659 if (!INSN_P (insn))
5660 continue;
5661
5662 if (ia64_safe_type (insn) == TYPE_S)
5663 {
5664 group_idx ^= 1;
5665 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5666 }
5667 else
5668 errata_emit_nops (insn);
5669 }
5670 }
5671 \f
5672
5673 /* Instruction scheduling support. */
5674
5675 #define NR_BUNDLES 10
5676
5677 /* A list of names of all available bundles. */
5678
5679 static const char *bundle_name [NR_BUNDLES] =
5680 {
5681 ".mii",
5682 ".mmi",
5683 ".mfi",
5684 ".mmf",
5685 #if NR_BUNDLES == 10
5686 ".bbb",
5687 ".mbb",
5688 #endif
5689 ".mib",
5690 ".mmb",
5691 ".mfb",
5692 ".mlx"
5693 };
5694
5695 /* Nonzero if we should insert stop bits into the schedule. */
5696
5697 int ia64_final_schedule = 0;
5698
5699 /* Codes of the corresponding quieryied units: */
5700
5701 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5702 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5703
5704 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5705 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5706
5707 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5708
5709 /* The following variable value is an insn group barrier. */
5710
5711 static rtx dfa_stop_insn;
5712
5713 /* The following variable value is the last issued insn. */
5714
5715 static rtx last_scheduled_insn;
5716
5717 /* The following variable value is size of the DFA state. */
5718
5719 static size_t dfa_state_size;
5720
5721 /* The following variable value is pointer to a DFA state used as
5722 temporary variable. */
5723
5724 static state_t temp_dfa_state = NULL;
5725
5726 /* The following variable value is DFA state after issuing the last
5727 insn. */
5728
5729 static state_t prev_cycle_state = NULL;
5730
5731 /* The following array element values are TRUE if the corresponding
5732 insn requires to add stop bits before it. */
5733
5734 static char *stops_p;
5735
5736 /* The following variable is used to set up the mentioned above array. */
5737
5738 static int stop_before_p = 0;
5739
5740 /* The following variable value is length of the arrays `clocks' and
5741 `add_cycles'. */
5742
5743 static int clocks_length;
5744
5745 /* The following array element values are cycles on which the
5746 corresponding insn will be issued. The array is used only for
5747 Itanium1. */
5748
5749 static int *clocks;
5750
5751 /* The following array element values are numbers of cycles should be
5752 added to improve insn scheduling for MM_insns for Itanium1. */
5753
5754 static int *add_cycles;
5755
5756 static rtx ia64_single_set (rtx);
5757 static void ia64_emit_insn_before (rtx, rtx);
5758
5759 /* Map a bundle number to its pseudo-op. */
5760
5761 const char *
5762 get_bundle_name (int b)
5763 {
5764 return bundle_name[b];
5765 }
5766
5767
5768 /* Return the maximum number of instructions a cpu can issue. */
5769
5770 static int
5771 ia64_issue_rate (void)
5772 {
5773 return 6;
5774 }
5775
5776 /* Helper function - like single_set, but look inside COND_EXEC. */
5777
5778 static rtx
5779 ia64_single_set (rtx insn)
5780 {
5781 rtx x = PATTERN (insn), ret;
5782 if (GET_CODE (x) == COND_EXEC)
5783 x = COND_EXEC_CODE (x);
5784 if (GET_CODE (x) == SET)
5785 return x;
5786
5787 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5788 Although they are not classical single set, the second set is there just
5789 to protect it from moving past FP-relative stack accesses. */
5790 switch (recog_memoized (insn))
5791 {
5792 case CODE_FOR_prologue_allocate_stack:
5793 case CODE_FOR_epilogue_deallocate_stack:
5794 ret = XVECEXP (x, 0, 0);
5795 break;
5796
5797 default:
5798 ret = single_set_2 (insn, x);
5799 break;
5800 }
5801
5802 return ret;
5803 }
5804
5805 /* Adjust the cost of a scheduling dependency. Return the new cost of
5806 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5807
5808 static int
5809 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5810 {
5811 enum attr_itanium_class dep_class;
5812 enum attr_itanium_class insn_class;
5813
5814 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5815 return cost;
5816
5817 insn_class = ia64_safe_itanium_class (insn);
5818 dep_class = ia64_safe_itanium_class (dep_insn);
5819 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5820 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5821 return 0;
5822
5823 return cost;
5824 }
5825
5826 /* Like emit_insn_before, but skip cycle_display notes.
5827 ??? When cycle display notes are implemented, update this. */
5828
5829 static void
5830 ia64_emit_insn_before (rtx insn, rtx before)
5831 {
5832 emit_insn_before (insn, before);
5833 }
5834
5835 /* The following function marks insns who produce addresses for load
5836 and store insns. Such insns will be placed into M slots because it
5837 decrease latency time for Itanium1 (see function
5838 `ia64_produce_address_p' and the DFA descriptions). */
5839
5840 static void
5841 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5842 {
5843 rtx insn, link, next, next_tail;
5844
5845 next_tail = NEXT_INSN (tail);
5846 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5847 if (INSN_P (insn))
5848 insn->call = 0;
5849 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5850 if (INSN_P (insn)
5851 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5852 {
5853 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5854 {
5855 next = XEXP (link, 0);
5856 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5857 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5858 && ia64_st_address_bypass_p (insn, next))
5859 break;
5860 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5861 || ia64_safe_itanium_class (next)
5862 == ITANIUM_CLASS_FLD)
5863 && ia64_ld_address_bypass_p (insn, next))
5864 break;
5865 }
5866 insn->call = link != 0;
5867 }
5868 }
5869
5870 /* We're beginning a new block. Initialize data structures as necessary. */
5871
5872 static void
5873 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5874 int sched_verbose ATTRIBUTE_UNUSED,
5875 int max_ready ATTRIBUTE_UNUSED)
5876 {
5877 #ifdef ENABLE_CHECKING
5878 rtx insn;
5879
5880 if (reload_completed)
5881 for (insn = NEXT_INSN (current_sched_info->prev_head);
5882 insn != current_sched_info->next_tail;
5883 insn = NEXT_INSN (insn))
5884 if (SCHED_GROUP_P (insn))
5885 abort ();
5886 #endif
5887 last_scheduled_insn = NULL_RTX;
5888 init_insn_group_barriers ();
5889 }
5890
5891 /* We are about to being issuing insns for this clock cycle.
5892 Override the default sort algorithm to better slot instructions. */
5893
5894 static int
5895 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5896 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5897 int reorder_type)
5898 {
5899 int n_asms;
5900 int n_ready = *pn_ready;
5901 rtx *e_ready = ready + n_ready;
5902 rtx *insnp;
5903
5904 if (sched_verbose)
5905 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5906
5907 if (reorder_type == 0)
5908 {
5909 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5910 n_asms = 0;
5911 for (insnp = ready; insnp < e_ready; insnp++)
5912 if (insnp < e_ready)
5913 {
5914 rtx insn = *insnp;
5915 enum attr_type t = ia64_safe_type (insn);
5916 if (t == TYPE_UNKNOWN)
5917 {
5918 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5919 || asm_noperands (PATTERN (insn)) >= 0)
5920 {
5921 rtx lowest = ready[n_asms];
5922 ready[n_asms] = insn;
5923 *insnp = lowest;
5924 n_asms++;
5925 }
5926 else
5927 {
5928 rtx highest = ready[n_ready - 1];
5929 ready[n_ready - 1] = insn;
5930 *insnp = highest;
5931 return 1;
5932 }
5933 }
5934 }
5935
5936 if (n_asms < n_ready)
5937 {
5938 /* Some normal insns to process. Skip the asms. */
5939 ready += n_asms;
5940 n_ready -= n_asms;
5941 }
5942 else if (n_ready > 0)
5943 return 1;
5944 }
5945
5946 if (ia64_final_schedule)
5947 {
5948 int deleted = 0;
5949 int nr_need_stop = 0;
5950
5951 for (insnp = ready; insnp < e_ready; insnp++)
5952 if (safe_group_barrier_needed_p (*insnp))
5953 nr_need_stop++;
5954
5955 if (reorder_type == 1 && n_ready == nr_need_stop)
5956 return 0;
5957 if (reorder_type == 0)
5958 return 1;
5959 insnp = e_ready;
5960 /* Move down everything that needs a stop bit, preserving
5961 relative order. */
5962 while (insnp-- > ready + deleted)
5963 while (insnp >= ready + deleted)
5964 {
5965 rtx insn = *insnp;
5966 if (! safe_group_barrier_needed_p (insn))
5967 break;
5968 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5969 *ready = insn;
5970 deleted++;
5971 }
5972 n_ready -= deleted;
5973 ready += deleted;
5974 }
5975
5976 return 1;
5977 }
5978
5979 /* We are about to being issuing insns for this clock cycle. Override
5980 the default sort algorithm to better slot instructions. */
5981
5982 static int
5983 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5984 int clock_var)
5985 {
5986 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5987 pn_ready, clock_var, 0);
5988 }
5989
5990 /* Like ia64_sched_reorder, but called after issuing each insn.
5991 Override the default sort algorithm to better slot instructions. */
5992
5993 static int
5994 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5995 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
5996 int *pn_ready, int clock_var)
5997 {
5998 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
5999 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6000 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6001 clock_var, 1);
6002 }
6003
6004 /* We are about to issue INSN. Return the number of insns left on the
6005 ready queue that can be issued this cycle. */
6006
6007 static int
6008 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6009 int sched_verbose ATTRIBUTE_UNUSED,
6010 rtx insn ATTRIBUTE_UNUSED,
6011 int can_issue_more ATTRIBUTE_UNUSED)
6012 {
6013 last_scheduled_insn = insn;
6014 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6015 if (reload_completed)
6016 {
6017 if (group_barrier_needed_p (insn))
6018 abort ();
6019 if (GET_CODE (insn) == CALL_INSN)
6020 init_insn_group_barriers ();
6021 stops_p [INSN_UID (insn)] = stop_before_p;
6022 stop_before_p = 0;
6023 }
6024 return 1;
6025 }
6026
6027 /* We are choosing insn from the ready queue. Return nonzero if INSN
6028 can be chosen. */
6029
6030 static int
6031 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6032 {
6033 if (insn == NULL_RTX || !INSN_P (insn))
6034 abort ();
6035 return (!reload_completed
6036 || !safe_group_barrier_needed_p (insn));
6037 }
6038
6039 /* The following variable value is pseudo-insn used by the DFA insn
6040 scheduler to change the DFA state when the simulated clock is
6041 increased. */
6042
6043 static rtx dfa_pre_cycle_insn;
6044
6045 /* We are about to being issuing INSN. Return nonzero if we can not
6046 issue it on given cycle CLOCK and return zero if we should not sort
6047 the ready queue on the next clock start. */
6048
6049 static int
6050 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6051 int clock, int *sort_p)
6052 {
6053 int setup_clocks_p = FALSE;
6054
6055 if (insn == NULL_RTX || !INSN_P (insn))
6056 abort ();
6057 if ((reload_completed && safe_group_barrier_needed_p (insn))
6058 || (last_scheduled_insn
6059 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6060 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6061 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6062 {
6063 init_insn_group_barriers ();
6064 if (verbose && dump)
6065 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6066 last_clock == clock ? " + cycle advance" : "");
6067 stop_before_p = 1;
6068 if (last_clock == clock)
6069 {
6070 state_transition (curr_state, dfa_stop_insn);
6071 if (TARGET_EARLY_STOP_BITS)
6072 *sort_p = (last_scheduled_insn == NULL_RTX
6073 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6074 else
6075 *sort_p = 0;
6076 return 1;
6077 }
6078 else if (reload_completed)
6079 setup_clocks_p = TRUE;
6080 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6081 state_transition (curr_state, dfa_stop_insn);
6082 state_transition (curr_state, dfa_pre_cycle_insn);
6083 state_transition (curr_state, NULL);
6084 }
6085 else if (reload_completed)
6086 setup_clocks_p = TRUE;
6087 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM)
6088 {
6089 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6090
6091 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6092 {
6093 rtx link;
6094 int d = -1;
6095
6096 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6097 if (REG_NOTE_KIND (link) == 0)
6098 {
6099 enum attr_itanium_class dep_class;
6100 rtx dep_insn = XEXP (link, 0);
6101
6102 dep_class = ia64_safe_itanium_class (dep_insn);
6103 if ((dep_class == ITANIUM_CLASS_MMMUL
6104 || dep_class == ITANIUM_CLASS_MMSHF)
6105 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6106 && (d < 0
6107 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6108 d = last_clock - clocks [INSN_UID (dep_insn)];
6109 }
6110 if (d >= 0)
6111 add_cycles [INSN_UID (insn)] = 3 - d;
6112 }
6113 }
6114 return 0;
6115 }
6116
6117 \f
6118
6119 /* The following page contains abstract data `bundle states' which are
6120 used for bundling insns (inserting nops and template generation). */
6121
6122 /* The following describes state of insn bundling. */
6123
6124 struct bundle_state
6125 {
6126 /* Unique bundle state number to identify them in the debugging
6127 output */
6128 int unique_num;
6129 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6130 /* number nops before and after the insn */
6131 short before_nops_num, after_nops_num;
6132 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6133 insn */
6134 int cost; /* cost of the state in cycles */
6135 int accumulated_insns_num; /* number of all previous insns including
6136 nops. L is considered as 2 insns */
6137 int branch_deviation; /* deviation of previous branches from 3rd slots */
6138 struct bundle_state *next; /* next state with the same insn_num */
6139 struct bundle_state *originator; /* originator (previous insn state) */
6140 /* All bundle states are in the following chain. */
6141 struct bundle_state *allocated_states_chain;
6142 /* The DFA State after issuing the insn and the nops. */
6143 state_t dfa_state;
6144 };
6145
6146 /* The following is map insn number to the corresponding bundle state. */
6147
6148 static struct bundle_state **index_to_bundle_states;
6149
6150 /* The unique number of next bundle state. */
6151
6152 static int bundle_states_num;
6153
6154 /* All allocated bundle states are in the following chain. */
6155
6156 static struct bundle_state *allocated_bundle_states_chain;
6157
6158 /* All allocated but not used bundle states are in the following
6159 chain. */
6160
6161 static struct bundle_state *free_bundle_state_chain;
6162
6163
6164 /* The following function returns a free bundle state. */
6165
6166 static struct bundle_state *
6167 get_free_bundle_state (void)
6168 {
6169 struct bundle_state *result;
6170
6171 if (free_bundle_state_chain != NULL)
6172 {
6173 result = free_bundle_state_chain;
6174 free_bundle_state_chain = result->next;
6175 }
6176 else
6177 {
6178 result = xmalloc (sizeof (struct bundle_state));
6179 result->dfa_state = xmalloc (dfa_state_size);
6180 result->allocated_states_chain = allocated_bundle_states_chain;
6181 allocated_bundle_states_chain = result;
6182 }
6183 result->unique_num = bundle_states_num++;
6184 return result;
6185
6186 }
6187
6188 /* The following function frees given bundle state. */
6189
6190 static void
6191 free_bundle_state (struct bundle_state *state)
6192 {
6193 state->next = free_bundle_state_chain;
6194 free_bundle_state_chain = state;
6195 }
6196
6197 /* Start work with abstract data `bundle states'. */
6198
6199 static void
6200 initiate_bundle_states (void)
6201 {
6202 bundle_states_num = 0;
6203 free_bundle_state_chain = NULL;
6204 allocated_bundle_states_chain = NULL;
6205 }
6206
6207 /* Finish work with abstract data `bundle states'. */
6208
6209 static void
6210 finish_bundle_states (void)
6211 {
6212 struct bundle_state *curr_state, *next_state;
6213
6214 for (curr_state = allocated_bundle_states_chain;
6215 curr_state != NULL;
6216 curr_state = next_state)
6217 {
6218 next_state = curr_state->allocated_states_chain;
6219 free (curr_state->dfa_state);
6220 free (curr_state);
6221 }
6222 }
6223
6224 /* Hash table of the bundle states. The key is dfa_state and insn_num
6225 of the bundle states. */
6226
6227 static htab_t bundle_state_table;
6228
6229 /* The function returns hash of BUNDLE_STATE. */
6230
6231 static unsigned
6232 bundle_state_hash (const void *bundle_state)
6233 {
6234 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6235 unsigned result, i;
6236
6237 for (result = i = 0; i < dfa_state_size; i++)
6238 result += (((unsigned char *) state->dfa_state) [i]
6239 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6240 return result + state->insn_num;
6241 }
6242
6243 /* The function returns nonzero if the bundle state keys are equal. */
6244
6245 static int
6246 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6247 {
6248 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6249 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6250
6251 return (state1->insn_num == state2->insn_num
6252 && memcmp (state1->dfa_state, state2->dfa_state,
6253 dfa_state_size) == 0);
6254 }
6255
6256 /* The function inserts the BUNDLE_STATE into the hash table. The
6257 function returns nonzero if the bundle has been inserted into the
6258 table. The table contains the best bundle state with given key. */
6259
6260 static int
6261 insert_bundle_state (struct bundle_state *bundle_state)
6262 {
6263 void **entry_ptr;
6264
6265 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6266 if (*entry_ptr == NULL)
6267 {
6268 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6269 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6270 *entry_ptr = (void *) bundle_state;
6271 return TRUE;
6272 }
6273 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6274 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6275 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6276 > bundle_state->accumulated_insns_num
6277 || (((struct bundle_state *)
6278 *entry_ptr)->accumulated_insns_num
6279 == bundle_state->accumulated_insns_num
6280 && ((struct bundle_state *)
6281 *entry_ptr)->branch_deviation
6282 > bundle_state->branch_deviation))))
6283
6284 {
6285 struct bundle_state temp;
6286
6287 temp = *(struct bundle_state *) *entry_ptr;
6288 *(struct bundle_state *) *entry_ptr = *bundle_state;
6289 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6290 *bundle_state = temp;
6291 }
6292 return FALSE;
6293 }
6294
6295 /* Start work with the hash table. */
6296
6297 static void
6298 initiate_bundle_state_table (void)
6299 {
6300 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6301 (htab_del) 0);
6302 }
6303
6304 /* Finish work with the hash table. */
6305
6306 static void
6307 finish_bundle_state_table (void)
6308 {
6309 htab_delete (bundle_state_table);
6310 }
6311
6312 \f
6313
6314 /* The following variable is a insn `nop' used to check bundle states
6315 with different number of inserted nops. */
6316
6317 static rtx ia64_nop;
6318
6319 /* The following function tries to issue NOPS_NUM nops for the current
6320 state without advancing processor cycle. If it failed, the
6321 function returns FALSE and frees the current state. */
6322
6323 static int
6324 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6325 {
6326 int i;
6327
6328 for (i = 0; i < nops_num; i++)
6329 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6330 {
6331 free_bundle_state (curr_state);
6332 return FALSE;
6333 }
6334 return TRUE;
6335 }
6336
6337 /* The following function tries to issue INSN for the current
6338 state without advancing processor cycle. If it failed, the
6339 function returns FALSE and frees the current state. */
6340
6341 static int
6342 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6343 {
6344 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6345 {
6346 free_bundle_state (curr_state);
6347 return FALSE;
6348 }
6349 return TRUE;
6350 }
6351
6352 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6353 starting with ORIGINATOR without advancing processor cycle. If
6354 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6355 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6356 If it was successful, the function creates new bundle state and
6357 insert into the hash table and into `index_to_bundle_states'. */
6358
6359 static void
6360 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6361 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6362 {
6363 struct bundle_state *curr_state;
6364
6365 curr_state = get_free_bundle_state ();
6366 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6367 curr_state->insn = insn;
6368 curr_state->insn_num = originator->insn_num + 1;
6369 curr_state->cost = originator->cost;
6370 curr_state->originator = originator;
6371 curr_state->before_nops_num = before_nops_num;
6372 curr_state->after_nops_num = 0;
6373 curr_state->accumulated_insns_num
6374 = originator->accumulated_insns_num + before_nops_num;
6375 curr_state->branch_deviation = originator->branch_deviation;
6376 if (insn == NULL_RTX)
6377 abort ();
6378 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6379 {
6380 if (GET_MODE (insn) == TImode)
6381 abort ();
6382 if (!try_issue_nops (curr_state, before_nops_num))
6383 return;
6384 if (!try_issue_insn (curr_state, insn))
6385 return;
6386 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6387 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6388 && curr_state->accumulated_insns_num % 3 != 0)
6389 {
6390 free_bundle_state (curr_state);
6391 return;
6392 }
6393 }
6394 else if (GET_MODE (insn) != TImode)
6395 {
6396 if (!try_issue_nops (curr_state, before_nops_num))
6397 return;
6398 if (!try_issue_insn (curr_state, insn))
6399 return;
6400 curr_state->accumulated_insns_num++;
6401 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6402 || asm_noperands (PATTERN (insn)) >= 0)
6403 abort ();
6404 if (ia64_safe_type (insn) == TYPE_L)
6405 curr_state->accumulated_insns_num++;
6406 }
6407 else
6408 {
6409 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6410 state_transition (curr_state->dfa_state, NULL);
6411 curr_state->cost++;
6412 if (!try_issue_nops (curr_state, before_nops_num))
6413 return;
6414 if (!try_issue_insn (curr_state, insn))
6415 return;
6416 curr_state->accumulated_insns_num++;
6417 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6418 || asm_noperands (PATTERN (insn)) >= 0)
6419 {
6420 /* Finish bundle containing asm insn. */
6421 curr_state->after_nops_num
6422 = 3 - curr_state->accumulated_insns_num % 3;
6423 curr_state->accumulated_insns_num
6424 += 3 - curr_state->accumulated_insns_num % 3;
6425 }
6426 else if (ia64_safe_type (insn) == TYPE_L)
6427 curr_state->accumulated_insns_num++;
6428 }
6429 if (ia64_safe_type (insn) == TYPE_B)
6430 curr_state->branch_deviation
6431 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6432 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6433 {
6434 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6435 {
6436 state_t dfa_state;
6437 struct bundle_state *curr_state1;
6438 struct bundle_state *allocated_states_chain;
6439
6440 curr_state1 = get_free_bundle_state ();
6441 dfa_state = curr_state1->dfa_state;
6442 allocated_states_chain = curr_state1->allocated_states_chain;
6443 *curr_state1 = *curr_state;
6444 curr_state1->dfa_state = dfa_state;
6445 curr_state1->allocated_states_chain = allocated_states_chain;
6446 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6447 dfa_state_size);
6448 curr_state = curr_state1;
6449 }
6450 if (!try_issue_nops (curr_state,
6451 3 - curr_state->accumulated_insns_num % 3))
6452 return;
6453 curr_state->after_nops_num
6454 = 3 - curr_state->accumulated_insns_num % 3;
6455 curr_state->accumulated_insns_num
6456 += 3 - curr_state->accumulated_insns_num % 3;
6457 }
6458 if (!insert_bundle_state (curr_state))
6459 free_bundle_state (curr_state);
6460 return;
6461 }
6462
6463 /* The following function returns position in the two window bundle
6464 for given STATE. */
6465
6466 static int
6467 get_max_pos (state_t state)
6468 {
6469 if (cpu_unit_reservation_p (state, pos_6))
6470 return 6;
6471 else if (cpu_unit_reservation_p (state, pos_5))
6472 return 5;
6473 else if (cpu_unit_reservation_p (state, pos_4))
6474 return 4;
6475 else if (cpu_unit_reservation_p (state, pos_3))
6476 return 3;
6477 else if (cpu_unit_reservation_p (state, pos_2))
6478 return 2;
6479 else if (cpu_unit_reservation_p (state, pos_1))
6480 return 1;
6481 else
6482 return 0;
6483 }
6484
6485 /* The function returns code of a possible template for given position
6486 and state. The function should be called only with 2 values of
6487 position equal to 3 or 6. */
6488
6489 static int
6490 get_template (state_t state, int pos)
6491 {
6492 switch (pos)
6493 {
6494 case 3:
6495 if (cpu_unit_reservation_p (state, _0mii_))
6496 return 0;
6497 else if (cpu_unit_reservation_p (state, _0mmi_))
6498 return 1;
6499 else if (cpu_unit_reservation_p (state, _0mfi_))
6500 return 2;
6501 else if (cpu_unit_reservation_p (state, _0mmf_))
6502 return 3;
6503 else if (cpu_unit_reservation_p (state, _0bbb_))
6504 return 4;
6505 else if (cpu_unit_reservation_p (state, _0mbb_))
6506 return 5;
6507 else if (cpu_unit_reservation_p (state, _0mib_))
6508 return 6;
6509 else if (cpu_unit_reservation_p (state, _0mmb_))
6510 return 7;
6511 else if (cpu_unit_reservation_p (state, _0mfb_))
6512 return 8;
6513 else if (cpu_unit_reservation_p (state, _0mlx_))
6514 return 9;
6515 else
6516 abort ();
6517 case 6:
6518 if (cpu_unit_reservation_p (state, _1mii_))
6519 return 0;
6520 else if (cpu_unit_reservation_p (state, _1mmi_))
6521 return 1;
6522 else if (cpu_unit_reservation_p (state, _1mfi_))
6523 return 2;
6524 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6525 return 3;
6526 else if (cpu_unit_reservation_p (state, _1bbb_))
6527 return 4;
6528 else if (cpu_unit_reservation_p (state, _1mbb_))
6529 return 5;
6530 else if (cpu_unit_reservation_p (state, _1mib_))
6531 return 6;
6532 else if (cpu_unit_reservation_p (state, _1mmb_))
6533 return 7;
6534 else if (cpu_unit_reservation_p (state, _1mfb_))
6535 return 8;
6536 else if (cpu_unit_reservation_p (state, _1mlx_))
6537 return 9;
6538 else
6539 abort ();
6540 default:
6541 abort ();
6542 }
6543 }
6544
6545 /* The following function returns an insn important for insn bundling
6546 followed by INSN and before TAIL. */
6547
6548 static rtx
6549 get_next_important_insn (rtx insn, rtx tail)
6550 {
6551 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6552 if (INSN_P (insn)
6553 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6554 && GET_CODE (PATTERN (insn)) != USE
6555 && GET_CODE (PATTERN (insn)) != CLOBBER)
6556 return insn;
6557 return NULL_RTX;
6558 }
6559
6560 /* The following function does insn bundling. Bundling algorithm is
6561 based on dynamic programming. It tries to insert different number of
6562 nop insns before/after the real insns. At the end of EBB, it chooses the
6563 best alternative and then, moving back in EBB, inserts templates for
6564 the best alternative. The algorithm is directed by information
6565 (changes of simulated processor cycle) created by the 2nd insn
6566 scheduling. */
6567
6568 static void
6569 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6570 {
6571 struct bundle_state *curr_state, *next_state, *best_state;
6572 rtx insn, next_insn;
6573 int insn_num;
6574 int i, bundle_end_p, only_bundle_end_p, asm_p;
6575 int pos = 0, max_pos, template0, template1;
6576 rtx b;
6577 rtx nop;
6578 enum attr_type type;
6579
6580 insn_num = 0;
6581 for (insn = NEXT_INSN (prev_head_insn);
6582 insn && insn != tail;
6583 insn = NEXT_INSN (insn))
6584 if (INSN_P (insn))
6585 insn_num++;
6586 if (insn_num == 0)
6587 return;
6588 bundling_p = 1;
6589 dfa_clean_insn_cache ();
6590 initiate_bundle_state_table ();
6591 index_to_bundle_states = xmalloc ((insn_num + 2)
6592 * sizeof (struct bundle_state *));
6593 /* First (forward) pass -- generates states. */
6594 curr_state = get_free_bundle_state ();
6595 curr_state->insn = NULL;
6596 curr_state->before_nops_num = 0;
6597 curr_state->after_nops_num = 0;
6598 curr_state->insn_num = 0;
6599 curr_state->cost = 0;
6600 curr_state->accumulated_insns_num = 0;
6601 curr_state->branch_deviation = 0;
6602 curr_state->next = NULL;
6603 curr_state->originator = NULL;
6604 state_reset (curr_state->dfa_state);
6605 index_to_bundle_states [0] = curr_state;
6606 insn_num = 0;
6607 for (insn = NEXT_INSN (prev_head_insn);
6608 insn != tail;
6609 insn = NEXT_INSN (insn))
6610 if (INSN_P (insn)
6611 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6612 || GET_CODE (PATTERN (insn)) == USE
6613 || GET_CODE (PATTERN (insn)) == CLOBBER)
6614 && GET_MODE (insn) == TImode)
6615 {
6616 PUT_MODE (insn, VOIDmode);
6617 for (next_insn = NEXT_INSN (insn);
6618 next_insn != tail;
6619 next_insn = NEXT_INSN (next_insn))
6620 if (INSN_P (next_insn)
6621 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6622 && GET_CODE (PATTERN (next_insn)) != USE
6623 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6624 {
6625 PUT_MODE (next_insn, TImode);
6626 break;
6627 }
6628 }
6629 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6630 insn != NULL_RTX;
6631 insn = next_insn)
6632 {
6633 if (!INSN_P (insn)
6634 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6635 || GET_CODE (PATTERN (insn)) == USE
6636 || GET_CODE (PATTERN (insn)) == CLOBBER)
6637 abort ();
6638 type = ia64_safe_type (insn);
6639 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6640 insn_num++;
6641 index_to_bundle_states [insn_num] = NULL;
6642 for (curr_state = index_to_bundle_states [insn_num - 1];
6643 curr_state != NULL;
6644 curr_state = next_state)
6645 {
6646 pos = curr_state->accumulated_insns_num % 3;
6647 next_state = curr_state->next;
6648 /* Finish the current bundle in order to start a subsequent
6649 asm insn in a new bundle. */
6650 only_bundle_end_p
6651 = (next_insn != NULL_RTX
6652 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6653 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6654 bundle_end_p
6655 = (only_bundle_end_p || next_insn == NULL_RTX
6656 || (GET_MODE (next_insn) == TImode
6657 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6658 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6659 || type == TYPE_S
6660 /* We need to insert 2 Nops for cases like M_MII. */
6661 || (type == TYPE_M && ia64_tune == PROCESSOR_ITANIUM
6662 && !bundle_end_p && pos == 1))
6663 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6664 only_bundle_end_p);
6665 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6666 only_bundle_end_p);
6667 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6668 only_bundle_end_p);
6669 }
6670 if (index_to_bundle_states [insn_num] == NULL)
6671 abort ();
6672 for (curr_state = index_to_bundle_states [insn_num];
6673 curr_state != NULL;
6674 curr_state = curr_state->next)
6675 if (verbose >= 2 && dump)
6676 {
6677 struct DFA_chip
6678 {
6679 unsigned short one_automaton_state;
6680 unsigned short oneb_automaton_state;
6681 unsigned short two_automaton_state;
6682 unsigned short twob_automaton_state;
6683 };
6684
6685 fprintf
6686 (dump,
6687 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6688 curr_state->unique_num,
6689 (curr_state->originator == NULL
6690 ? -1 : curr_state->originator->unique_num),
6691 curr_state->cost,
6692 curr_state->before_nops_num, curr_state->after_nops_num,
6693 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6694 (ia64_tune == PROCESSOR_ITANIUM
6695 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6696 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6697 INSN_UID (insn));
6698 }
6699 }
6700 if (index_to_bundle_states [insn_num] == NULL)
6701 abort ();
6702 /* Finding state with a minimal cost: */
6703 best_state = NULL;
6704 for (curr_state = index_to_bundle_states [insn_num];
6705 curr_state != NULL;
6706 curr_state = curr_state->next)
6707 if (curr_state->accumulated_insns_num % 3 == 0
6708 && (best_state == NULL || best_state->cost > curr_state->cost
6709 || (best_state->cost == curr_state->cost
6710 && (curr_state->accumulated_insns_num
6711 < best_state->accumulated_insns_num
6712 || (curr_state->accumulated_insns_num
6713 == best_state->accumulated_insns_num
6714 && curr_state->branch_deviation
6715 < best_state->branch_deviation)))))
6716 best_state = curr_state;
6717 /* Second (backward) pass: adding nops and templates: */
6718 insn_num = best_state->before_nops_num;
6719 template0 = template1 = -1;
6720 for (curr_state = best_state;
6721 curr_state->originator != NULL;
6722 curr_state = curr_state->originator)
6723 {
6724 insn = curr_state->insn;
6725 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6726 || asm_noperands (PATTERN (insn)) >= 0);
6727 insn_num++;
6728 if (verbose >= 2 && dump)
6729 {
6730 struct DFA_chip
6731 {
6732 unsigned short one_automaton_state;
6733 unsigned short oneb_automaton_state;
6734 unsigned short two_automaton_state;
6735 unsigned short twob_automaton_state;
6736 };
6737
6738 fprintf
6739 (dump,
6740 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6741 curr_state->unique_num,
6742 (curr_state->originator == NULL
6743 ? -1 : curr_state->originator->unique_num),
6744 curr_state->cost,
6745 curr_state->before_nops_num, curr_state->after_nops_num,
6746 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6747 (ia64_tune == PROCESSOR_ITANIUM
6748 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6749 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6750 INSN_UID (insn));
6751 }
6752 max_pos = get_max_pos (curr_state->dfa_state);
6753 if (max_pos == 6 || (max_pos == 3 && template0 < 0))
6754 {
6755 pos = max_pos;
6756 if (max_pos == 3)
6757 template0 = get_template (curr_state->dfa_state, 3);
6758 else
6759 {
6760 template1 = get_template (curr_state->dfa_state, 3);
6761 template0 = get_template (curr_state->dfa_state, 6);
6762 }
6763 }
6764 if (max_pos > 3 && template1 < 0)
6765 {
6766 if (pos > 3)
6767 abort ();
6768 template1 = get_template (curr_state->dfa_state, 3);
6769 pos += 3;
6770 }
6771 if (!asm_p)
6772 for (i = 0; i < curr_state->after_nops_num; i++)
6773 {
6774 nop = gen_nop ();
6775 emit_insn_after (nop, insn);
6776 pos--;
6777 if (pos < 0)
6778 abort ();
6779 if (pos % 3 == 0)
6780 {
6781 if (template0 < 0)
6782 abort ();
6783 b = gen_bundle_selector (GEN_INT (template0));
6784 ia64_emit_insn_before (b, nop);
6785 template0 = template1;
6786 template1 = -1;
6787 }
6788 }
6789 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6790 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6791 && asm_noperands (PATTERN (insn)) < 0)
6792 pos--;
6793 if (ia64_safe_type (insn) == TYPE_L)
6794 pos--;
6795 if (pos < 0)
6796 abort ();
6797 if (pos % 3 == 0
6798 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6799 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6800 && asm_noperands (PATTERN (insn)) < 0)
6801 {
6802 if (template0 < 0)
6803 abort ();
6804 b = gen_bundle_selector (GEN_INT (template0));
6805 ia64_emit_insn_before (b, insn);
6806 b = PREV_INSN (insn);
6807 insn = b;
6808 template0 = template1;
6809 template1 = -1;
6810 }
6811 for (i = 0; i < curr_state->before_nops_num; i++)
6812 {
6813 nop = gen_nop ();
6814 ia64_emit_insn_before (nop, insn);
6815 nop = PREV_INSN (insn);
6816 insn = nop;
6817 pos--;
6818 if (pos < 0)
6819 abort ();
6820 if (pos % 3 == 0)
6821 {
6822 if (template0 < 0)
6823 abort ();
6824 b = gen_bundle_selector (GEN_INT (template0));
6825 ia64_emit_insn_before (b, insn);
6826 b = PREV_INSN (insn);
6827 insn = b;
6828 template0 = template1;
6829 template1 = -1;
6830 }
6831 }
6832 }
6833 if (ia64_tune == PROCESSOR_ITANIUM)
6834 /* Insert additional cycles for MM-insns: */
6835 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6836 insn != NULL_RTX;
6837 insn = next_insn)
6838 {
6839 if (!INSN_P (insn)
6840 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6841 || GET_CODE (PATTERN (insn)) == USE
6842 || GET_CODE (PATTERN (insn)) == CLOBBER)
6843 abort ();
6844 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6845 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6846 {
6847 rtx last;
6848 int i, j, n;
6849 int pred_stop_p;
6850
6851 last = prev_active_insn (insn);
6852 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6853 if (pred_stop_p)
6854 last = prev_active_insn (last);
6855 n = 0;
6856 for (;; last = prev_active_insn (last))
6857 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6858 {
6859 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6860 if (template0 == 9)
6861 PATTERN (last)
6862 = gen_bundle_selector (GEN_INT (2)); /* -> MFI */
6863 break;
6864 }
6865 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6866 n++;
6867 if ((pred_stop_p && n == 0) || n > 2
6868 || (template0 == 9 && n != 0))
6869 abort ();
6870 for (j = 3 - n; j > 0; j --)
6871 ia64_emit_insn_before (gen_nop (), insn);
6872 add_cycles [INSN_UID (insn)]--;
6873 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6874 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6875 insn);
6876 if (pred_stop_p)
6877 add_cycles [INSN_UID (insn)]--;
6878 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6879 {
6880 /* Insert .MII bundle. */
6881 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (0)),
6882 insn);
6883 ia64_emit_insn_before (gen_nop (), insn);
6884 ia64_emit_insn_before (gen_nop (), insn);
6885 if (i > 1)
6886 {
6887 ia64_emit_insn_before
6888 (gen_insn_group_barrier (GEN_INT (3)), insn);
6889 i--;
6890 }
6891 ia64_emit_insn_before (gen_nop (), insn);
6892 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6893 insn);
6894 }
6895 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6896 insn);
6897 for (j = n; j > 0; j --)
6898 ia64_emit_insn_before (gen_nop (), insn);
6899 if (pred_stop_p)
6900 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6901 insn);
6902 }
6903 }
6904 free (index_to_bundle_states);
6905 finish_bundle_state_table ();
6906 bundling_p = 0;
6907 dfa_clean_insn_cache ();
6908 }
6909
6910 /* The following function is called at the end of scheduling BB or
6911 EBB. After reload, it inserts stop bits and does insn bundling. */
6912
6913 static void
6914 ia64_sched_finish (FILE *dump, int sched_verbose)
6915 {
6916 if (sched_verbose)
6917 fprintf (dump, "// Finishing schedule.\n");
6918 if (!reload_completed)
6919 return;
6920 if (reload_completed)
6921 {
6922 final_emit_insn_group_barriers (dump);
6923 bundling (dump, sched_verbose, current_sched_info->prev_head,
6924 current_sched_info->next_tail);
6925 if (sched_verbose && dump)
6926 fprintf (dump, "// finishing %d-%d\n",
6927 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
6928 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
6929
6930 return;
6931 }
6932 }
6933
6934 /* The following function inserts stop bits in scheduled BB or EBB. */
6935
6936 static void
6937 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6938 {
6939 rtx insn;
6940 int need_barrier_p = 0;
6941 rtx prev_insn = NULL_RTX;
6942
6943 init_insn_group_barriers ();
6944
6945 for (insn = NEXT_INSN (current_sched_info->prev_head);
6946 insn != current_sched_info->next_tail;
6947 insn = NEXT_INSN (insn))
6948 {
6949 if (GET_CODE (insn) == BARRIER)
6950 {
6951 rtx last = prev_active_insn (insn);
6952
6953 if (! last)
6954 continue;
6955 if (GET_CODE (last) == JUMP_INSN
6956 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6957 last = prev_active_insn (last);
6958 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6959 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6960
6961 init_insn_group_barriers ();
6962 need_barrier_p = 0;
6963 prev_insn = NULL_RTX;
6964 }
6965 else if (INSN_P (insn))
6966 {
6967 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6968 {
6969 init_insn_group_barriers ();
6970 need_barrier_p = 0;
6971 prev_insn = NULL_RTX;
6972 }
6973 else if (need_barrier_p || group_barrier_needed_p (insn))
6974 {
6975 if (TARGET_EARLY_STOP_BITS)
6976 {
6977 rtx last;
6978
6979 for (last = insn;
6980 last != current_sched_info->prev_head;
6981 last = PREV_INSN (last))
6982 if (INSN_P (last) && GET_MODE (last) == TImode
6983 && stops_p [INSN_UID (last)])
6984 break;
6985 if (last == current_sched_info->prev_head)
6986 last = insn;
6987 last = prev_active_insn (last);
6988 if (last
6989 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
6990 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
6991 last);
6992 init_insn_group_barriers ();
6993 for (last = NEXT_INSN (last);
6994 last != insn;
6995 last = NEXT_INSN (last))
6996 if (INSN_P (last))
6997 group_barrier_needed_p (last);
6998 }
6999 else
7000 {
7001 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7002 insn);
7003 init_insn_group_barriers ();
7004 }
7005 group_barrier_needed_p (insn);
7006 prev_insn = NULL_RTX;
7007 }
7008 else if (recog_memoized (insn) >= 0)
7009 prev_insn = insn;
7010 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7011 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7012 || asm_noperands (PATTERN (insn)) >= 0);
7013 }
7014 }
7015 }
7016
7017 \f
7018
7019 /* If the following function returns TRUE, we will use the the DFA
7020 insn scheduler. */
7021
7022 static int
7023 ia64_use_dfa_pipeline_interface (void)
7024 {
7025 return 1;
7026 }
7027
7028 /* If the following function returns TRUE, we will use the the DFA
7029 insn scheduler. */
7030
7031 static int
7032 ia64_first_cycle_multipass_dfa_lookahead (void)
7033 {
7034 return (reload_completed ? 6 : 4);
7035 }
7036
7037 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7038
7039 static void
7040 ia64_init_dfa_pre_cycle_insn (void)
7041 {
7042 if (temp_dfa_state == NULL)
7043 {
7044 dfa_state_size = state_size ();
7045 temp_dfa_state = xmalloc (dfa_state_size);
7046 prev_cycle_state = xmalloc (dfa_state_size);
7047 }
7048 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7049 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7050 recog_memoized (dfa_pre_cycle_insn);
7051 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7052 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7053 recog_memoized (dfa_stop_insn);
7054 }
7055
7056 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7057 used by the DFA insn scheduler. */
7058
7059 static rtx
7060 ia64_dfa_pre_cycle_insn (void)
7061 {
7062 return dfa_pre_cycle_insn;
7063 }
7064
7065 /* The following function returns TRUE if PRODUCER (of type ilog or
7066 ld) produces address for CONSUMER (of type st or stf). */
7067
7068 int
7069 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7070 {
7071 rtx dest, reg, mem;
7072
7073 if (producer == NULL_RTX || consumer == NULL_RTX)
7074 abort ();
7075 dest = ia64_single_set (producer);
7076 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7077 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7078 abort ();
7079 if (GET_CODE (reg) == SUBREG)
7080 reg = SUBREG_REG (reg);
7081 dest = ia64_single_set (consumer);
7082 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7083 || GET_CODE (mem) != MEM)
7084 abort ();
7085 return reg_mentioned_p (reg, mem);
7086 }
7087
7088 /* The following function returns TRUE if PRODUCER (of type ilog or
7089 ld) produces address for CONSUMER (of type ld or fld). */
7090
7091 int
7092 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7093 {
7094 rtx dest, src, reg, mem;
7095
7096 if (producer == NULL_RTX || consumer == NULL_RTX)
7097 abort ();
7098 dest = ia64_single_set (producer);
7099 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7100 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7101 abort ();
7102 if (GET_CODE (reg) == SUBREG)
7103 reg = SUBREG_REG (reg);
7104 src = ia64_single_set (consumer);
7105 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7106 abort ();
7107 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7108 mem = XVECEXP (mem, 0, 0);
7109 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7110 mem = XEXP (mem, 0);
7111
7112 /* Note that LO_SUM is used for GOT loads. */
7113 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7114 abort ();
7115
7116 return reg_mentioned_p (reg, mem);
7117 }
7118
7119 /* The following function returns TRUE if INSN produces address for a
7120 load/store insn. We will place such insns into M slot because it
7121 decreases its latency time. */
7122
7123 int
7124 ia64_produce_address_p (rtx insn)
7125 {
7126 return insn->call;
7127 }
7128
7129 \f
7130 /* Emit pseudo-ops for the assembler to describe predicate relations.
7131 At present this assumes that we only consider predicate pairs to
7132 be mutex, and that the assembler can deduce proper values from
7133 straight-line code. */
7134
7135 static void
7136 emit_predicate_relation_info (void)
7137 {
7138 basic_block bb;
7139
7140 FOR_EACH_BB_REVERSE (bb)
7141 {
7142 int r;
7143 rtx head = BB_HEAD (bb);
7144
7145 /* We only need such notes at code labels. */
7146 if (GET_CODE (head) != CODE_LABEL)
7147 continue;
7148 if (GET_CODE (NEXT_INSN (head)) == NOTE
7149 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7150 head = NEXT_INSN (head);
7151
7152 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7153 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7154 {
7155 rtx p = gen_rtx_REG (BImode, r);
7156 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7157 if (head == BB_END (bb))
7158 BB_END (bb) = n;
7159 head = n;
7160 }
7161 }
7162
7163 /* Look for conditional calls that do not return, and protect predicate
7164 relations around them. Otherwise the assembler will assume the call
7165 returns, and complain about uses of call-clobbered predicates after
7166 the call. */
7167 FOR_EACH_BB_REVERSE (bb)
7168 {
7169 rtx insn = BB_HEAD (bb);
7170
7171 while (1)
7172 {
7173 if (GET_CODE (insn) == CALL_INSN
7174 && GET_CODE (PATTERN (insn)) == COND_EXEC
7175 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7176 {
7177 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7178 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7179 if (BB_HEAD (bb) == insn)
7180 BB_HEAD (bb) = b;
7181 if (BB_END (bb) == insn)
7182 BB_END (bb) = a;
7183 }
7184
7185 if (insn == BB_END (bb))
7186 break;
7187 insn = NEXT_INSN (insn);
7188 }
7189 }
7190 }
7191
7192 /* Perform machine dependent operations on the rtl chain INSNS. */
7193
7194 static void
7195 ia64_reorg (void)
7196 {
7197 /* We are freeing block_for_insn in the toplev to keep compatibility
7198 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7199 compute_bb_for_insn ();
7200
7201 /* If optimizing, we'll have split before scheduling. */
7202 if (optimize == 0)
7203 split_all_insns (0);
7204
7205 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7206 non-optimizing bootstrap. */
7207 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7208
7209 if (ia64_flag_schedule_insns2)
7210 {
7211 timevar_push (TV_SCHED2);
7212 ia64_final_schedule = 1;
7213
7214 initiate_bundle_states ();
7215 ia64_nop = make_insn_raw (gen_nop ());
7216 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7217 recog_memoized (ia64_nop);
7218 clocks_length = get_max_uid () + 1;
7219 stops_p = xcalloc (1, clocks_length);
7220 if (ia64_tune == PROCESSOR_ITANIUM)
7221 {
7222 clocks = xcalloc (clocks_length, sizeof (int));
7223 add_cycles = xcalloc (clocks_length, sizeof (int));
7224 }
7225 if (ia64_tune == PROCESSOR_ITANIUM2)
7226 {
7227 pos_1 = get_cpu_unit_code ("2_1");
7228 pos_2 = get_cpu_unit_code ("2_2");
7229 pos_3 = get_cpu_unit_code ("2_3");
7230 pos_4 = get_cpu_unit_code ("2_4");
7231 pos_5 = get_cpu_unit_code ("2_5");
7232 pos_6 = get_cpu_unit_code ("2_6");
7233 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7234 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7235 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7236 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7237 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7238 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7239 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7240 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7241 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7242 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7243 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7244 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7245 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7246 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7247 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7248 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7249 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7250 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7251 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7252 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7253 }
7254 else
7255 {
7256 pos_1 = get_cpu_unit_code ("1_1");
7257 pos_2 = get_cpu_unit_code ("1_2");
7258 pos_3 = get_cpu_unit_code ("1_3");
7259 pos_4 = get_cpu_unit_code ("1_4");
7260 pos_5 = get_cpu_unit_code ("1_5");
7261 pos_6 = get_cpu_unit_code ("1_6");
7262 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7263 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7264 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7265 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7266 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7267 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7268 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7269 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7270 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7271 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7272 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7273 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7274 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7275 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7276 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7277 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7278 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7279 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7280 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7281 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7282 }
7283 schedule_ebbs (rtl_dump_file);
7284 finish_bundle_states ();
7285 if (ia64_tune == PROCESSOR_ITANIUM)
7286 {
7287 free (add_cycles);
7288 free (clocks);
7289 }
7290 free (stops_p);
7291 emit_insn_group_barriers (rtl_dump_file);
7292
7293 ia64_final_schedule = 0;
7294 timevar_pop (TV_SCHED2);
7295 }
7296 else
7297 emit_all_insn_group_barriers (rtl_dump_file);
7298
7299 /* A call must not be the last instruction in a function, so that the
7300 return address is still within the function, so that unwinding works
7301 properly. Note that IA-64 differs from dwarf2 on this point. */
7302 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7303 {
7304 rtx insn;
7305 int saw_stop = 0;
7306
7307 insn = get_last_insn ();
7308 if (! INSN_P (insn))
7309 insn = prev_active_insn (insn);
7310 if (GET_CODE (insn) == INSN
7311 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7312 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7313 {
7314 saw_stop = 1;
7315 insn = prev_active_insn (insn);
7316 }
7317 if (GET_CODE (insn) == CALL_INSN)
7318 {
7319 if (! saw_stop)
7320 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7321 emit_insn (gen_break_f ());
7322 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7323 }
7324 }
7325
7326 fixup_errata ();
7327 emit_predicate_relation_info ();
7328 }
7329 \f
7330 /* Return true if REGNO is used by the epilogue. */
7331
7332 int
7333 ia64_epilogue_uses (int regno)
7334 {
7335 switch (regno)
7336 {
7337 case R_GR (1):
7338 /* With a call to a function in another module, we will write a new
7339 value to "gp". After returning from such a call, we need to make
7340 sure the function restores the original gp-value, even if the
7341 function itself does not use the gp anymore. */
7342 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7343
7344 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7345 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7346 /* For functions defined with the syscall_linkage attribute, all
7347 input registers are marked as live at all function exits. This
7348 prevents the register allocator from using the input registers,
7349 which in turn makes it possible to restart a system call after
7350 an interrupt without having to save/restore the input registers.
7351 This also prevents kernel data from leaking to application code. */
7352 return lookup_attribute ("syscall_linkage",
7353 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7354
7355 case R_BR (0):
7356 /* Conditional return patterns can't represent the use of `b0' as
7357 the return address, so we force the value live this way. */
7358 return 1;
7359
7360 case AR_PFS_REGNUM:
7361 /* Likewise for ar.pfs, which is used by br.ret. */
7362 return 1;
7363
7364 default:
7365 return 0;
7366 }
7367 }
7368
7369 /* Return true if REGNO is used by the frame unwinder. */
7370
7371 int
7372 ia64_eh_uses (int regno)
7373 {
7374 if (! reload_completed)
7375 return 0;
7376
7377 if (current_frame_info.reg_save_b0
7378 && regno == current_frame_info.reg_save_b0)
7379 return 1;
7380 if (current_frame_info.reg_save_pr
7381 && regno == current_frame_info.reg_save_pr)
7382 return 1;
7383 if (current_frame_info.reg_save_ar_pfs
7384 && regno == current_frame_info.reg_save_ar_pfs)
7385 return 1;
7386 if (current_frame_info.reg_save_ar_unat
7387 && regno == current_frame_info.reg_save_ar_unat)
7388 return 1;
7389 if (current_frame_info.reg_save_ar_lc
7390 && regno == current_frame_info.reg_save_ar_lc)
7391 return 1;
7392
7393 return 0;
7394 }
7395 \f
7396 /* Return true if this goes in small data/bss. */
7397
7398 /* ??? We could also support own long data here. Generating movl/add/ld8
7399 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7400 code faster because there is one less load. This also includes incomplete
7401 types which can't go in sdata/sbss. */
7402
7403 static bool
7404 ia64_in_small_data_p (tree exp)
7405 {
7406 if (TARGET_NO_SDATA)
7407 return false;
7408
7409 /* We want to merge strings, so we never consider them small data. */
7410 if (TREE_CODE (exp) == STRING_CST)
7411 return false;
7412
7413 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7414 {
7415 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7416 if (strcmp (section, ".sdata") == 0
7417 || strcmp (section, ".sbss") == 0)
7418 return true;
7419 }
7420 else
7421 {
7422 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7423
7424 /* If this is an incomplete type with size 0, then we can't put it
7425 in sdata because it might be too big when completed. */
7426 if (size > 0 && size <= ia64_section_threshold)
7427 return true;
7428 }
7429
7430 return false;
7431 }
7432 \f
7433 /* Output assembly directives for prologue regions. */
7434
7435 /* The current basic block number. */
7436
7437 static bool last_block;
7438
7439 /* True if we need a copy_state command at the start of the next block. */
7440
7441 static bool need_copy_state;
7442
7443 /* The function emits unwind directives for the start of an epilogue. */
7444
7445 static void
7446 process_epilogue (void)
7447 {
7448 /* If this isn't the last block of the function, then we need to label the
7449 current state, and copy it back in at the start of the next block. */
7450
7451 if (!last_block)
7452 {
7453 fprintf (asm_out_file, "\t.label_state 1\n");
7454 need_copy_state = true;
7455 }
7456
7457 fprintf (asm_out_file, "\t.restore sp\n");
7458 }
7459
7460 /* This function processes a SET pattern looking for specific patterns
7461 which result in emitting an assembly directive required for unwinding. */
7462
7463 static int
7464 process_set (FILE *asm_out_file, rtx pat)
7465 {
7466 rtx src = SET_SRC (pat);
7467 rtx dest = SET_DEST (pat);
7468 int src_regno, dest_regno;
7469
7470 /* Look for the ALLOC insn. */
7471 if (GET_CODE (src) == UNSPEC_VOLATILE
7472 && XINT (src, 1) == UNSPECV_ALLOC
7473 && GET_CODE (dest) == REG)
7474 {
7475 dest_regno = REGNO (dest);
7476
7477 /* If this isn't the final destination for ar.pfs, the alloc
7478 shouldn't have been marked frame related. */
7479 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7480 abort ();
7481
7482 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7483 ia64_dbx_register_number (dest_regno));
7484 return 1;
7485 }
7486
7487 /* Look for SP = .... */
7488 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7489 {
7490 if (GET_CODE (src) == PLUS)
7491 {
7492 rtx op0 = XEXP (src, 0);
7493 rtx op1 = XEXP (src, 1);
7494 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7495 {
7496 if (INTVAL (op1) < 0)
7497 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7498 -INTVAL (op1));
7499 else
7500 process_epilogue ();
7501 }
7502 else
7503 abort ();
7504 }
7505 else if (GET_CODE (src) == REG
7506 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7507 process_epilogue ();
7508 else
7509 abort ();
7510
7511 return 1;
7512 }
7513
7514 /* Register move we need to look at. */
7515 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7516 {
7517 src_regno = REGNO (src);
7518 dest_regno = REGNO (dest);
7519
7520 switch (src_regno)
7521 {
7522 case BR_REG (0):
7523 /* Saving return address pointer. */
7524 if (dest_regno != current_frame_info.reg_save_b0)
7525 abort ();
7526 fprintf (asm_out_file, "\t.save rp, r%d\n",
7527 ia64_dbx_register_number (dest_regno));
7528 return 1;
7529
7530 case PR_REG (0):
7531 if (dest_regno != current_frame_info.reg_save_pr)
7532 abort ();
7533 fprintf (asm_out_file, "\t.save pr, r%d\n",
7534 ia64_dbx_register_number (dest_regno));
7535 return 1;
7536
7537 case AR_UNAT_REGNUM:
7538 if (dest_regno != current_frame_info.reg_save_ar_unat)
7539 abort ();
7540 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7541 ia64_dbx_register_number (dest_regno));
7542 return 1;
7543
7544 case AR_LC_REGNUM:
7545 if (dest_regno != current_frame_info.reg_save_ar_lc)
7546 abort ();
7547 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7548 ia64_dbx_register_number (dest_regno));
7549 return 1;
7550
7551 case STACK_POINTER_REGNUM:
7552 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7553 || ! frame_pointer_needed)
7554 abort ();
7555 fprintf (asm_out_file, "\t.vframe r%d\n",
7556 ia64_dbx_register_number (dest_regno));
7557 return 1;
7558
7559 default:
7560 /* Everything else should indicate being stored to memory. */
7561 abort ();
7562 }
7563 }
7564
7565 /* Memory store we need to look at. */
7566 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7567 {
7568 long off;
7569 rtx base;
7570 const char *saveop;
7571
7572 if (GET_CODE (XEXP (dest, 0)) == REG)
7573 {
7574 base = XEXP (dest, 0);
7575 off = 0;
7576 }
7577 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7578 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7579 {
7580 base = XEXP (XEXP (dest, 0), 0);
7581 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7582 }
7583 else
7584 abort ();
7585
7586 if (base == hard_frame_pointer_rtx)
7587 {
7588 saveop = ".savepsp";
7589 off = - off;
7590 }
7591 else if (base == stack_pointer_rtx)
7592 saveop = ".savesp";
7593 else
7594 abort ();
7595
7596 src_regno = REGNO (src);
7597 switch (src_regno)
7598 {
7599 case BR_REG (0):
7600 if (current_frame_info.reg_save_b0 != 0)
7601 abort ();
7602 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7603 return 1;
7604
7605 case PR_REG (0):
7606 if (current_frame_info.reg_save_pr != 0)
7607 abort ();
7608 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7609 return 1;
7610
7611 case AR_LC_REGNUM:
7612 if (current_frame_info.reg_save_ar_lc != 0)
7613 abort ();
7614 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7615 return 1;
7616
7617 case AR_PFS_REGNUM:
7618 if (current_frame_info.reg_save_ar_pfs != 0)
7619 abort ();
7620 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7621 return 1;
7622
7623 case AR_UNAT_REGNUM:
7624 if (current_frame_info.reg_save_ar_unat != 0)
7625 abort ();
7626 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7627 return 1;
7628
7629 case GR_REG (4):
7630 case GR_REG (5):
7631 case GR_REG (6):
7632 case GR_REG (7):
7633 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7634 1 << (src_regno - GR_REG (4)));
7635 return 1;
7636
7637 case BR_REG (1):
7638 case BR_REG (2):
7639 case BR_REG (3):
7640 case BR_REG (4):
7641 case BR_REG (5):
7642 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7643 1 << (src_regno - BR_REG (1)));
7644 return 1;
7645
7646 case FR_REG (2):
7647 case FR_REG (3):
7648 case FR_REG (4):
7649 case FR_REG (5):
7650 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7651 1 << (src_regno - FR_REG (2)));
7652 return 1;
7653
7654 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7655 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7656 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7657 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7658 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7659 1 << (src_regno - FR_REG (12)));
7660 return 1;
7661
7662 default:
7663 return 0;
7664 }
7665 }
7666
7667 return 0;
7668 }
7669
7670
7671 /* This function looks at a single insn and emits any directives
7672 required to unwind this insn. */
7673 void
7674 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7675 {
7676 if (flag_unwind_tables
7677 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7678 {
7679 rtx pat;
7680
7681 if (GET_CODE (insn) == NOTE
7682 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7683 {
7684 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7685
7686 /* Restore unwind state from immediately before the epilogue. */
7687 if (need_copy_state)
7688 {
7689 fprintf (asm_out_file, "\t.body\n");
7690 fprintf (asm_out_file, "\t.copy_state 1\n");
7691 need_copy_state = false;
7692 }
7693 }
7694
7695 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7696 return;
7697
7698 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7699 if (pat)
7700 pat = XEXP (pat, 0);
7701 else
7702 pat = PATTERN (insn);
7703
7704 switch (GET_CODE (pat))
7705 {
7706 case SET:
7707 process_set (asm_out_file, pat);
7708 break;
7709
7710 case PARALLEL:
7711 {
7712 int par_index;
7713 int limit = XVECLEN (pat, 0);
7714 for (par_index = 0; par_index < limit; par_index++)
7715 {
7716 rtx x = XVECEXP (pat, 0, par_index);
7717 if (GET_CODE (x) == SET)
7718 process_set (asm_out_file, x);
7719 }
7720 break;
7721 }
7722
7723 default:
7724 abort ();
7725 }
7726 }
7727 }
7728
7729 \f
7730 void
7731 ia64_init_builtins (void)
7732 {
7733 tree psi_type_node = build_pointer_type (integer_type_node);
7734 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7735
7736 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7737 tree si_ftype_psi_si_si
7738 = build_function_type_list (integer_type_node,
7739 psi_type_node, integer_type_node,
7740 integer_type_node, NULL_TREE);
7741
7742 /* __sync_val_compare_and_swap_di */
7743 tree di_ftype_pdi_di_di
7744 = build_function_type_list (long_integer_type_node,
7745 pdi_type_node, long_integer_type_node,
7746 long_integer_type_node, NULL_TREE);
7747 /* __sync_bool_compare_and_swap_di */
7748 tree si_ftype_pdi_di_di
7749 = build_function_type_list (integer_type_node,
7750 pdi_type_node, long_integer_type_node,
7751 long_integer_type_node, NULL_TREE);
7752 /* __sync_synchronize */
7753 tree void_ftype_void
7754 = build_function_type (void_type_node, void_list_node);
7755
7756 /* __sync_lock_test_and_set_si */
7757 tree si_ftype_psi_si
7758 = build_function_type_list (integer_type_node,
7759 psi_type_node, integer_type_node, NULL_TREE);
7760
7761 /* __sync_lock_test_and_set_di */
7762 tree di_ftype_pdi_di
7763 = build_function_type_list (long_integer_type_node,
7764 pdi_type_node, long_integer_type_node,
7765 NULL_TREE);
7766
7767 /* __sync_lock_release_si */
7768 tree void_ftype_psi
7769 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7770
7771 /* __sync_lock_release_di */
7772 tree void_ftype_pdi
7773 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7774
7775 tree fpreg_type;
7776 tree float80_type;
7777
7778 /* The __fpreg type. */
7779 fpreg_type = make_node (REAL_TYPE);
7780 /* ??? The back end should know to load/save __fpreg variables using
7781 the ldf.fill and stf.spill instructions. */
7782 TYPE_PRECISION (fpreg_type) = 96;
7783 layout_type (fpreg_type);
7784 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7785
7786 /* The __float80 type. */
7787 float80_type = make_node (REAL_TYPE);
7788 TYPE_PRECISION (float80_type) = 96;
7789 layout_type (float80_type);
7790 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7791
7792 /* The __float128 type. */
7793 if (!TARGET_HPUX)
7794 {
7795 tree float128_type = make_node (REAL_TYPE);
7796 TYPE_PRECISION (float128_type) = 128;
7797 layout_type (float128_type);
7798 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7799 }
7800 else
7801 /* Under HPUX, this is a synonym for "long double". */
7802 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7803 "__float128");
7804
7805 #define def_builtin(name, type, code) \
7806 builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE)
7807
7808 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7809 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7810 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7811 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7812 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7813 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7814 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7815 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7816
7817 def_builtin ("__sync_synchronize", void_ftype_void,
7818 IA64_BUILTIN_SYNCHRONIZE);
7819
7820 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7821 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7822 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7823 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7824 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7825 IA64_BUILTIN_LOCK_RELEASE_SI);
7826 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7827 IA64_BUILTIN_LOCK_RELEASE_DI);
7828
7829 def_builtin ("__builtin_ia64_bsp",
7830 build_function_type (ptr_type_node, void_list_node),
7831 IA64_BUILTIN_BSP);
7832
7833 def_builtin ("__builtin_ia64_flushrs",
7834 build_function_type (void_type_node, void_list_node),
7835 IA64_BUILTIN_FLUSHRS);
7836
7837 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7838 IA64_BUILTIN_FETCH_AND_ADD_SI);
7839 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7840 IA64_BUILTIN_FETCH_AND_SUB_SI);
7841 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7842 IA64_BUILTIN_FETCH_AND_OR_SI);
7843 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7844 IA64_BUILTIN_FETCH_AND_AND_SI);
7845 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7846 IA64_BUILTIN_FETCH_AND_XOR_SI);
7847 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7848 IA64_BUILTIN_FETCH_AND_NAND_SI);
7849
7850 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7851 IA64_BUILTIN_ADD_AND_FETCH_SI);
7852 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7853 IA64_BUILTIN_SUB_AND_FETCH_SI);
7854 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7855 IA64_BUILTIN_OR_AND_FETCH_SI);
7856 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7857 IA64_BUILTIN_AND_AND_FETCH_SI);
7858 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7859 IA64_BUILTIN_XOR_AND_FETCH_SI);
7860 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7861 IA64_BUILTIN_NAND_AND_FETCH_SI);
7862
7863 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7864 IA64_BUILTIN_FETCH_AND_ADD_DI);
7865 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7866 IA64_BUILTIN_FETCH_AND_SUB_DI);
7867 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7868 IA64_BUILTIN_FETCH_AND_OR_DI);
7869 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7870 IA64_BUILTIN_FETCH_AND_AND_DI);
7871 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7872 IA64_BUILTIN_FETCH_AND_XOR_DI);
7873 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7874 IA64_BUILTIN_FETCH_AND_NAND_DI);
7875
7876 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7877 IA64_BUILTIN_ADD_AND_FETCH_DI);
7878 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7879 IA64_BUILTIN_SUB_AND_FETCH_DI);
7880 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7881 IA64_BUILTIN_OR_AND_FETCH_DI);
7882 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7883 IA64_BUILTIN_AND_AND_FETCH_DI);
7884 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7885 IA64_BUILTIN_XOR_AND_FETCH_DI);
7886 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7887 IA64_BUILTIN_NAND_AND_FETCH_DI);
7888
7889 #undef def_builtin
7890 }
7891
7892 /* Expand fetch_and_op intrinsics. The basic code sequence is:
7893
7894 mf
7895 tmp = [ptr];
7896 do {
7897 ret = tmp;
7898 ar.ccv = tmp;
7899 tmp <op>= value;
7900 cmpxchgsz.acq tmp = [ptr], tmp
7901 } while (tmp != ret)
7902 */
7903
7904 static rtx
7905 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
7906 tree arglist, rtx target)
7907 {
7908 rtx ret, label, tmp, ccv, insn, mem, value;
7909 tree arg0, arg1;
7910
7911 arg0 = TREE_VALUE (arglist);
7912 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7913 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7914 #ifdef POINTERS_EXTEND_UNSIGNED
7915 if (GET_MODE(mem) != Pmode)
7916 mem = convert_memory_address (Pmode, mem);
7917 #endif
7918 value = expand_expr (arg1, NULL_RTX, mode, 0);
7919
7920 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7921 MEM_VOLATILE_P (mem) = 1;
7922
7923 if (target && register_operand (target, mode))
7924 ret = target;
7925 else
7926 ret = gen_reg_rtx (mode);
7927
7928 emit_insn (gen_mf ());
7929
7930 /* Special case for fetchadd instructions. */
7931 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
7932 {
7933 if (mode == SImode)
7934 insn = gen_fetchadd_acq_si (ret, mem, value);
7935 else
7936 insn = gen_fetchadd_acq_di (ret, mem, value);
7937 emit_insn (insn);
7938 return ret;
7939 }
7940
7941 tmp = gen_reg_rtx (mode);
7942 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7943 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7944 emit_move_insn (tmp, mem);
7945
7946 label = gen_label_rtx ();
7947 emit_label (label);
7948 emit_move_insn (ret, tmp);
7949 convert_move (ccv, tmp, /*unsignedp=*/1);
7950
7951 /* Perform the specific operation. Special case NAND by noticing
7952 one_cmpl_optab instead. */
7953 if (binoptab == one_cmpl_optab)
7954 {
7955 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7956 binoptab = and_optab;
7957 }
7958 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
7959
7960 if (mode == SImode)
7961 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
7962 else
7963 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
7964 emit_insn (insn);
7965
7966 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
7967
7968 return ret;
7969 }
7970
7971 /* Expand op_and_fetch intrinsics. The basic code sequence is:
7972
7973 mf
7974 tmp = [ptr];
7975 do {
7976 old = tmp;
7977 ar.ccv = tmp;
7978 ret = tmp <op> value;
7979 cmpxchgsz.acq tmp = [ptr], ret
7980 } while (tmp != old)
7981 */
7982
7983 static rtx
7984 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
7985 tree arglist, rtx target)
7986 {
7987 rtx old, label, tmp, ret, ccv, insn, mem, value;
7988 tree arg0, arg1;
7989
7990 arg0 = TREE_VALUE (arglist);
7991 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7992 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7993 #ifdef POINTERS_EXTEND_UNSIGNED
7994 if (GET_MODE(mem) != Pmode)
7995 mem = convert_memory_address (Pmode, mem);
7996 #endif
7997
7998 value = expand_expr (arg1, NULL_RTX, mode, 0);
7999
8000 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8001 MEM_VOLATILE_P (mem) = 1;
8002
8003 if (target && ! register_operand (target, mode))
8004 target = NULL_RTX;
8005
8006 emit_insn (gen_mf ());
8007 tmp = gen_reg_rtx (mode);
8008 old = gen_reg_rtx (mode);
8009 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8010 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8011
8012 emit_move_insn (tmp, mem);
8013
8014 label = gen_label_rtx ();
8015 emit_label (label);
8016 emit_move_insn (old, tmp);
8017 convert_move (ccv, tmp, /*unsignedp=*/1);
8018
8019 /* Perform the specific operation. Special case NAND by noticing
8020 one_cmpl_optab instead. */
8021 if (binoptab == one_cmpl_optab)
8022 {
8023 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8024 binoptab = and_optab;
8025 }
8026 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
8027
8028 if (mode == SImode)
8029 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
8030 else
8031 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
8032 emit_insn (insn);
8033
8034 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
8035
8036 return ret;
8037 }
8038
8039 /* Expand val_ and bool_compare_and_swap. For val_ we want:
8040
8041 ar.ccv = oldval
8042 mf
8043 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
8044 return ret
8045
8046 For bool_ it's the same except return ret == oldval.
8047 */
8048
8049 static rtx
8050 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
8051 int boolp, tree arglist, rtx target)
8052 {
8053 tree arg0, arg1, arg2;
8054 rtx mem, old, new, ccv, tmp, insn;
8055
8056 arg0 = TREE_VALUE (arglist);
8057 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8058 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8059 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8060 old = expand_expr (arg1, NULL_RTX, mode, 0);
8061 new = expand_expr (arg2, NULL_RTX, mode, 0);
8062
8063 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8064 MEM_VOLATILE_P (mem) = 1;
8065
8066 if (GET_MODE (old) != mode)
8067 old = convert_to_mode (mode, old, /*unsignedp=*/1);
8068 if (GET_MODE (new) != mode)
8069 new = convert_to_mode (mode, new, /*unsignedp=*/1);
8070
8071 if (! register_operand (old, mode))
8072 old = copy_to_mode_reg (mode, old);
8073 if (! register_operand (new, mode))
8074 new = copy_to_mode_reg (mode, new);
8075
8076 if (! boolp && target && register_operand (target, mode))
8077 tmp = target;
8078 else
8079 tmp = gen_reg_rtx (mode);
8080
8081 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8082 convert_move (ccv, old, /*unsignedp=*/1);
8083 emit_insn (gen_mf ());
8084 if (mode == SImode)
8085 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
8086 else
8087 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
8088 emit_insn (insn);
8089
8090 if (boolp)
8091 {
8092 if (! target)
8093 target = gen_reg_rtx (rmode);
8094 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
8095 }
8096 else
8097 return tmp;
8098 }
8099
8100 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
8101
8102 static rtx
8103 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
8104 rtx target)
8105 {
8106 tree arg0, arg1;
8107 rtx mem, new, ret, insn;
8108
8109 arg0 = TREE_VALUE (arglist);
8110 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8111 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8112 new = expand_expr (arg1, NULL_RTX, mode, 0);
8113
8114 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8115 MEM_VOLATILE_P (mem) = 1;
8116 if (! register_operand (new, mode))
8117 new = copy_to_mode_reg (mode, new);
8118
8119 if (target && register_operand (target, mode))
8120 ret = target;
8121 else
8122 ret = gen_reg_rtx (mode);
8123
8124 if (mode == SImode)
8125 insn = gen_xchgsi (ret, mem, new);
8126 else
8127 insn = gen_xchgdi (ret, mem, new);
8128 emit_insn (insn);
8129
8130 return ret;
8131 }
8132
8133 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8134
8135 static rtx
8136 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8137 rtx target ATTRIBUTE_UNUSED)
8138 {
8139 tree arg0;
8140 rtx mem;
8141
8142 arg0 = TREE_VALUE (arglist);
8143 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8144
8145 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8146 MEM_VOLATILE_P (mem) = 1;
8147
8148 emit_move_insn (mem, const0_rtx);
8149
8150 return const0_rtx;
8151 }
8152
8153 rtx
8154 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8155 enum machine_mode mode ATTRIBUTE_UNUSED,
8156 int ignore ATTRIBUTE_UNUSED)
8157 {
8158 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8159 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8160 tree arglist = TREE_OPERAND (exp, 1);
8161 enum machine_mode rmode = VOIDmode;
8162
8163 switch (fcode)
8164 {
8165 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8166 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8167 mode = SImode;
8168 rmode = SImode;
8169 break;
8170
8171 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8172 case IA64_BUILTIN_LOCK_RELEASE_SI:
8173 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8174 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8175 case IA64_BUILTIN_FETCH_AND_OR_SI:
8176 case IA64_BUILTIN_FETCH_AND_AND_SI:
8177 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8178 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8179 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8180 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8181 case IA64_BUILTIN_OR_AND_FETCH_SI:
8182 case IA64_BUILTIN_AND_AND_FETCH_SI:
8183 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8184 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8185 mode = SImode;
8186 break;
8187
8188 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8189 mode = DImode;
8190 rmode = SImode;
8191 break;
8192
8193 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8194 mode = DImode;
8195 rmode = DImode;
8196 break;
8197
8198 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8199 case IA64_BUILTIN_LOCK_RELEASE_DI:
8200 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8201 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8202 case IA64_BUILTIN_FETCH_AND_OR_DI:
8203 case IA64_BUILTIN_FETCH_AND_AND_DI:
8204 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8205 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8206 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8207 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8208 case IA64_BUILTIN_OR_AND_FETCH_DI:
8209 case IA64_BUILTIN_AND_AND_FETCH_DI:
8210 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8211 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8212 mode = DImode;
8213 break;
8214
8215 default:
8216 break;
8217 }
8218
8219 switch (fcode)
8220 {
8221 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8222 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8223 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8224 target);
8225
8226 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8227 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8228 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8229 target);
8230
8231 case IA64_BUILTIN_SYNCHRONIZE:
8232 emit_insn (gen_mf ());
8233 return const0_rtx;
8234
8235 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8236 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8237 return ia64_expand_lock_test_and_set (mode, arglist, target);
8238
8239 case IA64_BUILTIN_LOCK_RELEASE_SI:
8240 case IA64_BUILTIN_LOCK_RELEASE_DI:
8241 return ia64_expand_lock_release (mode, arglist, target);
8242
8243 case IA64_BUILTIN_BSP:
8244 if (! target || ! register_operand (target, DImode))
8245 target = gen_reg_rtx (DImode);
8246 emit_insn (gen_bsp_value (target));
8247 #ifdef POINTERS_EXTEND_UNSIGNED
8248 target = convert_memory_address (ptr_mode, target);
8249 #endif
8250 return target;
8251
8252 case IA64_BUILTIN_FLUSHRS:
8253 emit_insn (gen_flushrs ());
8254 return const0_rtx;
8255
8256 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8257 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8258 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8259
8260 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8261 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8262 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8263
8264 case IA64_BUILTIN_FETCH_AND_OR_SI:
8265 case IA64_BUILTIN_FETCH_AND_OR_DI:
8266 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8267
8268 case IA64_BUILTIN_FETCH_AND_AND_SI:
8269 case IA64_BUILTIN_FETCH_AND_AND_DI:
8270 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8271
8272 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8273 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8274 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8275
8276 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8277 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8278 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8279
8280 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8281 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8282 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8283
8284 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8285 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8286 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8287
8288 case IA64_BUILTIN_OR_AND_FETCH_SI:
8289 case IA64_BUILTIN_OR_AND_FETCH_DI:
8290 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8291
8292 case IA64_BUILTIN_AND_AND_FETCH_SI:
8293 case IA64_BUILTIN_AND_AND_FETCH_DI:
8294 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8295
8296 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8297 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8298 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8299
8300 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8301 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8302 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8303
8304 default:
8305 break;
8306 }
8307
8308 return NULL_RTX;
8309 }
8310
8311 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8312 most significant bits of the stack slot. */
8313
8314 enum direction
8315 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8316 {
8317 /* Exception to normal case for structures/unions/etc. */
8318
8319 if (type && AGGREGATE_TYPE_P (type)
8320 && int_size_in_bytes (type) < UNITS_PER_WORD)
8321 return upward;
8322
8323 /* Fall back to the default. */
8324 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8325 }
8326
8327 /* Linked list of all external functions that are to be emitted by GCC.
8328 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8329 order to avoid putting out names that are never really used. */
8330
8331 struct extern_func_list
8332 {
8333 struct extern_func_list *next; /* next external */
8334 char *name; /* name of the external */
8335 } *extern_func_head = 0;
8336
8337 static void
8338 ia64_hpux_add_extern_decl (const char *name)
8339 {
8340 struct extern_func_list *p;
8341
8342 p = (struct extern_func_list *) xmalloc (sizeof (struct extern_func_list));
8343 p->name = xmalloc (strlen (name) + 1);
8344 strcpy(p->name, name);
8345 p->next = extern_func_head;
8346 extern_func_head = p;
8347 }
8348
8349 /* Print out the list of used global functions. */
8350
8351 static void
8352 ia64_hpux_file_end (void)
8353 {
8354 while (extern_func_head)
8355 {
8356 const char *real_name;
8357 tree decl;
8358
8359 real_name = (* targetm.strip_name_encoding) (extern_func_head->name);
8360 decl = maybe_get_identifier (real_name);
8361
8362 if (!decl
8363 || (! TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (decl)))
8364 {
8365 if (decl)
8366 TREE_ASM_WRITTEN (decl) = 1;
8367 (*targetm.asm_out.globalize_label) (asm_out_file,
8368 extern_func_head->name);
8369 fputs (TYPE_ASM_OP, asm_out_file);
8370 assemble_name (asm_out_file, extern_func_head->name);
8371 putc (',', asm_out_file);
8372 fprintf (asm_out_file, TYPE_OPERAND_FMT, "function");
8373 putc ('\n', asm_out_file);
8374 }
8375 extern_func_head = extern_func_head->next;
8376 }
8377 }
8378
8379 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8380
8381 static void
8382 ia64_hpux_init_libfuncs (void)
8383 {
8384 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8385 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8386 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8387 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8388 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8389 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8390 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8391 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8392
8393 /* ia64_expand_compare uses this. */
8394 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8395
8396 /* These should never be used. */
8397 set_optab_libfunc (eq_optab, TFmode, 0);
8398 set_optab_libfunc (ne_optab, TFmode, 0);
8399 set_optab_libfunc (gt_optab, TFmode, 0);
8400 set_optab_libfunc (ge_optab, TFmode, 0);
8401 set_optab_libfunc (lt_optab, TFmode, 0);
8402 set_optab_libfunc (le_optab, TFmode, 0);
8403
8404 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8405 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8406 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8407 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8408 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8409 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8410
8411 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8412 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8413 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8414 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8415
8416 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8417 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8418 }
8419
8420 /* Rename the division and modulus functions in VMS. */
8421
8422 static void
8423 ia64_vms_init_libfuncs (void)
8424 {
8425 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8426 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8427 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8428 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8429 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8430 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8431 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8432 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8433 }
8434 \f
8435 /* Switch to the section to which we should output X. The only thing
8436 special we do here is to honor small data. */
8437
8438 static void
8439 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8440 unsigned HOST_WIDE_INT align)
8441 {
8442 if (GET_MODE_SIZE (mode) > 0
8443 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8444 sdata_section ();
8445 else
8446 default_elf_select_rtx_section (mode, x, align);
8447 }
8448
8449 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8450 Pretend flag_pic is always set. */
8451
8452 static void
8453 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8454 {
8455 default_elf_select_section_1 (exp, reloc, align, true);
8456 }
8457
8458 static void
8459 ia64_rwreloc_unique_section (tree decl, int reloc)
8460 {
8461 default_unique_section_1 (decl, reloc, true);
8462 }
8463
8464 static void
8465 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8466 unsigned HOST_WIDE_INT align)
8467 {
8468 int save_pic = flag_pic;
8469 flag_pic = 1;
8470 ia64_select_rtx_section (mode, x, align);
8471 flag_pic = save_pic;
8472 }
8473
8474 static unsigned int
8475 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8476 {
8477 return default_section_type_flags_1 (decl, name, reloc, true);
8478 }
8479
8480
8481 /* Output the assembler code for a thunk function. THUNK_DECL is the
8482 declaration for the thunk function itself, FUNCTION is the decl for
8483 the target function. DELTA is an immediate constant offset to be
8484 added to THIS. If VCALL_OFFSET is nonzero, the word at
8485 *(*this + vcall_offset) should be added to THIS. */
8486
8487 static void
8488 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8489 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8490 tree function)
8491 {
8492 rtx this, insn, funexp;
8493
8494 reload_completed = 1;
8495 epilogue_completed = 1;
8496 no_new_pseudos = 1;
8497
8498 /* Set things up as ia64_expand_prologue might. */
8499 last_scratch_gr_reg = 15;
8500
8501 memset (&current_frame_info, 0, sizeof (current_frame_info));
8502 current_frame_info.spill_cfa_off = -16;
8503 current_frame_info.n_input_regs = 1;
8504 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8505
8506 if (!TARGET_REG_NAMES)
8507 reg_names[IN_REG (0)] = ia64_reg_numbers[0];
8508
8509 /* Mark the end of the (empty) prologue. */
8510 emit_note (NOTE_INSN_PROLOGUE_END);
8511
8512 this = gen_rtx_REG (Pmode, IN_REG (0));
8513 if (TARGET_ILP32)
8514 {
8515 rtx tmp = gen_rtx_REG (ptr_mode, IN_REG (0));
8516 REG_POINTER (tmp) = 1;
8517 if (delta && CONST_OK_FOR_I (delta))
8518 {
8519 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8520 delta = 0;
8521 }
8522 else
8523 emit_insn (gen_ptr_extend (this, tmp));
8524 }
8525
8526 /* Apply the constant offset, if required. */
8527 if (delta)
8528 {
8529 rtx delta_rtx = GEN_INT (delta);
8530
8531 if (!CONST_OK_FOR_I (delta))
8532 {
8533 rtx tmp = gen_rtx_REG (Pmode, 2);
8534 emit_move_insn (tmp, delta_rtx);
8535 delta_rtx = tmp;
8536 }
8537 emit_insn (gen_adddi3 (this, this, delta_rtx));
8538 }
8539
8540 /* Apply the offset from the vtable, if required. */
8541 if (vcall_offset)
8542 {
8543 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8544 rtx tmp = gen_rtx_REG (Pmode, 2);
8545
8546 if (TARGET_ILP32)
8547 {
8548 rtx t = gen_rtx_REG (ptr_mode, 2);
8549 REG_POINTER (t) = 1;
8550 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8551 if (CONST_OK_FOR_I (vcall_offset))
8552 {
8553 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8554 vcall_offset_rtx));
8555 vcall_offset = 0;
8556 }
8557 else
8558 emit_insn (gen_ptr_extend (tmp, t));
8559 }
8560 else
8561 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8562
8563 if (vcall_offset)
8564 {
8565 if (!CONST_OK_FOR_J (vcall_offset))
8566 {
8567 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8568 emit_move_insn (tmp2, vcall_offset_rtx);
8569 vcall_offset_rtx = tmp2;
8570 }
8571 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8572 }
8573
8574 if (TARGET_ILP32)
8575 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8576 gen_rtx_MEM (ptr_mode, tmp));
8577 else
8578 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8579
8580 emit_insn (gen_adddi3 (this, this, tmp));
8581 }
8582
8583 /* Generate a tail call to the target function. */
8584 if (! TREE_USED (function))
8585 {
8586 assemble_external (function);
8587 TREE_USED (function) = 1;
8588 }
8589 funexp = XEXP (DECL_RTL (function), 0);
8590 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8591 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8592 insn = get_last_insn ();
8593 SIBLING_CALL_P (insn) = 1;
8594
8595 /* Code generation for calls relies on splitting. */
8596 reload_completed = 1;
8597 epilogue_completed = 1;
8598 try_split (PATTERN (insn), insn, 0);
8599
8600 emit_barrier ();
8601
8602 /* Run just enough of rest_of_compilation to get the insns emitted.
8603 There's not really enough bulk here to make other passes such as
8604 instruction scheduling worth while. Note that use_thunk calls
8605 assemble_start_function and assemble_end_function. */
8606
8607 insn_locators_initialize ();
8608 emit_all_insn_group_barriers (NULL);
8609 insn = get_insns ();
8610 shorten_branches (insn);
8611 final_start_function (insn, file, 1);
8612 final (insn, file, 1, 0);
8613 final_end_function ();
8614
8615 reload_completed = 0;
8616 epilogue_completed = 0;
8617 no_new_pseudos = 0;
8618 }
8619
8620 #include "gt-ia64.h"