]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
pa.c (pa_output_addr_vec): Align address table.
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "alias.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "varasm.h"
45 #include "calls.h"
46 #include "output.h"
47 #include "except.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "reload.h"
51 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "cfgrtl.h"
54 #include "opts.h"
55 #include "builtins.h"
56
57 /* This file should be included last. */
58 #include "target-def.h"
59
60 /* Return nonzero if there is a bypass for the output of
61 OUT_INSN and the fp store IN_INSN. */
62 int
63 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
64 {
65 machine_mode store_mode;
66 machine_mode other_mode;
67 rtx set;
68
69 if (recog_memoized (in_insn) < 0
70 || (get_attr_type (in_insn) != TYPE_FPSTORE
71 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
72 || recog_memoized (out_insn) < 0)
73 return 0;
74
75 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
76
77 set = single_set (out_insn);
78 if (!set)
79 return 0;
80
81 other_mode = GET_MODE (SET_SRC (set));
82
83 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 }
85
86
87 #ifndef DO_FRAME_NOTES
88 #ifdef INCOMING_RETURN_ADDR_RTX
89 #define DO_FRAME_NOTES 1
90 #else
91 #define DO_FRAME_NOTES 0
92 #endif
93 #endif
94
95 static void pa_option_override (void);
96 static void copy_reg_pointer (rtx, rtx);
97 static void fix_range (const char *);
98 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
99 reg_class_t);
100 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
101 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
102 static inline rtx force_mode (machine_mode, rtx);
103 static void pa_reorg (void);
104 static void pa_combine_instructions (void);
105 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
106 rtx, rtx);
107 static bool forward_branch_p (rtx_insn *);
108 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
109 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
110 static int compute_movmem_length (rtx_insn *);
111 static int compute_clrmem_length (rtx_insn *);
112 static bool pa_assemble_integer (rtx, unsigned int, int);
113 static void remove_useless_addtr_insns (int);
114 static void store_reg (int, HOST_WIDE_INT, int);
115 static void store_reg_modify (int, int, HOST_WIDE_INT);
116 static void load_reg (int, HOST_WIDE_INT, int);
117 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
118 static rtx pa_function_value (const_tree, const_tree, bool);
119 static rtx pa_libcall_value (machine_mode, const_rtx);
120 static bool pa_function_value_regno_p (const unsigned int);
121 static void pa_output_function_prologue (FILE *);
122 static void update_total_code_bytes (unsigned int);
123 static void pa_output_function_epilogue (FILE *);
124 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
125 static int pa_adjust_priority (rtx_insn *, int);
126 static int pa_issue_rate (void);
127 static int pa_reloc_rw_mask (void);
128 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
129 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
130 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
131 ATTRIBUTE_UNUSED;
132 static void pa_encode_section_info (tree, rtx, int);
133 static const char *pa_strip_name_encoding (const char *);
134 static bool pa_function_ok_for_sibcall (tree, tree);
135 static void pa_globalize_label (FILE *, const char *)
136 ATTRIBUTE_UNUSED;
137 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
138 HOST_WIDE_INT, tree);
139 #if !defined(USE_COLLECT2)
140 static void pa_asm_out_constructor (rtx, int);
141 static void pa_asm_out_destructor (rtx, int);
142 #endif
143 static void pa_init_builtins (void);
144 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
145 static rtx hppa_builtin_saveregs (void);
146 static void hppa_va_start (tree, rtx);
147 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
148 static bool pa_scalar_mode_supported_p (scalar_mode);
149 static bool pa_commutative_p (const_rtx x, int outer_code);
150 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
151 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
152 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
153 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
155 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
156 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
157 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
160 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
161 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
162 static void output_deferred_plabels (void);
163 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
164 static void pa_file_end (void);
165 static void pa_init_libfuncs (void);
166 static rtx pa_struct_value_rtx (tree, int);
167 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
168 const_tree, bool);
169 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
170 tree, bool);
171 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static rtx pa_function_arg (cumulative_args_t, machine_mode,
174 const_tree, bool);
175 static pad_direction pa_function_arg_padding (machine_mode, const_tree);
176 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
177 static struct machine_function * pa_init_machine_status (void);
178 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
179 machine_mode,
180 secondary_reload_info *);
181 static bool pa_secondary_memory_needed (machine_mode,
182 reg_class_t, reg_class_t);
183 static void pa_extra_live_on_entry (bitmap);
184 static machine_mode pa_promote_function_mode (const_tree,
185 machine_mode, int *,
186 const_tree, int);
187
188 static void pa_asm_trampoline_template (FILE *);
189 static void pa_trampoline_init (rtx, tree, rtx);
190 static rtx pa_trampoline_adjust_address (rtx);
191 static rtx pa_delegitimize_address (rtx);
192 static bool pa_print_operand_punct_valid_p (unsigned char);
193 static rtx pa_internal_arg_pointer (void);
194 static bool pa_can_eliminate (const int, const int);
195 static void pa_conditional_register_usage (void);
196 static machine_mode pa_c_mode_for_suffix (char);
197 static section *pa_function_section (tree, enum node_frequency, bool, bool);
198 static bool pa_cannot_force_const_mem (machine_mode, rtx);
199 static bool pa_legitimate_constant_p (machine_mode, rtx);
200 static unsigned int pa_section_type_flags (tree, const char *, int);
201 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
202 static bool pa_callee_copies (cumulative_args_t, machine_mode,
203 const_tree, bool);
204 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
205 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
206 static bool pa_modes_tieable_p (machine_mode, machine_mode);
207 static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
208 static HOST_WIDE_INT pa_starting_frame_offset (void);
209
210 /* The following extra sections are only used for SOM. */
211 static GTY(()) section *som_readonly_data_section;
212 static GTY(()) section *som_one_only_readonly_data_section;
213 static GTY(()) section *som_one_only_data_section;
214 static GTY(()) section *som_tm_clone_table_section;
215
216 /* Counts for the number of callee-saved general and floating point
217 registers which were saved by the current function's prologue. */
218 static int gr_saved, fr_saved;
219
220 /* Boolean indicating whether the return pointer was saved by the
221 current function's prologue. */
222 static bool rp_saved;
223
224 static rtx find_addr_reg (rtx);
225
226 /* Keep track of the number of bytes we have output in the CODE subspace
227 during this compilation so we'll know when to emit inline long-calls. */
228 unsigned long total_code_bytes;
229
230 /* The last address of the previous function plus the number of bytes in
231 associated thunks that have been output. This is used to determine if
232 a thunk can use an IA-relative branch to reach its target function. */
233 static unsigned int last_address;
234
235 /* Variables to handle plabels that we discover are necessary at assembly
236 output time. They are output after the current function. */
237 struct GTY(()) deferred_plabel
238 {
239 rtx internal_label;
240 rtx symbol;
241 };
242 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
243 deferred_plabels;
244 static size_t n_deferred_plabels = 0;
245 \f
246 /* Initialize the GCC target structure. */
247
248 #undef TARGET_OPTION_OVERRIDE
249 #define TARGET_OPTION_OVERRIDE pa_option_override
250
251 #undef TARGET_ASM_ALIGNED_HI_OP
252 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
253 #undef TARGET_ASM_ALIGNED_SI_OP
254 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
255 #undef TARGET_ASM_ALIGNED_DI_OP
256 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
257 #undef TARGET_ASM_UNALIGNED_HI_OP
258 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
259 #undef TARGET_ASM_UNALIGNED_SI_OP
260 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
261 #undef TARGET_ASM_UNALIGNED_DI_OP
262 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
263 #undef TARGET_ASM_INTEGER
264 #define TARGET_ASM_INTEGER pa_assemble_integer
265
266 #undef TARGET_ASM_FUNCTION_PROLOGUE
267 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
268 #undef TARGET_ASM_FUNCTION_EPILOGUE
269 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
270
271 #undef TARGET_FUNCTION_VALUE
272 #define TARGET_FUNCTION_VALUE pa_function_value
273 #undef TARGET_LIBCALL_VALUE
274 #define TARGET_LIBCALL_VALUE pa_libcall_value
275 #undef TARGET_FUNCTION_VALUE_REGNO_P
276 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
277
278 #undef TARGET_LEGITIMIZE_ADDRESS
279 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
280
281 #undef TARGET_SCHED_ADJUST_COST
282 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
283 #undef TARGET_SCHED_ADJUST_PRIORITY
284 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
285 #undef TARGET_SCHED_ISSUE_RATE
286 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
287
288 #undef TARGET_ENCODE_SECTION_INFO
289 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
290 #undef TARGET_STRIP_NAME_ENCODING
291 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
292
293 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
294 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
295
296 #undef TARGET_COMMUTATIVE_P
297 #define TARGET_COMMUTATIVE_P pa_commutative_p
298
299 #undef TARGET_ASM_OUTPUT_MI_THUNK
300 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
301 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
302 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
303
304 #undef TARGET_ASM_FILE_END
305 #define TARGET_ASM_FILE_END pa_file_end
306
307 #undef TARGET_ASM_RELOC_RW_MASK
308 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
309
310 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
311 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
312
313 #if !defined(USE_COLLECT2)
314 #undef TARGET_ASM_CONSTRUCTOR
315 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
316 #undef TARGET_ASM_DESTRUCTOR
317 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
318 #endif
319
320 #undef TARGET_INIT_BUILTINS
321 #define TARGET_INIT_BUILTINS pa_init_builtins
322
323 #undef TARGET_EXPAND_BUILTIN
324 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
325
326 #undef TARGET_REGISTER_MOVE_COST
327 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
328 #undef TARGET_RTX_COSTS
329 #define TARGET_RTX_COSTS hppa_rtx_costs
330 #undef TARGET_ADDRESS_COST
331 #define TARGET_ADDRESS_COST hppa_address_cost
332
333 #undef TARGET_MACHINE_DEPENDENT_REORG
334 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
335
336 #undef TARGET_INIT_LIBFUNCS
337 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
338
339 #undef TARGET_PROMOTE_FUNCTION_MODE
340 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
341 #undef TARGET_PROMOTE_PROTOTYPES
342 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
343
344 #undef TARGET_STRUCT_VALUE_RTX
345 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
346 #undef TARGET_RETURN_IN_MEMORY
347 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
348 #undef TARGET_MUST_PASS_IN_STACK
349 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
350 #undef TARGET_PASS_BY_REFERENCE
351 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
352 #undef TARGET_CALLEE_COPIES
353 #define TARGET_CALLEE_COPIES pa_callee_copies
354 #undef TARGET_ARG_PARTIAL_BYTES
355 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
356 #undef TARGET_FUNCTION_ARG
357 #define TARGET_FUNCTION_ARG pa_function_arg
358 #undef TARGET_FUNCTION_ARG_ADVANCE
359 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
360 #undef TARGET_FUNCTION_ARG_PADDING
361 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
362 #undef TARGET_FUNCTION_ARG_BOUNDARY
363 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
364
365 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
366 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
367 #undef TARGET_EXPAND_BUILTIN_VA_START
368 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
369 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
370 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
371
372 #undef TARGET_SCALAR_MODE_SUPPORTED_P
373 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
374
375 #undef TARGET_CANNOT_FORCE_CONST_MEM
376 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
377
378 #undef TARGET_SECONDARY_RELOAD
379 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
380 #undef TARGET_SECONDARY_MEMORY_NEEDED
381 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
382
383 #undef TARGET_EXTRA_LIVE_ON_ENTRY
384 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
385
386 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
387 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
388 #undef TARGET_TRAMPOLINE_INIT
389 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
390 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
391 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
392 #undef TARGET_DELEGITIMIZE_ADDRESS
393 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
394 #undef TARGET_INTERNAL_ARG_POINTER
395 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
396 #undef TARGET_CAN_ELIMINATE
397 #define TARGET_CAN_ELIMINATE pa_can_eliminate
398 #undef TARGET_CONDITIONAL_REGISTER_USAGE
399 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
400 #undef TARGET_C_MODE_FOR_SUFFIX
401 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
402 #undef TARGET_ASM_FUNCTION_SECTION
403 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
404
405 #undef TARGET_LEGITIMATE_CONSTANT_P
406 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
407 #undef TARGET_SECTION_TYPE_FLAGS
408 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
409 #undef TARGET_LEGITIMATE_ADDRESS_P
410 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
411
412 #undef TARGET_LRA_P
413 #define TARGET_LRA_P hook_bool_void_false
414
415 #undef TARGET_HARD_REGNO_NREGS
416 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
417 #undef TARGET_HARD_REGNO_MODE_OK
418 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
419 #undef TARGET_MODES_TIEABLE_P
420 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
421
422 #undef TARGET_CAN_CHANGE_MODE_CLASS
423 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
424
425 #undef TARGET_CONSTANT_ALIGNMENT
426 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
427
428 #undef TARGET_STARTING_FRAME_OFFSET
429 #define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
430
431 struct gcc_target targetm = TARGET_INITIALIZER;
432 \f
433 /* Parse the -mfixed-range= option string. */
434
435 static void
436 fix_range (const char *const_str)
437 {
438 int i, first, last;
439 char *str, *dash, *comma;
440
441 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
442 REG2 are either register names or register numbers. The effect
443 of this option is to mark the registers in the range from REG1 to
444 REG2 as ``fixed'' so they won't be used by the compiler. This is
445 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
446
447 i = strlen (const_str);
448 str = (char *) alloca (i + 1);
449 memcpy (str, const_str, i + 1);
450
451 while (1)
452 {
453 dash = strchr (str, '-');
454 if (!dash)
455 {
456 warning (0, "value of -mfixed-range must have form REG1-REG2");
457 return;
458 }
459 *dash = '\0';
460
461 comma = strchr (dash + 1, ',');
462 if (comma)
463 *comma = '\0';
464
465 first = decode_reg_name (str);
466 if (first < 0)
467 {
468 warning (0, "unknown register name: %s", str);
469 return;
470 }
471
472 last = decode_reg_name (dash + 1);
473 if (last < 0)
474 {
475 warning (0, "unknown register name: %s", dash + 1);
476 return;
477 }
478
479 *dash = '-';
480
481 if (first > last)
482 {
483 warning (0, "%s-%s is an empty range", str, dash + 1);
484 return;
485 }
486
487 for (i = first; i <= last; ++i)
488 fixed_regs[i] = call_used_regs[i] = 1;
489
490 if (!comma)
491 break;
492
493 *comma = ',';
494 str = comma + 1;
495 }
496
497 /* Check if all floating point registers have been fixed. */
498 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
499 if (!fixed_regs[i])
500 break;
501
502 if (i > FP_REG_LAST)
503 target_flags |= MASK_DISABLE_FPREGS;
504 }
505
506 /* Implement the TARGET_OPTION_OVERRIDE hook. */
507
508 static void
509 pa_option_override (void)
510 {
511 unsigned int i;
512 cl_deferred_option *opt;
513 vec<cl_deferred_option> *v
514 = (vec<cl_deferred_option> *) pa_deferred_options;
515
516 if (v)
517 FOR_EACH_VEC_ELT (*v, i, opt)
518 {
519 switch (opt->opt_index)
520 {
521 case OPT_mfixed_range_:
522 fix_range (opt->arg);
523 break;
524
525 default:
526 gcc_unreachable ();
527 }
528 }
529
530 if (flag_pic && TARGET_PORTABLE_RUNTIME)
531 {
532 warning (0, "PIC code generation is not supported in the portable runtime model");
533 }
534
535 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
536 {
537 warning (0, "PIC code generation is not compatible with fast indirect calls");
538 }
539
540 if (! TARGET_GAS && write_symbols != NO_DEBUG)
541 {
542 warning (0, "-g is only supported when using GAS on this processor,");
543 warning (0, "-g option disabled");
544 write_symbols = NO_DEBUG;
545 }
546
547 /* We only support the "big PIC" model now. And we always generate PIC
548 code when in 64bit mode. */
549 if (flag_pic == 1 || TARGET_64BIT)
550 flag_pic = 2;
551
552 /* Disable -freorder-blocks-and-partition as we don't support hot and
553 cold partitioning. */
554 if (flag_reorder_blocks_and_partition)
555 {
556 inform (input_location,
557 "-freorder-blocks-and-partition does not work "
558 "on this architecture");
559 flag_reorder_blocks_and_partition = 0;
560 flag_reorder_blocks = 1;
561 }
562
563 /* We can't guarantee that .dword is available for 32-bit targets. */
564 if (UNITS_PER_WORD == 4)
565 targetm.asm_out.aligned_op.di = NULL;
566
567 /* The unaligned ops are only available when using GAS. */
568 if (!TARGET_GAS)
569 {
570 targetm.asm_out.unaligned_op.hi = NULL;
571 targetm.asm_out.unaligned_op.si = NULL;
572 targetm.asm_out.unaligned_op.di = NULL;
573 }
574
575 init_machine_status = pa_init_machine_status;
576 }
577
578 enum pa_builtins
579 {
580 PA_BUILTIN_COPYSIGNQ,
581 PA_BUILTIN_FABSQ,
582 PA_BUILTIN_INFQ,
583 PA_BUILTIN_HUGE_VALQ,
584 PA_BUILTIN_max
585 };
586
587 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
588
589 static void
590 pa_init_builtins (void)
591 {
592 #ifdef DONT_HAVE_FPUTC_UNLOCKED
593 {
594 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
595 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
596 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
597 }
598 #endif
599 #if TARGET_HPUX_11
600 {
601 tree decl;
602
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinite");
605 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
606 set_user_assembler_name (decl, "_Isfinitef");
607 }
608 #endif
609
610 if (HPUX_LONG_DOUBLE_LIBRARY)
611 {
612 tree decl, ftype;
613
614 /* Under HPUX, the __float128 type is a synonym for "long double". */
615 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
616 "__float128");
617
618 /* TFmode support builtins. */
619 ftype = build_function_type_list (long_double_type_node,
620 long_double_type_node,
621 NULL_TREE);
622 decl = add_builtin_function ("__builtin_fabsq", ftype,
623 PA_BUILTIN_FABSQ, BUILT_IN_MD,
624 "_U_Qfabs", NULL_TREE);
625 TREE_READONLY (decl) = 1;
626 pa_builtins[PA_BUILTIN_FABSQ] = decl;
627
628 ftype = build_function_type_list (long_double_type_node,
629 long_double_type_node,
630 long_double_type_node,
631 NULL_TREE);
632 decl = add_builtin_function ("__builtin_copysignq", ftype,
633 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
634 "_U_Qfcopysign", NULL_TREE);
635 TREE_READONLY (decl) = 1;
636 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
637
638 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
639 decl = add_builtin_function ("__builtin_infq", ftype,
640 PA_BUILTIN_INFQ, BUILT_IN_MD,
641 NULL, NULL_TREE);
642 pa_builtins[PA_BUILTIN_INFQ] = decl;
643
644 decl = add_builtin_function ("__builtin_huge_valq", ftype,
645 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
646 NULL, NULL_TREE);
647 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
648 }
649 }
650
651 static rtx
652 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
653 machine_mode mode ATTRIBUTE_UNUSED,
654 int ignore ATTRIBUTE_UNUSED)
655 {
656 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
657 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
658
659 switch (fcode)
660 {
661 case PA_BUILTIN_FABSQ:
662 case PA_BUILTIN_COPYSIGNQ:
663 return expand_call (exp, target, ignore);
664
665 case PA_BUILTIN_INFQ:
666 case PA_BUILTIN_HUGE_VALQ:
667 {
668 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
669 REAL_VALUE_TYPE inf;
670 rtx tmp;
671
672 real_inf (&inf);
673 tmp = const_double_from_real_value (inf, target_mode);
674
675 tmp = validize_mem (force_const_mem (target_mode, tmp));
676
677 if (target == 0)
678 target = gen_reg_rtx (target_mode);
679
680 emit_move_insn (target, tmp);
681 return target;
682 }
683
684 default:
685 gcc_unreachable ();
686 }
687
688 return NULL_RTX;
689 }
690
691 /* Function to init struct machine_function.
692 This will be called, via a pointer variable,
693 from push_function_context. */
694
695 static struct machine_function *
696 pa_init_machine_status (void)
697 {
698 return ggc_cleared_alloc<machine_function> ();
699 }
700
701 /* If FROM is a probable pointer register, mark TO as a probable
702 pointer register with the same pointer alignment as FROM. */
703
704 static void
705 copy_reg_pointer (rtx to, rtx from)
706 {
707 if (REG_POINTER (from))
708 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
709 }
710
711 /* Return 1 if X contains a symbolic expression. We know these
712 expressions will have one of a few well defined forms, so
713 we need only check those forms. */
714 int
715 pa_symbolic_expression_p (rtx x)
716 {
717
718 /* Strip off any HIGH. */
719 if (GET_CODE (x) == HIGH)
720 x = XEXP (x, 0);
721
722 return symbolic_operand (x, VOIDmode);
723 }
724
725 /* Accept any constant that can be moved in one instruction into a
726 general register. */
727 int
728 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
729 {
730 /* OK if ldo, ldil, or zdepi, can be used. */
731 return (VAL_14_BITS_P (ival)
732 || pa_ldil_cint_p (ival)
733 || pa_zdepi_cint_p (ival));
734 }
735 \f
736 /* True iff ldil can be used to load this CONST_INT. The least
737 significant 11 bits of the value must be zero and the value must
738 not change sign when extended from 32 to 64 bits. */
739 int
740 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
741 {
742 unsigned HOST_WIDE_INT x;
743
744 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
745 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
746 }
747
748 /* True iff zdepi can be used to generate this CONST_INT.
749 zdepi first sign extends a 5-bit signed number to a given field
750 length, then places this field anywhere in a zero. */
751 int
752 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
753 {
754 unsigned HOST_WIDE_INT lsb_mask, t;
755
756 /* This might not be obvious, but it's at least fast.
757 This function is critical; we don't have the time loops would take. */
758 lsb_mask = x & -x;
759 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
760 /* Return true iff t is a power of two. */
761 return ((t & (t - 1)) == 0);
762 }
763
764 /* True iff depi or extru can be used to compute (reg & mask).
765 Accept bit pattern like these:
766 0....01....1
767 1....10....0
768 1..10..01..1 */
769 int
770 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
771 {
772 mask = ~mask;
773 mask += mask & -mask;
774 return (mask & (mask - 1)) == 0;
775 }
776
777 /* True iff depi can be used to compute (reg | MASK). */
778 int
779 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
780 {
781 mask += mask & -mask;
782 return (mask & (mask - 1)) == 0;
783 }
784 \f
785 /* Legitimize PIC addresses. If the address is already
786 position-independent, we return ORIG. Newly generated
787 position-independent addresses go to REG. If we need more
788 than one register, we lose. */
789
790 static rtx
791 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
792 {
793 rtx pic_ref = orig;
794
795 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
796
797 /* Labels need special handling. */
798 if (pic_label_operand (orig, mode))
799 {
800 rtx_insn *insn;
801
802 /* We do not want to go through the movXX expanders here since that
803 would create recursion.
804
805 Nor do we really want to call a generator for a named pattern
806 since that requires multiple patterns if we want to support
807 multiple word sizes.
808
809 So instead we just emit the raw set, which avoids the movXX
810 expanders completely. */
811 mark_reg_pointer (reg, BITS_PER_UNIT);
812 insn = emit_insn (gen_rtx_SET (reg, orig));
813
814 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
815 add_reg_note (insn, REG_EQUAL, orig);
816
817 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
818 and update LABEL_NUSES because this is not done automatically. */
819 if (reload_in_progress || reload_completed)
820 {
821 /* Extract LABEL_REF. */
822 if (GET_CODE (orig) == CONST)
823 orig = XEXP (XEXP (orig, 0), 0);
824 /* Extract CODE_LABEL. */
825 orig = XEXP (orig, 0);
826 add_reg_note (insn, REG_LABEL_OPERAND, orig);
827 /* Make sure we have label and not a note. */
828 if (LABEL_P (orig))
829 LABEL_NUSES (orig)++;
830 }
831 crtl->uses_pic_offset_table = 1;
832 return reg;
833 }
834 if (GET_CODE (orig) == SYMBOL_REF)
835 {
836 rtx_insn *insn;
837 rtx tmp_reg;
838
839 gcc_assert (reg);
840
841 /* Before reload, allocate a temporary register for the intermediate
842 result. This allows the sequence to be deleted when the final
843 result is unused and the insns are trivially dead. */
844 tmp_reg = ((reload_in_progress || reload_completed)
845 ? reg : gen_reg_rtx (Pmode));
846
847 if (function_label_operand (orig, VOIDmode))
848 {
849 /* Force function label into memory in word mode. */
850 orig = XEXP (force_const_mem (word_mode, orig), 0);
851 /* Load plabel address from DLT. */
852 emit_move_insn (tmp_reg,
853 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
854 gen_rtx_HIGH (word_mode, orig)));
855 pic_ref
856 = gen_const_mem (Pmode,
857 gen_rtx_LO_SUM (Pmode, tmp_reg,
858 gen_rtx_UNSPEC (Pmode,
859 gen_rtvec (1, orig),
860 UNSPEC_DLTIND14R)));
861 emit_move_insn (reg, pic_ref);
862 /* Now load address of function descriptor. */
863 pic_ref = gen_rtx_MEM (Pmode, reg);
864 }
865 else
866 {
867 /* Load symbol reference from DLT. */
868 emit_move_insn (tmp_reg,
869 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
870 gen_rtx_HIGH (word_mode, orig)));
871 pic_ref
872 = gen_const_mem (Pmode,
873 gen_rtx_LO_SUM (Pmode, tmp_reg,
874 gen_rtx_UNSPEC (Pmode,
875 gen_rtvec (1, orig),
876 UNSPEC_DLTIND14R)));
877 }
878
879 crtl->uses_pic_offset_table = 1;
880 mark_reg_pointer (reg, BITS_PER_UNIT);
881 insn = emit_move_insn (reg, pic_ref);
882
883 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
884 set_unique_reg_note (insn, REG_EQUAL, orig);
885
886 return reg;
887 }
888 else if (GET_CODE (orig) == CONST)
889 {
890 rtx base;
891
892 if (GET_CODE (XEXP (orig, 0)) == PLUS
893 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
894 return orig;
895
896 gcc_assert (reg);
897 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
898
899 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
900 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
901 base == reg ? 0 : reg);
902
903 if (GET_CODE (orig) == CONST_INT)
904 {
905 if (INT_14_BITS (orig))
906 return plus_constant (Pmode, base, INTVAL (orig));
907 orig = force_reg (Pmode, orig);
908 }
909 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
910 /* Likewise, should we set special REG_NOTEs here? */
911 }
912
913 return pic_ref;
914 }
915
916 static GTY(()) rtx gen_tls_tga;
917
918 static rtx
919 gen_tls_get_addr (void)
920 {
921 if (!gen_tls_tga)
922 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
923 return gen_tls_tga;
924 }
925
926 static rtx
927 hppa_tls_call (rtx arg)
928 {
929 rtx ret;
930
931 ret = gen_reg_rtx (Pmode);
932 emit_library_call_value (gen_tls_get_addr (), ret,
933 LCT_CONST, Pmode, arg, Pmode);
934
935 return ret;
936 }
937
938 static rtx
939 legitimize_tls_address (rtx addr)
940 {
941 rtx ret, tmp, t1, t2, tp;
942 rtx_insn *insn;
943
944 /* Currently, we can't handle anything but a SYMBOL_REF. */
945 if (GET_CODE (addr) != SYMBOL_REF)
946 return addr;
947
948 switch (SYMBOL_REF_TLS_MODEL (addr))
949 {
950 case TLS_MODEL_GLOBAL_DYNAMIC:
951 tmp = gen_reg_rtx (Pmode);
952 if (flag_pic)
953 emit_insn (gen_tgd_load_pic (tmp, addr));
954 else
955 emit_insn (gen_tgd_load (tmp, addr));
956 ret = hppa_tls_call (tmp);
957 break;
958
959 case TLS_MODEL_LOCAL_DYNAMIC:
960 ret = gen_reg_rtx (Pmode);
961 tmp = gen_reg_rtx (Pmode);
962 start_sequence ();
963 if (flag_pic)
964 emit_insn (gen_tld_load_pic (tmp, addr));
965 else
966 emit_insn (gen_tld_load (tmp, addr));
967 t1 = hppa_tls_call (tmp);
968 insn = get_insns ();
969 end_sequence ();
970 t2 = gen_reg_rtx (Pmode);
971 emit_libcall_block (insn, t2, t1,
972 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
973 UNSPEC_TLSLDBASE));
974 emit_insn (gen_tld_offset_load (ret, addr, t2));
975 break;
976
977 case TLS_MODEL_INITIAL_EXEC:
978 tp = gen_reg_rtx (Pmode);
979 tmp = gen_reg_rtx (Pmode);
980 ret = gen_reg_rtx (Pmode);
981 emit_insn (gen_tp_load (tp));
982 if (flag_pic)
983 emit_insn (gen_tie_load_pic (tmp, addr));
984 else
985 emit_insn (gen_tie_load (tmp, addr));
986 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
987 break;
988
989 case TLS_MODEL_LOCAL_EXEC:
990 tp = gen_reg_rtx (Pmode);
991 ret = gen_reg_rtx (Pmode);
992 emit_insn (gen_tp_load (tp));
993 emit_insn (gen_tle_load (ret, addr, tp));
994 break;
995
996 default:
997 gcc_unreachable ();
998 }
999
1000 return ret;
1001 }
1002
1003 /* Helper for hppa_legitimize_address. Given X, return true if it
1004 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1005
1006 This respectively represent canonical shift-add rtxs or scaled
1007 memory addresses. */
1008 static bool
1009 mem_shadd_or_shadd_rtx_p (rtx x)
1010 {
1011 return ((GET_CODE (x) == ASHIFT
1012 || GET_CODE (x) == MULT)
1013 && GET_CODE (XEXP (x, 1)) == CONST_INT
1014 && ((GET_CODE (x) == ASHIFT
1015 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
1016 || (GET_CODE (x) == MULT
1017 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
1018 }
1019
1020 /* Try machine-dependent ways of modifying an illegitimate address
1021 to be legitimate. If we find one, return the new, valid address.
1022 This macro is used in only one place: `memory_address' in explow.c.
1023
1024 OLDX is the address as it was before break_out_memory_refs was called.
1025 In some cases it is useful to look at this to decide what needs to be done.
1026
1027 It is always safe for this macro to do nothing. It exists to recognize
1028 opportunities to optimize the output.
1029
1030 For the PA, transform:
1031
1032 memory(X + <large int>)
1033
1034 into:
1035
1036 if (<large int> & mask) >= 16
1037 Y = (<large int> & ~mask) + mask + 1 Round up.
1038 else
1039 Y = (<large int> & ~mask) Round down.
1040 Z = X + Y
1041 memory (Z + (<large int> - Y));
1042
1043 This is for CSE to find several similar references, and only use one Z.
1044
1045 X can either be a SYMBOL_REF or REG, but because combine cannot
1046 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1047 D will not fit in 14 bits.
1048
1049 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1050 0x1f as the mask.
1051
1052 MODE_INT references allow displacements which fit in 14 bits, so use
1053 0x3fff as the mask.
1054
1055 This relies on the fact that most mode MODE_FLOAT references will use FP
1056 registers and most mode MODE_INT references will use integer registers.
1057 (In the rare case of an FP register used in an integer MODE, we depend
1058 on secondary reloads to clean things up.)
1059
1060
1061 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1062 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1063 addressing modes to be used).
1064
1065 Note that the addresses passed into hppa_legitimize_address always
1066 come from a MEM, so we only have to match the MULT form on incoming
1067 addresses. But to be future proof we also match the ASHIFT form.
1068
1069 However, this routine always places those shift-add sequences into
1070 registers, so we have to generate the ASHIFT form as our output.
1071
1072 Put X and Z into registers. Then put the entire expression into
1073 a register. */
1074
1075 rtx
1076 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1077 machine_mode mode)
1078 {
1079 rtx orig = x;
1080
1081 /* We need to canonicalize the order of operands in unscaled indexed
1082 addresses since the code that checks if an address is valid doesn't
1083 always try both orders. */
1084 if (!TARGET_NO_SPACE_REGS
1085 && GET_CODE (x) == PLUS
1086 && GET_MODE (x) == Pmode
1087 && REG_P (XEXP (x, 0))
1088 && REG_P (XEXP (x, 1))
1089 && REG_POINTER (XEXP (x, 0))
1090 && !REG_POINTER (XEXP (x, 1)))
1091 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1092
1093 if (tls_referenced_p (x))
1094 return legitimize_tls_address (x);
1095 else if (flag_pic)
1096 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1097
1098 /* Strip off CONST. */
1099 if (GET_CODE (x) == CONST)
1100 x = XEXP (x, 0);
1101
1102 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1103 That should always be safe. */
1104 if (GET_CODE (x) == PLUS
1105 && GET_CODE (XEXP (x, 0)) == REG
1106 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1107 {
1108 rtx reg = force_reg (Pmode, XEXP (x, 1));
1109 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1110 }
1111
1112 /* Note we must reject symbols which represent function addresses
1113 since the assembler/linker can't handle arithmetic on plabels. */
1114 if (GET_CODE (x) == PLUS
1115 && GET_CODE (XEXP (x, 1)) == CONST_INT
1116 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1117 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1118 || GET_CODE (XEXP (x, 0)) == REG))
1119 {
1120 rtx int_part, ptr_reg;
1121 int newoffset;
1122 int offset = INTVAL (XEXP (x, 1));
1123 int mask;
1124
1125 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1126 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1127
1128 /* Choose which way to round the offset. Round up if we
1129 are >= halfway to the next boundary. */
1130 if ((offset & mask) >= ((mask + 1) / 2))
1131 newoffset = (offset & ~ mask) + mask + 1;
1132 else
1133 newoffset = (offset & ~ mask);
1134
1135 /* If the newoffset will not fit in 14 bits (ldo), then
1136 handling this would take 4 or 5 instructions (2 to load
1137 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1138 add the new offset and the SYMBOL_REF.) Combine can
1139 not handle 4->2 or 5->2 combinations, so do not create
1140 them. */
1141 if (! VAL_14_BITS_P (newoffset)
1142 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1143 {
1144 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1145 rtx tmp_reg
1146 = force_reg (Pmode,
1147 gen_rtx_HIGH (Pmode, const_part));
1148 ptr_reg
1149 = force_reg (Pmode,
1150 gen_rtx_LO_SUM (Pmode,
1151 tmp_reg, const_part));
1152 }
1153 else
1154 {
1155 if (! VAL_14_BITS_P (newoffset))
1156 int_part = force_reg (Pmode, GEN_INT (newoffset));
1157 else
1158 int_part = GEN_INT (newoffset);
1159
1160 ptr_reg = force_reg (Pmode,
1161 gen_rtx_PLUS (Pmode,
1162 force_reg (Pmode, XEXP (x, 0)),
1163 int_part));
1164 }
1165 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1166 }
1167
1168 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1169
1170 if (GET_CODE (x) == PLUS
1171 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1172 && (OBJECT_P (XEXP (x, 1))
1173 || GET_CODE (XEXP (x, 1)) == SUBREG)
1174 && GET_CODE (XEXP (x, 1)) != CONST)
1175 {
1176 /* If we were given a MULT, we must fix the constant
1177 as we're going to create the ASHIFT form. */
1178 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1179 if (GET_CODE (XEXP (x, 0)) == MULT)
1180 shift_val = exact_log2 (shift_val);
1181
1182 rtx reg1, reg2;
1183 reg1 = XEXP (x, 1);
1184 if (GET_CODE (reg1) != REG)
1185 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1186
1187 reg2 = XEXP (XEXP (x, 0), 0);
1188 if (GET_CODE (reg2) != REG)
1189 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1190
1191 return force_reg (Pmode,
1192 gen_rtx_PLUS (Pmode,
1193 gen_rtx_ASHIFT (Pmode, reg2,
1194 GEN_INT (shift_val)),
1195 reg1));
1196 }
1197
1198 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1199
1200 Only do so for floating point modes since this is more speculative
1201 and we lose if it's an integer store. */
1202 if (GET_CODE (x) == PLUS
1203 && GET_CODE (XEXP (x, 0)) == PLUS
1204 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1205 && (mode == SFmode || mode == DFmode))
1206 {
1207 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1208
1209 /* If we were given a MULT, we must fix the constant
1210 as we're going to create the ASHIFT form. */
1211 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1212 shift_val = exact_log2 (shift_val);
1213
1214 /* Try and figure out what to use as a base register. */
1215 rtx reg1, reg2, base, idx;
1216
1217 reg1 = XEXP (XEXP (x, 0), 1);
1218 reg2 = XEXP (x, 1);
1219 base = NULL_RTX;
1220 idx = NULL_RTX;
1221
1222 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1223 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1224 it's a base register below. */
1225 if (GET_CODE (reg1) != REG)
1226 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1227
1228 if (GET_CODE (reg2) != REG)
1229 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1230
1231 /* Figure out what the base and index are. */
1232
1233 if (GET_CODE (reg1) == REG
1234 && REG_POINTER (reg1))
1235 {
1236 base = reg1;
1237 idx = gen_rtx_PLUS (Pmode,
1238 gen_rtx_ASHIFT (Pmode,
1239 XEXP (XEXP (XEXP (x, 0), 0), 0),
1240 GEN_INT (shift_val)),
1241 XEXP (x, 1));
1242 }
1243 else if (GET_CODE (reg2) == REG
1244 && REG_POINTER (reg2))
1245 {
1246 base = reg2;
1247 idx = XEXP (x, 0);
1248 }
1249
1250 if (base == 0)
1251 return orig;
1252
1253 /* If the index adds a large constant, try to scale the
1254 constant so that it can be loaded with only one insn. */
1255 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1256 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1257 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1258 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1259 {
1260 /* Divide the CONST_INT by the scale factor, then add it to A. */
1261 int val = INTVAL (XEXP (idx, 1));
1262 val /= (1 << shift_val);
1263
1264 reg1 = XEXP (XEXP (idx, 0), 0);
1265 if (GET_CODE (reg1) != REG)
1266 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1267
1268 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1269
1270 /* We can now generate a simple scaled indexed address. */
1271 return
1272 force_reg
1273 (Pmode, gen_rtx_PLUS (Pmode,
1274 gen_rtx_ASHIFT (Pmode, reg1,
1275 GEN_INT (shift_val)),
1276 base));
1277 }
1278
1279 /* If B + C is still a valid base register, then add them. */
1280 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1281 && INTVAL (XEXP (idx, 1)) <= 4096
1282 && INTVAL (XEXP (idx, 1)) >= -4096)
1283 {
1284 rtx reg1, reg2;
1285
1286 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1287
1288 reg2 = XEXP (XEXP (idx, 0), 0);
1289 if (GET_CODE (reg2) != CONST_INT)
1290 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1291
1292 return force_reg (Pmode,
1293 gen_rtx_PLUS (Pmode,
1294 gen_rtx_ASHIFT (Pmode, reg2,
1295 GEN_INT (shift_val)),
1296 reg1));
1297 }
1298
1299 /* Get the index into a register, then add the base + index and
1300 return a register holding the result. */
1301
1302 /* First get A into a register. */
1303 reg1 = XEXP (XEXP (idx, 0), 0);
1304 if (GET_CODE (reg1) != REG)
1305 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1306
1307 /* And get B into a register. */
1308 reg2 = XEXP (idx, 1);
1309 if (GET_CODE (reg2) != REG)
1310 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1311
1312 reg1 = force_reg (Pmode,
1313 gen_rtx_PLUS (Pmode,
1314 gen_rtx_ASHIFT (Pmode, reg1,
1315 GEN_INT (shift_val)),
1316 reg2));
1317
1318 /* Add the result to our base register and return. */
1319 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1320
1321 }
1322
1323 /* Uh-oh. We might have an address for x[n-100000]. This needs
1324 special handling to avoid creating an indexed memory address
1325 with x-100000 as the base.
1326
1327 If the constant part is small enough, then it's still safe because
1328 there is a guard page at the beginning and end of the data segment.
1329
1330 Scaled references are common enough that we want to try and rearrange the
1331 terms so that we can use indexing for these addresses too. Only
1332 do the optimization for floatint point modes. */
1333
1334 if (GET_CODE (x) == PLUS
1335 && pa_symbolic_expression_p (XEXP (x, 1)))
1336 {
1337 /* Ugly. We modify things here so that the address offset specified
1338 by the index expression is computed first, then added to x to form
1339 the entire address. */
1340
1341 rtx regx1, regx2, regy1, regy2, y;
1342
1343 /* Strip off any CONST. */
1344 y = XEXP (x, 1);
1345 if (GET_CODE (y) == CONST)
1346 y = XEXP (y, 0);
1347
1348 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1349 {
1350 /* See if this looks like
1351 (plus (mult (reg) (mem_shadd_const))
1352 (const (plus (symbol_ref) (const_int))))
1353
1354 Where const_int is small. In that case the const
1355 expression is a valid pointer for indexing.
1356
1357 If const_int is big, but can be divided evenly by shadd_const
1358 and added to (reg). This allows more scaled indexed addresses. */
1359 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1360 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1361 && GET_CODE (XEXP (y, 1)) == CONST_INT
1362 && INTVAL (XEXP (y, 1)) >= -4096
1363 && INTVAL (XEXP (y, 1)) <= 4095)
1364 {
1365 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1366
1367 /* If we were given a MULT, we must fix the constant
1368 as we're going to create the ASHIFT form. */
1369 if (GET_CODE (XEXP (x, 0)) == MULT)
1370 shift_val = exact_log2 (shift_val);
1371
1372 rtx reg1, reg2;
1373
1374 reg1 = XEXP (x, 1);
1375 if (GET_CODE (reg1) != REG)
1376 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1377
1378 reg2 = XEXP (XEXP (x, 0), 0);
1379 if (GET_CODE (reg2) != REG)
1380 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1381
1382 return
1383 force_reg (Pmode,
1384 gen_rtx_PLUS (Pmode,
1385 gen_rtx_ASHIFT (Pmode,
1386 reg2,
1387 GEN_INT (shift_val)),
1388 reg1));
1389 }
1390 else if ((mode == DFmode || mode == SFmode)
1391 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1392 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1393 && GET_CODE (XEXP (y, 1)) == CONST_INT
1394 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1395 {
1396 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1397
1398 /* If we were given a MULT, we must fix the constant
1399 as we're going to create the ASHIFT form. */
1400 if (GET_CODE (XEXP (x, 0)) == MULT)
1401 shift_val = exact_log2 (shift_val);
1402
1403 regx1
1404 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1405 / INTVAL (XEXP (XEXP (x, 0), 1))));
1406 regx2 = XEXP (XEXP (x, 0), 0);
1407 if (GET_CODE (regx2) != REG)
1408 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1409 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1410 regx2, regx1));
1411 return
1412 force_reg (Pmode,
1413 gen_rtx_PLUS (Pmode,
1414 gen_rtx_ASHIFT (Pmode, regx2,
1415 GEN_INT (shift_val)),
1416 force_reg (Pmode, XEXP (y, 0))));
1417 }
1418 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1419 && INTVAL (XEXP (y, 1)) >= -4096
1420 && INTVAL (XEXP (y, 1)) <= 4095)
1421 {
1422 /* This is safe because of the guard page at the
1423 beginning and end of the data space. Just
1424 return the original address. */
1425 return orig;
1426 }
1427 else
1428 {
1429 /* Doesn't look like one we can optimize. */
1430 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1431 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1432 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1433 regx1 = force_reg (Pmode,
1434 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1435 regx1, regy2));
1436 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1437 }
1438 }
1439 }
1440
1441 return orig;
1442 }
1443
1444 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1445
1446 Compute extra cost of moving data between one register class
1447 and another.
1448
1449 Make moves from SAR so expensive they should never happen. We used to
1450 have 0xffff here, but that generates overflow in rare cases.
1451
1452 Copies involving a FP register and a non-FP register are relatively
1453 expensive because they must go through memory.
1454
1455 Other copies are reasonably cheap. */
1456
1457 static int
1458 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1459 reg_class_t from, reg_class_t to)
1460 {
1461 if (from == SHIFT_REGS)
1462 return 0x100;
1463 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1464 return 18;
1465 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1466 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1467 return 16;
1468 else
1469 return 2;
1470 }
1471
1472 /* For the HPPA, REG and REG+CONST is cost 0
1473 and addresses involving symbolic constants are cost 2.
1474
1475 PIC addresses are very expensive.
1476
1477 It is no coincidence that this has the same structure
1478 as pa_legitimate_address_p. */
1479
1480 static int
1481 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1482 addr_space_t as ATTRIBUTE_UNUSED,
1483 bool speed ATTRIBUTE_UNUSED)
1484 {
1485 switch (GET_CODE (X))
1486 {
1487 case REG:
1488 case PLUS:
1489 case LO_SUM:
1490 return 1;
1491 case HIGH:
1492 return 2;
1493 default:
1494 return 4;
1495 }
1496 }
1497
1498 /* Compute a (partial) cost for rtx X. Return true if the complete
1499 cost has been computed, and false if subexpressions should be
1500 scanned. In either case, *TOTAL contains the cost result. */
1501
1502 static bool
1503 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1504 int opno ATTRIBUTE_UNUSED,
1505 int *total, bool speed ATTRIBUTE_UNUSED)
1506 {
1507 int factor;
1508 int code = GET_CODE (x);
1509
1510 switch (code)
1511 {
1512 case CONST_INT:
1513 if (INTVAL (x) == 0)
1514 *total = 0;
1515 else if (INT_14_BITS (x))
1516 *total = 1;
1517 else
1518 *total = 2;
1519 return true;
1520
1521 case HIGH:
1522 *total = 2;
1523 return true;
1524
1525 case CONST:
1526 case LABEL_REF:
1527 case SYMBOL_REF:
1528 *total = 4;
1529 return true;
1530
1531 case CONST_DOUBLE:
1532 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1533 && outer_code != SET)
1534 *total = 0;
1535 else
1536 *total = 8;
1537 return true;
1538
1539 case MULT:
1540 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1541 {
1542 *total = COSTS_N_INSNS (3);
1543 return true;
1544 }
1545
1546 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1547 factor = GET_MODE_SIZE (mode) / 4;
1548 if (factor == 0)
1549 factor = 1;
1550
1551 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1552 *total = factor * factor * COSTS_N_INSNS (8);
1553 else
1554 *total = factor * factor * COSTS_N_INSNS (20);
1555 return true;
1556
1557 case DIV:
1558 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1559 {
1560 *total = COSTS_N_INSNS (14);
1561 return true;
1562 }
1563 /* FALLTHRU */
1564
1565 case UDIV:
1566 case MOD:
1567 case UMOD:
1568 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1569 factor = GET_MODE_SIZE (mode) / 4;
1570 if (factor == 0)
1571 factor = 1;
1572
1573 *total = factor * factor * COSTS_N_INSNS (60);
1574 return true;
1575
1576 case PLUS: /* this includes shNadd insns */
1577 case MINUS:
1578 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1579 {
1580 *total = COSTS_N_INSNS (3);
1581 return true;
1582 }
1583
1584 /* A size N times larger than UNITS_PER_WORD needs N times as
1585 many insns, taking N times as long. */
1586 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1587 if (factor == 0)
1588 factor = 1;
1589 *total = factor * COSTS_N_INSNS (1);
1590 return true;
1591
1592 case ASHIFT:
1593 case ASHIFTRT:
1594 case LSHIFTRT:
1595 *total = COSTS_N_INSNS (1);
1596 return true;
1597
1598 default:
1599 return false;
1600 }
1601 }
1602
1603 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1604 new rtx with the correct mode. */
1605 static inline rtx
1606 force_mode (machine_mode mode, rtx orig)
1607 {
1608 if (mode == GET_MODE (orig))
1609 return orig;
1610
1611 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1612
1613 return gen_rtx_REG (mode, REGNO (orig));
1614 }
1615
1616 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1617
1618 static bool
1619 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1620 {
1621 return tls_referenced_p (x);
1622 }
1623
1624 /* Emit insns to move operands[1] into operands[0].
1625
1626 Return 1 if we have written out everything that needs to be done to
1627 do the move. Otherwise, return 0 and the caller will emit the move
1628 normally.
1629
1630 Note SCRATCH_REG may not be in the proper mode depending on how it
1631 will be used. This routine is responsible for creating a new copy
1632 of SCRATCH_REG in the proper mode. */
1633
1634 int
1635 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1636 {
1637 register rtx operand0 = operands[0];
1638 register rtx operand1 = operands[1];
1639 register rtx tem;
1640
1641 /* We can only handle indexed addresses in the destination operand
1642 of floating point stores. Thus, we need to break out indexed
1643 addresses from the destination operand. */
1644 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1645 {
1646 gcc_assert (can_create_pseudo_p ());
1647
1648 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1649 operand0 = replace_equiv_address (operand0, tem);
1650 }
1651
1652 /* On targets with non-equivalent space registers, break out unscaled
1653 indexed addresses from the source operand before the final CSE.
1654 We have to do this because the REG_POINTER flag is not correctly
1655 carried through various optimization passes and CSE may substitute
1656 a pseudo without the pointer set for one with the pointer set. As
1657 a result, we loose various opportunities to create insns with
1658 unscaled indexed addresses. */
1659 if (!TARGET_NO_SPACE_REGS
1660 && !cse_not_expected
1661 && GET_CODE (operand1) == MEM
1662 && GET_CODE (XEXP (operand1, 0)) == PLUS
1663 && REG_P (XEXP (XEXP (operand1, 0), 0))
1664 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1665 operand1
1666 = replace_equiv_address (operand1,
1667 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1668
1669 if (scratch_reg
1670 && reload_in_progress && GET_CODE (operand0) == REG
1671 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1672 operand0 = reg_equiv_mem (REGNO (operand0));
1673 else if (scratch_reg
1674 && reload_in_progress && GET_CODE (operand0) == SUBREG
1675 && GET_CODE (SUBREG_REG (operand0)) == REG
1676 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1677 {
1678 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1679 the code which tracks sets/uses for delete_output_reload. */
1680 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1681 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1682 SUBREG_BYTE (operand0));
1683 operand0 = alter_subreg (&temp, true);
1684 }
1685
1686 if (scratch_reg
1687 && reload_in_progress && GET_CODE (operand1) == REG
1688 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1689 operand1 = reg_equiv_mem (REGNO (operand1));
1690 else if (scratch_reg
1691 && reload_in_progress && GET_CODE (operand1) == SUBREG
1692 && GET_CODE (SUBREG_REG (operand1)) == REG
1693 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1694 {
1695 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1696 the code which tracks sets/uses for delete_output_reload. */
1697 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1698 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1699 SUBREG_BYTE (operand1));
1700 operand1 = alter_subreg (&temp, true);
1701 }
1702
1703 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1704 && ((tem = find_replacement (&XEXP (operand0, 0)))
1705 != XEXP (operand0, 0)))
1706 operand0 = replace_equiv_address (operand0, tem);
1707
1708 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1709 && ((tem = find_replacement (&XEXP (operand1, 0)))
1710 != XEXP (operand1, 0)))
1711 operand1 = replace_equiv_address (operand1, tem);
1712
1713 /* Handle secondary reloads for loads/stores of FP registers from
1714 REG+D addresses where D does not fit in 5 or 14 bits, including
1715 (subreg (mem (addr))) cases, and reloads for other unsupported
1716 memory operands. */
1717 if (scratch_reg
1718 && FP_REG_P (operand0)
1719 && (MEM_P (operand1)
1720 || (GET_CODE (operand1) == SUBREG
1721 && MEM_P (XEXP (operand1, 0)))))
1722 {
1723 rtx op1 = operand1;
1724
1725 if (GET_CODE (op1) == SUBREG)
1726 op1 = XEXP (op1, 0);
1727
1728 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1729 {
1730 if (!(TARGET_PA_20
1731 && !TARGET_ELF32
1732 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1733 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1734 {
1735 /* SCRATCH_REG will hold an address and maybe the actual data.
1736 We want it in WORD_MODE regardless of what mode it was
1737 originally given to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1739
1740 /* D might not fit in 14 bits either; for such cases load D
1741 into scratch reg. */
1742 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1743 {
1744 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1745 emit_move_insn (scratch_reg,
1746 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1747 Pmode,
1748 XEXP (XEXP (op1, 0), 0),
1749 scratch_reg));
1750 }
1751 else
1752 emit_move_insn (scratch_reg, XEXP (op1, 0));
1753 op1 = replace_equiv_address (op1, scratch_reg);
1754 }
1755 }
1756 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1757 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1758 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1759 {
1760 /* Load memory address into SCRATCH_REG. */
1761 scratch_reg = force_mode (word_mode, scratch_reg);
1762 emit_move_insn (scratch_reg, XEXP (op1, 0));
1763 op1 = replace_equiv_address (op1, scratch_reg);
1764 }
1765 emit_insn (gen_rtx_SET (operand0, op1));
1766 return 1;
1767 }
1768 else if (scratch_reg
1769 && FP_REG_P (operand1)
1770 && (MEM_P (operand0)
1771 || (GET_CODE (operand0) == SUBREG
1772 && MEM_P (XEXP (operand0, 0)))))
1773 {
1774 rtx op0 = operand0;
1775
1776 if (GET_CODE (op0) == SUBREG)
1777 op0 = XEXP (op0, 0);
1778
1779 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1780 {
1781 if (!(TARGET_PA_20
1782 && !TARGET_ELF32
1783 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1784 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1785 {
1786 /* SCRATCH_REG will hold an address and maybe the actual data.
1787 We want it in WORD_MODE regardless of what mode it was
1788 originally given to us. */
1789 scratch_reg = force_mode (word_mode, scratch_reg);
1790
1791 /* D might not fit in 14 bits either; for such cases load D
1792 into scratch reg. */
1793 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1794 {
1795 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1796 emit_move_insn (scratch_reg,
1797 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1798 Pmode,
1799 XEXP (XEXP (op0, 0), 0),
1800 scratch_reg));
1801 }
1802 else
1803 emit_move_insn (scratch_reg, XEXP (op0, 0));
1804 op0 = replace_equiv_address (op0, scratch_reg);
1805 }
1806 }
1807 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1808 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1809 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1810 {
1811 /* Load memory address into SCRATCH_REG. */
1812 scratch_reg = force_mode (word_mode, scratch_reg);
1813 emit_move_insn (scratch_reg, XEXP (op0, 0));
1814 op0 = replace_equiv_address (op0, scratch_reg);
1815 }
1816 emit_insn (gen_rtx_SET (op0, operand1));
1817 return 1;
1818 }
1819 /* Handle secondary reloads for loads of FP registers from constant
1820 expressions by forcing the constant into memory. For the most part,
1821 this is only necessary for SImode and DImode.
1822
1823 Use scratch_reg to hold the address of the memory location. */
1824 else if (scratch_reg
1825 && CONSTANT_P (operand1)
1826 && FP_REG_P (operand0))
1827 {
1828 rtx const_mem, xoperands[2];
1829
1830 if (operand1 == CONST0_RTX (mode))
1831 {
1832 emit_insn (gen_rtx_SET (operand0, operand1));
1833 return 1;
1834 }
1835
1836 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1837 it in WORD_MODE regardless of what mode it was originally given
1838 to us. */
1839 scratch_reg = force_mode (word_mode, scratch_reg);
1840
1841 /* Force the constant into memory and put the address of the
1842 memory location into scratch_reg. */
1843 const_mem = force_const_mem (mode, operand1);
1844 xoperands[0] = scratch_reg;
1845 xoperands[1] = XEXP (const_mem, 0);
1846 pa_emit_move_sequence (xoperands, Pmode, 0);
1847
1848 /* Now load the destination register. */
1849 emit_insn (gen_rtx_SET (operand0,
1850 replace_equiv_address (const_mem, scratch_reg)));
1851 return 1;
1852 }
1853 /* Handle secondary reloads for SAR. These occur when trying to load
1854 the SAR from memory or a constant. */
1855 else if (scratch_reg
1856 && GET_CODE (operand0) == REG
1857 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1858 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1859 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1860 {
1861 /* D might not fit in 14 bits either; for such cases load D into
1862 scratch reg. */
1863 if (GET_CODE (operand1) == MEM
1864 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1865 {
1866 /* We are reloading the address into the scratch register, so we
1867 want to make sure the scratch register is a full register. */
1868 scratch_reg = force_mode (word_mode, scratch_reg);
1869
1870 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1871 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1872 0)),
1873 Pmode,
1874 XEXP (XEXP (operand1, 0),
1875 0),
1876 scratch_reg));
1877
1878 /* Now we are going to load the scratch register from memory,
1879 we want to load it in the same width as the original MEM,
1880 which must be the same as the width of the ultimate destination,
1881 OPERAND0. */
1882 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1883
1884 emit_move_insn (scratch_reg,
1885 replace_equiv_address (operand1, scratch_reg));
1886 }
1887 else
1888 {
1889 /* We want to load the scratch register using the same mode as
1890 the ultimate destination. */
1891 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1892
1893 emit_move_insn (scratch_reg, operand1);
1894 }
1895
1896 /* And emit the insn to set the ultimate destination. We know that
1897 the scratch register has the same mode as the destination at this
1898 point. */
1899 emit_move_insn (operand0, scratch_reg);
1900 return 1;
1901 }
1902
1903 /* Handle the most common case: storing into a register. */
1904 if (register_operand (operand0, mode))
1905 {
1906 /* Legitimize TLS symbol references. This happens for references
1907 that aren't a legitimate constant. */
1908 if (PA_SYMBOL_REF_TLS_P (operand1))
1909 operand1 = legitimize_tls_address (operand1);
1910
1911 if (register_operand (operand1, mode)
1912 || (GET_CODE (operand1) == CONST_INT
1913 && pa_cint_ok_for_move (UINTVAL (operand1)))
1914 || (operand1 == CONST0_RTX (mode))
1915 || (GET_CODE (operand1) == HIGH
1916 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1917 /* Only `general_operands' can come here, so MEM is ok. */
1918 || GET_CODE (operand1) == MEM)
1919 {
1920 /* Various sets are created during RTL generation which don't
1921 have the REG_POINTER flag correctly set. After the CSE pass,
1922 instruction recognition can fail if we don't consistently
1923 set this flag when performing register copies. This should
1924 also improve the opportunities for creating insns that use
1925 unscaled indexing. */
1926 if (REG_P (operand0) && REG_P (operand1))
1927 {
1928 if (REG_POINTER (operand1)
1929 && !REG_POINTER (operand0)
1930 && !HARD_REGISTER_P (operand0))
1931 copy_reg_pointer (operand0, operand1);
1932 }
1933
1934 /* When MEMs are broken out, the REG_POINTER flag doesn't
1935 get set. In some cases, we can set the REG_POINTER flag
1936 from the declaration for the MEM. */
1937 if (REG_P (operand0)
1938 && GET_CODE (operand1) == MEM
1939 && !REG_POINTER (operand0))
1940 {
1941 tree decl = MEM_EXPR (operand1);
1942
1943 /* Set the register pointer flag and register alignment
1944 if the declaration for this memory reference is a
1945 pointer type. */
1946 if (decl)
1947 {
1948 tree type;
1949
1950 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1951 tree operand 1. */
1952 if (TREE_CODE (decl) == COMPONENT_REF)
1953 decl = TREE_OPERAND (decl, 1);
1954
1955 type = TREE_TYPE (decl);
1956 type = strip_array_types (type);
1957
1958 if (POINTER_TYPE_P (type))
1959 mark_reg_pointer (operand0, BITS_PER_UNIT);
1960 }
1961 }
1962
1963 emit_insn (gen_rtx_SET (operand0, operand1));
1964 return 1;
1965 }
1966 }
1967 else if (GET_CODE (operand0) == MEM)
1968 {
1969 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1970 && !(reload_in_progress || reload_completed))
1971 {
1972 rtx temp = gen_reg_rtx (DFmode);
1973
1974 emit_insn (gen_rtx_SET (temp, operand1));
1975 emit_insn (gen_rtx_SET (operand0, temp));
1976 return 1;
1977 }
1978 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1979 {
1980 /* Run this case quickly. */
1981 emit_insn (gen_rtx_SET (operand0, operand1));
1982 return 1;
1983 }
1984 if (! (reload_in_progress || reload_completed))
1985 {
1986 operands[0] = validize_mem (operand0);
1987 operands[1] = operand1 = force_reg (mode, operand1);
1988 }
1989 }
1990
1991 /* Simplify the source if we need to.
1992 Note we do have to handle function labels here, even though we do
1993 not consider them legitimate constants. Loop optimizations can
1994 call the emit_move_xxx with one as a source. */
1995 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1996 || (GET_CODE (operand1) == HIGH
1997 && symbolic_operand (XEXP (operand1, 0), mode))
1998 || function_label_operand (operand1, VOIDmode)
1999 || tls_referenced_p (operand1))
2000 {
2001 int ishighonly = 0;
2002
2003 if (GET_CODE (operand1) == HIGH)
2004 {
2005 ishighonly = 1;
2006 operand1 = XEXP (operand1, 0);
2007 }
2008 if (symbolic_operand (operand1, mode))
2009 {
2010 /* Argh. The assembler and linker can't handle arithmetic
2011 involving plabels.
2012
2013 So we force the plabel into memory, load operand0 from
2014 the memory location, then add in the constant part. */
2015 if ((GET_CODE (operand1) == CONST
2016 && GET_CODE (XEXP (operand1, 0)) == PLUS
2017 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
2018 VOIDmode))
2019 || function_label_operand (operand1, VOIDmode))
2020 {
2021 rtx temp, const_part;
2022
2023 /* Figure out what (if any) scratch register to use. */
2024 if (reload_in_progress || reload_completed)
2025 {
2026 scratch_reg = scratch_reg ? scratch_reg : operand0;
2027 /* SCRATCH_REG will hold an address and maybe the actual
2028 data. We want it in WORD_MODE regardless of what mode it
2029 was originally given to us. */
2030 scratch_reg = force_mode (word_mode, scratch_reg);
2031 }
2032 else if (flag_pic)
2033 scratch_reg = gen_reg_rtx (Pmode);
2034
2035 if (GET_CODE (operand1) == CONST)
2036 {
2037 /* Save away the constant part of the expression. */
2038 const_part = XEXP (XEXP (operand1, 0), 1);
2039 gcc_assert (GET_CODE (const_part) == CONST_INT);
2040
2041 /* Force the function label into memory. */
2042 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2043 }
2044 else
2045 {
2046 /* No constant part. */
2047 const_part = NULL_RTX;
2048
2049 /* Force the function label into memory. */
2050 temp = force_const_mem (mode, operand1);
2051 }
2052
2053
2054 /* Get the address of the memory location. PIC-ify it if
2055 necessary. */
2056 temp = XEXP (temp, 0);
2057 if (flag_pic)
2058 temp = legitimize_pic_address (temp, mode, scratch_reg);
2059
2060 /* Put the address of the memory location into our destination
2061 register. */
2062 operands[1] = temp;
2063 pa_emit_move_sequence (operands, mode, scratch_reg);
2064
2065 /* Now load from the memory location into our destination
2066 register. */
2067 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2068 pa_emit_move_sequence (operands, mode, scratch_reg);
2069
2070 /* And add back in the constant part. */
2071 if (const_part != NULL_RTX)
2072 expand_inc (operand0, const_part);
2073
2074 return 1;
2075 }
2076
2077 if (flag_pic)
2078 {
2079 rtx_insn *insn;
2080 rtx temp;
2081
2082 if (reload_in_progress || reload_completed)
2083 {
2084 temp = scratch_reg ? scratch_reg : operand0;
2085 /* TEMP will hold an address and maybe the actual
2086 data. We want it in WORD_MODE regardless of what mode it
2087 was originally given to us. */
2088 temp = force_mode (word_mode, temp);
2089 }
2090 else
2091 temp = gen_reg_rtx (Pmode);
2092
2093 /* Force (const (plus (symbol) (const_int))) to memory
2094 if the const_int will not fit in 14 bits. Although
2095 this requires a relocation, the instruction sequence
2096 needed to load the value is shorter. */
2097 if (GET_CODE (operand1) == CONST
2098 && GET_CODE (XEXP (operand1, 0)) == PLUS
2099 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2100 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2101 {
2102 rtx x, m = force_const_mem (mode, operand1);
2103
2104 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2105 x = replace_equiv_address (m, x);
2106 insn = emit_move_insn (operand0, x);
2107 }
2108 else
2109 {
2110 operands[1] = legitimize_pic_address (operand1, mode, temp);
2111 if (REG_P (operand0) && REG_P (operands[1]))
2112 copy_reg_pointer (operand0, operands[1]);
2113 insn = emit_move_insn (operand0, operands[1]);
2114 }
2115
2116 /* Put a REG_EQUAL note on this insn. */
2117 set_unique_reg_note (insn, REG_EQUAL, operand1);
2118 }
2119 /* On the HPPA, references to data space are supposed to use dp,
2120 register 27, but showing it in the RTL inhibits various cse
2121 and loop optimizations. */
2122 else
2123 {
2124 rtx temp, set;
2125
2126 if (reload_in_progress || reload_completed)
2127 {
2128 temp = scratch_reg ? scratch_reg : operand0;
2129 /* TEMP will hold an address and maybe the actual
2130 data. We want it in WORD_MODE regardless of what mode it
2131 was originally given to us. */
2132 temp = force_mode (word_mode, temp);
2133 }
2134 else
2135 temp = gen_reg_rtx (mode);
2136
2137 /* Loading a SYMBOL_REF into a register makes that register
2138 safe to be used as the base in an indexed address.
2139
2140 Don't mark hard registers though. That loses. */
2141 if (GET_CODE (operand0) == REG
2142 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2143 mark_reg_pointer (operand0, BITS_PER_UNIT);
2144 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2145 mark_reg_pointer (temp, BITS_PER_UNIT);
2146
2147 if (ishighonly)
2148 set = gen_rtx_SET (operand0, temp);
2149 else
2150 set = gen_rtx_SET (operand0,
2151 gen_rtx_LO_SUM (mode, temp, operand1));
2152
2153 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2154 emit_insn (set);
2155
2156 }
2157 return 1;
2158 }
2159 else if (tls_referenced_p (operand1))
2160 {
2161 rtx tmp = operand1;
2162 rtx addend = NULL;
2163
2164 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2165 {
2166 addend = XEXP (XEXP (tmp, 0), 1);
2167 tmp = XEXP (XEXP (tmp, 0), 0);
2168 }
2169
2170 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2171 tmp = legitimize_tls_address (tmp);
2172 if (addend)
2173 {
2174 tmp = gen_rtx_PLUS (mode, tmp, addend);
2175 tmp = force_operand (tmp, operands[0]);
2176 }
2177 operands[1] = tmp;
2178 }
2179 else if (GET_CODE (operand1) != CONST_INT
2180 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2181 {
2182 rtx temp;
2183 rtx_insn *insn;
2184 rtx op1 = operand1;
2185 HOST_WIDE_INT value = 0;
2186 HOST_WIDE_INT insv = 0;
2187 int insert = 0;
2188
2189 if (GET_CODE (operand1) == CONST_INT)
2190 value = INTVAL (operand1);
2191
2192 if (TARGET_64BIT
2193 && GET_CODE (operand1) == CONST_INT
2194 && HOST_BITS_PER_WIDE_INT > 32
2195 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2196 {
2197 HOST_WIDE_INT nval;
2198
2199 /* Extract the low order 32 bits of the value and sign extend.
2200 If the new value is the same as the original value, we can
2201 can use the original value as-is. If the new value is
2202 different, we use it and insert the most-significant 32-bits
2203 of the original value into the final result. */
2204 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2205 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2206 if (value != nval)
2207 {
2208 #if HOST_BITS_PER_WIDE_INT > 32
2209 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2210 #endif
2211 insert = 1;
2212 value = nval;
2213 operand1 = GEN_INT (nval);
2214 }
2215 }
2216
2217 if (reload_in_progress || reload_completed)
2218 temp = scratch_reg ? scratch_reg : operand0;
2219 else
2220 temp = gen_reg_rtx (mode);
2221
2222 /* We don't directly split DImode constants on 32-bit targets
2223 because PLUS uses an 11-bit immediate and the insn sequence
2224 generated is not as efficient as the one using HIGH/LO_SUM. */
2225 if (GET_CODE (operand1) == CONST_INT
2226 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2227 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2228 && !insert)
2229 {
2230 /* Directly break constant into high and low parts. This
2231 provides better optimization opportunities because various
2232 passes recognize constants split with PLUS but not LO_SUM.
2233 We use a 14-bit signed low part except when the addition
2234 of 0x4000 to the high part might change the sign of the
2235 high part. */
2236 HOST_WIDE_INT low = value & 0x3fff;
2237 HOST_WIDE_INT high = value & ~ 0x3fff;
2238
2239 if (low >= 0x2000)
2240 {
2241 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2242 high += 0x2000;
2243 else
2244 high += 0x4000;
2245 }
2246
2247 low = value - high;
2248
2249 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2250 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2251 }
2252 else
2253 {
2254 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2255 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2256 }
2257
2258 insn = emit_move_insn (operands[0], operands[1]);
2259
2260 /* Now insert the most significant 32 bits of the value
2261 into the register. When we don't have a second register
2262 available, it could take up to nine instructions to load
2263 a 64-bit integer constant. Prior to reload, we force
2264 constants that would take more than three instructions
2265 to load to the constant pool. During and after reload,
2266 we have to handle all possible values. */
2267 if (insert)
2268 {
2269 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2270 register and the value to be inserted is outside the
2271 range that can be loaded with three depdi instructions. */
2272 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2273 {
2274 operand1 = GEN_INT (insv);
2275
2276 emit_insn (gen_rtx_SET (temp,
2277 gen_rtx_HIGH (mode, operand1)));
2278 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2279 if (mode == DImode)
2280 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2281 const0_rtx, temp));
2282 else
2283 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2284 const0_rtx, temp));
2285 }
2286 else
2287 {
2288 int len = 5, pos = 27;
2289
2290 /* Insert the bits using the depdi instruction. */
2291 while (pos >= 0)
2292 {
2293 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2294 HOST_WIDE_INT sign = v5 < 0;
2295
2296 /* Left extend the insertion. */
2297 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2298 while (pos > 0 && (insv & 1) == sign)
2299 {
2300 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2301 len += 1;
2302 pos -= 1;
2303 }
2304
2305 if (mode == DImode)
2306 insn = emit_insn (gen_insvdi (operand0,
2307 GEN_INT (len),
2308 GEN_INT (pos),
2309 GEN_INT (v5)));
2310 else
2311 insn = emit_insn (gen_insvsi (operand0,
2312 GEN_INT (len),
2313 GEN_INT (pos),
2314 GEN_INT (v5)));
2315
2316 len = pos > 0 && pos < 5 ? pos : 5;
2317 pos -= len;
2318 }
2319 }
2320 }
2321
2322 set_unique_reg_note (insn, REG_EQUAL, op1);
2323
2324 return 1;
2325 }
2326 }
2327 /* Now have insn-emit do whatever it normally does. */
2328 return 0;
2329 }
2330
2331 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2332 it will need a link/runtime reloc). */
2333
2334 int
2335 pa_reloc_needed (tree exp)
2336 {
2337 int reloc = 0;
2338
2339 switch (TREE_CODE (exp))
2340 {
2341 case ADDR_EXPR:
2342 return 1;
2343
2344 case POINTER_PLUS_EXPR:
2345 case PLUS_EXPR:
2346 case MINUS_EXPR:
2347 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2348 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2349 break;
2350
2351 CASE_CONVERT:
2352 case NON_LVALUE_EXPR:
2353 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2354 break;
2355
2356 case CONSTRUCTOR:
2357 {
2358 tree value;
2359 unsigned HOST_WIDE_INT ix;
2360
2361 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2362 if (value)
2363 reloc |= pa_reloc_needed (value);
2364 }
2365 break;
2366
2367 case ERROR_MARK:
2368 break;
2369
2370 default:
2371 break;
2372 }
2373 return reloc;
2374 }
2375
2376 \f
2377 /* Return the best assembler insn template
2378 for moving operands[1] into operands[0] as a fullword. */
2379 const char *
2380 pa_singlemove_string (rtx *operands)
2381 {
2382 HOST_WIDE_INT intval;
2383
2384 if (GET_CODE (operands[0]) == MEM)
2385 return "stw %r1,%0";
2386 if (GET_CODE (operands[1]) == MEM)
2387 return "ldw %1,%0";
2388 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2389 {
2390 long i;
2391
2392 gcc_assert (GET_MODE (operands[1]) == SFmode);
2393
2394 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2395 bit pattern. */
2396 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2397
2398 operands[1] = GEN_INT (i);
2399 /* Fall through to CONST_INT case. */
2400 }
2401 if (GET_CODE (operands[1]) == CONST_INT)
2402 {
2403 intval = INTVAL (operands[1]);
2404
2405 if (VAL_14_BITS_P (intval))
2406 return "ldi %1,%0";
2407 else if ((intval & 0x7ff) == 0)
2408 return "ldil L'%1,%0";
2409 else if (pa_zdepi_cint_p (intval))
2410 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2411 else
2412 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2413 }
2414 return "copy %1,%0";
2415 }
2416 \f
2417
2418 /* Compute position (in OP[1]) and width (in OP[2])
2419 useful for copying IMM to a register using the zdepi
2420 instructions. Store the immediate value to insert in OP[0]. */
2421 static void
2422 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2423 {
2424 int lsb, len;
2425
2426 /* Find the least significant set bit in IMM. */
2427 for (lsb = 0; lsb < 32; lsb++)
2428 {
2429 if ((imm & 1) != 0)
2430 break;
2431 imm >>= 1;
2432 }
2433
2434 /* Choose variants based on *sign* of the 5-bit field. */
2435 if ((imm & 0x10) == 0)
2436 len = (lsb <= 28) ? 4 : 32 - lsb;
2437 else
2438 {
2439 /* Find the width of the bitstring in IMM. */
2440 for (len = 5; len < 32 - lsb; len++)
2441 {
2442 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2443 break;
2444 }
2445
2446 /* Sign extend IMM as a 5-bit value. */
2447 imm = (imm & 0xf) - 0x10;
2448 }
2449
2450 op[0] = imm;
2451 op[1] = 31 - lsb;
2452 op[2] = len;
2453 }
2454
2455 /* Compute position (in OP[1]) and width (in OP[2])
2456 useful for copying IMM to a register using the depdi,z
2457 instructions. Store the immediate value to insert in OP[0]. */
2458
2459 static void
2460 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2461 {
2462 int lsb, len, maxlen;
2463
2464 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2465
2466 /* Find the least significant set bit in IMM. */
2467 for (lsb = 0; lsb < maxlen; lsb++)
2468 {
2469 if ((imm & 1) != 0)
2470 break;
2471 imm >>= 1;
2472 }
2473
2474 /* Choose variants based on *sign* of the 5-bit field. */
2475 if ((imm & 0x10) == 0)
2476 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2477 else
2478 {
2479 /* Find the width of the bitstring in IMM. */
2480 for (len = 5; len < maxlen - lsb; len++)
2481 {
2482 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2483 break;
2484 }
2485
2486 /* Extend length if host is narrow and IMM is negative. */
2487 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2488 len += 32;
2489
2490 /* Sign extend IMM as a 5-bit value. */
2491 imm = (imm & 0xf) - 0x10;
2492 }
2493
2494 op[0] = imm;
2495 op[1] = 63 - lsb;
2496 op[2] = len;
2497 }
2498
2499 /* Output assembler code to perform a doubleword move insn
2500 with operands OPERANDS. */
2501
2502 const char *
2503 pa_output_move_double (rtx *operands)
2504 {
2505 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2506 rtx latehalf[2];
2507 rtx addreg0 = 0, addreg1 = 0;
2508 int highonly = 0;
2509
2510 /* First classify both operands. */
2511
2512 if (REG_P (operands[0]))
2513 optype0 = REGOP;
2514 else if (offsettable_memref_p (operands[0]))
2515 optype0 = OFFSOP;
2516 else if (GET_CODE (operands[0]) == MEM)
2517 optype0 = MEMOP;
2518 else
2519 optype0 = RNDOP;
2520
2521 if (REG_P (operands[1]))
2522 optype1 = REGOP;
2523 else if (CONSTANT_P (operands[1]))
2524 optype1 = CNSTOP;
2525 else if (offsettable_memref_p (operands[1]))
2526 optype1 = OFFSOP;
2527 else if (GET_CODE (operands[1]) == MEM)
2528 optype1 = MEMOP;
2529 else
2530 optype1 = RNDOP;
2531
2532 /* Check for the cases that the operand constraints are not
2533 supposed to allow to happen. */
2534 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2535
2536 /* Handle copies between general and floating registers. */
2537
2538 if (optype0 == REGOP && optype1 == REGOP
2539 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2540 {
2541 if (FP_REG_P (operands[0]))
2542 {
2543 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2544 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2545 return "{fldds|fldd} -16(%%sp),%0";
2546 }
2547 else
2548 {
2549 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2550 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2551 return "{ldws|ldw} -12(%%sp),%R0";
2552 }
2553 }
2554
2555 /* Handle auto decrementing and incrementing loads and stores
2556 specifically, since the structure of the function doesn't work
2557 for them without major modification. Do it better when we learn
2558 this port about the general inc/dec addressing of PA.
2559 (This was written by tege. Chide him if it doesn't work.) */
2560
2561 if (optype0 == MEMOP)
2562 {
2563 /* We have to output the address syntax ourselves, since print_operand
2564 doesn't deal with the addresses we want to use. Fix this later. */
2565
2566 rtx addr = XEXP (operands[0], 0);
2567 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2568 {
2569 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2570
2571 operands[0] = XEXP (addr, 0);
2572 gcc_assert (GET_CODE (operands[1]) == REG
2573 && GET_CODE (operands[0]) == REG);
2574
2575 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2576
2577 /* No overlap between high target register and address
2578 register. (We do this in a non-obvious way to
2579 save a register file writeback) */
2580 if (GET_CODE (addr) == POST_INC)
2581 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2582 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2583 }
2584 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2585 {
2586 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2587
2588 operands[0] = XEXP (addr, 0);
2589 gcc_assert (GET_CODE (operands[1]) == REG
2590 && GET_CODE (operands[0]) == REG);
2591
2592 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2593 /* No overlap between high target register and address
2594 register. (We do this in a non-obvious way to save a
2595 register file writeback) */
2596 if (GET_CODE (addr) == PRE_INC)
2597 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2598 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2599 }
2600 }
2601 if (optype1 == MEMOP)
2602 {
2603 /* We have to output the address syntax ourselves, since print_operand
2604 doesn't deal with the addresses we want to use. Fix this later. */
2605
2606 rtx addr = XEXP (operands[1], 0);
2607 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2608 {
2609 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2610
2611 operands[1] = XEXP (addr, 0);
2612 gcc_assert (GET_CODE (operands[0]) == REG
2613 && GET_CODE (operands[1]) == REG);
2614
2615 if (!reg_overlap_mentioned_p (high_reg, addr))
2616 {
2617 /* No overlap between high target register and address
2618 register. (We do this in a non-obvious way to
2619 save a register file writeback) */
2620 if (GET_CODE (addr) == POST_INC)
2621 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2622 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2623 }
2624 else
2625 {
2626 /* This is an undefined situation. We should load into the
2627 address register *and* update that register. Probably
2628 we don't need to handle this at all. */
2629 if (GET_CODE (addr) == POST_INC)
2630 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2631 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2632 }
2633 }
2634 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2635 {
2636 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2637
2638 operands[1] = XEXP (addr, 0);
2639 gcc_assert (GET_CODE (operands[0]) == REG
2640 && GET_CODE (operands[1]) == REG);
2641
2642 if (!reg_overlap_mentioned_p (high_reg, addr))
2643 {
2644 /* No overlap between high target register and address
2645 register. (We do this in a non-obvious way to
2646 save a register file writeback) */
2647 if (GET_CODE (addr) == PRE_INC)
2648 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2649 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2650 }
2651 else
2652 {
2653 /* This is an undefined situation. We should load into the
2654 address register *and* update that register. Probably
2655 we don't need to handle this at all. */
2656 if (GET_CODE (addr) == PRE_INC)
2657 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2658 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2659 }
2660 }
2661 else if (GET_CODE (addr) == PLUS
2662 && GET_CODE (XEXP (addr, 0)) == MULT)
2663 {
2664 rtx xoperands[4];
2665
2666 /* Load address into left half of destination register. */
2667 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2668 xoperands[1] = XEXP (addr, 1);
2669 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2670 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2671 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2672 xoperands);
2673 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2674 }
2675 else if (GET_CODE (addr) == PLUS
2676 && REG_P (XEXP (addr, 0))
2677 && REG_P (XEXP (addr, 1)))
2678 {
2679 rtx xoperands[3];
2680
2681 /* Load address into left half of destination register. */
2682 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2683 xoperands[1] = XEXP (addr, 0);
2684 xoperands[2] = XEXP (addr, 1);
2685 output_asm_insn ("{addl|add,l} %1,%2,%0",
2686 xoperands);
2687 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2688 }
2689 }
2690
2691 /* If an operand is an unoffsettable memory ref, find a register
2692 we can increment temporarily to make it refer to the second word. */
2693
2694 if (optype0 == MEMOP)
2695 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2696
2697 if (optype1 == MEMOP)
2698 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2699
2700 /* Ok, we can do one word at a time.
2701 Normally we do the low-numbered word first.
2702
2703 In either case, set up in LATEHALF the operands to use
2704 for the high-numbered word and in some cases alter the
2705 operands in OPERANDS to be suitable for the low-numbered word. */
2706
2707 if (optype0 == REGOP)
2708 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2709 else if (optype0 == OFFSOP)
2710 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2711 else
2712 latehalf[0] = operands[0];
2713
2714 if (optype1 == REGOP)
2715 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2716 else if (optype1 == OFFSOP)
2717 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2718 else if (optype1 == CNSTOP)
2719 {
2720 if (GET_CODE (operands[1]) == HIGH)
2721 {
2722 operands[1] = XEXP (operands[1], 0);
2723 highonly = 1;
2724 }
2725 split_double (operands[1], &operands[1], &latehalf[1]);
2726 }
2727 else
2728 latehalf[1] = operands[1];
2729
2730 /* If the first move would clobber the source of the second one,
2731 do them in the other order.
2732
2733 This can happen in two cases:
2734
2735 mem -> register where the first half of the destination register
2736 is the same register used in the memory's address. Reload
2737 can create such insns.
2738
2739 mem in this case will be either register indirect or register
2740 indirect plus a valid offset.
2741
2742 register -> register move where REGNO(dst) == REGNO(src + 1)
2743 someone (Tim/Tege?) claimed this can happen for parameter loads.
2744
2745 Handle mem -> register case first. */
2746 if (optype0 == REGOP
2747 && (optype1 == MEMOP || optype1 == OFFSOP)
2748 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2749 {
2750 /* Do the late half first. */
2751 if (addreg1)
2752 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2753 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2754
2755 /* Then clobber. */
2756 if (addreg1)
2757 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2758 return pa_singlemove_string (operands);
2759 }
2760
2761 /* Now handle register -> register case. */
2762 if (optype0 == REGOP && optype1 == REGOP
2763 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2764 {
2765 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2766 return pa_singlemove_string (operands);
2767 }
2768
2769 /* Normal case: do the two words, low-numbered first. */
2770
2771 output_asm_insn (pa_singlemove_string (operands), operands);
2772
2773 /* Make any unoffsettable addresses point at high-numbered word. */
2774 if (addreg0)
2775 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2776 if (addreg1)
2777 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2778
2779 /* Do high-numbered word. */
2780 if (highonly)
2781 output_asm_insn ("ldil L'%1,%0", latehalf);
2782 else
2783 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2784
2785 /* Undo the adds we just did. */
2786 if (addreg0)
2787 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2788 if (addreg1)
2789 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2790
2791 return "";
2792 }
2793 \f
2794 const char *
2795 pa_output_fp_move_double (rtx *operands)
2796 {
2797 if (FP_REG_P (operands[0]))
2798 {
2799 if (FP_REG_P (operands[1])
2800 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2801 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2802 else
2803 output_asm_insn ("fldd%F1 %1,%0", operands);
2804 }
2805 else if (FP_REG_P (operands[1]))
2806 {
2807 output_asm_insn ("fstd%F0 %1,%0", operands);
2808 }
2809 else
2810 {
2811 rtx xoperands[2];
2812
2813 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2814
2815 /* This is a pain. You have to be prepared to deal with an
2816 arbitrary address here including pre/post increment/decrement.
2817
2818 so avoid this in the MD. */
2819 gcc_assert (GET_CODE (operands[0]) == REG);
2820
2821 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2822 xoperands[0] = operands[0];
2823 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2824 }
2825 return "";
2826 }
2827 \f
2828 /* Return a REG that occurs in ADDR with coefficient 1.
2829 ADDR can be effectively incremented by incrementing REG. */
2830
2831 static rtx
2832 find_addr_reg (rtx addr)
2833 {
2834 while (GET_CODE (addr) == PLUS)
2835 {
2836 if (GET_CODE (XEXP (addr, 0)) == REG)
2837 addr = XEXP (addr, 0);
2838 else if (GET_CODE (XEXP (addr, 1)) == REG)
2839 addr = XEXP (addr, 1);
2840 else if (CONSTANT_P (XEXP (addr, 0)))
2841 addr = XEXP (addr, 1);
2842 else if (CONSTANT_P (XEXP (addr, 1)))
2843 addr = XEXP (addr, 0);
2844 else
2845 gcc_unreachable ();
2846 }
2847 gcc_assert (GET_CODE (addr) == REG);
2848 return addr;
2849 }
2850
2851 /* Emit code to perform a block move.
2852
2853 OPERANDS[0] is the destination pointer as a REG, clobbered.
2854 OPERANDS[1] is the source pointer as a REG, clobbered.
2855 OPERANDS[2] is a register for temporary storage.
2856 OPERANDS[3] is a register for temporary storage.
2857 OPERANDS[4] is the size as a CONST_INT
2858 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2859 OPERANDS[6] is another temporary register. */
2860
2861 const char *
2862 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2863 {
2864 int align = INTVAL (operands[5]);
2865 unsigned long n_bytes = INTVAL (operands[4]);
2866
2867 /* We can't move more than a word at a time because the PA
2868 has no longer integer move insns. (Could use fp mem ops?) */
2869 if (align > (TARGET_64BIT ? 8 : 4))
2870 align = (TARGET_64BIT ? 8 : 4);
2871
2872 /* Note that we know each loop below will execute at least twice
2873 (else we would have open-coded the copy). */
2874 switch (align)
2875 {
2876 case 8:
2877 /* Pre-adjust the loop counter. */
2878 operands[4] = GEN_INT (n_bytes - 16);
2879 output_asm_insn ("ldi %4,%2", operands);
2880
2881 /* Copying loop. */
2882 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2883 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2884 output_asm_insn ("std,ma %3,8(%0)", operands);
2885 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2886 output_asm_insn ("std,ma %6,8(%0)", operands);
2887
2888 /* Handle the residual. There could be up to 7 bytes of
2889 residual to copy! */
2890 if (n_bytes % 16 != 0)
2891 {
2892 operands[4] = GEN_INT (n_bytes % 8);
2893 if (n_bytes % 16 >= 8)
2894 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2895 if (n_bytes % 8 != 0)
2896 output_asm_insn ("ldd 0(%1),%6", operands);
2897 if (n_bytes % 16 >= 8)
2898 output_asm_insn ("std,ma %3,8(%0)", operands);
2899 if (n_bytes % 8 != 0)
2900 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2901 }
2902 return "";
2903
2904 case 4:
2905 /* Pre-adjust the loop counter. */
2906 operands[4] = GEN_INT (n_bytes - 8);
2907 output_asm_insn ("ldi %4,%2", operands);
2908
2909 /* Copying loop. */
2910 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2911 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2912 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2913 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2914 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2915
2916 /* Handle the residual. There could be up to 7 bytes of
2917 residual to copy! */
2918 if (n_bytes % 8 != 0)
2919 {
2920 operands[4] = GEN_INT (n_bytes % 4);
2921 if (n_bytes % 8 >= 4)
2922 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2923 if (n_bytes % 4 != 0)
2924 output_asm_insn ("ldw 0(%1),%6", operands);
2925 if (n_bytes % 8 >= 4)
2926 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2927 if (n_bytes % 4 != 0)
2928 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2929 }
2930 return "";
2931
2932 case 2:
2933 /* Pre-adjust the loop counter. */
2934 operands[4] = GEN_INT (n_bytes - 4);
2935 output_asm_insn ("ldi %4,%2", operands);
2936
2937 /* Copying loop. */
2938 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2939 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2940 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2941 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2942 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2943
2944 /* Handle the residual. */
2945 if (n_bytes % 4 != 0)
2946 {
2947 if (n_bytes % 4 >= 2)
2948 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2949 if (n_bytes % 2 != 0)
2950 output_asm_insn ("ldb 0(%1),%6", operands);
2951 if (n_bytes % 4 >= 2)
2952 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2953 if (n_bytes % 2 != 0)
2954 output_asm_insn ("stb %6,0(%0)", operands);
2955 }
2956 return "";
2957
2958 case 1:
2959 /* Pre-adjust the loop counter. */
2960 operands[4] = GEN_INT (n_bytes - 2);
2961 output_asm_insn ("ldi %4,%2", operands);
2962
2963 /* Copying loop. */
2964 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2965 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2966 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2967 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2968 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2969
2970 /* Handle the residual. */
2971 if (n_bytes % 2 != 0)
2972 {
2973 output_asm_insn ("ldb 0(%1),%3", operands);
2974 output_asm_insn ("stb %3,0(%0)", operands);
2975 }
2976 return "";
2977
2978 default:
2979 gcc_unreachable ();
2980 }
2981 }
2982
2983 /* Count the number of insns necessary to handle this block move.
2984
2985 Basic structure is the same as emit_block_move, except that we
2986 count insns rather than emit them. */
2987
2988 static int
2989 compute_movmem_length (rtx_insn *insn)
2990 {
2991 rtx pat = PATTERN (insn);
2992 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2993 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2994 unsigned int n_insns = 0;
2995
2996 /* We can't move more than four bytes at a time because the PA
2997 has no longer integer move insns. (Could use fp mem ops?) */
2998 if (align > (TARGET_64BIT ? 8 : 4))
2999 align = (TARGET_64BIT ? 8 : 4);
3000
3001 /* The basic copying loop. */
3002 n_insns = 6;
3003
3004 /* Residuals. */
3005 if (n_bytes % (2 * align) != 0)
3006 {
3007 if ((n_bytes % (2 * align)) >= align)
3008 n_insns += 2;
3009
3010 if ((n_bytes % align) != 0)
3011 n_insns += 2;
3012 }
3013
3014 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3015 return n_insns * 4;
3016 }
3017
3018 /* Emit code to perform a block clear.
3019
3020 OPERANDS[0] is the destination pointer as a REG, clobbered.
3021 OPERANDS[1] is a register for temporary storage.
3022 OPERANDS[2] is the size as a CONST_INT
3023 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3024
3025 const char *
3026 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3027 {
3028 int align = INTVAL (operands[3]);
3029 unsigned long n_bytes = INTVAL (operands[2]);
3030
3031 /* We can't clear more than a word at a time because the PA
3032 has no longer integer move insns. */
3033 if (align > (TARGET_64BIT ? 8 : 4))
3034 align = (TARGET_64BIT ? 8 : 4);
3035
3036 /* Note that we know each loop below will execute at least twice
3037 (else we would have open-coded the copy). */
3038 switch (align)
3039 {
3040 case 8:
3041 /* Pre-adjust the loop counter. */
3042 operands[2] = GEN_INT (n_bytes - 16);
3043 output_asm_insn ("ldi %2,%1", operands);
3044
3045 /* Loop. */
3046 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3047 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3048 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3049
3050 /* Handle the residual. There could be up to 7 bytes of
3051 residual to copy! */
3052 if (n_bytes % 16 != 0)
3053 {
3054 operands[2] = GEN_INT (n_bytes % 8);
3055 if (n_bytes % 16 >= 8)
3056 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3057 if (n_bytes % 8 != 0)
3058 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3059 }
3060 return "";
3061
3062 case 4:
3063 /* Pre-adjust the loop counter. */
3064 operands[2] = GEN_INT (n_bytes - 8);
3065 output_asm_insn ("ldi %2,%1", operands);
3066
3067 /* Loop. */
3068 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3069 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3070 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3071
3072 /* Handle the residual. There could be up to 7 bytes of
3073 residual to copy! */
3074 if (n_bytes % 8 != 0)
3075 {
3076 operands[2] = GEN_INT (n_bytes % 4);
3077 if (n_bytes % 8 >= 4)
3078 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3079 if (n_bytes % 4 != 0)
3080 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3081 }
3082 return "";
3083
3084 case 2:
3085 /* Pre-adjust the loop counter. */
3086 operands[2] = GEN_INT (n_bytes - 4);
3087 output_asm_insn ("ldi %2,%1", operands);
3088
3089 /* Loop. */
3090 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3091 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3092 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3093
3094 /* Handle the residual. */
3095 if (n_bytes % 4 != 0)
3096 {
3097 if (n_bytes % 4 >= 2)
3098 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3099 if (n_bytes % 2 != 0)
3100 output_asm_insn ("stb %%r0,0(%0)", operands);
3101 }
3102 return "";
3103
3104 case 1:
3105 /* Pre-adjust the loop counter. */
3106 operands[2] = GEN_INT (n_bytes - 2);
3107 output_asm_insn ("ldi %2,%1", operands);
3108
3109 /* Loop. */
3110 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3111 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3112 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3113
3114 /* Handle the residual. */
3115 if (n_bytes % 2 != 0)
3116 output_asm_insn ("stb %%r0,0(%0)", operands);
3117
3118 return "";
3119
3120 default:
3121 gcc_unreachable ();
3122 }
3123 }
3124
3125 /* Count the number of insns necessary to handle this block move.
3126
3127 Basic structure is the same as emit_block_move, except that we
3128 count insns rather than emit them. */
3129
3130 static int
3131 compute_clrmem_length (rtx_insn *insn)
3132 {
3133 rtx pat = PATTERN (insn);
3134 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3135 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3136 unsigned int n_insns = 0;
3137
3138 /* We can't clear more than a word at a time because the PA
3139 has no longer integer move insns. */
3140 if (align > (TARGET_64BIT ? 8 : 4))
3141 align = (TARGET_64BIT ? 8 : 4);
3142
3143 /* The basic loop. */
3144 n_insns = 4;
3145
3146 /* Residuals. */
3147 if (n_bytes % (2 * align) != 0)
3148 {
3149 if ((n_bytes % (2 * align)) >= align)
3150 n_insns++;
3151
3152 if ((n_bytes % align) != 0)
3153 n_insns++;
3154 }
3155
3156 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3157 return n_insns * 4;
3158 }
3159 \f
3160
3161 const char *
3162 pa_output_and (rtx *operands)
3163 {
3164 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3165 {
3166 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3167 int ls0, ls1, ms0, p, len;
3168
3169 for (ls0 = 0; ls0 < 32; ls0++)
3170 if ((mask & (1 << ls0)) == 0)
3171 break;
3172
3173 for (ls1 = ls0; ls1 < 32; ls1++)
3174 if ((mask & (1 << ls1)) != 0)
3175 break;
3176
3177 for (ms0 = ls1; ms0 < 32; ms0++)
3178 if ((mask & (1 << ms0)) == 0)
3179 break;
3180
3181 gcc_assert (ms0 == 32);
3182
3183 if (ls1 == 32)
3184 {
3185 len = ls0;
3186
3187 gcc_assert (len);
3188
3189 operands[2] = GEN_INT (len);
3190 return "{extru|extrw,u} %1,31,%2,%0";
3191 }
3192 else
3193 {
3194 /* We could use this `depi' for the case above as well, but `depi'
3195 requires one more register file access than an `extru'. */
3196
3197 p = 31 - ls0;
3198 len = ls1 - ls0;
3199
3200 operands[2] = GEN_INT (p);
3201 operands[3] = GEN_INT (len);
3202 return "{depi|depwi} 0,%2,%3,%0";
3203 }
3204 }
3205 else
3206 return "and %1,%2,%0";
3207 }
3208
3209 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3210 storing the result in operands[0]. */
3211 const char *
3212 pa_output_64bit_and (rtx *operands)
3213 {
3214 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3215 {
3216 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3217 int ls0, ls1, ms0, p, len;
3218
3219 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3220 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3221 break;
3222
3223 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3224 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3225 break;
3226
3227 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3228 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3229 break;
3230
3231 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3232
3233 if (ls1 == HOST_BITS_PER_WIDE_INT)
3234 {
3235 len = ls0;
3236
3237 gcc_assert (len);
3238
3239 operands[2] = GEN_INT (len);
3240 return "extrd,u %1,63,%2,%0";
3241 }
3242 else
3243 {
3244 /* We could use this `depi' for the case above as well, but `depi'
3245 requires one more register file access than an `extru'. */
3246
3247 p = 63 - ls0;
3248 len = ls1 - ls0;
3249
3250 operands[2] = GEN_INT (p);
3251 operands[3] = GEN_INT (len);
3252 return "depdi 0,%2,%3,%0";
3253 }
3254 }
3255 else
3256 return "and %1,%2,%0";
3257 }
3258
3259 const char *
3260 pa_output_ior (rtx *operands)
3261 {
3262 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3263 int bs0, bs1, p, len;
3264
3265 if (INTVAL (operands[2]) == 0)
3266 return "copy %1,%0";
3267
3268 for (bs0 = 0; bs0 < 32; bs0++)
3269 if ((mask & (1 << bs0)) != 0)
3270 break;
3271
3272 for (bs1 = bs0; bs1 < 32; bs1++)
3273 if ((mask & (1 << bs1)) == 0)
3274 break;
3275
3276 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3277
3278 p = 31 - bs0;
3279 len = bs1 - bs0;
3280
3281 operands[2] = GEN_INT (p);
3282 operands[3] = GEN_INT (len);
3283 return "{depi|depwi} -1,%2,%3,%0";
3284 }
3285
3286 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3287 storing the result in operands[0]. */
3288 const char *
3289 pa_output_64bit_ior (rtx *operands)
3290 {
3291 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3292 int bs0, bs1, p, len;
3293
3294 if (INTVAL (operands[2]) == 0)
3295 return "copy %1,%0";
3296
3297 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3298 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3299 break;
3300
3301 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3302 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3303 break;
3304
3305 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3306 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3307
3308 p = 63 - bs0;
3309 len = bs1 - bs0;
3310
3311 operands[2] = GEN_INT (p);
3312 operands[3] = GEN_INT (len);
3313 return "depdi -1,%2,%3,%0";
3314 }
3315 \f
3316 /* Target hook for assembling integer objects. This code handles
3317 aligned SI and DI integers specially since function references
3318 must be preceded by P%. */
3319
3320 static bool
3321 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3322 {
3323 bool result;
3324 tree decl = NULL;
3325
3326 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3327 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3328 calling output_addr_const. Otherwise, it may call assemble_external
3329 in the midst of outputing the assembler code for the SYMBOL_REF.
3330 We restore the SYMBOL_REF_DECL after the output is done. */
3331 if (GET_CODE (x) == SYMBOL_REF)
3332 {
3333 decl = SYMBOL_REF_DECL (x);
3334 if (decl)
3335 {
3336 assemble_external (decl);
3337 SET_SYMBOL_REF_DECL (x, NULL);
3338 }
3339 }
3340
3341 if (size == UNITS_PER_WORD
3342 && aligned_p
3343 && function_label_operand (x, VOIDmode))
3344 {
3345 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3346
3347 /* We don't want an OPD when generating fast indirect calls. */
3348 if (!TARGET_FAST_INDIRECT_CALLS)
3349 fputs ("P%", asm_out_file);
3350
3351 output_addr_const (asm_out_file, x);
3352 fputc ('\n', asm_out_file);
3353 result = true;
3354 }
3355 else
3356 result = default_assemble_integer (x, size, aligned_p);
3357
3358 if (decl)
3359 SET_SYMBOL_REF_DECL (x, decl);
3360
3361 return result;
3362 }
3363 \f
3364 /* Output an ascii string. */
3365 void
3366 pa_output_ascii (FILE *file, const char *p, int size)
3367 {
3368 int i;
3369 int chars_output;
3370 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3371
3372 /* The HP assembler can only take strings of 256 characters at one
3373 time. This is a limitation on input line length, *not* the
3374 length of the string. Sigh. Even worse, it seems that the
3375 restriction is in number of input characters (see \xnn &
3376 \whatever). So we have to do this very carefully. */
3377
3378 fputs ("\t.STRING \"", file);
3379
3380 chars_output = 0;
3381 for (i = 0; i < size; i += 4)
3382 {
3383 int co = 0;
3384 int io = 0;
3385 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3386 {
3387 register unsigned int c = (unsigned char) p[i + io];
3388
3389 if (c == '\"' || c == '\\')
3390 partial_output[co++] = '\\';
3391 if (c >= ' ' && c < 0177)
3392 partial_output[co++] = c;
3393 else
3394 {
3395 unsigned int hexd;
3396 partial_output[co++] = '\\';
3397 partial_output[co++] = 'x';
3398 hexd = c / 16 - 0 + '0';
3399 if (hexd > '9')
3400 hexd -= '9' - 'a' + 1;
3401 partial_output[co++] = hexd;
3402 hexd = c % 16 - 0 + '0';
3403 if (hexd > '9')
3404 hexd -= '9' - 'a' + 1;
3405 partial_output[co++] = hexd;
3406 }
3407 }
3408 if (chars_output + co > 243)
3409 {
3410 fputs ("\"\n\t.STRING \"", file);
3411 chars_output = 0;
3412 }
3413 fwrite (partial_output, 1, (size_t) co, file);
3414 chars_output += co;
3415 co = 0;
3416 }
3417 fputs ("\"\n", file);
3418 }
3419
3420 /* Try to rewrite floating point comparisons & branches to avoid
3421 useless add,tr insns.
3422
3423 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3424 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3425 first attempt to remove useless add,tr insns. It is zero
3426 for the second pass as reorg sometimes leaves bogus REG_DEAD
3427 notes lying around.
3428
3429 When CHECK_NOTES is zero we can only eliminate add,tr insns
3430 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3431 instructions. */
3432 static void
3433 remove_useless_addtr_insns (int check_notes)
3434 {
3435 rtx_insn *insn;
3436 static int pass = 0;
3437
3438 /* This is fairly cheap, so always run it when optimizing. */
3439 if (optimize > 0)
3440 {
3441 int fcmp_count = 0;
3442 int fbranch_count = 0;
3443
3444 /* Walk all the insns in this function looking for fcmp & fbranch
3445 instructions. Keep track of how many of each we find. */
3446 for (insn = get_insns (); insn; insn = next_insn (insn))
3447 {
3448 rtx tmp;
3449
3450 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3451 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3452 continue;
3453
3454 tmp = PATTERN (insn);
3455
3456 /* It must be a set. */
3457 if (GET_CODE (tmp) != SET)
3458 continue;
3459
3460 /* If the destination is CCFP, then we've found an fcmp insn. */
3461 tmp = SET_DEST (tmp);
3462 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3463 {
3464 fcmp_count++;
3465 continue;
3466 }
3467
3468 tmp = PATTERN (insn);
3469 /* If this is an fbranch instruction, bump the fbranch counter. */
3470 if (GET_CODE (tmp) == SET
3471 && SET_DEST (tmp) == pc_rtx
3472 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3473 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3474 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3475 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3476 {
3477 fbranch_count++;
3478 continue;
3479 }
3480 }
3481
3482
3483 /* Find all floating point compare + branch insns. If possible,
3484 reverse the comparison & the branch to avoid add,tr insns. */
3485 for (insn = get_insns (); insn; insn = next_insn (insn))
3486 {
3487 rtx tmp;
3488 rtx_insn *next;
3489
3490 /* Ignore anything that isn't an INSN. */
3491 if (! NONJUMP_INSN_P (insn))
3492 continue;
3493
3494 tmp = PATTERN (insn);
3495
3496 /* It must be a set. */
3497 if (GET_CODE (tmp) != SET)
3498 continue;
3499
3500 /* The destination must be CCFP, which is register zero. */
3501 tmp = SET_DEST (tmp);
3502 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3503 continue;
3504
3505 /* INSN should be a set of CCFP.
3506
3507 See if the result of this insn is used in a reversed FP
3508 conditional branch. If so, reverse our condition and
3509 the branch. Doing so avoids useless add,tr insns. */
3510 next = next_insn (insn);
3511 while (next)
3512 {
3513 /* Jumps, calls and labels stop our search. */
3514 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3515 break;
3516
3517 /* As does another fcmp insn. */
3518 if (NONJUMP_INSN_P (next)
3519 && GET_CODE (PATTERN (next)) == SET
3520 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3521 && REGNO (SET_DEST (PATTERN (next))) == 0)
3522 break;
3523
3524 next = next_insn (next);
3525 }
3526
3527 /* Is NEXT_INSN a branch? */
3528 if (next && JUMP_P (next))
3529 {
3530 rtx pattern = PATTERN (next);
3531
3532 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3533 and CCFP dies, then reverse our conditional and the branch
3534 to avoid the add,tr. */
3535 if (GET_CODE (pattern) == SET
3536 && SET_DEST (pattern) == pc_rtx
3537 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3538 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3539 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3540 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3541 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3542 && (fcmp_count == fbranch_count
3543 || (check_notes
3544 && find_regno_note (next, REG_DEAD, 0))))
3545 {
3546 /* Reverse the branch. */
3547 tmp = XEXP (SET_SRC (pattern), 1);
3548 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3549 XEXP (SET_SRC (pattern), 2) = tmp;
3550 INSN_CODE (next) = -1;
3551
3552 /* Reverse our condition. */
3553 tmp = PATTERN (insn);
3554 PUT_CODE (XEXP (tmp, 1),
3555 (reverse_condition_maybe_unordered
3556 (GET_CODE (XEXP (tmp, 1)))));
3557 }
3558 }
3559 }
3560 }
3561
3562 pass = !pass;
3563
3564 }
3565 \f
3566 /* You may have trouble believing this, but this is the 32 bit HP-PA
3567 stack layout. Wow.
3568
3569 Offset Contents
3570
3571 Variable arguments (optional; any number may be allocated)
3572
3573 SP-(4*(N+9)) arg word N
3574 : :
3575 SP-56 arg word 5
3576 SP-52 arg word 4
3577
3578 Fixed arguments (must be allocated; may remain unused)
3579
3580 SP-48 arg word 3
3581 SP-44 arg word 2
3582 SP-40 arg word 1
3583 SP-36 arg word 0
3584
3585 Frame Marker
3586
3587 SP-32 External Data Pointer (DP)
3588 SP-28 External sr4
3589 SP-24 External/stub RP (RP')
3590 SP-20 Current RP
3591 SP-16 Static Link
3592 SP-12 Clean up
3593 SP-8 Calling Stub RP (RP'')
3594 SP-4 Previous SP
3595
3596 Top of Frame
3597
3598 SP-0 Stack Pointer (points to next available address)
3599
3600 */
3601
3602 /* This function saves registers as follows. Registers marked with ' are
3603 this function's registers (as opposed to the previous function's).
3604 If a frame_pointer isn't needed, r4 is saved as a general register;
3605 the space for the frame pointer is still allocated, though, to keep
3606 things simple.
3607
3608
3609 Top of Frame
3610
3611 SP (FP') Previous FP
3612 SP + 4 Alignment filler (sigh)
3613 SP + 8 Space for locals reserved here.
3614 .
3615 .
3616 .
3617 SP + n All call saved register used.
3618 .
3619 .
3620 .
3621 SP + o All call saved fp registers used.
3622 .
3623 .
3624 .
3625 SP + p (SP') points to next available address.
3626
3627 */
3628
3629 /* Global variables set by output_function_prologue(). */
3630 /* Size of frame. Need to know this to emit return insns from
3631 leaf procedures. */
3632 static HOST_WIDE_INT actual_fsize, local_fsize;
3633 static int save_fregs;
3634
3635 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3636 Handle case where DISP > 8k by using the add_high_const patterns.
3637
3638 Note in DISP > 8k case, we will leave the high part of the address
3639 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3640
3641 static void
3642 store_reg (int reg, HOST_WIDE_INT disp, int base)
3643 {
3644 rtx dest, src, basereg;
3645 rtx_insn *insn;
3646
3647 src = gen_rtx_REG (word_mode, reg);
3648 basereg = gen_rtx_REG (Pmode, base);
3649 if (VAL_14_BITS_P (disp))
3650 {
3651 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3652 insn = emit_move_insn (dest, src);
3653 }
3654 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3655 {
3656 rtx delta = GEN_INT (disp);
3657 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3658
3659 emit_move_insn (tmpreg, delta);
3660 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3661 if (DO_FRAME_NOTES)
3662 {
3663 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3664 gen_rtx_SET (tmpreg,
3665 gen_rtx_PLUS (Pmode, basereg, delta)));
3666 RTX_FRAME_RELATED_P (insn) = 1;
3667 }
3668 dest = gen_rtx_MEM (word_mode, tmpreg);
3669 insn = emit_move_insn (dest, src);
3670 }
3671 else
3672 {
3673 rtx delta = GEN_INT (disp);
3674 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3675 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3676
3677 emit_move_insn (tmpreg, high);
3678 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3679 insn = emit_move_insn (dest, src);
3680 if (DO_FRAME_NOTES)
3681 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3682 gen_rtx_SET (gen_rtx_MEM (word_mode,
3683 gen_rtx_PLUS (word_mode,
3684 basereg,
3685 delta)),
3686 src));
3687 }
3688
3689 if (DO_FRAME_NOTES)
3690 RTX_FRAME_RELATED_P (insn) = 1;
3691 }
3692
3693 /* Emit RTL to store REG at the memory location specified by BASE and then
3694 add MOD to BASE. MOD must be <= 8k. */
3695
3696 static void
3697 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3698 {
3699 rtx basereg, srcreg, delta;
3700 rtx_insn *insn;
3701
3702 gcc_assert (VAL_14_BITS_P (mod));
3703
3704 basereg = gen_rtx_REG (Pmode, base);
3705 srcreg = gen_rtx_REG (word_mode, reg);
3706 delta = GEN_INT (mod);
3707
3708 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3709 if (DO_FRAME_NOTES)
3710 {
3711 RTX_FRAME_RELATED_P (insn) = 1;
3712
3713 /* RTX_FRAME_RELATED_P must be set on each frame related set
3714 in a parallel with more than one element. */
3715 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3716 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3717 }
3718 }
3719
3720 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3721 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3722 whether to add a frame note or not.
3723
3724 In the DISP > 8k case, we leave the high part of the address in %r1.
3725 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3726
3727 static void
3728 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3729 {
3730 rtx_insn *insn;
3731
3732 if (VAL_14_BITS_P (disp))
3733 {
3734 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3735 plus_constant (Pmode,
3736 gen_rtx_REG (Pmode, base), disp));
3737 }
3738 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3739 {
3740 rtx basereg = gen_rtx_REG (Pmode, base);
3741 rtx delta = GEN_INT (disp);
3742 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3743
3744 emit_move_insn (tmpreg, delta);
3745 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3746 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3747 if (DO_FRAME_NOTES)
3748 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3749 gen_rtx_SET (tmpreg,
3750 gen_rtx_PLUS (Pmode, basereg, delta)));
3751 }
3752 else
3753 {
3754 rtx basereg = gen_rtx_REG (Pmode, base);
3755 rtx delta = GEN_INT (disp);
3756 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3757
3758 emit_move_insn (tmpreg,
3759 gen_rtx_PLUS (Pmode, basereg,
3760 gen_rtx_HIGH (Pmode, delta)));
3761 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3762 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3763 }
3764
3765 if (DO_FRAME_NOTES && note)
3766 RTX_FRAME_RELATED_P (insn) = 1;
3767 }
3768
3769 HOST_WIDE_INT
3770 pa_compute_frame_size (poly_int64 size, int *fregs_live)
3771 {
3772 int freg_saved = 0;
3773 int i, j;
3774
3775 /* The code in pa_expand_prologue and pa_expand_epilogue must
3776 be consistent with the rounding and size calculation done here.
3777 Change them at the same time. */
3778
3779 /* We do our own stack alignment. First, round the size of the
3780 stack locals up to a word boundary. */
3781 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3782
3783 /* Space for previous frame pointer + filler. If any frame is
3784 allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
3785 waste some space here for the sake of HP compatibility. The
3786 first slot is only used when the frame pointer is needed. */
3787 if (size || frame_pointer_needed)
3788 size += pa_starting_frame_offset ();
3789
3790 /* If the current function calls __builtin_eh_return, then we need
3791 to allocate stack space for registers that will hold data for
3792 the exception handler. */
3793 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3794 {
3795 unsigned int i;
3796
3797 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3798 continue;
3799 size += i * UNITS_PER_WORD;
3800 }
3801
3802 /* Account for space used by the callee general register saves. */
3803 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3804 if (df_regs_ever_live_p (i))
3805 size += UNITS_PER_WORD;
3806
3807 /* Account for space used by the callee floating point register saves. */
3808 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3809 if (df_regs_ever_live_p (i)
3810 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3811 {
3812 freg_saved = 1;
3813
3814 /* We always save both halves of the FP register, so always
3815 increment the frame size by 8 bytes. */
3816 size += 8;
3817 }
3818
3819 /* If any of the floating registers are saved, account for the
3820 alignment needed for the floating point register save block. */
3821 if (freg_saved)
3822 {
3823 size = (size + 7) & ~7;
3824 if (fregs_live)
3825 *fregs_live = 1;
3826 }
3827
3828 /* The various ABIs include space for the outgoing parameters in the
3829 size of the current function's stack frame. We don't need to align
3830 for the outgoing arguments as their alignment is set by the final
3831 rounding for the frame as a whole. */
3832 size += crtl->outgoing_args_size;
3833
3834 /* Allocate space for the fixed frame marker. This space must be
3835 allocated for any function that makes calls or allocates
3836 stack space. */
3837 if (!crtl->is_leaf || size)
3838 size += TARGET_64BIT ? 48 : 32;
3839
3840 /* Finally, round to the preferred stack boundary. */
3841 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3842 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3843 }
3844
3845 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3846 of memory. If any fpu reg is used in the function, we allocate
3847 such a block here, at the bottom of the frame, just in case it's needed.
3848
3849 If this function is a leaf procedure, then we may choose not
3850 to do a "save" insn. The decision about whether or not
3851 to do this is made in regclass.c. */
3852
3853 static void
3854 pa_output_function_prologue (FILE *file)
3855 {
3856 /* The function's label and associated .PROC must never be
3857 separated and must be output *after* any profiling declarations
3858 to avoid changing spaces/subspaces within a procedure. */
3859 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3860 fputs ("\t.PROC\n", file);
3861
3862 /* pa_expand_prologue does the dirty work now. We just need
3863 to output the assembler directives which denote the start
3864 of a function. */
3865 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3866 if (crtl->is_leaf)
3867 fputs (",NO_CALLS", file);
3868 else
3869 fputs (",CALLS", file);
3870 if (rp_saved)
3871 fputs (",SAVE_RP", file);
3872
3873 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3874 at the beginning of the frame and that it is used as the frame
3875 pointer for the frame. We do this because our current frame
3876 layout doesn't conform to that specified in the HP runtime
3877 documentation and we need a way to indicate to programs such as
3878 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3879 isn't used by HP compilers but is supported by the assembler.
3880 However, SAVE_SP is supposed to indicate that the previous stack
3881 pointer has been saved in the frame marker. */
3882 if (frame_pointer_needed)
3883 fputs (",SAVE_SP", file);
3884
3885 /* Pass on information about the number of callee register saves
3886 performed in the prologue.
3887
3888 The compiler is supposed to pass the highest register number
3889 saved, the assembler then has to adjust that number before
3890 entering it into the unwind descriptor (to account for any
3891 caller saved registers with lower register numbers than the
3892 first callee saved register). */
3893 if (gr_saved)
3894 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3895
3896 if (fr_saved)
3897 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3898
3899 fputs ("\n\t.ENTRY\n", file);
3900
3901 remove_useless_addtr_insns (0);
3902 }
3903
3904 void
3905 pa_expand_prologue (void)
3906 {
3907 int merge_sp_adjust_with_store = 0;
3908 HOST_WIDE_INT size = get_frame_size ();
3909 HOST_WIDE_INT offset;
3910 int i;
3911 rtx tmpreg;
3912 rtx_insn *insn;
3913
3914 gr_saved = 0;
3915 fr_saved = 0;
3916 save_fregs = 0;
3917
3918 /* Compute total size for frame pointer, filler, locals and rounding to
3919 the next word boundary. Similar code appears in pa_compute_frame_size
3920 and must be changed in tandem with this code. */
3921 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3922 if (local_fsize || frame_pointer_needed)
3923 local_fsize += pa_starting_frame_offset ();
3924
3925 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3926 if (flag_stack_usage_info)
3927 current_function_static_stack_size = actual_fsize;
3928
3929 /* Compute a few things we will use often. */
3930 tmpreg = gen_rtx_REG (word_mode, 1);
3931
3932 /* Save RP first. The calling conventions manual states RP will
3933 always be stored into the caller's frame at sp - 20 or sp - 16
3934 depending on which ABI is in use. */
3935 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3936 {
3937 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3938 rp_saved = true;
3939 }
3940 else
3941 rp_saved = false;
3942
3943 /* Allocate the local frame and set up the frame pointer if needed. */
3944 if (actual_fsize != 0)
3945 {
3946 if (frame_pointer_needed)
3947 {
3948 /* Copy the old frame pointer temporarily into %r1. Set up the
3949 new stack pointer, then store away the saved old frame pointer
3950 into the stack at sp and at the same time update the stack
3951 pointer by actual_fsize bytes. Two versions, first
3952 handles small (<8k) frames. The second handles large (>=8k)
3953 frames. */
3954 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3955 if (DO_FRAME_NOTES)
3956 RTX_FRAME_RELATED_P (insn) = 1;
3957
3958 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3959 if (DO_FRAME_NOTES)
3960 RTX_FRAME_RELATED_P (insn) = 1;
3961
3962 if (VAL_14_BITS_P (actual_fsize))
3963 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3964 else
3965 {
3966 /* It is incorrect to store the saved frame pointer at *sp,
3967 then increment sp (writes beyond the current stack boundary).
3968
3969 So instead use stwm to store at *sp and post-increment the
3970 stack pointer as an atomic operation. Then increment sp to
3971 finish allocating the new frame. */
3972 HOST_WIDE_INT adjust1 = 8192 - 64;
3973 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3974
3975 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3976 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3977 adjust2, 1);
3978 }
3979
3980 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3981 we need to store the previous stack pointer (frame pointer)
3982 into the frame marker on targets that use the HP unwind
3983 library. This allows the HP unwind library to be used to
3984 unwind GCC frames. However, we are not fully compatible
3985 with the HP library because our frame layout differs from
3986 that specified in the HP runtime specification.
3987
3988 We don't want a frame note on this instruction as the frame
3989 marker moves during dynamic stack allocation.
3990
3991 This instruction also serves as a blockage to prevent
3992 register spills from being scheduled before the stack
3993 pointer is raised. This is necessary as we store
3994 registers using the frame pointer as a base register,
3995 and the frame pointer is set before sp is raised. */
3996 if (TARGET_HPUX_UNWIND_LIBRARY)
3997 {
3998 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3999 GEN_INT (TARGET_64BIT ? -8 : -4));
4000
4001 emit_move_insn (gen_rtx_MEM (word_mode, addr),
4002 hard_frame_pointer_rtx);
4003 }
4004 else
4005 emit_insn (gen_blockage ());
4006 }
4007 /* no frame pointer needed. */
4008 else
4009 {
4010 /* In some cases we can perform the first callee register save
4011 and allocating the stack frame at the same time. If so, just
4012 make a note of it and defer allocating the frame until saving
4013 the callee registers. */
4014 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
4015 merge_sp_adjust_with_store = 1;
4016 /* Can not optimize. Adjust the stack frame by actual_fsize
4017 bytes. */
4018 else
4019 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4020 actual_fsize, 1);
4021 }
4022 }
4023
4024 /* Normal register save.
4025
4026 Do not save the frame pointer in the frame_pointer_needed case. It
4027 was done earlier. */
4028 if (frame_pointer_needed)
4029 {
4030 offset = local_fsize;
4031
4032 /* Saving the EH return data registers in the frame is the simplest
4033 way to get the frame unwind information emitted. We put them
4034 just before the general registers. */
4035 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4036 {
4037 unsigned int i, regno;
4038
4039 for (i = 0; ; ++i)
4040 {
4041 regno = EH_RETURN_DATA_REGNO (i);
4042 if (regno == INVALID_REGNUM)
4043 break;
4044
4045 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4046 offset += UNITS_PER_WORD;
4047 }
4048 }
4049
4050 for (i = 18; i >= 4; i--)
4051 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4052 {
4053 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4054 offset += UNITS_PER_WORD;
4055 gr_saved++;
4056 }
4057 /* Account for %r3 which is saved in a special place. */
4058 gr_saved++;
4059 }
4060 /* No frame pointer needed. */
4061 else
4062 {
4063 offset = local_fsize - actual_fsize;
4064
4065 /* Saving the EH return data registers in the frame is the simplest
4066 way to get the frame unwind information emitted. */
4067 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4068 {
4069 unsigned int i, regno;
4070
4071 for (i = 0; ; ++i)
4072 {
4073 regno = EH_RETURN_DATA_REGNO (i);
4074 if (regno == INVALID_REGNUM)
4075 break;
4076
4077 /* If merge_sp_adjust_with_store is nonzero, then we can
4078 optimize the first save. */
4079 if (merge_sp_adjust_with_store)
4080 {
4081 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4082 merge_sp_adjust_with_store = 0;
4083 }
4084 else
4085 store_reg (regno, offset, STACK_POINTER_REGNUM);
4086 offset += UNITS_PER_WORD;
4087 }
4088 }
4089
4090 for (i = 18; i >= 3; i--)
4091 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4092 {
4093 /* If merge_sp_adjust_with_store is nonzero, then we can
4094 optimize the first GR save. */
4095 if (merge_sp_adjust_with_store)
4096 {
4097 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4098 merge_sp_adjust_with_store = 0;
4099 }
4100 else
4101 store_reg (i, offset, STACK_POINTER_REGNUM);
4102 offset += UNITS_PER_WORD;
4103 gr_saved++;
4104 }
4105
4106 /* If we wanted to merge the SP adjustment with a GR save, but we never
4107 did any GR saves, then just emit the adjustment here. */
4108 if (merge_sp_adjust_with_store)
4109 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4110 actual_fsize, 1);
4111 }
4112
4113 /* The hppa calling conventions say that %r19, the pic offset
4114 register, is saved at sp - 32 (in this function's frame)
4115 when generating PIC code. FIXME: What is the correct thing
4116 to do for functions which make no calls and allocate no
4117 frame? Do we need to allocate a frame, or can we just omit
4118 the save? For now we'll just omit the save.
4119
4120 We don't want a note on this insn as the frame marker can
4121 move if there is a dynamic stack allocation. */
4122 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4123 {
4124 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4125
4126 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4127
4128 }
4129
4130 /* Align pointer properly (doubleword boundary). */
4131 offset = (offset + 7) & ~7;
4132
4133 /* Floating point register store. */
4134 if (save_fregs)
4135 {
4136 rtx base;
4137
4138 /* First get the frame or stack pointer to the start of the FP register
4139 save area. */
4140 if (frame_pointer_needed)
4141 {
4142 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4143 base = hard_frame_pointer_rtx;
4144 }
4145 else
4146 {
4147 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4148 base = stack_pointer_rtx;
4149 }
4150
4151 /* Now actually save the FP registers. */
4152 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4153 {
4154 if (df_regs_ever_live_p (i)
4155 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4156 {
4157 rtx addr, reg;
4158 rtx_insn *insn;
4159 addr = gen_rtx_MEM (DFmode,
4160 gen_rtx_POST_INC (word_mode, tmpreg));
4161 reg = gen_rtx_REG (DFmode, i);
4162 insn = emit_move_insn (addr, reg);
4163 if (DO_FRAME_NOTES)
4164 {
4165 RTX_FRAME_RELATED_P (insn) = 1;
4166 if (TARGET_64BIT)
4167 {
4168 rtx mem = gen_rtx_MEM (DFmode,
4169 plus_constant (Pmode, base,
4170 offset));
4171 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4172 gen_rtx_SET (mem, reg));
4173 }
4174 else
4175 {
4176 rtx meml = gen_rtx_MEM (SFmode,
4177 plus_constant (Pmode, base,
4178 offset));
4179 rtx memr = gen_rtx_MEM (SFmode,
4180 plus_constant (Pmode, base,
4181 offset + 4));
4182 rtx regl = gen_rtx_REG (SFmode, i);
4183 rtx regr = gen_rtx_REG (SFmode, i + 1);
4184 rtx setl = gen_rtx_SET (meml, regl);
4185 rtx setr = gen_rtx_SET (memr, regr);
4186 rtvec vec;
4187
4188 RTX_FRAME_RELATED_P (setl) = 1;
4189 RTX_FRAME_RELATED_P (setr) = 1;
4190 vec = gen_rtvec (2, setl, setr);
4191 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4192 gen_rtx_SEQUENCE (VOIDmode, vec));
4193 }
4194 }
4195 offset += GET_MODE_SIZE (DFmode);
4196 fr_saved++;
4197 }
4198 }
4199 }
4200 }
4201
4202 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4203 Handle case where DISP > 8k by using the add_high_const patterns. */
4204
4205 static void
4206 load_reg (int reg, HOST_WIDE_INT disp, int base)
4207 {
4208 rtx dest = gen_rtx_REG (word_mode, reg);
4209 rtx basereg = gen_rtx_REG (Pmode, base);
4210 rtx src;
4211
4212 if (VAL_14_BITS_P (disp))
4213 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4214 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4215 {
4216 rtx delta = GEN_INT (disp);
4217 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4218
4219 emit_move_insn (tmpreg, delta);
4220 if (TARGET_DISABLE_INDEXING)
4221 {
4222 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4223 src = gen_rtx_MEM (word_mode, tmpreg);
4224 }
4225 else
4226 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4227 }
4228 else
4229 {
4230 rtx delta = GEN_INT (disp);
4231 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4232 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4233
4234 emit_move_insn (tmpreg, high);
4235 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4236 }
4237
4238 emit_move_insn (dest, src);
4239 }
4240
4241 /* Update the total code bytes output to the text section. */
4242
4243 static void
4244 update_total_code_bytes (unsigned int nbytes)
4245 {
4246 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4247 && !IN_NAMED_SECTION_P (cfun->decl))
4248 {
4249 unsigned int old_total = total_code_bytes;
4250
4251 total_code_bytes += nbytes;
4252
4253 /* Be prepared to handle overflows. */
4254 if (old_total > total_code_bytes)
4255 total_code_bytes = UINT_MAX;
4256 }
4257 }
4258
4259 /* This function generates the assembly code for function exit.
4260 Args are as for output_function_prologue ().
4261
4262 The function epilogue should not depend on the current stack
4263 pointer! It should use the frame pointer only. This is mandatory
4264 because of alloca; we also take advantage of it to omit stack
4265 adjustments before returning. */
4266
4267 static void
4268 pa_output_function_epilogue (FILE *file)
4269 {
4270 rtx_insn *insn = get_last_insn ();
4271 bool extra_nop;
4272
4273 /* pa_expand_epilogue does the dirty work now. We just need
4274 to output the assembler directives which denote the end
4275 of a function.
4276
4277 To make debuggers happy, emit a nop if the epilogue was completely
4278 eliminated due to a volatile call as the last insn in the
4279 current function. That way the return address (in %r2) will
4280 always point to a valid instruction in the current function. */
4281
4282 /* Get the last real insn. */
4283 if (NOTE_P (insn))
4284 insn = prev_real_insn (insn);
4285
4286 /* If it is a sequence, then look inside. */
4287 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4288 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4289
4290 /* If insn is a CALL_INSN, then it must be a call to a volatile
4291 function (otherwise there would be epilogue insns). */
4292 if (insn && CALL_P (insn))
4293 {
4294 fputs ("\tnop\n", file);
4295 extra_nop = true;
4296 }
4297 else
4298 extra_nop = false;
4299
4300 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4301
4302 if (TARGET_SOM && TARGET_GAS)
4303 {
4304 /* We are done with this subspace except possibly for some additional
4305 debug information. Forget that we are in this subspace to ensure
4306 that the next function is output in its own subspace. */
4307 in_section = NULL;
4308 cfun->machine->in_nsubspa = 2;
4309 }
4310
4311 /* Thunks do their own insn accounting. */
4312 if (cfun->is_thunk)
4313 return;
4314
4315 if (INSN_ADDRESSES_SET_P ())
4316 {
4317 last_address = extra_nop ? 4 : 0;
4318 insn = get_last_nonnote_insn ();
4319 if (insn)
4320 {
4321 last_address += INSN_ADDRESSES (INSN_UID (insn));
4322 if (INSN_P (insn))
4323 last_address += insn_default_length (insn);
4324 }
4325 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4326 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4327 }
4328 else
4329 last_address = UINT_MAX;
4330
4331 /* Finally, update the total number of code bytes output so far. */
4332 update_total_code_bytes (last_address);
4333 }
4334
4335 void
4336 pa_expand_epilogue (void)
4337 {
4338 rtx tmpreg;
4339 HOST_WIDE_INT offset;
4340 HOST_WIDE_INT ret_off = 0;
4341 int i;
4342 int merge_sp_adjust_with_load = 0;
4343
4344 /* We will use this often. */
4345 tmpreg = gen_rtx_REG (word_mode, 1);
4346
4347 /* Try to restore RP early to avoid load/use interlocks when
4348 RP gets used in the return (bv) instruction. This appears to still
4349 be necessary even when we schedule the prologue and epilogue. */
4350 if (rp_saved)
4351 {
4352 ret_off = TARGET_64BIT ? -16 : -20;
4353 if (frame_pointer_needed)
4354 {
4355 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4356 ret_off = 0;
4357 }
4358 else
4359 {
4360 /* No frame pointer, and stack is smaller than 8k. */
4361 if (VAL_14_BITS_P (ret_off - actual_fsize))
4362 {
4363 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4364 ret_off = 0;
4365 }
4366 }
4367 }
4368
4369 /* General register restores. */
4370 if (frame_pointer_needed)
4371 {
4372 offset = local_fsize;
4373
4374 /* If the current function calls __builtin_eh_return, then we need
4375 to restore the saved EH data registers. */
4376 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4377 {
4378 unsigned int i, regno;
4379
4380 for (i = 0; ; ++i)
4381 {
4382 regno = EH_RETURN_DATA_REGNO (i);
4383 if (regno == INVALID_REGNUM)
4384 break;
4385
4386 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4387 offset += UNITS_PER_WORD;
4388 }
4389 }
4390
4391 for (i = 18; i >= 4; i--)
4392 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4393 {
4394 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4395 offset += UNITS_PER_WORD;
4396 }
4397 }
4398 else
4399 {
4400 offset = local_fsize - actual_fsize;
4401
4402 /* If the current function calls __builtin_eh_return, then we need
4403 to restore the saved EH data registers. */
4404 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4405 {
4406 unsigned int i, regno;
4407
4408 for (i = 0; ; ++i)
4409 {
4410 regno = EH_RETURN_DATA_REGNO (i);
4411 if (regno == INVALID_REGNUM)
4412 break;
4413
4414 /* Only for the first load.
4415 merge_sp_adjust_with_load holds the register load
4416 with which we will merge the sp adjustment. */
4417 if (merge_sp_adjust_with_load == 0
4418 && local_fsize == 0
4419 && VAL_14_BITS_P (-actual_fsize))
4420 merge_sp_adjust_with_load = regno;
4421 else
4422 load_reg (regno, offset, STACK_POINTER_REGNUM);
4423 offset += UNITS_PER_WORD;
4424 }
4425 }
4426
4427 for (i = 18; i >= 3; i--)
4428 {
4429 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4430 {
4431 /* Only for the first load.
4432 merge_sp_adjust_with_load holds the register load
4433 with which we will merge the sp adjustment. */
4434 if (merge_sp_adjust_with_load == 0
4435 && local_fsize == 0
4436 && VAL_14_BITS_P (-actual_fsize))
4437 merge_sp_adjust_with_load = i;
4438 else
4439 load_reg (i, offset, STACK_POINTER_REGNUM);
4440 offset += UNITS_PER_WORD;
4441 }
4442 }
4443 }
4444
4445 /* Align pointer properly (doubleword boundary). */
4446 offset = (offset + 7) & ~7;
4447
4448 /* FP register restores. */
4449 if (save_fregs)
4450 {
4451 /* Adjust the register to index off of. */
4452 if (frame_pointer_needed)
4453 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4454 else
4455 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4456
4457 /* Actually do the restores now. */
4458 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4459 if (df_regs_ever_live_p (i)
4460 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4461 {
4462 rtx src = gen_rtx_MEM (DFmode,
4463 gen_rtx_POST_INC (word_mode, tmpreg));
4464 rtx dest = gen_rtx_REG (DFmode, i);
4465 emit_move_insn (dest, src);
4466 }
4467 }
4468
4469 /* Emit a blockage insn here to keep these insns from being moved to
4470 an earlier spot in the epilogue, or into the main instruction stream.
4471
4472 This is necessary as we must not cut the stack back before all the
4473 restores are finished. */
4474 emit_insn (gen_blockage ());
4475
4476 /* Reset stack pointer (and possibly frame pointer). The stack
4477 pointer is initially set to fp + 64 to avoid a race condition. */
4478 if (frame_pointer_needed)
4479 {
4480 rtx delta = GEN_INT (-64);
4481
4482 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4483 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4484 stack_pointer_rtx, delta));
4485 }
4486 /* If we were deferring a callee register restore, do it now. */
4487 else if (merge_sp_adjust_with_load)
4488 {
4489 rtx delta = GEN_INT (-actual_fsize);
4490 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4491
4492 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4493 }
4494 else if (actual_fsize != 0)
4495 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4496 - actual_fsize, 0);
4497
4498 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4499 frame greater than 8k), do so now. */
4500 if (ret_off != 0)
4501 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4502
4503 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4504 {
4505 rtx sa = EH_RETURN_STACKADJ_RTX;
4506
4507 emit_insn (gen_blockage ());
4508 emit_insn (TARGET_64BIT
4509 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4510 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4511 }
4512 }
4513
4514 bool
4515 pa_can_use_return_insn (void)
4516 {
4517 if (!reload_completed)
4518 return false;
4519
4520 if (frame_pointer_needed)
4521 return false;
4522
4523 if (df_regs_ever_live_p (2))
4524 return false;
4525
4526 if (crtl->profile)
4527 return false;
4528
4529 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4530 }
4531
4532 rtx
4533 hppa_pic_save_rtx (void)
4534 {
4535 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4536 }
4537
4538 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4539 #define NO_DEFERRED_PROFILE_COUNTERS 0
4540 #endif
4541
4542
4543 /* Vector of funcdef numbers. */
4544 static vec<int> funcdef_nos;
4545
4546 /* Output deferred profile counters. */
4547 static void
4548 output_deferred_profile_counters (void)
4549 {
4550 unsigned int i;
4551 int align, n;
4552
4553 if (funcdef_nos.is_empty ())
4554 return;
4555
4556 switch_to_section (data_section);
4557 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4558 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4559
4560 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4561 {
4562 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4563 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4564 }
4565
4566 funcdef_nos.release ();
4567 }
4568
4569 void
4570 hppa_profile_hook (int label_no)
4571 {
4572 /* We use SImode for the address of the function in both 32 and
4573 64-bit code to avoid having to provide DImode versions of the
4574 lcla2 and load_offset_label_address insn patterns. */
4575 rtx reg = gen_reg_rtx (SImode);
4576 rtx_code_label *label_rtx = gen_label_rtx ();
4577 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4578 rtx arg_bytes, begin_label_rtx, mcount, sym;
4579 rtx_insn *call_insn;
4580 char begin_label_name[16];
4581 bool use_mcount_pcrel_call;
4582
4583 /* Set up call destination. */
4584 sym = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
4585 pa_encode_label (sym);
4586 mcount = gen_rtx_MEM (Pmode, sym);
4587
4588 /* If we can reach _mcount with a pc-relative call, we can optimize
4589 loading the address of the current function. This requires linker
4590 long branch stub support. */
4591 if (!TARGET_PORTABLE_RUNTIME
4592 && !TARGET_LONG_CALLS
4593 && (TARGET_SOM || flag_function_sections))
4594 use_mcount_pcrel_call = TRUE;
4595 else
4596 use_mcount_pcrel_call = FALSE;
4597
4598 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4599 label_no);
4600 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4601
4602 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4603
4604 if (!use_mcount_pcrel_call)
4605 {
4606 /* The address of the function is loaded into %r25 with an instruction-
4607 relative sequence that avoids the use of relocations. The sequence
4608 is split so that the load_offset_label_address instruction can
4609 occupy the delay slot of the call to _mcount. */
4610 if (TARGET_PA_20)
4611 emit_insn (gen_lcla2 (reg, label_rtx));
4612 else
4613 emit_insn (gen_lcla1 (reg, label_rtx));
4614
4615 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4616 reg,
4617 begin_label_rtx,
4618 label_rtx));
4619 }
4620
4621 if (!NO_DEFERRED_PROFILE_COUNTERS)
4622 {
4623 rtx count_label_rtx, addr, r24;
4624 char count_label_name[16];
4625
4626 funcdef_nos.safe_push (label_no);
4627 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4628 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4629 ggc_strdup (count_label_name));
4630
4631 addr = force_reg (Pmode, count_label_rtx);
4632 r24 = gen_rtx_REG (Pmode, 24);
4633 emit_move_insn (r24, addr);
4634
4635 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4636 if (use_mcount_pcrel_call)
4637 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4638 begin_label_rtx));
4639 else
4640 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4641
4642 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4643 }
4644 else
4645 {
4646 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4647 if (use_mcount_pcrel_call)
4648 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4649 begin_label_rtx));
4650 else
4651 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4652 }
4653
4654 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4655 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4656
4657 /* Indicate the _mcount call cannot throw, nor will it execute a
4658 non-local goto. */
4659 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4660
4661 /* Allocate space for fixed arguments. */
4662 if (reg_parm_stack_space > crtl->outgoing_args_size)
4663 crtl->outgoing_args_size = reg_parm_stack_space;
4664 }
4665
4666 /* Fetch the return address for the frame COUNT steps up from
4667 the current frame, after the prologue. FRAMEADDR is the
4668 frame pointer of the COUNT frame.
4669
4670 We want to ignore any export stub remnants here. To handle this,
4671 we examine the code at the return address, and if it is an export
4672 stub, we return a memory rtx for the stub return address stored
4673 at frame-24.
4674
4675 The value returned is used in two different ways:
4676
4677 1. To find a function's caller.
4678
4679 2. To change the return address for a function.
4680
4681 This function handles most instances of case 1; however, it will
4682 fail if there are two levels of stubs to execute on the return
4683 path. The only way I believe that can happen is if the return value
4684 needs a parameter relocation, which never happens for C code.
4685
4686 This function handles most instances of case 2; however, it will
4687 fail if we did not originally have stub code on the return path
4688 but will need stub code on the new return path. This can happen if
4689 the caller & callee are both in the main program, but the new
4690 return location is in a shared library. */
4691
4692 rtx
4693 pa_return_addr_rtx (int count, rtx frameaddr)
4694 {
4695 rtx label;
4696 rtx rp;
4697 rtx saved_rp;
4698 rtx ins;
4699
4700 /* The instruction stream at the return address of a PA1.X export stub is:
4701
4702 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4703 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4704 0x00011820 | stub+16: mtsp r1,sr0
4705 0xe0400002 | stub+20: be,n 0(sr0,rp)
4706
4707 0xe0400002 must be specified as -532676606 so that it won't be
4708 rejected as an invalid immediate operand on 64-bit hosts.
4709
4710 The instruction stream at the return address of a PA2.0 export stub is:
4711
4712 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4713 0xe840d002 | stub+12: bve,n (rp)
4714 */
4715
4716 HOST_WIDE_INT insns[4];
4717 int i, len;
4718
4719 if (count != 0)
4720 return NULL_RTX;
4721
4722 rp = get_hard_reg_initial_val (Pmode, 2);
4723
4724 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4725 return rp;
4726
4727 /* If there is no export stub then just use the value saved from
4728 the return pointer register. */
4729
4730 saved_rp = gen_reg_rtx (Pmode);
4731 emit_move_insn (saved_rp, rp);
4732
4733 /* Get pointer to the instruction stream. We have to mask out the
4734 privilege level from the two low order bits of the return address
4735 pointer here so that ins will point to the start of the first
4736 instruction that would have been executed if we returned. */
4737 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4738 label = gen_label_rtx ();
4739
4740 if (TARGET_PA_20)
4741 {
4742 insns[0] = 0x4bc23fd1;
4743 insns[1] = -398405630;
4744 len = 2;
4745 }
4746 else
4747 {
4748 insns[0] = 0x4bc23fd1;
4749 insns[1] = 0x004010a1;
4750 insns[2] = 0x00011820;
4751 insns[3] = -532676606;
4752 len = 4;
4753 }
4754
4755 /* Check the instruction stream at the normal return address for the
4756 export stub. If it is an export stub, than our return address is
4757 really in -24[frameaddr]. */
4758
4759 for (i = 0; i < len; i++)
4760 {
4761 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4762 rtx op1 = GEN_INT (insns[i]);
4763 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4764 }
4765
4766 /* Here we know that our return address points to an export
4767 stub. We don't want to return the address of the export stub,
4768 but rather the return address of the export stub. That return
4769 address is stored at -24[frameaddr]. */
4770
4771 emit_move_insn (saved_rp,
4772 gen_rtx_MEM (Pmode,
4773 memory_address (Pmode,
4774 plus_constant (Pmode, frameaddr,
4775 -24))));
4776
4777 emit_label (label);
4778
4779 return saved_rp;
4780 }
4781
4782 void
4783 pa_emit_bcond_fp (rtx operands[])
4784 {
4785 enum rtx_code code = GET_CODE (operands[0]);
4786 rtx operand0 = operands[1];
4787 rtx operand1 = operands[2];
4788 rtx label = operands[3];
4789
4790 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4791 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4792
4793 emit_jump_insn (gen_rtx_SET (pc_rtx,
4794 gen_rtx_IF_THEN_ELSE (VOIDmode,
4795 gen_rtx_fmt_ee (NE,
4796 VOIDmode,
4797 gen_rtx_REG (CCFPmode, 0),
4798 const0_rtx),
4799 gen_rtx_LABEL_REF (VOIDmode, label),
4800 pc_rtx)));
4801
4802 }
4803
4804 /* Adjust the cost of a scheduling dependency. Return the new cost of
4805 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4806
4807 static int
4808 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4809 unsigned int)
4810 {
4811 enum attr_type attr_type;
4812
4813 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4814 true dependencies as they are described with bypasses now. */
4815 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4816 return cost;
4817
4818 if (! recog_memoized (insn))
4819 return 0;
4820
4821 attr_type = get_attr_type (insn);
4822
4823 switch (dep_type)
4824 {
4825 case REG_DEP_ANTI:
4826 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4827 cycles later. */
4828
4829 if (attr_type == TYPE_FPLOAD)
4830 {
4831 rtx pat = PATTERN (insn);
4832 rtx dep_pat = PATTERN (dep_insn);
4833 if (GET_CODE (pat) == PARALLEL)
4834 {
4835 /* This happens for the fldXs,mb patterns. */
4836 pat = XVECEXP (pat, 0, 0);
4837 }
4838 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4839 /* If this happens, we have to extend this to schedule
4840 optimally. Return 0 for now. */
4841 return 0;
4842
4843 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4844 {
4845 if (! recog_memoized (dep_insn))
4846 return 0;
4847 switch (get_attr_type (dep_insn))
4848 {
4849 case TYPE_FPALU:
4850 case TYPE_FPMULSGL:
4851 case TYPE_FPMULDBL:
4852 case TYPE_FPDIVSGL:
4853 case TYPE_FPDIVDBL:
4854 case TYPE_FPSQRTSGL:
4855 case TYPE_FPSQRTDBL:
4856 /* A fpload can't be issued until one cycle before a
4857 preceding arithmetic operation has finished if
4858 the target of the fpload is any of the sources
4859 (or destination) of the arithmetic operation. */
4860 return insn_default_latency (dep_insn) - 1;
4861
4862 default:
4863 return 0;
4864 }
4865 }
4866 }
4867 else if (attr_type == TYPE_FPALU)
4868 {
4869 rtx pat = PATTERN (insn);
4870 rtx dep_pat = PATTERN (dep_insn);
4871 if (GET_CODE (pat) == PARALLEL)
4872 {
4873 /* This happens for the fldXs,mb patterns. */
4874 pat = XVECEXP (pat, 0, 0);
4875 }
4876 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4877 /* If this happens, we have to extend this to schedule
4878 optimally. Return 0 for now. */
4879 return 0;
4880
4881 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4882 {
4883 if (! recog_memoized (dep_insn))
4884 return 0;
4885 switch (get_attr_type (dep_insn))
4886 {
4887 case TYPE_FPDIVSGL:
4888 case TYPE_FPDIVDBL:
4889 case TYPE_FPSQRTSGL:
4890 case TYPE_FPSQRTDBL:
4891 /* An ALU flop can't be issued until two cycles before a
4892 preceding divide or sqrt operation has finished if
4893 the target of the ALU flop is any of the sources
4894 (or destination) of the divide or sqrt operation. */
4895 return insn_default_latency (dep_insn) - 2;
4896
4897 default:
4898 return 0;
4899 }
4900 }
4901 }
4902
4903 /* For other anti dependencies, the cost is 0. */
4904 return 0;
4905
4906 case REG_DEP_OUTPUT:
4907 /* Output dependency; DEP_INSN writes a register that INSN writes some
4908 cycles later. */
4909 if (attr_type == TYPE_FPLOAD)
4910 {
4911 rtx pat = PATTERN (insn);
4912 rtx dep_pat = PATTERN (dep_insn);
4913 if (GET_CODE (pat) == PARALLEL)
4914 {
4915 /* This happens for the fldXs,mb patterns. */
4916 pat = XVECEXP (pat, 0, 0);
4917 }
4918 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4919 /* If this happens, we have to extend this to schedule
4920 optimally. Return 0 for now. */
4921 return 0;
4922
4923 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4924 {
4925 if (! recog_memoized (dep_insn))
4926 return 0;
4927 switch (get_attr_type (dep_insn))
4928 {
4929 case TYPE_FPALU:
4930 case TYPE_FPMULSGL:
4931 case TYPE_FPMULDBL:
4932 case TYPE_FPDIVSGL:
4933 case TYPE_FPDIVDBL:
4934 case TYPE_FPSQRTSGL:
4935 case TYPE_FPSQRTDBL:
4936 /* A fpload can't be issued until one cycle before a
4937 preceding arithmetic operation has finished if
4938 the target of the fpload is the destination of the
4939 arithmetic operation.
4940
4941 Exception: For PA7100LC, PA7200 and PA7300, the cost
4942 is 3 cycles, unless they bundle together. We also
4943 pay the penalty if the second insn is a fpload. */
4944 return insn_default_latency (dep_insn) - 1;
4945
4946 default:
4947 return 0;
4948 }
4949 }
4950 }
4951 else if (attr_type == TYPE_FPALU)
4952 {
4953 rtx pat = PATTERN (insn);
4954 rtx dep_pat = PATTERN (dep_insn);
4955 if (GET_CODE (pat) == PARALLEL)
4956 {
4957 /* This happens for the fldXs,mb patterns. */
4958 pat = XVECEXP (pat, 0, 0);
4959 }
4960 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4961 /* If this happens, we have to extend this to schedule
4962 optimally. Return 0 for now. */
4963 return 0;
4964
4965 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4966 {
4967 if (! recog_memoized (dep_insn))
4968 return 0;
4969 switch (get_attr_type (dep_insn))
4970 {
4971 case TYPE_FPDIVSGL:
4972 case TYPE_FPDIVDBL:
4973 case TYPE_FPSQRTSGL:
4974 case TYPE_FPSQRTDBL:
4975 /* An ALU flop can't be issued until two cycles before a
4976 preceding divide or sqrt operation has finished if
4977 the target of the ALU flop is also the target of
4978 the divide or sqrt operation. */
4979 return insn_default_latency (dep_insn) - 2;
4980
4981 default:
4982 return 0;
4983 }
4984 }
4985 }
4986
4987 /* For other output dependencies, the cost is 0. */
4988 return 0;
4989
4990 default:
4991 gcc_unreachable ();
4992 }
4993 }
4994
4995 /* Adjust scheduling priorities. We use this to try and keep addil
4996 and the next use of %r1 close together. */
4997 static int
4998 pa_adjust_priority (rtx_insn *insn, int priority)
4999 {
5000 rtx set = single_set (insn);
5001 rtx src, dest;
5002 if (set)
5003 {
5004 src = SET_SRC (set);
5005 dest = SET_DEST (set);
5006 if (GET_CODE (src) == LO_SUM
5007 && symbolic_operand (XEXP (src, 1), VOIDmode)
5008 && ! read_only_operand (XEXP (src, 1), VOIDmode))
5009 priority >>= 3;
5010
5011 else if (GET_CODE (src) == MEM
5012 && GET_CODE (XEXP (src, 0)) == LO_SUM
5013 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
5014 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
5015 priority >>= 1;
5016
5017 else if (GET_CODE (dest) == MEM
5018 && GET_CODE (XEXP (dest, 0)) == LO_SUM
5019 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
5020 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
5021 priority >>= 3;
5022 }
5023 return priority;
5024 }
5025
5026 /* The 700 can only issue a single insn at a time.
5027 The 7XXX processors can issue two insns at a time.
5028 The 8000 can issue 4 insns at a time. */
5029 static int
5030 pa_issue_rate (void)
5031 {
5032 switch (pa_cpu)
5033 {
5034 case PROCESSOR_700: return 1;
5035 case PROCESSOR_7100: return 2;
5036 case PROCESSOR_7100LC: return 2;
5037 case PROCESSOR_7200: return 2;
5038 case PROCESSOR_7300: return 2;
5039 case PROCESSOR_8000: return 4;
5040
5041 default:
5042 gcc_unreachable ();
5043 }
5044 }
5045
5046
5047
5048 /* Return any length plus adjustment needed by INSN which already has
5049 its length computed as LENGTH. Return LENGTH if no adjustment is
5050 necessary.
5051
5052 Also compute the length of an inline block move here as it is too
5053 complicated to express as a length attribute in pa.md. */
5054 int
5055 pa_adjust_insn_length (rtx_insn *insn, int length)
5056 {
5057 rtx pat = PATTERN (insn);
5058
5059 /* If length is negative or undefined, provide initial length. */
5060 if ((unsigned int) length >= INT_MAX)
5061 {
5062 if (GET_CODE (pat) == SEQUENCE)
5063 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5064
5065 switch (get_attr_type (insn))
5066 {
5067 case TYPE_MILLI:
5068 length = pa_attr_length_millicode_call (insn);
5069 break;
5070 case TYPE_CALL:
5071 length = pa_attr_length_call (insn, 0);
5072 break;
5073 case TYPE_SIBCALL:
5074 length = pa_attr_length_call (insn, 1);
5075 break;
5076 case TYPE_DYNCALL:
5077 length = pa_attr_length_indirect_call (insn);
5078 break;
5079 case TYPE_SH_FUNC_ADRS:
5080 length = pa_attr_length_millicode_call (insn) + 20;
5081 break;
5082 default:
5083 gcc_unreachable ();
5084 }
5085 }
5086
5087 /* Block move pattern. */
5088 if (NONJUMP_INSN_P (insn)
5089 && GET_CODE (pat) == PARALLEL
5090 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5091 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5092 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5093 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5094 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5095 length += compute_movmem_length (insn) - 4;
5096 /* Block clear pattern. */
5097 else if (NONJUMP_INSN_P (insn)
5098 && GET_CODE (pat) == PARALLEL
5099 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5100 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5101 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5102 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5103 length += compute_clrmem_length (insn) - 4;
5104 /* Conditional branch with an unfilled delay slot. */
5105 else if (JUMP_P (insn) && ! simplejump_p (insn))
5106 {
5107 /* Adjust a short backwards conditional with an unfilled delay slot. */
5108 if (GET_CODE (pat) == SET
5109 && length == 4
5110 && JUMP_LABEL (insn) != NULL_RTX
5111 && ! forward_branch_p (insn))
5112 length += 4;
5113 else if (GET_CODE (pat) == PARALLEL
5114 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5115 && length == 4)
5116 length += 4;
5117 /* Adjust dbra insn with short backwards conditional branch with
5118 unfilled delay slot -- only for case where counter is in a
5119 general register register. */
5120 else if (GET_CODE (pat) == PARALLEL
5121 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5122 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5123 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5124 && length == 4
5125 && ! forward_branch_p (insn))
5126 length += 4;
5127 }
5128 return length;
5129 }
5130
5131 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5132
5133 static bool
5134 pa_print_operand_punct_valid_p (unsigned char code)
5135 {
5136 if (code == '@'
5137 || code == '#'
5138 || code == '*'
5139 || code == '^')
5140 return true;
5141
5142 return false;
5143 }
5144
5145 /* Print operand X (an rtx) in assembler syntax to file FILE.
5146 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5147 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5148
5149 void
5150 pa_print_operand (FILE *file, rtx x, int code)
5151 {
5152 switch (code)
5153 {
5154 case '#':
5155 /* Output a 'nop' if there's nothing for the delay slot. */
5156 if (dbr_sequence_length () == 0)
5157 fputs ("\n\tnop", file);
5158 return;
5159 case '*':
5160 /* Output a nullification completer if there's nothing for the */
5161 /* delay slot or nullification is requested. */
5162 if (dbr_sequence_length () == 0 ||
5163 (final_sequence &&
5164 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5165 fputs (",n", file);
5166 return;
5167 case 'R':
5168 /* Print out the second register name of a register pair.
5169 I.e., R (6) => 7. */
5170 fputs (reg_names[REGNO (x) + 1], file);
5171 return;
5172 case 'r':
5173 /* A register or zero. */
5174 if (x == const0_rtx
5175 || (x == CONST0_RTX (DFmode))
5176 || (x == CONST0_RTX (SFmode)))
5177 {
5178 fputs ("%r0", file);
5179 return;
5180 }
5181 else
5182 break;
5183 case 'f':
5184 /* A register or zero (floating point). */
5185 if (x == const0_rtx
5186 || (x == CONST0_RTX (DFmode))
5187 || (x == CONST0_RTX (SFmode)))
5188 {
5189 fputs ("%fr0", file);
5190 return;
5191 }
5192 else
5193 break;
5194 case 'A':
5195 {
5196 rtx xoperands[2];
5197
5198 xoperands[0] = XEXP (XEXP (x, 0), 0);
5199 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5200 pa_output_global_address (file, xoperands[1], 0);
5201 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5202 return;
5203 }
5204
5205 case 'C': /* Plain (C)ondition */
5206 case 'X':
5207 switch (GET_CODE (x))
5208 {
5209 case EQ:
5210 fputs ("=", file); break;
5211 case NE:
5212 fputs ("<>", file); break;
5213 case GT:
5214 fputs (">", file); break;
5215 case GE:
5216 fputs (">=", file); break;
5217 case GEU:
5218 fputs (">>=", file); break;
5219 case GTU:
5220 fputs (">>", file); break;
5221 case LT:
5222 fputs ("<", file); break;
5223 case LE:
5224 fputs ("<=", file); break;
5225 case LEU:
5226 fputs ("<<=", file); break;
5227 case LTU:
5228 fputs ("<<", file); break;
5229 default:
5230 gcc_unreachable ();
5231 }
5232 return;
5233 case 'N': /* Condition, (N)egated */
5234 switch (GET_CODE (x))
5235 {
5236 case EQ:
5237 fputs ("<>", file); break;
5238 case NE:
5239 fputs ("=", file); break;
5240 case GT:
5241 fputs ("<=", file); break;
5242 case GE:
5243 fputs ("<", file); break;
5244 case GEU:
5245 fputs ("<<", file); break;
5246 case GTU:
5247 fputs ("<<=", file); break;
5248 case LT:
5249 fputs (">=", file); break;
5250 case LE:
5251 fputs (">", file); break;
5252 case LEU:
5253 fputs (">>", file); break;
5254 case LTU:
5255 fputs (">>=", file); break;
5256 default:
5257 gcc_unreachable ();
5258 }
5259 return;
5260 /* For floating point comparisons. Note that the output
5261 predicates are the complement of the desired mode. The
5262 conditions for GT, GE, LT, LE and LTGT cause an invalid
5263 operation exception if the result is unordered and this
5264 exception is enabled in the floating-point status register. */
5265 case 'Y':
5266 switch (GET_CODE (x))
5267 {
5268 case EQ:
5269 fputs ("!=", file); break;
5270 case NE:
5271 fputs ("=", file); break;
5272 case GT:
5273 fputs ("!>", file); break;
5274 case GE:
5275 fputs ("!>=", file); break;
5276 case LT:
5277 fputs ("!<", file); break;
5278 case LE:
5279 fputs ("!<=", file); break;
5280 case LTGT:
5281 fputs ("!<>", file); break;
5282 case UNLE:
5283 fputs ("!?<=", file); break;
5284 case UNLT:
5285 fputs ("!?<", file); break;
5286 case UNGE:
5287 fputs ("!?>=", file); break;
5288 case UNGT:
5289 fputs ("!?>", file); break;
5290 case UNEQ:
5291 fputs ("!?=", file); break;
5292 case UNORDERED:
5293 fputs ("!?", file); break;
5294 case ORDERED:
5295 fputs ("?", file); break;
5296 default:
5297 gcc_unreachable ();
5298 }
5299 return;
5300 case 'S': /* Condition, operands are (S)wapped. */
5301 switch (GET_CODE (x))
5302 {
5303 case EQ:
5304 fputs ("=", file); break;
5305 case NE:
5306 fputs ("<>", file); break;
5307 case GT:
5308 fputs ("<", file); break;
5309 case GE:
5310 fputs ("<=", file); break;
5311 case GEU:
5312 fputs ("<<=", file); break;
5313 case GTU:
5314 fputs ("<<", file); break;
5315 case LT:
5316 fputs (">", file); break;
5317 case LE:
5318 fputs (">=", file); break;
5319 case LEU:
5320 fputs (">>=", file); break;
5321 case LTU:
5322 fputs (">>", file); break;
5323 default:
5324 gcc_unreachable ();
5325 }
5326 return;
5327 case 'B': /* Condition, (B)oth swapped and negate. */
5328 switch (GET_CODE (x))
5329 {
5330 case EQ:
5331 fputs ("<>", file); break;
5332 case NE:
5333 fputs ("=", file); break;
5334 case GT:
5335 fputs (">=", file); break;
5336 case GE:
5337 fputs (">", file); break;
5338 case GEU:
5339 fputs (">>", file); break;
5340 case GTU:
5341 fputs (">>=", file); break;
5342 case LT:
5343 fputs ("<=", file); break;
5344 case LE:
5345 fputs ("<", file); break;
5346 case LEU:
5347 fputs ("<<", file); break;
5348 case LTU:
5349 fputs ("<<=", file); break;
5350 default:
5351 gcc_unreachable ();
5352 }
5353 return;
5354 case 'k':
5355 gcc_assert (GET_CODE (x) == CONST_INT);
5356 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5357 return;
5358 case 'Q':
5359 gcc_assert (GET_CODE (x) == CONST_INT);
5360 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5361 return;
5362 case 'L':
5363 gcc_assert (GET_CODE (x) == CONST_INT);
5364 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5365 return;
5366 case 'o':
5367 gcc_assert (GET_CODE (x) == CONST_INT
5368 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5369 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5370 return;
5371 case 'O':
5372 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5373 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5374 return;
5375 case 'p':
5376 gcc_assert (GET_CODE (x) == CONST_INT);
5377 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5378 return;
5379 case 'P':
5380 gcc_assert (GET_CODE (x) == CONST_INT);
5381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5382 return;
5383 case 'I':
5384 if (GET_CODE (x) == CONST_INT)
5385 fputs ("i", file);
5386 return;
5387 case 'M':
5388 case 'F':
5389 switch (GET_CODE (XEXP (x, 0)))
5390 {
5391 case PRE_DEC:
5392 case PRE_INC:
5393 if (ASSEMBLER_DIALECT == 0)
5394 fputs ("s,mb", file);
5395 else
5396 fputs (",mb", file);
5397 break;
5398 case POST_DEC:
5399 case POST_INC:
5400 if (ASSEMBLER_DIALECT == 0)
5401 fputs ("s,ma", file);
5402 else
5403 fputs (",ma", file);
5404 break;
5405 case PLUS:
5406 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5407 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5408 {
5409 if (ASSEMBLER_DIALECT == 0)
5410 fputs ("x", file);
5411 }
5412 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5413 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5414 {
5415 if (ASSEMBLER_DIALECT == 0)
5416 fputs ("x,s", file);
5417 else
5418 fputs (",s", file);
5419 }
5420 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5421 fputs ("s", file);
5422 break;
5423 default:
5424 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5425 fputs ("s", file);
5426 break;
5427 }
5428 return;
5429 case 'G':
5430 pa_output_global_address (file, x, 0);
5431 return;
5432 case 'H':
5433 pa_output_global_address (file, x, 1);
5434 return;
5435 case 0: /* Don't do anything special */
5436 break;
5437 case 'Z':
5438 {
5439 unsigned op[3];
5440 compute_zdepwi_operands (INTVAL (x), op);
5441 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5442 return;
5443 }
5444 case 'z':
5445 {
5446 unsigned op[3];
5447 compute_zdepdi_operands (INTVAL (x), op);
5448 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5449 return;
5450 }
5451 case 'c':
5452 /* We can get here from a .vtable_inherit due to our
5453 CONSTANT_ADDRESS_P rejecting perfectly good constant
5454 addresses. */
5455 break;
5456 default:
5457 gcc_unreachable ();
5458 }
5459 if (GET_CODE (x) == REG)
5460 {
5461 fputs (reg_names [REGNO (x)], file);
5462 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5463 {
5464 fputs ("R", file);
5465 return;
5466 }
5467 if (FP_REG_P (x)
5468 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5469 && (REGNO (x) & 1) == 0)
5470 fputs ("L", file);
5471 }
5472 else if (GET_CODE (x) == MEM)
5473 {
5474 int size = GET_MODE_SIZE (GET_MODE (x));
5475 rtx base = NULL_RTX;
5476 switch (GET_CODE (XEXP (x, 0)))
5477 {
5478 case PRE_DEC:
5479 case POST_DEC:
5480 base = XEXP (XEXP (x, 0), 0);
5481 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5482 break;
5483 case PRE_INC:
5484 case POST_INC:
5485 base = XEXP (XEXP (x, 0), 0);
5486 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5487 break;
5488 case PLUS:
5489 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5490 fprintf (file, "%s(%s)",
5491 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5492 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5493 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5494 fprintf (file, "%s(%s)",
5495 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5496 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5497 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5498 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5499 {
5500 /* Because the REG_POINTER flag can get lost during reload,
5501 pa_legitimate_address_p canonicalizes the order of the
5502 index and base registers in the combined move patterns. */
5503 rtx base = XEXP (XEXP (x, 0), 1);
5504 rtx index = XEXP (XEXP (x, 0), 0);
5505
5506 fprintf (file, "%s(%s)",
5507 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5508 }
5509 else
5510 output_address (GET_MODE (x), XEXP (x, 0));
5511 break;
5512 default:
5513 output_address (GET_MODE (x), XEXP (x, 0));
5514 break;
5515 }
5516 }
5517 else
5518 output_addr_const (file, x);
5519 }
5520
5521 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5522
5523 void
5524 pa_output_global_address (FILE *file, rtx x, int round_constant)
5525 {
5526
5527 /* Imagine (high (const (plus ...))). */
5528 if (GET_CODE (x) == HIGH)
5529 x = XEXP (x, 0);
5530
5531 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5532 output_addr_const (file, x);
5533 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5534 {
5535 output_addr_const (file, x);
5536 fputs ("-$global$", file);
5537 }
5538 else if (GET_CODE (x) == CONST)
5539 {
5540 const char *sep = "";
5541 int offset = 0; /* assembler wants -$global$ at end */
5542 rtx base = NULL_RTX;
5543
5544 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5545 {
5546 case LABEL_REF:
5547 case SYMBOL_REF:
5548 base = XEXP (XEXP (x, 0), 0);
5549 output_addr_const (file, base);
5550 break;
5551 case CONST_INT:
5552 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5553 break;
5554 default:
5555 gcc_unreachable ();
5556 }
5557
5558 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5559 {
5560 case LABEL_REF:
5561 case SYMBOL_REF:
5562 base = XEXP (XEXP (x, 0), 1);
5563 output_addr_const (file, base);
5564 break;
5565 case CONST_INT:
5566 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5567 break;
5568 default:
5569 gcc_unreachable ();
5570 }
5571
5572 /* How bogus. The compiler is apparently responsible for
5573 rounding the constant if it uses an LR field selector.
5574
5575 The linker and/or assembler seem a better place since
5576 they have to do this kind of thing already.
5577
5578 If we fail to do this, HP's optimizing linker may eliminate
5579 an addil, but not update the ldw/stw/ldo instruction that
5580 uses the result of the addil. */
5581 if (round_constant)
5582 offset = ((offset + 0x1000) & ~0x1fff);
5583
5584 switch (GET_CODE (XEXP (x, 0)))
5585 {
5586 case PLUS:
5587 if (offset < 0)
5588 {
5589 offset = -offset;
5590 sep = "-";
5591 }
5592 else
5593 sep = "+";
5594 break;
5595
5596 case MINUS:
5597 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5598 sep = "-";
5599 break;
5600
5601 default:
5602 gcc_unreachable ();
5603 }
5604
5605 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5606 fputs ("-$global$", file);
5607 if (offset)
5608 fprintf (file, "%s%d", sep, offset);
5609 }
5610 else
5611 output_addr_const (file, x);
5612 }
5613
5614 /* Output boilerplate text to appear at the beginning of the file.
5615 There are several possible versions. */
5616 #define aputs(x) fputs(x, asm_out_file)
5617 static inline void
5618 pa_file_start_level (void)
5619 {
5620 if (TARGET_64BIT)
5621 aputs ("\t.LEVEL 2.0w\n");
5622 else if (TARGET_PA_20)
5623 aputs ("\t.LEVEL 2.0\n");
5624 else if (TARGET_PA_11)
5625 aputs ("\t.LEVEL 1.1\n");
5626 else
5627 aputs ("\t.LEVEL 1.0\n");
5628 }
5629
5630 static inline void
5631 pa_file_start_space (int sortspace)
5632 {
5633 aputs ("\t.SPACE $PRIVATE$");
5634 if (sortspace)
5635 aputs (",SORT=16");
5636 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5637 if (flag_tm)
5638 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5639 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5640 "\n\t.SPACE $TEXT$");
5641 if (sortspace)
5642 aputs (",SORT=8");
5643 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5644 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5645 }
5646
5647 static inline void
5648 pa_file_start_file (int want_version)
5649 {
5650 if (write_symbols != NO_DEBUG)
5651 {
5652 output_file_directive (asm_out_file, main_input_filename);
5653 if (want_version)
5654 aputs ("\t.version\t\"01.01\"\n");
5655 }
5656 }
5657
5658 static inline void
5659 pa_file_start_mcount (const char *aswhat)
5660 {
5661 if (profile_flag)
5662 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5663 }
5664
5665 static void
5666 pa_elf_file_start (void)
5667 {
5668 pa_file_start_level ();
5669 pa_file_start_mcount ("ENTRY");
5670 pa_file_start_file (0);
5671 }
5672
5673 static void
5674 pa_som_file_start (void)
5675 {
5676 pa_file_start_level ();
5677 pa_file_start_space (0);
5678 aputs ("\t.IMPORT $global$,DATA\n"
5679 "\t.IMPORT $$dyncall,MILLICODE\n");
5680 pa_file_start_mcount ("CODE");
5681 pa_file_start_file (0);
5682 }
5683
5684 static void
5685 pa_linux_file_start (void)
5686 {
5687 pa_file_start_file (1);
5688 pa_file_start_level ();
5689 pa_file_start_mcount ("CODE");
5690 }
5691
5692 static void
5693 pa_hpux64_gas_file_start (void)
5694 {
5695 pa_file_start_level ();
5696 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5697 if (profile_flag)
5698 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5699 #endif
5700 pa_file_start_file (1);
5701 }
5702
5703 static void
5704 pa_hpux64_hpas_file_start (void)
5705 {
5706 pa_file_start_level ();
5707 pa_file_start_space (1);
5708 pa_file_start_mcount ("CODE");
5709 pa_file_start_file (0);
5710 }
5711 #undef aputs
5712
5713 /* Search the deferred plabel list for SYMBOL and return its internal
5714 label. If an entry for SYMBOL is not found, a new entry is created. */
5715
5716 rtx
5717 pa_get_deferred_plabel (rtx symbol)
5718 {
5719 const char *fname = XSTR (symbol, 0);
5720 size_t i;
5721
5722 /* See if we have already put this function on the list of deferred
5723 plabels. This list is generally small, so a liner search is not
5724 too ugly. If it proves too slow replace it with something faster. */
5725 for (i = 0; i < n_deferred_plabels; i++)
5726 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5727 break;
5728
5729 /* If the deferred plabel list is empty, or this entry was not found
5730 on the list, create a new entry on the list. */
5731 if (deferred_plabels == NULL || i == n_deferred_plabels)
5732 {
5733 tree id;
5734
5735 if (deferred_plabels == 0)
5736 deferred_plabels = ggc_alloc<deferred_plabel> ();
5737 else
5738 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5739 deferred_plabels,
5740 n_deferred_plabels + 1);
5741
5742 i = n_deferred_plabels++;
5743 deferred_plabels[i].internal_label = gen_label_rtx ();
5744 deferred_plabels[i].symbol = symbol;
5745
5746 /* Gross. We have just implicitly taken the address of this
5747 function. Mark it in the same manner as assemble_name. */
5748 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5749 if (id)
5750 mark_referenced (id);
5751 }
5752
5753 return deferred_plabels[i].internal_label;
5754 }
5755
5756 static void
5757 output_deferred_plabels (void)
5758 {
5759 size_t i;
5760
5761 /* If we have some deferred plabels, then we need to switch into the
5762 data or readonly data section, and align it to a 4 byte boundary
5763 before outputting the deferred plabels. */
5764 if (n_deferred_plabels)
5765 {
5766 switch_to_section (flag_pic ? data_section : readonly_data_section);
5767 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5768 }
5769
5770 /* Now output the deferred plabels. */
5771 for (i = 0; i < n_deferred_plabels; i++)
5772 {
5773 targetm.asm_out.internal_label (asm_out_file, "L",
5774 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5775 assemble_integer (deferred_plabels[i].symbol,
5776 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5777 }
5778 }
5779
5780 /* Initialize optabs to point to emulation routines. */
5781
5782 static void
5783 pa_init_libfuncs (void)
5784 {
5785 if (HPUX_LONG_DOUBLE_LIBRARY)
5786 {
5787 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5788 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5789 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5790 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5791 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5792 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5793 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5794 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5795 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5796
5797 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5798 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5799 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5800 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5801 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5802 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5803 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5804
5805 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5806 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5807 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5808 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5809
5810 set_conv_libfunc (sfix_optab, SImode, TFmode,
5811 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5812 : "_U_Qfcnvfxt_quad_to_sgl");
5813 set_conv_libfunc (sfix_optab, DImode, TFmode,
5814 "_U_Qfcnvfxt_quad_to_dbl");
5815 set_conv_libfunc (ufix_optab, SImode, TFmode,
5816 "_U_Qfcnvfxt_quad_to_usgl");
5817 set_conv_libfunc (ufix_optab, DImode, TFmode,
5818 "_U_Qfcnvfxt_quad_to_udbl");
5819
5820 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5821 "_U_Qfcnvxf_sgl_to_quad");
5822 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5823 "_U_Qfcnvxf_dbl_to_quad");
5824 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5825 "_U_Qfcnvxf_usgl_to_quad");
5826 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5827 "_U_Qfcnvxf_udbl_to_quad");
5828 }
5829
5830 if (TARGET_SYNC_LIBCALL)
5831 init_sync_libfuncs (8);
5832 }
5833
5834 /* HP's millicode routines mean something special to the assembler.
5835 Keep track of which ones we have used. */
5836
5837 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5838 static void import_milli (enum millicodes);
5839 static char imported[(int) end1000];
5840 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5841 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5842 #define MILLI_START 10
5843
5844 static void
5845 import_milli (enum millicodes code)
5846 {
5847 char str[sizeof (import_string)];
5848
5849 if (!imported[(int) code])
5850 {
5851 imported[(int) code] = 1;
5852 strcpy (str, import_string);
5853 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5854 output_asm_insn (str, 0);
5855 }
5856 }
5857
5858 /* The register constraints have put the operands and return value in
5859 the proper registers. */
5860
5861 const char *
5862 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5863 {
5864 import_milli (mulI);
5865 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5866 }
5867
5868 /* Emit the rtl for doing a division by a constant. */
5869
5870 /* Do magic division millicodes exist for this value? */
5871 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5872
5873 /* We'll use an array to keep track of the magic millicodes and
5874 whether or not we've used them already. [n][0] is signed, [n][1] is
5875 unsigned. */
5876
5877 static int div_milli[16][2];
5878
5879 int
5880 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5881 {
5882 if (GET_CODE (operands[2]) == CONST_INT
5883 && INTVAL (operands[2]) > 0
5884 && INTVAL (operands[2]) < 16
5885 && pa_magic_milli[INTVAL (operands[2])])
5886 {
5887 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5888
5889 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5890 emit
5891 (gen_rtx_PARALLEL
5892 (VOIDmode,
5893 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5894 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5895 SImode,
5896 gen_rtx_REG (SImode, 26),
5897 operands[2])),
5898 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5899 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5900 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5901 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5902 gen_rtx_CLOBBER (VOIDmode, ret))));
5903 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5904 return 1;
5905 }
5906 return 0;
5907 }
5908
5909 const char *
5910 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5911 {
5912 int divisor;
5913
5914 /* If the divisor is a constant, try to use one of the special
5915 opcodes .*/
5916 if (GET_CODE (operands[0]) == CONST_INT)
5917 {
5918 static char buf[100];
5919 divisor = INTVAL (operands[0]);
5920 if (!div_milli[divisor][unsignedp])
5921 {
5922 div_milli[divisor][unsignedp] = 1;
5923 if (unsignedp)
5924 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5925 else
5926 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5927 }
5928 if (unsignedp)
5929 {
5930 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5931 INTVAL (operands[0]));
5932 return pa_output_millicode_call (insn,
5933 gen_rtx_SYMBOL_REF (SImode, buf));
5934 }
5935 else
5936 {
5937 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5938 INTVAL (operands[0]));
5939 return pa_output_millicode_call (insn,
5940 gen_rtx_SYMBOL_REF (SImode, buf));
5941 }
5942 }
5943 /* Divisor isn't a special constant. */
5944 else
5945 {
5946 if (unsignedp)
5947 {
5948 import_milli (divU);
5949 return pa_output_millicode_call (insn,
5950 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5951 }
5952 else
5953 {
5954 import_milli (divI);
5955 return pa_output_millicode_call (insn,
5956 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5957 }
5958 }
5959 }
5960
5961 /* Output a $$rem millicode to do mod. */
5962
5963 const char *
5964 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5965 {
5966 if (unsignedp)
5967 {
5968 import_milli (remU);
5969 return pa_output_millicode_call (insn,
5970 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5971 }
5972 else
5973 {
5974 import_milli (remI);
5975 return pa_output_millicode_call (insn,
5976 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5977 }
5978 }
5979
5980 void
5981 pa_output_arg_descriptor (rtx_insn *call_insn)
5982 {
5983 const char *arg_regs[4];
5984 machine_mode arg_mode;
5985 rtx link;
5986 int i, output_flag = 0;
5987 int regno;
5988
5989 /* We neither need nor want argument location descriptors for the
5990 64bit runtime environment or the ELF32 environment. */
5991 if (TARGET_64BIT || TARGET_ELF32)
5992 return;
5993
5994 for (i = 0; i < 4; i++)
5995 arg_regs[i] = 0;
5996
5997 /* Specify explicitly that no argument relocations should take place
5998 if using the portable runtime calling conventions. */
5999 if (TARGET_PORTABLE_RUNTIME)
6000 {
6001 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
6002 asm_out_file);
6003 return;
6004 }
6005
6006 gcc_assert (CALL_P (call_insn));
6007 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
6008 link; link = XEXP (link, 1))
6009 {
6010 rtx use = XEXP (link, 0);
6011
6012 if (! (GET_CODE (use) == USE
6013 && GET_CODE (XEXP (use, 0)) == REG
6014 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6015 continue;
6016
6017 arg_mode = GET_MODE (XEXP (use, 0));
6018 regno = REGNO (XEXP (use, 0));
6019 if (regno >= 23 && regno <= 26)
6020 {
6021 arg_regs[26 - regno] = "GR";
6022 if (arg_mode == DImode)
6023 arg_regs[25 - regno] = "GR";
6024 }
6025 else if (regno >= 32 && regno <= 39)
6026 {
6027 if (arg_mode == SFmode)
6028 arg_regs[(regno - 32) / 2] = "FR";
6029 else
6030 {
6031 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6032 arg_regs[(regno - 34) / 2] = "FR";
6033 arg_regs[(regno - 34) / 2 + 1] = "FU";
6034 #else
6035 arg_regs[(regno - 34) / 2] = "FU";
6036 arg_regs[(regno - 34) / 2 + 1] = "FR";
6037 #endif
6038 }
6039 }
6040 }
6041 fputs ("\t.CALL ", asm_out_file);
6042 for (i = 0; i < 4; i++)
6043 {
6044 if (arg_regs[i])
6045 {
6046 if (output_flag++)
6047 fputc (',', asm_out_file);
6048 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6049 }
6050 }
6051 fputc ('\n', asm_out_file);
6052 }
6053 \f
6054 /* Inform reload about cases where moving X with a mode MODE to or from
6055 a register in RCLASS requires an extra scratch or immediate register.
6056 Return the class needed for the immediate register. */
6057
6058 static reg_class_t
6059 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6060 machine_mode mode, secondary_reload_info *sri)
6061 {
6062 int regno;
6063 enum reg_class rclass = (enum reg_class) rclass_i;
6064
6065 /* Handle the easy stuff first. */
6066 if (rclass == R1_REGS)
6067 return NO_REGS;
6068
6069 if (REG_P (x))
6070 {
6071 regno = REGNO (x);
6072 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6073 return NO_REGS;
6074 }
6075 else
6076 regno = -1;
6077
6078 /* If we have something like (mem (mem (...)), we can safely assume the
6079 inner MEM will end up in a general register after reloading, so there's
6080 no need for a secondary reload. */
6081 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6082 return NO_REGS;
6083
6084 /* Trying to load a constant into a FP register during PIC code
6085 generation requires %r1 as a scratch register. For float modes,
6086 the only legitimate constant is CONST0_RTX. However, there are
6087 a few patterns that accept constant double operands. */
6088 if (flag_pic
6089 && FP_REG_CLASS_P (rclass)
6090 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6091 {
6092 switch (mode)
6093 {
6094 case E_SImode:
6095 sri->icode = CODE_FOR_reload_insi_r1;
6096 break;
6097
6098 case E_DImode:
6099 sri->icode = CODE_FOR_reload_indi_r1;
6100 break;
6101
6102 case E_SFmode:
6103 sri->icode = CODE_FOR_reload_insf_r1;
6104 break;
6105
6106 case E_DFmode:
6107 sri->icode = CODE_FOR_reload_indf_r1;
6108 break;
6109
6110 default:
6111 gcc_unreachable ();
6112 }
6113 return NO_REGS;
6114 }
6115
6116 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6117 register when we're generating PIC code or when the operand isn't
6118 readonly. */
6119 if (pa_symbolic_expression_p (x))
6120 {
6121 if (GET_CODE (x) == HIGH)
6122 x = XEXP (x, 0);
6123
6124 if (flag_pic || !read_only_operand (x, VOIDmode))
6125 {
6126 switch (mode)
6127 {
6128 case E_SImode:
6129 sri->icode = CODE_FOR_reload_insi_r1;
6130 break;
6131
6132 case E_DImode:
6133 sri->icode = CODE_FOR_reload_indi_r1;
6134 break;
6135
6136 default:
6137 gcc_unreachable ();
6138 }
6139 return NO_REGS;
6140 }
6141 }
6142
6143 /* Profiling showed the PA port spends about 1.3% of its compilation
6144 time in true_regnum from calls inside pa_secondary_reload_class. */
6145 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6146 regno = true_regnum (x);
6147
6148 /* Handle reloads for floating point loads and stores. */
6149 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6150 && FP_REG_CLASS_P (rclass))
6151 {
6152 if (MEM_P (x))
6153 {
6154 x = XEXP (x, 0);
6155
6156 /* We don't need a secondary reload for indexed memory addresses.
6157
6158 When INT14_OK_STRICT is true, it might appear that we could
6159 directly allow register indirect memory addresses. However,
6160 this doesn't work because we don't support SUBREGs in
6161 floating-point register copies and reload doesn't tell us
6162 when it's going to use a SUBREG. */
6163 if (IS_INDEX_ADDR_P (x))
6164 return NO_REGS;
6165 }
6166
6167 /* Request a secondary reload with a general scratch register
6168 for everything else. ??? Could symbolic operands be handled
6169 directly when generating non-pic PA 2.0 code? */
6170 sri->icode = (in_p
6171 ? direct_optab_handler (reload_in_optab, mode)
6172 : direct_optab_handler (reload_out_optab, mode));
6173 return NO_REGS;
6174 }
6175
6176 /* A SAR<->FP register copy requires an intermediate general register
6177 and secondary memory. We need a secondary reload with a general
6178 scratch register for spills. */
6179 if (rclass == SHIFT_REGS)
6180 {
6181 /* Handle spill. */
6182 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6183 {
6184 sri->icode = (in_p
6185 ? direct_optab_handler (reload_in_optab, mode)
6186 : direct_optab_handler (reload_out_optab, mode));
6187 return NO_REGS;
6188 }
6189
6190 /* Handle FP copy. */
6191 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6192 return GENERAL_REGS;
6193 }
6194
6195 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6196 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6197 && FP_REG_CLASS_P (rclass))
6198 return GENERAL_REGS;
6199
6200 return NO_REGS;
6201 }
6202
6203 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6204
6205 static bool
6206 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
6207 reg_class_t class1 ATTRIBUTE_UNUSED,
6208 reg_class_t class2 ATTRIBUTE_UNUSED)
6209 {
6210 #ifdef PA_SECONDARY_MEMORY_NEEDED
6211 return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
6212 #else
6213 return false;
6214 #endif
6215 }
6216
6217 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6218 is only marked as live on entry by df-scan when it is a fixed
6219 register. It isn't a fixed register in the 64-bit runtime,
6220 so we need to mark it here. */
6221
6222 static void
6223 pa_extra_live_on_entry (bitmap regs)
6224 {
6225 if (TARGET_64BIT)
6226 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6227 }
6228
6229 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6230 to prevent it from being deleted. */
6231
6232 rtx
6233 pa_eh_return_handler_rtx (void)
6234 {
6235 rtx tmp;
6236
6237 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6238 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6239 tmp = gen_rtx_MEM (word_mode, tmp);
6240 tmp->volatil = 1;
6241 return tmp;
6242 }
6243
6244 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6245 by invisible reference. As a GCC extension, we also pass anything
6246 with a zero or variable size by reference.
6247
6248 The 64-bit runtime does not describe passing any types by invisible
6249 reference. The internals of GCC can't currently handle passing
6250 empty structures, and zero or variable length arrays when they are
6251 not passed entirely on the stack or by reference. Thus, as a GCC
6252 extension, we pass these types by reference. The HP compiler doesn't
6253 support these types, so hopefully there shouldn't be any compatibility
6254 issues. This may have to be revisited when HP releases a C99 compiler
6255 or updates the ABI. */
6256
6257 static bool
6258 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6259 machine_mode mode, const_tree type,
6260 bool named ATTRIBUTE_UNUSED)
6261 {
6262 HOST_WIDE_INT size;
6263
6264 if (type)
6265 size = int_size_in_bytes (type);
6266 else
6267 size = GET_MODE_SIZE (mode);
6268
6269 if (TARGET_64BIT)
6270 return size <= 0;
6271 else
6272 return size <= 0 || size > 8;
6273 }
6274
6275 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6276
6277 static pad_direction
6278 pa_function_arg_padding (machine_mode mode, const_tree type)
6279 {
6280 if (mode == BLKmode
6281 || (TARGET_64BIT
6282 && type
6283 && (AGGREGATE_TYPE_P (type)
6284 || TREE_CODE (type) == COMPLEX_TYPE
6285 || TREE_CODE (type) == VECTOR_TYPE)))
6286 {
6287 /* Return PAD_NONE if justification is not required. */
6288 if (type
6289 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6290 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6291 return PAD_NONE;
6292
6293 /* The directions set here are ignored when a BLKmode argument larger
6294 than a word is placed in a register. Different code is used for
6295 the stack and registers. This makes it difficult to have a
6296 consistent data representation for both the stack and registers.
6297 For both runtimes, the justification and padding for arguments on
6298 the stack and in registers should be identical. */
6299 if (TARGET_64BIT)
6300 /* The 64-bit runtime specifies left justification for aggregates. */
6301 return PAD_UPWARD;
6302 else
6303 /* The 32-bit runtime architecture specifies right justification.
6304 When the argument is passed on the stack, the argument is padded
6305 with garbage on the left. The HP compiler pads with zeros. */
6306 return PAD_DOWNWARD;
6307 }
6308
6309 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6310 return PAD_DOWNWARD;
6311 else
6312 return PAD_NONE;
6313 }
6314
6315 \f
6316 /* Do what is necessary for `va_start'. We look at the current function
6317 to determine if stdargs or varargs is used and fill in an initial
6318 va_list. A pointer to this constructor is returned. */
6319
6320 static rtx
6321 hppa_builtin_saveregs (void)
6322 {
6323 rtx offset, dest;
6324 tree fntype = TREE_TYPE (current_function_decl);
6325 int argadj = ((!stdarg_p (fntype))
6326 ? UNITS_PER_WORD : 0);
6327
6328 if (argadj)
6329 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6330 else
6331 offset = crtl->args.arg_offset_rtx;
6332
6333 if (TARGET_64BIT)
6334 {
6335 int i, off;
6336
6337 /* Adjust for varargs/stdarg differences. */
6338 if (argadj)
6339 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6340 else
6341 offset = crtl->args.arg_offset_rtx;
6342
6343 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6344 from the incoming arg pointer and growing to larger addresses. */
6345 for (i = 26, off = -64; i >= 19; i--, off += 8)
6346 emit_move_insn (gen_rtx_MEM (word_mode,
6347 plus_constant (Pmode,
6348 arg_pointer_rtx, off)),
6349 gen_rtx_REG (word_mode, i));
6350
6351 /* The incoming args pointer points just beyond the flushback area;
6352 normally this is not a serious concern. However, when we are doing
6353 varargs/stdargs we want to make the arg pointer point to the start
6354 of the incoming argument area. */
6355 emit_move_insn (virtual_incoming_args_rtx,
6356 plus_constant (Pmode, arg_pointer_rtx, -64));
6357
6358 /* Now return a pointer to the first anonymous argument. */
6359 return copy_to_reg (expand_binop (Pmode, add_optab,
6360 virtual_incoming_args_rtx,
6361 offset, 0, 0, OPTAB_LIB_WIDEN));
6362 }
6363
6364 /* Store general registers on the stack. */
6365 dest = gen_rtx_MEM (BLKmode,
6366 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6367 -16));
6368 set_mem_alias_set (dest, get_varargs_alias_set ());
6369 set_mem_align (dest, BITS_PER_WORD);
6370 move_block_from_reg (23, dest, 4);
6371
6372 /* move_block_from_reg will emit code to store the argument registers
6373 individually as scalar stores.
6374
6375 However, other insns may later load from the same addresses for
6376 a structure load (passing a struct to a varargs routine).
6377
6378 The alias code assumes that such aliasing can never happen, so we
6379 have to keep memory referencing insns from moving up beyond the
6380 last argument register store. So we emit a blockage insn here. */
6381 emit_insn (gen_blockage ());
6382
6383 return copy_to_reg (expand_binop (Pmode, add_optab,
6384 crtl->args.internal_arg_pointer,
6385 offset, 0, 0, OPTAB_LIB_WIDEN));
6386 }
6387
6388 static void
6389 hppa_va_start (tree valist, rtx nextarg)
6390 {
6391 nextarg = expand_builtin_saveregs ();
6392 std_expand_builtin_va_start (valist, nextarg);
6393 }
6394
6395 static tree
6396 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6397 gimple_seq *post_p)
6398 {
6399 if (TARGET_64BIT)
6400 {
6401 /* Args grow upward. We can use the generic routines. */
6402 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6403 }
6404 else /* !TARGET_64BIT */
6405 {
6406 tree ptr = build_pointer_type (type);
6407 tree valist_type;
6408 tree t, u;
6409 unsigned int size, ofs;
6410 bool indirect;
6411
6412 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6413 if (indirect)
6414 {
6415 type = ptr;
6416 ptr = build_pointer_type (type);
6417 }
6418 size = int_size_in_bytes (type);
6419 valist_type = TREE_TYPE (valist);
6420
6421 /* Args grow down. Not handled by generic routines. */
6422
6423 u = fold_convert (sizetype, size_in_bytes (type));
6424 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6425 t = fold_build_pointer_plus (valist, u);
6426
6427 /* Align to 4 or 8 byte boundary depending on argument size. */
6428
6429 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6430 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6431 t = fold_convert (valist_type, t);
6432
6433 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6434
6435 ofs = (8 - size) % 4;
6436 if (ofs != 0)
6437 t = fold_build_pointer_plus_hwi (t, ofs);
6438
6439 t = fold_convert (ptr, t);
6440 t = build_va_arg_indirect_ref (t);
6441
6442 if (indirect)
6443 t = build_va_arg_indirect_ref (t);
6444
6445 return t;
6446 }
6447 }
6448
6449 /* True if MODE is valid for the target. By "valid", we mean able to
6450 be manipulated in non-trivial ways. In particular, this means all
6451 the arithmetic is supported.
6452
6453 Currently, TImode is not valid as the HP 64-bit runtime documentation
6454 doesn't document the alignment and calling conventions for this type.
6455 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6456 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6457
6458 static bool
6459 pa_scalar_mode_supported_p (scalar_mode mode)
6460 {
6461 int precision = GET_MODE_PRECISION (mode);
6462
6463 switch (GET_MODE_CLASS (mode))
6464 {
6465 case MODE_PARTIAL_INT:
6466 case MODE_INT:
6467 if (precision == CHAR_TYPE_SIZE)
6468 return true;
6469 if (precision == SHORT_TYPE_SIZE)
6470 return true;
6471 if (precision == INT_TYPE_SIZE)
6472 return true;
6473 if (precision == LONG_TYPE_SIZE)
6474 return true;
6475 if (precision == LONG_LONG_TYPE_SIZE)
6476 return true;
6477 return false;
6478
6479 case MODE_FLOAT:
6480 if (precision == FLOAT_TYPE_SIZE)
6481 return true;
6482 if (precision == DOUBLE_TYPE_SIZE)
6483 return true;
6484 if (precision == LONG_DOUBLE_TYPE_SIZE)
6485 return true;
6486 return false;
6487
6488 case MODE_DECIMAL_FLOAT:
6489 return false;
6490
6491 default:
6492 gcc_unreachable ();
6493 }
6494 }
6495
6496 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6497 it branches into the delay slot. Otherwise, return FALSE. */
6498
6499 static bool
6500 branch_to_delay_slot_p (rtx_insn *insn)
6501 {
6502 rtx_insn *jump_insn;
6503
6504 if (dbr_sequence_length ())
6505 return FALSE;
6506
6507 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6508 while (insn)
6509 {
6510 insn = next_active_insn (insn);
6511 if (jump_insn == insn)
6512 return TRUE;
6513
6514 /* We can't rely on the length of asms. So, we return FALSE when
6515 the branch is followed by an asm. */
6516 if (!insn
6517 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6518 || asm_noperands (PATTERN (insn)) >= 0
6519 || get_attr_length (insn) > 0)
6520 break;
6521 }
6522
6523 return FALSE;
6524 }
6525
6526 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6527
6528 This occurs when INSN has an unfilled delay slot and is followed
6529 by an asm. Disaster can occur if the asm is empty and the jump
6530 branches into the delay slot. So, we add a nop in the delay slot
6531 when this occurs. */
6532
6533 static bool
6534 branch_needs_nop_p (rtx_insn *insn)
6535 {
6536 rtx_insn *jump_insn;
6537
6538 if (dbr_sequence_length ())
6539 return FALSE;
6540
6541 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6542 while (insn)
6543 {
6544 insn = next_active_insn (insn);
6545 if (!insn || jump_insn == insn)
6546 return TRUE;
6547
6548 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6549 || asm_noperands (PATTERN (insn)) >= 0)
6550 && get_attr_length (insn) > 0)
6551 break;
6552 }
6553
6554 return FALSE;
6555 }
6556
6557 /* Return TRUE if INSN, a forward jump insn, can use nullification
6558 to skip the following instruction. This avoids an extra cycle due
6559 to a mis-predicted branch when we fall through. */
6560
6561 static bool
6562 use_skip_p (rtx_insn *insn)
6563 {
6564 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6565
6566 while (insn)
6567 {
6568 insn = next_active_insn (insn);
6569
6570 /* We can't rely on the length of asms, so we can't skip asms. */
6571 if (!insn
6572 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6573 || asm_noperands (PATTERN (insn)) >= 0)
6574 break;
6575 if (get_attr_length (insn) == 4
6576 && jump_insn == next_active_insn (insn))
6577 return TRUE;
6578 if (get_attr_length (insn) > 0)
6579 break;
6580 }
6581
6582 return FALSE;
6583 }
6584
6585 /* This routine handles all the normal conditional branch sequences we
6586 might need to generate. It handles compare immediate vs compare
6587 register, nullification of delay slots, varying length branches,
6588 negated branches, and all combinations of the above. It returns the
6589 output appropriate to emit the branch corresponding to all given
6590 parameters. */
6591
6592 const char *
6593 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6594 {
6595 static char buf[100];
6596 bool useskip;
6597 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6598 int length = get_attr_length (insn);
6599 int xdelay;
6600
6601 /* A conditional branch to the following instruction (e.g. the delay slot)
6602 is asking for a disaster. This can happen when not optimizing and
6603 when jump optimization fails.
6604
6605 While it is usually safe to emit nothing, this can fail if the
6606 preceding instruction is a nullified branch with an empty delay
6607 slot and the same branch target as this branch. We could check
6608 for this but jump optimization should eliminate nop jumps. It
6609 is always safe to emit a nop. */
6610 if (branch_to_delay_slot_p (insn))
6611 return "nop";
6612
6613 /* The doubleword form of the cmpib instruction doesn't have the LEU
6614 and GTU conditions while the cmpb instruction does. Since we accept
6615 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6616 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6617 operands[2] = gen_rtx_REG (DImode, 0);
6618 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6619 operands[1] = gen_rtx_REG (DImode, 0);
6620
6621 /* If this is a long branch with its delay slot unfilled, set `nullify'
6622 as it can nullify the delay slot and save a nop. */
6623 if (length == 8 && dbr_sequence_length () == 0)
6624 nullify = 1;
6625
6626 /* If this is a short forward conditional branch which did not get
6627 its delay slot filled, the delay slot can still be nullified. */
6628 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6629 nullify = forward_branch_p (insn);
6630
6631 /* A forward branch over a single nullified insn can be done with a
6632 comclr instruction. This avoids a single cycle penalty due to
6633 mis-predicted branch if we fall through (branch not taken). */
6634 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6635
6636 switch (length)
6637 {
6638 /* All short conditional branches except backwards with an unfilled
6639 delay slot. */
6640 case 4:
6641 if (useskip)
6642 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6643 else
6644 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6645 if (GET_MODE (operands[1]) == DImode)
6646 strcat (buf, "*");
6647 if (negated)
6648 strcat (buf, "%B3");
6649 else
6650 strcat (buf, "%S3");
6651 if (useskip)
6652 strcat (buf, " %2,%r1,%%r0");
6653 else if (nullify)
6654 {
6655 if (branch_needs_nop_p (insn))
6656 strcat (buf, ",n %2,%r1,%0%#");
6657 else
6658 strcat (buf, ",n %2,%r1,%0");
6659 }
6660 else
6661 strcat (buf, " %2,%r1,%0");
6662 break;
6663
6664 /* All long conditionals. Note a short backward branch with an
6665 unfilled delay slot is treated just like a long backward branch
6666 with an unfilled delay slot. */
6667 case 8:
6668 /* Handle weird backwards branch with a filled delay slot
6669 which is nullified. */
6670 if (dbr_sequence_length () != 0
6671 && ! forward_branch_p (insn)
6672 && nullify)
6673 {
6674 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6675 if (GET_MODE (operands[1]) == DImode)
6676 strcat (buf, "*");
6677 if (negated)
6678 strcat (buf, "%S3");
6679 else
6680 strcat (buf, "%B3");
6681 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6682 }
6683 /* Handle short backwards branch with an unfilled delay slot.
6684 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6685 taken and untaken branches. */
6686 else if (dbr_sequence_length () == 0
6687 && ! forward_branch_p (insn)
6688 && INSN_ADDRESSES_SET_P ()
6689 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6690 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6691 {
6692 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6693 if (GET_MODE (operands[1]) == DImode)
6694 strcat (buf, "*");
6695 if (negated)
6696 strcat (buf, "%B3 %2,%r1,%0%#");
6697 else
6698 strcat (buf, "%S3 %2,%r1,%0%#");
6699 }
6700 else
6701 {
6702 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6703 if (GET_MODE (operands[1]) == DImode)
6704 strcat (buf, "*");
6705 if (negated)
6706 strcat (buf, "%S3");
6707 else
6708 strcat (buf, "%B3");
6709 if (nullify)
6710 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6711 else
6712 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6713 }
6714 break;
6715
6716 default:
6717 /* The reversed conditional branch must branch over one additional
6718 instruction if the delay slot is filled and needs to be extracted
6719 by pa_output_lbranch. If the delay slot is empty or this is a
6720 nullified forward branch, the instruction after the reversed
6721 condition branch must be nullified. */
6722 if (dbr_sequence_length () == 0
6723 || (nullify && forward_branch_p (insn)))
6724 {
6725 nullify = 1;
6726 xdelay = 0;
6727 operands[4] = GEN_INT (length);
6728 }
6729 else
6730 {
6731 xdelay = 1;
6732 operands[4] = GEN_INT (length + 4);
6733 }
6734
6735 /* Create a reversed conditional branch which branches around
6736 the following insns. */
6737 if (GET_MODE (operands[1]) != DImode)
6738 {
6739 if (nullify)
6740 {
6741 if (negated)
6742 strcpy (buf,
6743 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6744 else
6745 strcpy (buf,
6746 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6747 }
6748 else
6749 {
6750 if (negated)
6751 strcpy (buf,
6752 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6753 else
6754 strcpy (buf,
6755 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6756 }
6757 }
6758 else
6759 {
6760 if (nullify)
6761 {
6762 if (negated)
6763 strcpy (buf,
6764 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6765 else
6766 strcpy (buf,
6767 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6768 }
6769 else
6770 {
6771 if (negated)
6772 strcpy (buf,
6773 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6774 else
6775 strcpy (buf,
6776 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6777 }
6778 }
6779
6780 output_asm_insn (buf, operands);
6781 return pa_output_lbranch (operands[0], insn, xdelay);
6782 }
6783 return buf;
6784 }
6785
6786 /* Output a PIC pc-relative instruction sequence to load the address of
6787 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6788 or a code label. OPERANDS[1] specifies the register to use to load
6789 the program counter. OPERANDS[3] may be used for label generation
6790 The sequence is always three instructions in length. The program
6791 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6792 Register %r1 is clobbered. */
6793
6794 static void
6795 pa_output_pic_pcrel_sequence (rtx *operands)
6796 {
6797 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6798 if (TARGET_PA_20)
6799 {
6800 /* We can use mfia to determine the current program counter. */
6801 if (TARGET_SOM || !TARGET_GAS)
6802 {
6803 operands[3] = gen_label_rtx ();
6804 targetm.asm_out.internal_label (asm_out_file, "L",
6805 CODE_LABEL_NUMBER (operands[3]));
6806 output_asm_insn ("mfia %1", operands);
6807 output_asm_insn ("addil L'%0-%l3,%1", operands);
6808 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6809 }
6810 else
6811 {
6812 output_asm_insn ("mfia %1", operands);
6813 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6814 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6815 }
6816 }
6817 else
6818 {
6819 /* We need to use a branch to determine the current program counter. */
6820 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6821 if (TARGET_SOM || !TARGET_GAS)
6822 {
6823 operands[3] = gen_label_rtx ();
6824 output_asm_insn ("addil L'%0-%l3,%1", operands);
6825 targetm.asm_out.internal_label (asm_out_file, "L",
6826 CODE_LABEL_NUMBER (operands[3]));
6827 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6828 }
6829 else
6830 {
6831 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6832 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6833 }
6834 }
6835 }
6836
6837 /* This routine handles output of long unconditional branches that
6838 exceed the maximum range of a simple branch instruction. Since
6839 we don't have a register available for the branch, we save register
6840 %r1 in the frame marker, load the branch destination DEST into %r1,
6841 execute the branch, and restore %r1 in the delay slot of the branch.
6842
6843 Since long branches may have an insn in the delay slot and the
6844 delay slot is used to restore %r1, we in general need to extract
6845 this insn and execute it before the branch. However, to facilitate
6846 use of this function by conditional branches, we also provide an
6847 option to not extract the delay insn so that it will be emitted
6848 after the long branch. So, if there is an insn in the delay slot,
6849 it is extracted if XDELAY is nonzero.
6850
6851 The lengths of the various long-branch sequences are 20, 16 and 24
6852 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6853
6854 const char *
6855 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6856 {
6857 rtx xoperands[4];
6858
6859 xoperands[0] = dest;
6860
6861 /* First, free up the delay slot. */
6862 if (xdelay && dbr_sequence_length () != 0)
6863 {
6864 /* We can't handle a jump in the delay slot. */
6865 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6866
6867 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6868 optimize, 0, NULL);
6869
6870 /* Now delete the delay insn. */
6871 SET_INSN_DELETED (NEXT_INSN (insn));
6872 }
6873
6874 /* Output an insn to save %r1. The runtime documentation doesn't
6875 specify whether the "Clean Up" slot in the callers frame can
6876 be clobbered by the callee. It isn't copied by HP's builtin
6877 alloca, so this suggests that it can be clobbered if necessary.
6878 The "Static Link" location is copied by HP builtin alloca, so
6879 we avoid using it. Using the cleanup slot might be a problem
6880 if we have to interoperate with languages that pass cleanup
6881 information. However, it should be possible to handle these
6882 situations with GCC's asm feature.
6883
6884 The "Current RP" slot is reserved for the called procedure, so
6885 we try to use it when we don't have a frame of our own. It's
6886 rather unlikely that we won't have a frame when we need to emit
6887 a very long branch.
6888
6889 Really the way to go long term is a register scavenger; goto
6890 the target of the jump and find a register which we can use
6891 as a scratch to hold the value in %r1. Then, we wouldn't have
6892 to free up the delay slot or clobber a slot that may be needed
6893 for other purposes. */
6894 if (TARGET_64BIT)
6895 {
6896 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6897 /* Use the return pointer slot in the frame marker. */
6898 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6899 else
6900 /* Use the slot at -40 in the frame marker since HP builtin
6901 alloca doesn't copy it. */
6902 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6903 }
6904 else
6905 {
6906 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6907 /* Use the return pointer slot in the frame marker. */
6908 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6909 else
6910 /* Use the "Clean Up" slot in the frame marker. In GCC,
6911 the only other use of this location is for copying a
6912 floating point double argument from a floating-point
6913 register to two general registers. The copy is done
6914 as an "atomic" operation when outputting a call, so it
6915 won't interfere with our using the location here. */
6916 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6917 }
6918
6919 if (TARGET_PORTABLE_RUNTIME)
6920 {
6921 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6922 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6923 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6924 }
6925 else if (flag_pic)
6926 {
6927 xoperands[1] = gen_rtx_REG (Pmode, 1);
6928 xoperands[2] = xoperands[1];
6929 pa_output_pic_pcrel_sequence (xoperands);
6930 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6931 }
6932 else
6933 /* Now output a very long branch to the original target. */
6934 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6935
6936 /* Now restore the value of %r1 in the delay slot. */
6937 if (TARGET_64BIT)
6938 {
6939 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6940 return "ldd -16(%%r30),%%r1";
6941 else
6942 return "ldd -40(%%r30),%%r1";
6943 }
6944 else
6945 {
6946 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6947 return "ldw -20(%%r30),%%r1";
6948 else
6949 return "ldw -12(%%r30),%%r1";
6950 }
6951 }
6952
6953 /* This routine handles all the branch-on-bit conditional branch sequences we
6954 might need to generate. It handles nullification of delay slots,
6955 varying length branches, negated branches and all combinations of the
6956 above. it returns the appropriate output template to emit the branch. */
6957
6958 const char *
6959 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6960 {
6961 static char buf[100];
6962 bool useskip;
6963 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6964 int length = get_attr_length (insn);
6965 int xdelay;
6966
6967 /* A conditional branch to the following instruction (e.g. the delay slot) is
6968 asking for a disaster. I do not think this can happen as this pattern
6969 is only used when optimizing; jump optimization should eliminate the
6970 jump. But be prepared just in case. */
6971
6972 if (branch_to_delay_slot_p (insn))
6973 return "nop";
6974
6975 /* If this is a long branch with its delay slot unfilled, set `nullify'
6976 as it can nullify the delay slot and save a nop. */
6977 if (length == 8 && dbr_sequence_length () == 0)
6978 nullify = 1;
6979
6980 /* If this is a short forward conditional branch which did not get
6981 its delay slot filled, the delay slot can still be nullified. */
6982 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6983 nullify = forward_branch_p (insn);
6984
6985 /* A forward branch over a single nullified insn can be done with a
6986 extrs instruction. This avoids a single cycle penalty due to
6987 mis-predicted branch if we fall through (branch not taken). */
6988 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6989
6990 switch (length)
6991 {
6992
6993 /* All short conditional branches except backwards with an unfilled
6994 delay slot. */
6995 case 4:
6996 if (useskip)
6997 strcpy (buf, "{extrs,|extrw,s,}");
6998 else
6999 strcpy (buf, "bb,");
7000 if (useskip && GET_MODE (operands[0]) == DImode)
7001 strcpy (buf, "extrd,s,*");
7002 else if (GET_MODE (operands[0]) == DImode)
7003 strcpy (buf, "bb,*");
7004 if ((which == 0 && negated)
7005 || (which == 1 && ! negated))
7006 strcat (buf, ">=");
7007 else
7008 strcat (buf, "<");
7009 if (useskip)
7010 strcat (buf, " %0,%1,1,%%r0");
7011 else if (nullify && negated)
7012 {
7013 if (branch_needs_nop_p (insn))
7014 strcat (buf, ",n %0,%1,%3%#");
7015 else
7016 strcat (buf, ",n %0,%1,%3");
7017 }
7018 else if (nullify && ! negated)
7019 {
7020 if (branch_needs_nop_p (insn))
7021 strcat (buf, ",n %0,%1,%2%#");
7022 else
7023 strcat (buf, ",n %0,%1,%2");
7024 }
7025 else if (! nullify && negated)
7026 strcat (buf, " %0,%1,%3");
7027 else if (! nullify && ! negated)
7028 strcat (buf, " %0,%1,%2");
7029 break;
7030
7031 /* All long conditionals. Note a short backward branch with an
7032 unfilled delay slot is treated just like a long backward branch
7033 with an unfilled delay slot. */
7034 case 8:
7035 /* Handle weird backwards branch with a filled delay slot
7036 which is nullified. */
7037 if (dbr_sequence_length () != 0
7038 && ! forward_branch_p (insn)
7039 && nullify)
7040 {
7041 strcpy (buf, "bb,");
7042 if (GET_MODE (operands[0]) == DImode)
7043 strcat (buf, "*");
7044 if ((which == 0 && negated)
7045 || (which == 1 && ! negated))
7046 strcat (buf, "<");
7047 else
7048 strcat (buf, ">=");
7049 if (negated)
7050 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7051 else
7052 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7053 }
7054 /* Handle short backwards branch with an unfilled delay slot.
7055 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7056 taken and untaken branches. */
7057 else if (dbr_sequence_length () == 0
7058 && ! forward_branch_p (insn)
7059 && INSN_ADDRESSES_SET_P ()
7060 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7061 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7062 {
7063 strcpy (buf, "bb,");
7064 if (GET_MODE (operands[0]) == DImode)
7065 strcat (buf, "*");
7066 if ((which == 0 && negated)
7067 || (which == 1 && ! negated))
7068 strcat (buf, ">=");
7069 else
7070 strcat (buf, "<");
7071 if (negated)
7072 strcat (buf, " %0,%1,%3%#");
7073 else
7074 strcat (buf, " %0,%1,%2%#");
7075 }
7076 else
7077 {
7078 if (GET_MODE (operands[0]) == DImode)
7079 strcpy (buf, "extrd,s,*");
7080 else
7081 strcpy (buf, "{extrs,|extrw,s,}");
7082 if ((which == 0 && negated)
7083 || (which == 1 && ! negated))
7084 strcat (buf, "<");
7085 else
7086 strcat (buf, ">=");
7087 if (nullify && negated)
7088 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7089 else if (nullify && ! negated)
7090 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7091 else if (negated)
7092 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7093 else
7094 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7095 }
7096 break;
7097
7098 default:
7099 /* The reversed conditional branch must branch over one additional
7100 instruction if the delay slot is filled and needs to be extracted
7101 by pa_output_lbranch. If the delay slot is empty or this is a
7102 nullified forward branch, the instruction after the reversed
7103 condition branch must be nullified. */
7104 if (dbr_sequence_length () == 0
7105 || (nullify && forward_branch_p (insn)))
7106 {
7107 nullify = 1;
7108 xdelay = 0;
7109 operands[4] = GEN_INT (length);
7110 }
7111 else
7112 {
7113 xdelay = 1;
7114 operands[4] = GEN_INT (length + 4);
7115 }
7116
7117 if (GET_MODE (operands[0]) == DImode)
7118 strcpy (buf, "bb,*");
7119 else
7120 strcpy (buf, "bb,");
7121 if ((which == 0 && negated)
7122 || (which == 1 && !negated))
7123 strcat (buf, "<");
7124 else
7125 strcat (buf, ">=");
7126 if (nullify)
7127 strcat (buf, ",n %0,%1,.+%4");
7128 else
7129 strcat (buf, " %0,%1,.+%4");
7130 output_asm_insn (buf, operands);
7131 return pa_output_lbranch (negated ? operands[3] : operands[2],
7132 insn, xdelay);
7133 }
7134 return buf;
7135 }
7136
7137 /* This routine handles all the branch-on-variable-bit conditional branch
7138 sequences we might need to generate. It handles nullification of delay
7139 slots, varying length branches, negated branches and all combinations
7140 of the above. it returns the appropriate output template to emit the
7141 branch. */
7142
7143 const char *
7144 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7145 int which)
7146 {
7147 static char buf[100];
7148 bool useskip;
7149 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7150 int length = get_attr_length (insn);
7151 int xdelay;
7152
7153 /* A conditional branch to the following instruction (e.g. the delay slot) is
7154 asking for a disaster. I do not think this can happen as this pattern
7155 is only used when optimizing; jump optimization should eliminate the
7156 jump. But be prepared just in case. */
7157
7158 if (branch_to_delay_slot_p (insn))
7159 return "nop";
7160
7161 /* If this is a long branch with its delay slot unfilled, set `nullify'
7162 as it can nullify the delay slot and save a nop. */
7163 if (length == 8 && dbr_sequence_length () == 0)
7164 nullify = 1;
7165
7166 /* If this is a short forward conditional branch which did not get
7167 its delay slot filled, the delay slot can still be nullified. */
7168 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7169 nullify = forward_branch_p (insn);
7170
7171 /* A forward branch over a single nullified insn can be done with a
7172 extrs instruction. This avoids a single cycle penalty due to
7173 mis-predicted branch if we fall through (branch not taken). */
7174 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7175
7176 switch (length)
7177 {
7178
7179 /* All short conditional branches except backwards with an unfilled
7180 delay slot. */
7181 case 4:
7182 if (useskip)
7183 strcpy (buf, "{vextrs,|extrw,s,}");
7184 else
7185 strcpy (buf, "{bvb,|bb,}");
7186 if (useskip && GET_MODE (operands[0]) == DImode)
7187 strcpy (buf, "extrd,s,*");
7188 else if (GET_MODE (operands[0]) == DImode)
7189 strcpy (buf, "bb,*");
7190 if ((which == 0 && negated)
7191 || (which == 1 && ! negated))
7192 strcat (buf, ">=");
7193 else
7194 strcat (buf, "<");
7195 if (useskip)
7196 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7197 else if (nullify && negated)
7198 {
7199 if (branch_needs_nop_p (insn))
7200 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7201 else
7202 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7203 }
7204 else if (nullify && ! negated)
7205 {
7206 if (branch_needs_nop_p (insn))
7207 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7208 else
7209 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7210 }
7211 else if (! nullify && negated)
7212 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7213 else if (! nullify && ! negated)
7214 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7215 break;
7216
7217 /* All long conditionals. Note a short backward branch with an
7218 unfilled delay slot is treated just like a long backward branch
7219 with an unfilled delay slot. */
7220 case 8:
7221 /* Handle weird backwards branch with a filled delay slot
7222 which is nullified. */
7223 if (dbr_sequence_length () != 0
7224 && ! forward_branch_p (insn)
7225 && nullify)
7226 {
7227 strcpy (buf, "{bvb,|bb,}");
7228 if (GET_MODE (operands[0]) == DImode)
7229 strcat (buf, "*");
7230 if ((which == 0 && negated)
7231 || (which == 1 && ! negated))
7232 strcat (buf, "<");
7233 else
7234 strcat (buf, ">=");
7235 if (negated)
7236 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7237 else
7238 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7239 }
7240 /* Handle short backwards branch with an unfilled delay slot.
7241 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7242 taken and untaken branches. */
7243 else if (dbr_sequence_length () == 0
7244 && ! forward_branch_p (insn)
7245 && INSN_ADDRESSES_SET_P ()
7246 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7247 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7248 {
7249 strcpy (buf, "{bvb,|bb,}");
7250 if (GET_MODE (operands[0]) == DImode)
7251 strcat (buf, "*");
7252 if ((which == 0 && negated)
7253 || (which == 1 && ! negated))
7254 strcat (buf, ">=");
7255 else
7256 strcat (buf, "<");
7257 if (negated)
7258 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7259 else
7260 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7261 }
7262 else
7263 {
7264 strcpy (buf, "{vextrs,|extrw,s,}");
7265 if (GET_MODE (operands[0]) == DImode)
7266 strcpy (buf, "extrd,s,*");
7267 if ((which == 0 && negated)
7268 || (which == 1 && ! negated))
7269 strcat (buf, "<");
7270 else
7271 strcat (buf, ">=");
7272 if (nullify && negated)
7273 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7274 else if (nullify && ! negated)
7275 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7276 else if (negated)
7277 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7278 else
7279 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7280 }
7281 break;
7282
7283 default:
7284 /* The reversed conditional branch must branch over one additional
7285 instruction if the delay slot is filled and needs to be extracted
7286 by pa_output_lbranch. If the delay slot is empty or this is a
7287 nullified forward branch, the instruction after the reversed
7288 condition branch must be nullified. */
7289 if (dbr_sequence_length () == 0
7290 || (nullify && forward_branch_p (insn)))
7291 {
7292 nullify = 1;
7293 xdelay = 0;
7294 operands[4] = GEN_INT (length);
7295 }
7296 else
7297 {
7298 xdelay = 1;
7299 operands[4] = GEN_INT (length + 4);
7300 }
7301
7302 if (GET_MODE (operands[0]) == DImode)
7303 strcpy (buf, "bb,*");
7304 else
7305 strcpy (buf, "{bvb,|bb,}");
7306 if ((which == 0 && negated)
7307 || (which == 1 && !negated))
7308 strcat (buf, "<");
7309 else
7310 strcat (buf, ">=");
7311 if (nullify)
7312 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7313 else
7314 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7315 output_asm_insn (buf, operands);
7316 return pa_output_lbranch (negated ? operands[3] : operands[2],
7317 insn, xdelay);
7318 }
7319 return buf;
7320 }
7321
7322 /* Return the output template for emitting a dbra type insn.
7323
7324 Note it may perform some output operations on its own before
7325 returning the final output string. */
7326 const char *
7327 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7328 {
7329 int length = get_attr_length (insn);
7330
7331 /* A conditional branch to the following instruction (e.g. the delay slot) is
7332 asking for a disaster. Be prepared! */
7333
7334 if (branch_to_delay_slot_p (insn))
7335 {
7336 if (which_alternative == 0)
7337 return "ldo %1(%0),%0";
7338 else if (which_alternative == 1)
7339 {
7340 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7341 output_asm_insn ("ldw -16(%%r30),%4", operands);
7342 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7343 return "{fldws|fldw} -16(%%r30),%0";
7344 }
7345 else
7346 {
7347 output_asm_insn ("ldw %0,%4", operands);
7348 return "ldo %1(%4),%4\n\tstw %4,%0";
7349 }
7350 }
7351
7352 if (which_alternative == 0)
7353 {
7354 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7355 int xdelay;
7356
7357 /* If this is a long branch with its delay slot unfilled, set `nullify'
7358 as it can nullify the delay slot and save a nop. */
7359 if (length == 8 && dbr_sequence_length () == 0)
7360 nullify = 1;
7361
7362 /* If this is a short forward conditional branch which did not get
7363 its delay slot filled, the delay slot can still be nullified. */
7364 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7365 nullify = forward_branch_p (insn);
7366
7367 switch (length)
7368 {
7369 case 4:
7370 if (nullify)
7371 {
7372 if (branch_needs_nop_p (insn))
7373 return "addib,%C2,n %1,%0,%3%#";
7374 else
7375 return "addib,%C2,n %1,%0,%3";
7376 }
7377 else
7378 return "addib,%C2 %1,%0,%3";
7379
7380 case 8:
7381 /* Handle weird backwards branch with a fulled delay slot
7382 which is nullified. */
7383 if (dbr_sequence_length () != 0
7384 && ! forward_branch_p (insn)
7385 && nullify)
7386 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7387 /* Handle short backwards branch with an unfilled delay slot.
7388 Using a addb;nop rather than addi;bl saves 1 cycle for both
7389 taken and untaken branches. */
7390 else if (dbr_sequence_length () == 0
7391 && ! forward_branch_p (insn)
7392 && INSN_ADDRESSES_SET_P ()
7393 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7394 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7395 return "addib,%C2 %1,%0,%3%#";
7396
7397 /* Handle normal cases. */
7398 if (nullify)
7399 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7400 else
7401 return "addi,%N2 %1,%0,%0\n\tb %3";
7402
7403 default:
7404 /* The reversed conditional branch must branch over one additional
7405 instruction if the delay slot is filled and needs to be extracted
7406 by pa_output_lbranch. If the delay slot is empty or this is a
7407 nullified forward branch, the instruction after the reversed
7408 condition branch must be nullified. */
7409 if (dbr_sequence_length () == 0
7410 || (nullify && forward_branch_p (insn)))
7411 {
7412 nullify = 1;
7413 xdelay = 0;
7414 operands[4] = GEN_INT (length);
7415 }
7416 else
7417 {
7418 xdelay = 1;
7419 operands[4] = GEN_INT (length + 4);
7420 }
7421
7422 if (nullify)
7423 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7424 else
7425 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7426
7427 return pa_output_lbranch (operands[3], insn, xdelay);
7428 }
7429
7430 }
7431 /* Deal with gross reload from FP register case. */
7432 else if (which_alternative == 1)
7433 {
7434 /* Move loop counter from FP register to MEM then into a GR,
7435 increment the GR, store the GR into MEM, and finally reload
7436 the FP register from MEM from within the branch's delay slot. */
7437 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7438 operands);
7439 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7440 if (length == 24)
7441 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7442 else if (length == 28)
7443 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7444 else
7445 {
7446 operands[5] = GEN_INT (length - 16);
7447 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7448 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7449 return pa_output_lbranch (operands[3], insn, 0);
7450 }
7451 }
7452 /* Deal with gross reload from memory case. */
7453 else
7454 {
7455 /* Reload loop counter from memory, the store back to memory
7456 happens in the branch's delay slot. */
7457 output_asm_insn ("ldw %0,%4", operands);
7458 if (length == 12)
7459 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7460 else if (length == 16)
7461 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7462 else
7463 {
7464 operands[5] = GEN_INT (length - 4);
7465 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7466 return pa_output_lbranch (operands[3], insn, 0);
7467 }
7468 }
7469 }
7470
7471 /* Return the output template for emitting a movb type insn.
7472
7473 Note it may perform some output operations on its own before
7474 returning the final output string. */
7475 const char *
7476 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7477 int reverse_comparison)
7478 {
7479 int length = get_attr_length (insn);
7480
7481 /* A conditional branch to the following instruction (e.g. the delay slot) is
7482 asking for a disaster. Be prepared! */
7483
7484 if (branch_to_delay_slot_p (insn))
7485 {
7486 if (which_alternative == 0)
7487 return "copy %1,%0";
7488 else if (which_alternative == 1)
7489 {
7490 output_asm_insn ("stw %1,-16(%%r30)", operands);
7491 return "{fldws|fldw} -16(%%r30),%0";
7492 }
7493 else if (which_alternative == 2)
7494 return "stw %1,%0";
7495 else
7496 return "mtsar %r1";
7497 }
7498
7499 /* Support the second variant. */
7500 if (reverse_comparison)
7501 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7502
7503 if (which_alternative == 0)
7504 {
7505 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7506 int xdelay;
7507
7508 /* If this is a long branch with its delay slot unfilled, set `nullify'
7509 as it can nullify the delay slot and save a nop. */
7510 if (length == 8 && dbr_sequence_length () == 0)
7511 nullify = 1;
7512
7513 /* If this is a short forward conditional branch which did not get
7514 its delay slot filled, the delay slot can still be nullified. */
7515 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7516 nullify = forward_branch_p (insn);
7517
7518 switch (length)
7519 {
7520 case 4:
7521 if (nullify)
7522 {
7523 if (branch_needs_nop_p (insn))
7524 return "movb,%C2,n %1,%0,%3%#";
7525 else
7526 return "movb,%C2,n %1,%0,%3";
7527 }
7528 else
7529 return "movb,%C2 %1,%0,%3";
7530
7531 case 8:
7532 /* Handle weird backwards branch with a filled delay slot
7533 which is nullified. */
7534 if (dbr_sequence_length () != 0
7535 && ! forward_branch_p (insn)
7536 && nullify)
7537 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7538
7539 /* Handle short backwards branch with an unfilled delay slot.
7540 Using a movb;nop rather than or;bl saves 1 cycle for both
7541 taken and untaken branches. */
7542 else if (dbr_sequence_length () == 0
7543 && ! forward_branch_p (insn)
7544 && INSN_ADDRESSES_SET_P ()
7545 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7546 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7547 return "movb,%C2 %1,%0,%3%#";
7548 /* Handle normal cases. */
7549 if (nullify)
7550 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7551 else
7552 return "or,%N2 %1,%%r0,%0\n\tb %3";
7553
7554 default:
7555 /* The reversed conditional branch must branch over one additional
7556 instruction if the delay slot is filled and needs to be extracted
7557 by pa_output_lbranch. If the delay slot is empty or this is a
7558 nullified forward branch, the instruction after the reversed
7559 condition branch must be nullified. */
7560 if (dbr_sequence_length () == 0
7561 || (nullify && forward_branch_p (insn)))
7562 {
7563 nullify = 1;
7564 xdelay = 0;
7565 operands[4] = GEN_INT (length);
7566 }
7567 else
7568 {
7569 xdelay = 1;
7570 operands[4] = GEN_INT (length + 4);
7571 }
7572
7573 if (nullify)
7574 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7575 else
7576 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7577
7578 return pa_output_lbranch (operands[3], insn, xdelay);
7579 }
7580 }
7581 /* Deal with gross reload for FP destination register case. */
7582 else if (which_alternative == 1)
7583 {
7584 /* Move source register to MEM, perform the branch test, then
7585 finally load the FP register from MEM from within the branch's
7586 delay slot. */
7587 output_asm_insn ("stw %1,-16(%%r30)", operands);
7588 if (length == 12)
7589 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7590 else if (length == 16)
7591 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7592 else
7593 {
7594 operands[4] = GEN_INT (length - 4);
7595 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7596 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7597 return pa_output_lbranch (operands[3], insn, 0);
7598 }
7599 }
7600 /* Deal with gross reload from memory case. */
7601 else if (which_alternative == 2)
7602 {
7603 /* Reload loop counter from memory, the store back to memory
7604 happens in the branch's delay slot. */
7605 if (length == 8)
7606 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7607 else if (length == 12)
7608 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7609 else
7610 {
7611 operands[4] = GEN_INT (length);
7612 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7613 operands);
7614 return pa_output_lbranch (operands[3], insn, 0);
7615 }
7616 }
7617 /* Handle SAR as a destination. */
7618 else
7619 {
7620 if (length == 8)
7621 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7622 else if (length == 12)
7623 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7624 else
7625 {
7626 operands[4] = GEN_INT (length);
7627 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7628 operands);
7629 return pa_output_lbranch (operands[3], insn, 0);
7630 }
7631 }
7632 }
7633
7634 /* Copy any FP arguments in INSN into integer registers. */
7635 static void
7636 copy_fp_args (rtx_insn *insn)
7637 {
7638 rtx link;
7639 rtx xoperands[2];
7640
7641 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7642 {
7643 int arg_mode, regno;
7644 rtx use = XEXP (link, 0);
7645
7646 if (! (GET_CODE (use) == USE
7647 && GET_CODE (XEXP (use, 0)) == REG
7648 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7649 continue;
7650
7651 arg_mode = GET_MODE (XEXP (use, 0));
7652 regno = REGNO (XEXP (use, 0));
7653
7654 /* Is it a floating point register? */
7655 if (regno >= 32 && regno <= 39)
7656 {
7657 /* Copy the FP register into an integer register via memory. */
7658 if (arg_mode == SFmode)
7659 {
7660 xoperands[0] = XEXP (use, 0);
7661 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7662 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7663 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7664 }
7665 else
7666 {
7667 xoperands[0] = XEXP (use, 0);
7668 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7669 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7670 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7671 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7672 }
7673 }
7674 }
7675 }
7676
7677 /* Compute length of the FP argument copy sequence for INSN. */
7678 static int
7679 length_fp_args (rtx_insn *insn)
7680 {
7681 int length = 0;
7682 rtx link;
7683
7684 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7685 {
7686 int arg_mode, regno;
7687 rtx use = XEXP (link, 0);
7688
7689 if (! (GET_CODE (use) == USE
7690 && GET_CODE (XEXP (use, 0)) == REG
7691 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7692 continue;
7693
7694 arg_mode = GET_MODE (XEXP (use, 0));
7695 regno = REGNO (XEXP (use, 0));
7696
7697 /* Is it a floating point register? */
7698 if (regno >= 32 && regno <= 39)
7699 {
7700 if (arg_mode == SFmode)
7701 length += 8;
7702 else
7703 length += 12;
7704 }
7705 }
7706
7707 return length;
7708 }
7709
7710 /* Return the attribute length for the millicode call instruction INSN.
7711 The length must match the code generated by pa_output_millicode_call.
7712 We include the delay slot in the returned length as it is better to
7713 over estimate the length than to under estimate it. */
7714
7715 int
7716 pa_attr_length_millicode_call (rtx_insn *insn)
7717 {
7718 unsigned long distance = -1;
7719 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7720
7721 if (INSN_ADDRESSES_SET_P ())
7722 {
7723 distance = (total + insn_current_reference_address (insn));
7724 if (distance < total)
7725 distance = -1;
7726 }
7727
7728 if (TARGET_64BIT)
7729 {
7730 if (!TARGET_LONG_CALLS && distance < 7600000)
7731 return 8;
7732
7733 return 20;
7734 }
7735 else if (TARGET_PORTABLE_RUNTIME)
7736 return 24;
7737 else
7738 {
7739 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7740 return 8;
7741
7742 if (!flag_pic)
7743 return 12;
7744
7745 return 24;
7746 }
7747 }
7748
7749 /* INSN is a function call.
7750
7751 CALL_DEST is the routine we are calling. */
7752
7753 const char *
7754 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7755 {
7756 int attr_length = get_attr_length (insn);
7757 int seq_length = dbr_sequence_length ();
7758 rtx xoperands[4];
7759
7760 xoperands[0] = call_dest;
7761
7762 /* Handle the common case where we are sure that the branch will
7763 reach the beginning of the $CODE$ subspace. The within reach
7764 form of the $$sh_func_adrs call has a length of 28. Because it
7765 has an attribute type of sh_func_adrs, it never has a nonzero
7766 sequence length (i.e., the delay slot is never filled). */
7767 if (!TARGET_LONG_CALLS
7768 && (attr_length == 8
7769 || (attr_length == 28
7770 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7771 {
7772 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7773 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7774 }
7775 else
7776 {
7777 if (TARGET_64BIT)
7778 {
7779 /* It might seem that one insn could be saved by accessing
7780 the millicode function using the linkage table. However,
7781 this doesn't work in shared libraries and other dynamically
7782 loaded objects. Using a pc-relative sequence also avoids
7783 problems related to the implicit use of the gp register. */
7784 xoperands[1] = gen_rtx_REG (Pmode, 1);
7785 xoperands[2] = xoperands[1];
7786 pa_output_pic_pcrel_sequence (xoperands);
7787 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7788 }
7789 else if (TARGET_PORTABLE_RUNTIME)
7790 {
7791 /* Pure portable runtime doesn't allow be/ble; we also don't
7792 have PIC support in the assembler/linker, so this sequence
7793 is needed. */
7794
7795 /* Get the address of our target into %r1. */
7796 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7797 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7798
7799 /* Get our return address into %r31. */
7800 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7801 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7802
7803 /* Jump to our target address in %r1. */
7804 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7805 }
7806 else if (!flag_pic)
7807 {
7808 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7809 if (TARGET_PA_20)
7810 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7811 else
7812 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7813 }
7814 else
7815 {
7816 xoperands[1] = gen_rtx_REG (Pmode, 31);
7817 xoperands[2] = gen_rtx_REG (Pmode, 1);
7818 pa_output_pic_pcrel_sequence (xoperands);
7819
7820 /* Adjust return address. */
7821 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7822
7823 /* Jump to our target address in %r1. */
7824 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7825 }
7826 }
7827
7828 if (seq_length == 0)
7829 output_asm_insn ("nop", xoperands);
7830
7831 return "";
7832 }
7833
7834 /* Return the attribute length of the call instruction INSN. The SIBCALL
7835 flag indicates whether INSN is a regular call or a sibling call. The
7836 length returned must be longer than the code actually generated by
7837 pa_output_call. Since branch shortening is done before delay branch
7838 sequencing, there is no way to determine whether or not the delay
7839 slot will be filled during branch shortening. Even when the delay
7840 slot is filled, we may have to add a nop if the delay slot contains
7841 a branch that can't reach its target. Thus, we always have to include
7842 the delay slot in the length estimate. This used to be done in
7843 pa_adjust_insn_length but we do it here now as some sequences always
7844 fill the delay slot and we can save four bytes in the estimate for
7845 these sequences. */
7846
7847 int
7848 pa_attr_length_call (rtx_insn *insn, int sibcall)
7849 {
7850 int local_call;
7851 rtx call, call_dest;
7852 tree call_decl;
7853 int length = 0;
7854 rtx pat = PATTERN (insn);
7855 unsigned long distance = -1;
7856
7857 gcc_assert (CALL_P (insn));
7858
7859 if (INSN_ADDRESSES_SET_P ())
7860 {
7861 unsigned long total;
7862
7863 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7864 distance = (total + insn_current_reference_address (insn));
7865 if (distance < total)
7866 distance = -1;
7867 }
7868
7869 gcc_assert (GET_CODE (pat) == PARALLEL);
7870
7871 /* Get the call rtx. */
7872 call = XVECEXP (pat, 0, 0);
7873 if (GET_CODE (call) == SET)
7874 call = SET_SRC (call);
7875
7876 gcc_assert (GET_CODE (call) == CALL);
7877
7878 /* Determine if this is a local call. */
7879 call_dest = XEXP (XEXP (call, 0), 0);
7880 call_decl = SYMBOL_REF_DECL (call_dest);
7881 local_call = call_decl && targetm.binds_local_p (call_decl);
7882
7883 /* pc-relative branch. */
7884 if (!TARGET_LONG_CALLS
7885 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7886 || distance < MAX_PCREL17F_OFFSET))
7887 length += 8;
7888
7889 /* 64-bit plabel sequence. */
7890 else if (TARGET_64BIT && !local_call)
7891 length += sibcall ? 28 : 24;
7892
7893 /* non-pic long absolute branch sequence. */
7894 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7895 length += 12;
7896
7897 /* long pc-relative branch sequence. */
7898 else if (TARGET_LONG_PIC_SDIFF_CALL
7899 || (TARGET_GAS && !TARGET_SOM && local_call))
7900 {
7901 length += 20;
7902
7903 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7904 length += 8;
7905 }
7906
7907 /* 32-bit plabel sequence. */
7908 else
7909 {
7910 length += 32;
7911
7912 if (TARGET_SOM)
7913 length += length_fp_args (insn);
7914
7915 if (flag_pic)
7916 length += 4;
7917
7918 if (!TARGET_PA_20)
7919 {
7920 if (!sibcall)
7921 length += 8;
7922
7923 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7924 length += 8;
7925 }
7926 }
7927
7928 return length;
7929 }
7930
7931 /* INSN is a function call.
7932
7933 CALL_DEST is the routine we are calling. */
7934
7935 const char *
7936 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7937 {
7938 int seq_length = dbr_sequence_length ();
7939 tree call_decl = SYMBOL_REF_DECL (call_dest);
7940 int local_call = call_decl && targetm.binds_local_p (call_decl);
7941 rtx xoperands[4];
7942
7943 xoperands[0] = call_dest;
7944
7945 /* Handle the common case where we're sure that the branch will reach
7946 the beginning of the "$CODE$" subspace. This is the beginning of
7947 the current function if we are in a named section. */
7948 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7949 {
7950 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7951 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7952 }
7953 else
7954 {
7955 if (TARGET_64BIT && !local_call)
7956 {
7957 /* ??? As far as I can tell, the HP linker doesn't support the
7958 long pc-relative sequence described in the 64-bit runtime
7959 architecture. So, we use a slightly longer indirect call. */
7960 xoperands[0] = pa_get_deferred_plabel (call_dest);
7961 xoperands[1] = gen_label_rtx ();
7962
7963 /* If this isn't a sibcall, we put the load of %r27 into the
7964 delay slot. We can't do this in a sibcall as we don't
7965 have a second call-clobbered scratch register available.
7966 We don't need to do anything when generating fast indirect
7967 calls. */
7968 if (seq_length != 0 && !sibcall)
7969 {
7970 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7971 optimize, 0, NULL);
7972
7973 /* Now delete the delay insn. */
7974 SET_INSN_DELETED (NEXT_INSN (insn));
7975 seq_length = 0;
7976 }
7977
7978 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7979 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7980 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7981
7982 if (sibcall)
7983 {
7984 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7985 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7986 output_asm_insn ("bve (%%r1)", xoperands);
7987 }
7988 else
7989 {
7990 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7991 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7992 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7993 seq_length = 1;
7994 }
7995 }
7996 else
7997 {
7998 int indirect_call = 0;
7999
8000 /* Emit a long call. There are several different sequences
8001 of increasing length and complexity. In most cases,
8002 they don't allow an instruction in the delay slot. */
8003 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8004 && !TARGET_LONG_PIC_SDIFF_CALL
8005 && !(TARGET_GAS && !TARGET_SOM && local_call)
8006 && !TARGET_64BIT)
8007 indirect_call = 1;
8008
8009 if (seq_length != 0
8010 && !sibcall
8011 && (!TARGET_PA_20
8012 || indirect_call
8013 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
8014 {
8015 /* A non-jump insn in the delay slot. By definition we can
8016 emit this insn before the call (and in fact before argument
8017 relocating. */
8018 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
8019 NULL);
8020
8021 /* Now delete the delay insn. */
8022 SET_INSN_DELETED (NEXT_INSN (insn));
8023 seq_length = 0;
8024 }
8025
8026 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8027 {
8028 /* This is the best sequence for making long calls in
8029 non-pic code. Unfortunately, GNU ld doesn't provide
8030 the stub needed for external calls, and GAS's support
8031 for this with the SOM linker is buggy. It is safe
8032 to use this for local calls. */
8033 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8034 if (sibcall)
8035 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
8036 else
8037 {
8038 if (TARGET_PA_20)
8039 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8040 xoperands);
8041 else
8042 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8043
8044 output_asm_insn ("copy %%r31,%%r2", xoperands);
8045 seq_length = 1;
8046 }
8047 }
8048 else
8049 {
8050 /* The HP assembler and linker can handle relocations for
8051 the difference of two symbols. The HP assembler
8052 recognizes the sequence as a pc-relative call and
8053 the linker provides stubs when needed. */
8054
8055 /* GAS currently can't generate the relocations that
8056 are needed for the SOM linker under HP-UX using this
8057 sequence. The GNU linker doesn't generate the stubs
8058 that are needed for external calls on TARGET_ELF32
8059 with this sequence. For now, we have to use a longer
8060 plabel sequence when using GAS for non local calls. */
8061 if (TARGET_LONG_PIC_SDIFF_CALL
8062 || (TARGET_GAS && !TARGET_SOM && local_call))
8063 {
8064 xoperands[1] = gen_rtx_REG (Pmode, 1);
8065 xoperands[2] = xoperands[1];
8066 pa_output_pic_pcrel_sequence (xoperands);
8067 }
8068 else
8069 {
8070 /* Emit a long plabel-based call sequence. This is
8071 essentially an inline implementation of $$dyncall.
8072 We don't actually try to call $$dyncall as this is
8073 as difficult as calling the function itself. */
8074 xoperands[0] = pa_get_deferred_plabel (call_dest);
8075 xoperands[1] = gen_label_rtx ();
8076
8077 /* Since the call is indirect, FP arguments in registers
8078 need to be copied to the general registers. Then, the
8079 argument relocation stub will copy them back. */
8080 if (TARGET_SOM)
8081 copy_fp_args (insn);
8082
8083 if (flag_pic)
8084 {
8085 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8086 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8087 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8088 }
8089 else
8090 {
8091 output_asm_insn ("addil LR'%0-$global$,%%r27",
8092 xoperands);
8093 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8094 xoperands);
8095 }
8096
8097 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8098 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8099 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8100 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8101
8102 if (!sibcall && !TARGET_PA_20)
8103 {
8104 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8105 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8106 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8107 else
8108 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8109 }
8110 }
8111
8112 if (TARGET_PA_20)
8113 {
8114 if (sibcall)
8115 output_asm_insn ("bve (%%r1)", xoperands);
8116 else
8117 {
8118 if (indirect_call)
8119 {
8120 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8121 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8122 seq_length = 1;
8123 }
8124 else
8125 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8126 }
8127 }
8128 else
8129 {
8130 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8131 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8132 xoperands);
8133
8134 if (sibcall)
8135 {
8136 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8137 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8138 else
8139 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8140 }
8141 else
8142 {
8143 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8144 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8145 else
8146 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8147
8148 if (indirect_call)
8149 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8150 else
8151 output_asm_insn ("copy %%r31,%%r2", xoperands);
8152 seq_length = 1;
8153 }
8154 }
8155 }
8156 }
8157 }
8158
8159 if (seq_length == 0)
8160 output_asm_insn ("nop", xoperands);
8161
8162 return "";
8163 }
8164
8165 /* Return the attribute length of the indirect call instruction INSN.
8166 The length must match the code generated by output_indirect call.
8167 The returned length includes the delay slot. Currently, the delay
8168 slot of an indirect call sequence is not exposed and it is used by
8169 the sequence itself. */
8170
8171 int
8172 pa_attr_length_indirect_call (rtx_insn *insn)
8173 {
8174 unsigned long distance = -1;
8175 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8176
8177 if (INSN_ADDRESSES_SET_P ())
8178 {
8179 distance = (total + insn_current_reference_address (insn));
8180 if (distance < total)
8181 distance = -1;
8182 }
8183
8184 if (TARGET_64BIT)
8185 return 12;
8186
8187 if (TARGET_FAST_INDIRECT_CALLS)
8188 return 8;
8189
8190 if (TARGET_PORTABLE_RUNTIME)
8191 return 16;
8192
8193 /* Inline version of $$dyncall. */
8194 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8195 return 20;
8196
8197 if (!TARGET_LONG_CALLS
8198 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8199 || distance < MAX_PCREL17F_OFFSET))
8200 return 8;
8201
8202 /* Out of reach, can use ble. */
8203 if (!flag_pic)
8204 return 12;
8205
8206 /* Inline version of $$dyncall. */
8207 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8208 return 20;
8209
8210 if (!optimize_size)
8211 return 36;
8212
8213 /* Long PIC pc-relative call. */
8214 return 20;
8215 }
8216
8217 const char *
8218 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8219 {
8220 rtx xoperands[4];
8221 int length;
8222
8223 if (TARGET_64BIT)
8224 {
8225 xoperands[0] = call_dest;
8226 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8227 "bve,l (%%r2),%%r2\n\t"
8228 "ldd 24(%0),%%r27", xoperands);
8229 return "";
8230 }
8231
8232 /* First the special case for kernels, level 0 systems, etc. */
8233 if (TARGET_FAST_INDIRECT_CALLS)
8234 {
8235 pa_output_arg_descriptor (insn);
8236 if (TARGET_PA_20)
8237 return "bve,l,n (%%r22),%%r2\n\tnop";
8238 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8239 }
8240
8241 if (TARGET_PORTABLE_RUNTIME)
8242 {
8243 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8244 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8245 pa_output_arg_descriptor (insn);
8246 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8247 }
8248
8249 /* Maybe emit a fast inline version of $$dyncall. */
8250 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8251 {
8252 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8253 "ldw 2(%%r22),%%r19\n\t"
8254 "ldw -2(%%r22),%%r22", xoperands);
8255 pa_output_arg_descriptor (insn);
8256 if (TARGET_NO_SPACE_REGS)
8257 {
8258 if (TARGET_PA_20)
8259 return "bve,l,n (%%r22),%%r2\n\tnop";
8260 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8261 }
8262 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8263 }
8264
8265 /* Now the normal case -- we can reach $$dyncall directly or
8266 we're sure that we can get there via a long-branch stub.
8267
8268 No need to check target flags as the length uniquely identifies
8269 the remaining cases. */
8270 length = pa_attr_length_indirect_call (insn);
8271 if (length == 8)
8272 {
8273 pa_output_arg_descriptor (insn);
8274
8275 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8276 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8277 variant of the B,L instruction can't be used on the SOM target. */
8278 if (TARGET_PA_20 && !TARGET_SOM)
8279 return "b,l,n $$dyncall,%%r2\n\tnop";
8280 else
8281 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8282 }
8283
8284 /* Long millicode call, but we are not generating PIC or portable runtime
8285 code. */
8286 if (length == 12)
8287 {
8288 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8289 pa_output_arg_descriptor (insn);
8290 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8291 }
8292
8293 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8294 pc-relative call sequence is five instructions. The inline PA 2.0
8295 version of $$dyncall is also five instructions. The PA 1.X versions
8296 are longer but still an overall win. */
8297 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8298 {
8299 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8300 "ldw 2(%%r22),%%r19\n\t"
8301 "ldw -2(%%r22),%%r22", xoperands);
8302 if (TARGET_NO_SPACE_REGS)
8303 {
8304 pa_output_arg_descriptor (insn);
8305 if (TARGET_PA_20)
8306 return "bve,l,n (%%r22),%%r2\n\tnop";
8307 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8308 }
8309 if (TARGET_PA_20)
8310 {
8311 pa_output_arg_descriptor (insn);
8312 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8313 }
8314 output_asm_insn ("bl .+8,%%r2\n\t"
8315 "ldo 16(%%r2),%%r2\n\t"
8316 "ldsid (%%r22),%%r1\n\t"
8317 "mtsp %%r1,%%sr0", xoperands);
8318 pa_output_arg_descriptor (insn);
8319 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8320 }
8321
8322 /* We need a long PIC call to $$dyncall. */
8323 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8324 xoperands[1] = gen_rtx_REG (Pmode, 2);
8325 xoperands[2] = gen_rtx_REG (Pmode, 1);
8326 pa_output_pic_pcrel_sequence (xoperands);
8327 pa_output_arg_descriptor (insn);
8328 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8329 }
8330
8331 /* In HPUX 8.0's shared library scheme, special relocations are needed
8332 for function labels if they might be passed to a function
8333 in a shared library (because shared libraries don't live in code
8334 space), and special magic is needed to construct their address. */
8335
8336 void
8337 pa_encode_label (rtx sym)
8338 {
8339 const char *str = XSTR (sym, 0);
8340 int len = strlen (str) + 1;
8341 char *newstr, *p;
8342
8343 p = newstr = XALLOCAVEC (char, len + 1);
8344 *p++ = '@';
8345 strcpy (p, str);
8346
8347 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8348 }
8349
8350 static void
8351 pa_encode_section_info (tree decl, rtx rtl, int first)
8352 {
8353 int old_referenced = 0;
8354
8355 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8356 old_referenced
8357 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8358
8359 default_encode_section_info (decl, rtl, first);
8360
8361 if (first && TEXT_SPACE_P (decl))
8362 {
8363 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8364 if (TREE_CODE (decl) == FUNCTION_DECL)
8365 pa_encode_label (XEXP (rtl, 0));
8366 }
8367 else if (old_referenced)
8368 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8369 }
8370
8371 /* This is sort of inverse to pa_encode_section_info. */
8372
8373 static const char *
8374 pa_strip_name_encoding (const char *str)
8375 {
8376 str += (*str == '@');
8377 str += (*str == '*');
8378 return str;
8379 }
8380
8381 /* Returns 1 if OP is a function label involved in a simple addition
8382 with a constant. Used to keep certain patterns from matching
8383 during instruction combination. */
8384 int
8385 pa_is_function_label_plus_const (rtx op)
8386 {
8387 /* Strip off any CONST. */
8388 if (GET_CODE (op) == CONST)
8389 op = XEXP (op, 0);
8390
8391 return (GET_CODE (op) == PLUS
8392 && function_label_operand (XEXP (op, 0), VOIDmode)
8393 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8394 }
8395
8396 /* Output assembly code for a thunk to FUNCTION. */
8397
8398 static void
8399 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8400 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8401 tree function)
8402 {
8403 static unsigned int current_thunk_number;
8404 int val_14 = VAL_14_BITS_P (delta);
8405 unsigned int old_last_address = last_address, nbytes = 0;
8406 char label[17];
8407 rtx xoperands[4];
8408
8409 xoperands[0] = XEXP (DECL_RTL (function), 0);
8410 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8411 xoperands[2] = GEN_INT (delta);
8412
8413 final_start_function (emit_barrier (), file, 1);
8414
8415 /* Output the thunk. We know that the function is in the same
8416 translation unit (i.e., the same space) as the thunk, and that
8417 thunks are output after their method. Thus, we don't need an
8418 external branch to reach the function. With SOM and GAS,
8419 functions and thunks are effectively in different sections.
8420 Thus, we can always use a IA-relative branch and the linker
8421 will add a long branch stub if necessary.
8422
8423 However, we have to be careful when generating PIC code on the
8424 SOM port to ensure that the sequence does not transfer to an
8425 import stub for the target function as this could clobber the
8426 return value saved at SP-24. This would also apply to the
8427 32-bit linux port if the multi-space model is implemented. */
8428 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8429 && !(flag_pic && TREE_PUBLIC (function))
8430 && (TARGET_GAS || last_address < 262132))
8431 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8432 && ((targetm_common.have_named_sections
8433 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8434 /* The GNU 64-bit linker has rather poor stub management.
8435 So, we use a long branch from thunks that aren't in
8436 the same section as the target function. */
8437 && ((!TARGET_64BIT
8438 && (DECL_SECTION_NAME (thunk_fndecl)
8439 != DECL_SECTION_NAME (function)))
8440 || ((DECL_SECTION_NAME (thunk_fndecl)
8441 == DECL_SECTION_NAME (function))
8442 && last_address < 262132)))
8443 /* In this case, we need to be able to reach the start of
8444 the stub table even though the function is likely closer
8445 and can be jumped to directly. */
8446 || (targetm_common.have_named_sections
8447 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8448 && DECL_SECTION_NAME (function) == NULL
8449 && total_code_bytes < MAX_PCREL17F_OFFSET)
8450 /* Likewise. */
8451 || (!targetm_common.have_named_sections
8452 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8453 {
8454 if (!val_14)
8455 output_asm_insn ("addil L'%2,%%r26", xoperands);
8456
8457 output_asm_insn ("b %0", xoperands);
8458
8459 if (val_14)
8460 {
8461 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8462 nbytes += 8;
8463 }
8464 else
8465 {
8466 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8467 nbytes += 12;
8468 }
8469 }
8470 else if (TARGET_64BIT)
8471 {
8472 rtx xop[4];
8473
8474 /* We only have one call-clobbered scratch register, so we can't
8475 make use of the delay slot if delta doesn't fit in 14 bits. */
8476 if (!val_14)
8477 {
8478 output_asm_insn ("addil L'%2,%%r26", xoperands);
8479 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8480 }
8481
8482 /* Load function address into %r1. */
8483 xop[0] = xoperands[0];
8484 xop[1] = gen_rtx_REG (Pmode, 1);
8485 xop[2] = xop[1];
8486 pa_output_pic_pcrel_sequence (xop);
8487
8488 if (val_14)
8489 {
8490 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8491 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8492 nbytes += 20;
8493 }
8494 else
8495 {
8496 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8497 nbytes += 24;
8498 }
8499 }
8500 else if (TARGET_PORTABLE_RUNTIME)
8501 {
8502 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8503 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8504
8505 if (!val_14)
8506 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8507
8508 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8509
8510 if (val_14)
8511 {
8512 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8513 nbytes += 16;
8514 }
8515 else
8516 {
8517 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8518 nbytes += 20;
8519 }
8520 }
8521 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8522 {
8523 /* The function is accessible from outside this module. The only
8524 way to avoid an import stub between the thunk and function is to
8525 call the function directly with an indirect sequence similar to
8526 that used by $$dyncall. This is possible because $$dyncall acts
8527 as the import stub in an indirect call. */
8528 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8529 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8530 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8531 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8532 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8533 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8534 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8535 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8536 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8537
8538 if (!val_14)
8539 {
8540 output_asm_insn ("addil L'%2,%%r26", xoperands);
8541 nbytes += 4;
8542 }
8543
8544 if (TARGET_PA_20)
8545 {
8546 output_asm_insn ("bve (%%r22)", xoperands);
8547 nbytes += 36;
8548 }
8549 else if (TARGET_NO_SPACE_REGS)
8550 {
8551 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8552 nbytes += 36;
8553 }
8554 else
8555 {
8556 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8557 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8558 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8559 nbytes += 44;
8560 }
8561
8562 if (val_14)
8563 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8564 else
8565 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8566 }
8567 else if (flag_pic)
8568 {
8569 rtx xop[4];
8570
8571 /* Load function address into %r22. */
8572 xop[0] = xoperands[0];
8573 xop[1] = gen_rtx_REG (Pmode, 1);
8574 xop[2] = gen_rtx_REG (Pmode, 22);
8575 pa_output_pic_pcrel_sequence (xop);
8576
8577 if (!val_14)
8578 output_asm_insn ("addil L'%2,%%r26", xoperands);
8579
8580 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8581
8582 if (val_14)
8583 {
8584 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8585 nbytes += 20;
8586 }
8587 else
8588 {
8589 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8590 nbytes += 24;
8591 }
8592 }
8593 else
8594 {
8595 if (!val_14)
8596 output_asm_insn ("addil L'%2,%%r26", xoperands);
8597
8598 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8599 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8600
8601 if (val_14)
8602 {
8603 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8604 nbytes += 12;
8605 }
8606 else
8607 {
8608 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8609 nbytes += 16;
8610 }
8611 }
8612
8613 final_end_function ();
8614
8615 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8616 {
8617 switch_to_section (data_section);
8618 output_asm_insn (".align 4", xoperands);
8619 ASM_OUTPUT_LABEL (file, label);
8620 output_asm_insn (".word P'%0", xoperands);
8621 }
8622
8623 current_thunk_number++;
8624 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8625 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8626 last_address += nbytes;
8627 if (old_last_address > last_address)
8628 last_address = UINT_MAX;
8629 update_total_code_bytes (nbytes);
8630 }
8631
8632 /* Only direct calls to static functions are allowed to be sibling (tail)
8633 call optimized.
8634
8635 This restriction is necessary because some linker generated stubs will
8636 store return pointers into rp' in some cases which might clobber a
8637 live value already in rp'.
8638
8639 In a sibcall the current function and the target function share stack
8640 space. Thus if the path to the current function and the path to the
8641 target function save a value in rp', they save the value into the
8642 same stack slot, which has undesirable consequences.
8643
8644 Because of the deferred binding nature of shared libraries any function
8645 with external scope could be in a different load module and thus require
8646 rp' to be saved when calling that function. So sibcall optimizations
8647 can only be safe for static function.
8648
8649 Note that GCC never needs return value relocations, so we don't have to
8650 worry about static calls with return value relocations (which require
8651 saving rp').
8652
8653 It is safe to perform a sibcall optimization when the target function
8654 will never return. */
8655 static bool
8656 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8657 {
8658 /* Sibcalls are not ok because the arg pointer register is not a fixed
8659 register. This prevents the sibcall optimization from occurring. In
8660 addition, there are problems with stub placement using GNU ld. This
8661 is because a normal sibcall branch uses a 17-bit relocation while
8662 a regular call branch uses a 22-bit relocation. As a result, more
8663 care needs to be taken in the placement of long-branch stubs. */
8664 if (TARGET_64BIT)
8665 return false;
8666
8667 if (TARGET_PORTABLE_RUNTIME)
8668 return false;
8669
8670 /* Sibcalls are only ok within a translation unit. */
8671 return decl && targetm.binds_local_p (decl);
8672 }
8673
8674 /* ??? Addition is not commutative on the PA due to the weird implicit
8675 space register selection rules for memory addresses. Therefore, we
8676 don't consider a + b == b + a, as this might be inside a MEM. */
8677 static bool
8678 pa_commutative_p (const_rtx x, int outer_code)
8679 {
8680 return (COMMUTATIVE_P (x)
8681 && (TARGET_NO_SPACE_REGS
8682 || (outer_code != UNKNOWN && outer_code != MEM)
8683 || GET_CODE (x) != PLUS));
8684 }
8685
8686 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8687 use in fmpyadd instructions. */
8688 int
8689 pa_fmpyaddoperands (rtx *operands)
8690 {
8691 machine_mode mode = GET_MODE (operands[0]);
8692
8693 /* Must be a floating point mode. */
8694 if (mode != SFmode && mode != DFmode)
8695 return 0;
8696
8697 /* All modes must be the same. */
8698 if (! (mode == GET_MODE (operands[1])
8699 && mode == GET_MODE (operands[2])
8700 && mode == GET_MODE (operands[3])
8701 && mode == GET_MODE (operands[4])
8702 && mode == GET_MODE (operands[5])))
8703 return 0;
8704
8705 /* All operands must be registers. */
8706 if (! (GET_CODE (operands[1]) == REG
8707 && GET_CODE (operands[2]) == REG
8708 && GET_CODE (operands[3]) == REG
8709 && GET_CODE (operands[4]) == REG
8710 && GET_CODE (operands[5]) == REG))
8711 return 0;
8712
8713 /* Only 2 real operands to the addition. One of the input operands must
8714 be the same as the output operand. */
8715 if (! rtx_equal_p (operands[3], operands[4])
8716 && ! rtx_equal_p (operands[3], operands[5]))
8717 return 0;
8718
8719 /* Inout operand of add cannot conflict with any operands from multiply. */
8720 if (rtx_equal_p (operands[3], operands[0])
8721 || rtx_equal_p (operands[3], operands[1])
8722 || rtx_equal_p (operands[3], operands[2]))
8723 return 0;
8724
8725 /* multiply cannot feed into addition operands. */
8726 if (rtx_equal_p (operands[4], operands[0])
8727 || rtx_equal_p (operands[5], operands[0]))
8728 return 0;
8729
8730 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8731 if (mode == SFmode
8732 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8733 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8734 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8735 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8736 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8737 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8738 return 0;
8739
8740 /* Passed. Operands are suitable for fmpyadd. */
8741 return 1;
8742 }
8743
8744 #if !defined(USE_COLLECT2)
8745 static void
8746 pa_asm_out_constructor (rtx symbol, int priority)
8747 {
8748 if (!function_label_operand (symbol, VOIDmode))
8749 pa_encode_label (symbol);
8750
8751 #ifdef CTORS_SECTION_ASM_OP
8752 default_ctor_section_asm_out_constructor (symbol, priority);
8753 #else
8754 # ifdef TARGET_ASM_NAMED_SECTION
8755 default_named_section_asm_out_constructor (symbol, priority);
8756 # else
8757 default_stabs_asm_out_constructor (symbol, priority);
8758 # endif
8759 #endif
8760 }
8761
8762 static void
8763 pa_asm_out_destructor (rtx symbol, int priority)
8764 {
8765 if (!function_label_operand (symbol, VOIDmode))
8766 pa_encode_label (symbol);
8767
8768 #ifdef DTORS_SECTION_ASM_OP
8769 default_dtor_section_asm_out_destructor (symbol, priority);
8770 #else
8771 # ifdef TARGET_ASM_NAMED_SECTION
8772 default_named_section_asm_out_destructor (symbol, priority);
8773 # else
8774 default_stabs_asm_out_destructor (symbol, priority);
8775 # endif
8776 #endif
8777 }
8778 #endif
8779
8780 /* This function places uninitialized global data in the bss section.
8781 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8782 function on the SOM port to prevent uninitialized global data from
8783 being placed in the data section. */
8784
8785 void
8786 pa_asm_output_aligned_bss (FILE *stream,
8787 const char *name,
8788 unsigned HOST_WIDE_INT size,
8789 unsigned int align)
8790 {
8791 switch_to_section (bss_section);
8792 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8793
8794 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8795 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8796 #endif
8797
8798 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8799 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8800 #endif
8801
8802 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8803 ASM_OUTPUT_LABEL (stream, name);
8804 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8805 }
8806
8807 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8808 that doesn't allow the alignment of global common storage to be directly
8809 specified. The SOM linker aligns common storage based on the rounded
8810 value of the NUM_BYTES parameter in the .comm directive. It's not
8811 possible to use the .align directive as it doesn't affect the alignment
8812 of the label associated with a .comm directive. */
8813
8814 void
8815 pa_asm_output_aligned_common (FILE *stream,
8816 const char *name,
8817 unsigned HOST_WIDE_INT size,
8818 unsigned int align)
8819 {
8820 unsigned int max_common_align;
8821
8822 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8823 if (align > max_common_align)
8824 {
8825 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8826 "for global common data. Using %u",
8827 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8828 align = max_common_align;
8829 }
8830
8831 switch_to_section (bss_section);
8832
8833 assemble_name (stream, name);
8834 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8835 MAX (size, align / BITS_PER_UNIT));
8836 }
8837
8838 /* We can't use .comm for local common storage as the SOM linker effectively
8839 treats the symbol as universal and uses the same storage for local symbols
8840 with the same name in different object files. The .block directive
8841 reserves an uninitialized block of storage. However, it's not common
8842 storage. Fortunately, GCC never requests common storage with the same
8843 name in any given translation unit. */
8844
8845 void
8846 pa_asm_output_aligned_local (FILE *stream,
8847 const char *name,
8848 unsigned HOST_WIDE_INT size,
8849 unsigned int align)
8850 {
8851 switch_to_section (bss_section);
8852 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8853
8854 #ifdef LOCAL_ASM_OP
8855 fprintf (stream, "%s", LOCAL_ASM_OP);
8856 assemble_name (stream, name);
8857 fprintf (stream, "\n");
8858 #endif
8859
8860 ASM_OUTPUT_LABEL (stream, name);
8861 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8862 }
8863
8864 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8865 use in fmpysub instructions. */
8866 int
8867 pa_fmpysuboperands (rtx *operands)
8868 {
8869 machine_mode mode = GET_MODE (operands[0]);
8870
8871 /* Must be a floating point mode. */
8872 if (mode != SFmode && mode != DFmode)
8873 return 0;
8874
8875 /* All modes must be the same. */
8876 if (! (mode == GET_MODE (operands[1])
8877 && mode == GET_MODE (operands[2])
8878 && mode == GET_MODE (operands[3])
8879 && mode == GET_MODE (operands[4])
8880 && mode == GET_MODE (operands[5])))
8881 return 0;
8882
8883 /* All operands must be registers. */
8884 if (! (GET_CODE (operands[1]) == REG
8885 && GET_CODE (operands[2]) == REG
8886 && GET_CODE (operands[3]) == REG
8887 && GET_CODE (operands[4]) == REG
8888 && GET_CODE (operands[5]) == REG))
8889 return 0;
8890
8891 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8892 operation, so operands[4] must be the same as operand[3]. */
8893 if (! rtx_equal_p (operands[3], operands[4]))
8894 return 0;
8895
8896 /* multiply cannot feed into subtraction. */
8897 if (rtx_equal_p (operands[5], operands[0]))
8898 return 0;
8899
8900 /* Inout operand of sub cannot conflict with any operands from multiply. */
8901 if (rtx_equal_p (operands[3], operands[0])
8902 || rtx_equal_p (operands[3], operands[1])
8903 || rtx_equal_p (operands[3], operands[2]))
8904 return 0;
8905
8906 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8907 if (mode == SFmode
8908 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8909 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8910 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8911 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8912 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8913 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8914 return 0;
8915
8916 /* Passed. Operands are suitable for fmpysub. */
8917 return 1;
8918 }
8919
8920 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8921 constants for a MULT embedded inside a memory address. */
8922 int
8923 pa_mem_shadd_constant_p (int val)
8924 {
8925 if (val == 2 || val == 4 || val == 8)
8926 return 1;
8927 else
8928 return 0;
8929 }
8930
8931 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8932 constants for shadd instructions. */
8933 int
8934 pa_shadd_constant_p (int val)
8935 {
8936 if (val == 1 || val == 2 || val == 3)
8937 return 1;
8938 else
8939 return 0;
8940 }
8941
8942 /* Return TRUE if INSN branches forward. */
8943
8944 static bool
8945 forward_branch_p (rtx_insn *insn)
8946 {
8947 rtx lab = JUMP_LABEL (insn);
8948
8949 /* The INSN must have a jump label. */
8950 gcc_assert (lab != NULL_RTX);
8951
8952 if (INSN_ADDRESSES_SET_P ())
8953 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8954
8955 while (insn)
8956 {
8957 if (insn == lab)
8958 return true;
8959 else
8960 insn = NEXT_INSN (insn);
8961 }
8962
8963 return false;
8964 }
8965
8966 /* Output an unconditional move and branch insn. */
8967
8968 const char *
8969 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8970 {
8971 int length = get_attr_length (insn);
8972
8973 /* These are the cases in which we win. */
8974 if (length == 4)
8975 return "mov%I1b,tr %1,%0,%2";
8976
8977 /* None of the following cases win, but they don't lose either. */
8978 if (length == 8)
8979 {
8980 if (dbr_sequence_length () == 0)
8981 {
8982 /* Nothing in the delay slot, fake it by putting the combined
8983 insn (the copy or add) in the delay slot of a bl. */
8984 if (GET_CODE (operands[1]) == CONST_INT)
8985 return "b %2\n\tldi %1,%0";
8986 else
8987 return "b %2\n\tcopy %1,%0";
8988 }
8989 else
8990 {
8991 /* Something in the delay slot, but we've got a long branch. */
8992 if (GET_CODE (operands[1]) == CONST_INT)
8993 return "ldi %1,%0\n\tb %2";
8994 else
8995 return "copy %1,%0\n\tb %2";
8996 }
8997 }
8998
8999 if (GET_CODE (operands[1]) == CONST_INT)
9000 output_asm_insn ("ldi %1,%0", operands);
9001 else
9002 output_asm_insn ("copy %1,%0", operands);
9003 return pa_output_lbranch (operands[2], insn, 1);
9004 }
9005
9006 /* Output an unconditional add and branch insn. */
9007
9008 const char *
9009 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
9010 {
9011 int length = get_attr_length (insn);
9012
9013 /* To make life easy we want operand0 to be the shared input/output
9014 operand and operand1 to be the readonly operand. */
9015 if (operands[0] == operands[1])
9016 operands[1] = operands[2];
9017
9018 /* These are the cases in which we win. */
9019 if (length == 4)
9020 return "add%I1b,tr %1,%0,%3";
9021
9022 /* None of the following cases win, but they don't lose either. */
9023 if (length == 8)
9024 {
9025 if (dbr_sequence_length () == 0)
9026 /* Nothing in the delay slot, fake it by putting the combined
9027 insn (the copy or add) in the delay slot of a bl. */
9028 return "b %3\n\tadd%I1 %1,%0,%0";
9029 else
9030 /* Something in the delay slot, but we've got a long branch. */
9031 return "add%I1 %1,%0,%0\n\tb %3";
9032 }
9033
9034 output_asm_insn ("add%I1 %1,%0,%0", operands);
9035 return pa_output_lbranch (operands[3], insn, 1);
9036 }
9037
9038 /* We use this hook to perform a PA specific optimization which is difficult
9039 to do in earlier passes. */
9040
9041 static void
9042 pa_reorg (void)
9043 {
9044 remove_useless_addtr_insns (1);
9045
9046 if (pa_cpu < PROCESSOR_8000)
9047 pa_combine_instructions ();
9048 }
9049
9050 /* The PA has a number of odd instructions which can perform multiple
9051 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9052 it may be profitable to combine two instructions into one instruction
9053 with two outputs. It's not profitable PA2.0 machines because the
9054 two outputs would take two slots in the reorder buffers.
9055
9056 This routine finds instructions which can be combined and combines
9057 them. We only support some of the potential combinations, and we
9058 only try common ways to find suitable instructions.
9059
9060 * addb can add two registers or a register and a small integer
9061 and jump to a nearby (+-8k) location. Normally the jump to the
9062 nearby location is conditional on the result of the add, but by
9063 using the "true" condition we can make the jump unconditional.
9064 Thus addb can perform two independent operations in one insn.
9065
9066 * movb is similar to addb in that it can perform a reg->reg
9067 or small immediate->reg copy and jump to a nearby (+-8k location).
9068
9069 * fmpyadd and fmpysub can perform a FP multiply and either an
9070 FP add or FP sub if the operands of the multiply and add/sub are
9071 independent (there are other minor restrictions). Note both
9072 the fmpy and fadd/fsub can in theory move to better spots according
9073 to data dependencies, but for now we require the fmpy stay at a
9074 fixed location.
9075
9076 * Many of the memory operations can perform pre & post updates
9077 of index registers. GCC's pre/post increment/decrement addressing
9078 is far too simple to take advantage of all the possibilities. This
9079 pass may not be suitable since those insns may not be independent.
9080
9081 * comclr can compare two ints or an int and a register, nullify
9082 the following instruction and zero some other register. This
9083 is more difficult to use as it's harder to find an insn which
9084 will generate a comclr than finding something like an unconditional
9085 branch. (conditional moves & long branches create comclr insns).
9086
9087 * Most arithmetic operations can conditionally skip the next
9088 instruction. They can be viewed as "perform this operation
9089 and conditionally jump to this nearby location" (where nearby
9090 is an insns away). These are difficult to use due to the
9091 branch length restrictions. */
9092
9093 static void
9094 pa_combine_instructions (void)
9095 {
9096 rtx_insn *anchor;
9097
9098 /* This can get expensive since the basic algorithm is on the
9099 order of O(n^2) (or worse). Only do it for -O2 or higher
9100 levels of optimization. */
9101 if (optimize < 2)
9102 return;
9103
9104 /* Walk down the list of insns looking for "anchor" insns which
9105 may be combined with "floating" insns. As the name implies,
9106 "anchor" instructions don't move, while "floating" insns may
9107 move around. */
9108 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9109 rtx_insn *new_rtx = make_insn_raw (par);
9110
9111 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9112 {
9113 enum attr_pa_combine_type anchor_attr;
9114 enum attr_pa_combine_type floater_attr;
9115
9116 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9117 Also ignore any special USE insns. */
9118 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9119 || GET_CODE (PATTERN (anchor)) == USE
9120 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9121 continue;
9122
9123 anchor_attr = get_attr_pa_combine_type (anchor);
9124 /* See if anchor is an insn suitable for combination. */
9125 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9126 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9127 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9128 && ! forward_branch_p (anchor)))
9129 {
9130 rtx_insn *floater;
9131
9132 for (floater = PREV_INSN (anchor);
9133 floater;
9134 floater = PREV_INSN (floater))
9135 {
9136 if (NOTE_P (floater)
9137 || (NONJUMP_INSN_P (floater)
9138 && (GET_CODE (PATTERN (floater)) == USE
9139 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9140 continue;
9141
9142 /* Anything except a regular INSN will stop our search. */
9143 if (! NONJUMP_INSN_P (floater))
9144 {
9145 floater = NULL;
9146 break;
9147 }
9148
9149 /* See if FLOATER is suitable for combination with the
9150 anchor. */
9151 floater_attr = get_attr_pa_combine_type (floater);
9152 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9153 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9154 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9155 && floater_attr == PA_COMBINE_TYPE_FMPY))
9156 {
9157 /* If ANCHOR and FLOATER can be combined, then we're
9158 done with this pass. */
9159 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9160 SET_DEST (PATTERN (floater)),
9161 XEXP (SET_SRC (PATTERN (floater)), 0),
9162 XEXP (SET_SRC (PATTERN (floater)), 1)))
9163 break;
9164 }
9165
9166 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9167 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9168 {
9169 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9170 {
9171 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9172 SET_DEST (PATTERN (floater)),
9173 XEXP (SET_SRC (PATTERN (floater)), 0),
9174 XEXP (SET_SRC (PATTERN (floater)), 1)))
9175 break;
9176 }
9177 else
9178 {
9179 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9180 SET_DEST (PATTERN (floater)),
9181 SET_SRC (PATTERN (floater)),
9182 SET_SRC (PATTERN (floater))))
9183 break;
9184 }
9185 }
9186 }
9187
9188 /* If we didn't find anything on the backwards scan try forwards. */
9189 if (!floater
9190 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9191 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9192 {
9193 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9194 {
9195 if (NOTE_P (floater)
9196 || (NONJUMP_INSN_P (floater)
9197 && (GET_CODE (PATTERN (floater)) == USE
9198 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9199
9200 continue;
9201
9202 /* Anything except a regular INSN will stop our search. */
9203 if (! NONJUMP_INSN_P (floater))
9204 {
9205 floater = NULL;
9206 break;
9207 }
9208
9209 /* See if FLOATER is suitable for combination with the
9210 anchor. */
9211 floater_attr = get_attr_pa_combine_type (floater);
9212 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9213 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9214 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9215 && floater_attr == PA_COMBINE_TYPE_FMPY))
9216 {
9217 /* If ANCHOR and FLOATER can be combined, then we're
9218 done with this pass. */
9219 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9220 SET_DEST (PATTERN (floater)),
9221 XEXP (SET_SRC (PATTERN (floater)),
9222 0),
9223 XEXP (SET_SRC (PATTERN (floater)),
9224 1)))
9225 break;
9226 }
9227 }
9228 }
9229
9230 /* FLOATER will be nonzero if we found a suitable floating
9231 insn for combination with ANCHOR. */
9232 if (floater
9233 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9234 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9235 {
9236 /* Emit the new instruction and delete the old anchor. */
9237 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9238 copy_rtx (PATTERN (floater)));
9239 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9240 emit_insn_before (temp, anchor);
9241
9242 SET_INSN_DELETED (anchor);
9243
9244 /* Emit a special USE insn for FLOATER, then delete
9245 the floating insn. */
9246 temp = copy_rtx (PATTERN (floater));
9247 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9248 delete_insn (floater);
9249
9250 continue;
9251 }
9252 else if (floater
9253 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9254 {
9255 /* Emit the new_jump instruction and delete the old anchor. */
9256 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9257 copy_rtx (PATTERN (floater)));
9258 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9259 temp = emit_jump_insn_before (temp, anchor);
9260
9261 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9262 SET_INSN_DELETED (anchor);
9263
9264 /* Emit a special USE insn for FLOATER, then delete
9265 the floating insn. */
9266 temp = copy_rtx (PATTERN (floater));
9267 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9268 delete_insn (floater);
9269 continue;
9270 }
9271 }
9272 }
9273 }
9274
9275 static int
9276 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9277 int reversed, rtx dest,
9278 rtx src1, rtx src2)
9279 {
9280 int insn_code_number;
9281 rtx_insn *start, *end;
9282
9283 /* Create a PARALLEL with the patterns of ANCHOR and
9284 FLOATER, try to recognize it, then test constraints
9285 for the resulting pattern.
9286
9287 If the pattern doesn't match or the constraints
9288 aren't met keep searching for a suitable floater
9289 insn. */
9290 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9291 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9292 INSN_CODE (new_rtx) = -1;
9293 insn_code_number = recog_memoized (new_rtx);
9294 basic_block bb = BLOCK_FOR_INSN (anchor);
9295 if (insn_code_number < 0
9296 || (extract_insn (new_rtx),
9297 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9298 return 0;
9299
9300 if (reversed)
9301 {
9302 start = anchor;
9303 end = floater;
9304 }
9305 else
9306 {
9307 start = floater;
9308 end = anchor;
9309 }
9310
9311 /* There's up to three operands to consider. One
9312 output and two inputs.
9313
9314 The output must not be used between FLOATER & ANCHOR
9315 exclusive. The inputs must not be set between
9316 FLOATER and ANCHOR exclusive. */
9317
9318 if (reg_used_between_p (dest, start, end))
9319 return 0;
9320
9321 if (reg_set_between_p (src1, start, end))
9322 return 0;
9323
9324 if (reg_set_between_p (src2, start, end))
9325 return 0;
9326
9327 /* If we get here, then everything is good. */
9328 return 1;
9329 }
9330
9331 /* Return nonzero if references for INSN are delayed.
9332
9333 Millicode insns are actually function calls with some special
9334 constraints on arguments and register usage.
9335
9336 Millicode calls always expect their arguments in the integer argument
9337 registers, and always return their result in %r29 (ret1). They
9338 are expected to clobber their arguments, %r1, %r29, and the return
9339 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9340
9341 This function tells reorg that the references to arguments and
9342 millicode calls do not appear to happen until after the millicode call.
9343 This allows reorg to put insns which set the argument registers into the
9344 delay slot of the millicode call -- thus they act more like traditional
9345 CALL_INSNs.
9346
9347 Note we cannot consider side effects of the insn to be delayed because
9348 the branch and link insn will clobber the return pointer. If we happened
9349 to use the return pointer in the delay slot of the call, then we lose.
9350
9351 get_attr_type will try to recognize the given insn, so make sure to
9352 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9353 in particular. */
9354 int
9355 pa_insn_refs_are_delayed (rtx_insn *insn)
9356 {
9357 return ((NONJUMP_INSN_P (insn)
9358 && GET_CODE (PATTERN (insn)) != SEQUENCE
9359 && GET_CODE (PATTERN (insn)) != USE
9360 && GET_CODE (PATTERN (insn)) != CLOBBER
9361 && get_attr_type (insn) == TYPE_MILLI));
9362 }
9363
9364 /* Promote the return value, but not the arguments. */
9365
9366 static machine_mode
9367 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9368 machine_mode mode,
9369 int *punsignedp ATTRIBUTE_UNUSED,
9370 const_tree fntype ATTRIBUTE_UNUSED,
9371 int for_return)
9372 {
9373 if (for_return == 0)
9374 return mode;
9375 return promote_mode (type, mode, punsignedp);
9376 }
9377
9378 /* On the HP-PA the value is found in register(s) 28(-29), unless
9379 the mode is SF or DF. Then the value is returned in fr4 (32).
9380
9381 This must perform the same promotions as PROMOTE_MODE, else promoting
9382 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9383
9384 Small structures must be returned in a PARALLEL on PA64 in order
9385 to match the HP Compiler ABI. */
9386
9387 static rtx
9388 pa_function_value (const_tree valtype,
9389 const_tree func ATTRIBUTE_UNUSED,
9390 bool outgoing ATTRIBUTE_UNUSED)
9391 {
9392 machine_mode valmode;
9393
9394 if (AGGREGATE_TYPE_P (valtype)
9395 || TREE_CODE (valtype) == COMPLEX_TYPE
9396 || TREE_CODE (valtype) == VECTOR_TYPE)
9397 {
9398 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9399
9400 /* Handle aggregates that fit exactly in a word or double word. */
9401 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9402 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9403
9404 if (TARGET_64BIT)
9405 {
9406 /* Aggregates with a size less than or equal to 128 bits are
9407 returned in GR 28(-29). They are left justified. The pad
9408 bits are undefined. Larger aggregates are returned in
9409 memory. */
9410 rtx loc[2];
9411 int i, offset = 0;
9412 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9413
9414 for (i = 0; i < ub; i++)
9415 {
9416 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9417 gen_rtx_REG (DImode, 28 + i),
9418 GEN_INT (offset));
9419 offset += 8;
9420 }
9421
9422 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9423 }
9424 else if (valsize > UNITS_PER_WORD)
9425 {
9426 /* Aggregates 5 to 8 bytes in size are returned in general
9427 registers r28-r29 in the same manner as other non
9428 floating-point objects. The data is right-justified and
9429 zero-extended to 64 bits. This is opposite to the normal
9430 justification used on big endian targets and requires
9431 special treatment. */
9432 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9433 gen_rtx_REG (DImode, 28), const0_rtx);
9434 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9435 }
9436 }
9437
9438 if ((INTEGRAL_TYPE_P (valtype)
9439 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9440 || POINTER_TYPE_P (valtype))
9441 valmode = word_mode;
9442 else
9443 valmode = TYPE_MODE (valtype);
9444
9445 if (TREE_CODE (valtype) == REAL_TYPE
9446 && !AGGREGATE_TYPE_P (valtype)
9447 && TYPE_MODE (valtype) != TFmode
9448 && !TARGET_SOFT_FLOAT)
9449 return gen_rtx_REG (valmode, 32);
9450
9451 return gen_rtx_REG (valmode, 28);
9452 }
9453
9454 /* Implement the TARGET_LIBCALL_VALUE hook. */
9455
9456 static rtx
9457 pa_libcall_value (machine_mode mode,
9458 const_rtx fun ATTRIBUTE_UNUSED)
9459 {
9460 if (! TARGET_SOFT_FLOAT
9461 && (mode == SFmode || mode == DFmode))
9462 return gen_rtx_REG (mode, 32);
9463 else
9464 return gen_rtx_REG (mode, 28);
9465 }
9466
9467 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9468
9469 static bool
9470 pa_function_value_regno_p (const unsigned int regno)
9471 {
9472 if (regno == 28
9473 || (! TARGET_SOFT_FLOAT && regno == 32))
9474 return true;
9475
9476 return false;
9477 }
9478
9479 /* Update the data in CUM to advance over an argument
9480 of mode MODE and data type TYPE.
9481 (TYPE is null for libcalls where that information may not be available.) */
9482
9483 static void
9484 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9485 const_tree type, bool named ATTRIBUTE_UNUSED)
9486 {
9487 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9488 int arg_size = pa_function_arg_size (mode, type);
9489
9490 cum->nargs_prototype--;
9491 cum->words += (arg_size
9492 + ((cum->words & 01)
9493 && type != NULL_TREE
9494 && arg_size > 1));
9495 }
9496
9497 /* Return the location of a parameter that is passed in a register or NULL
9498 if the parameter has any component that is passed in memory.
9499
9500 This is new code and will be pushed to into the net sources after
9501 further testing.
9502
9503 ??? We might want to restructure this so that it looks more like other
9504 ports. */
9505 static rtx
9506 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9507 const_tree type, bool named ATTRIBUTE_UNUSED)
9508 {
9509 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9510 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9511 int alignment = 0;
9512 int arg_size;
9513 int fpr_reg_base;
9514 int gpr_reg_base;
9515 rtx retval;
9516
9517 if (mode == VOIDmode)
9518 return NULL_RTX;
9519
9520 arg_size = pa_function_arg_size (mode, type);
9521
9522 /* If this arg would be passed partially or totally on the stack, then
9523 this routine should return zero. pa_arg_partial_bytes will
9524 handle arguments which are split between regs and stack slots if
9525 the ABI mandates split arguments. */
9526 if (!TARGET_64BIT)
9527 {
9528 /* The 32-bit ABI does not split arguments. */
9529 if (cum->words + arg_size > max_arg_words)
9530 return NULL_RTX;
9531 }
9532 else
9533 {
9534 if (arg_size > 1)
9535 alignment = cum->words & 1;
9536 if (cum->words + alignment >= max_arg_words)
9537 return NULL_RTX;
9538 }
9539
9540 /* The 32bit ABIs and the 64bit ABIs are rather different,
9541 particularly in their handling of FP registers. We might
9542 be able to cleverly share code between them, but I'm not
9543 going to bother in the hope that splitting them up results
9544 in code that is more easily understood. */
9545
9546 if (TARGET_64BIT)
9547 {
9548 /* Advance the base registers to their current locations.
9549
9550 Remember, gprs grow towards smaller register numbers while
9551 fprs grow to higher register numbers. Also remember that
9552 although FP regs are 32-bit addressable, we pretend that
9553 the registers are 64-bits wide. */
9554 gpr_reg_base = 26 - cum->words;
9555 fpr_reg_base = 32 + cum->words;
9556
9557 /* Arguments wider than one word and small aggregates need special
9558 treatment. */
9559 if (arg_size > 1
9560 || mode == BLKmode
9561 || (type && (AGGREGATE_TYPE_P (type)
9562 || TREE_CODE (type) == COMPLEX_TYPE
9563 || TREE_CODE (type) == VECTOR_TYPE)))
9564 {
9565 /* Double-extended precision (80-bit), quad-precision (128-bit)
9566 and aggregates including complex numbers are aligned on
9567 128-bit boundaries. The first eight 64-bit argument slots
9568 are associated one-to-one, with general registers r26
9569 through r19, and also with floating-point registers fr4
9570 through fr11. Arguments larger than one word are always
9571 passed in general registers.
9572
9573 Using a PARALLEL with a word mode register results in left
9574 justified data on a big-endian target. */
9575
9576 rtx loc[8];
9577 int i, offset = 0, ub = arg_size;
9578
9579 /* Align the base register. */
9580 gpr_reg_base -= alignment;
9581
9582 ub = MIN (ub, max_arg_words - cum->words - alignment);
9583 for (i = 0; i < ub; i++)
9584 {
9585 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9586 gen_rtx_REG (DImode, gpr_reg_base),
9587 GEN_INT (offset));
9588 gpr_reg_base -= 1;
9589 offset += 8;
9590 }
9591
9592 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9593 }
9594 }
9595 else
9596 {
9597 /* If the argument is larger than a word, then we know precisely
9598 which registers we must use. */
9599 if (arg_size > 1)
9600 {
9601 if (cum->words)
9602 {
9603 gpr_reg_base = 23;
9604 fpr_reg_base = 38;
9605 }
9606 else
9607 {
9608 gpr_reg_base = 25;
9609 fpr_reg_base = 34;
9610 }
9611
9612 /* Structures 5 to 8 bytes in size are passed in the general
9613 registers in the same manner as other non floating-point
9614 objects. The data is right-justified and zero-extended
9615 to 64 bits. This is opposite to the normal justification
9616 used on big endian targets and requires special treatment.
9617 We now define BLOCK_REG_PADDING to pad these objects.
9618 Aggregates, complex and vector types are passed in the same
9619 manner as structures. */
9620 if (mode == BLKmode
9621 || (type && (AGGREGATE_TYPE_P (type)
9622 || TREE_CODE (type) == COMPLEX_TYPE
9623 || TREE_CODE (type) == VECTOR_TYPE)))
9624 {
9625 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9626 gen_rtx_REG (DImode, gpr_reg_base),
9627 const0_rtx);
9628 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9629 }
9630 }
9631 else
9632 {
9633 /* We have a single word (32 bits). A simple computation
9634 will get us the register #s we need. */
9635 gpr_reg_base = 26 - cum->words;
9636 fpr_reg_base = 32 + 2 * cum->words;
9637 }
9638 }
9639
9640 /* Determine if the argument needs to be passed in both general and
9641 floating point registers. */
9642 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9643 /* If we are doing soft-float with portable runtime, then there
9644 is no need to worry about FP regs. */
9645 && !TARGET_SOFT_FLOAT
9646 /* The parameter must be some kind of scalar float, else we just
9647 pass it in integer registers. */
9648 && GET_MODE_CLASS (mode) == MODE_FLOAT
9649 /* The target function must not have a prototype. */
9650 && cum->nargs_prototype <= 0
9651 /* libcalls do not need to pass items in both FP and general
9652 registers. */
9653 && type != NULL_TREE
9654 /* All this hair applies to "outgoing" args only. This includes
9655 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9656 && !cum->incoming)
9657 /* Also pass outgoing floating arguments in both registers in indirect
9658 calls with the 32 bit ABI and the HP assembler since there is no
9659 way to the specify argument locations in static functions. */
9660 || (!TARGET_64BIT
9661 && !TARGET_GAS
9662 && !cum->incoming
9663 && cum->indirect
9664 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9665 {
9666 retval
9667 = gen_rtx_PARALLEL
9668 (mode,
9669 gen_rtvec (2,
9670 gen_rtx_EXPR_LIST (VOIDmode,
9671 gen_rtx_REG (mode, fpr_reg_base),
9672 const0_rtx),
9673 gen_rtx_EXPR_LIST (VOIDmode,
9674 gen_rtx_REG (mode, gpr_reg_base),
9675 const0_rtx)));
9676 }
9677 else
9678 {
9679 /* See if we should pass this parameter in a general register. */
9680 if (TARGET_SOFT_FLOAT
9681 /* Indirect calls in the normal 32bit ABI require all arguments
9682 to be passed in general registers. */
9683 || (!TARGET_PORTABLE_RUNTIME
9684 && !TARGET_64BIT
9685 && !TARGET_ELF32
9686 && cum->indirect)
9687 /* If the parameter is not a scalar floating-point parameter,
9688 then it belongs in GPRs. */
9689 || GET_MODE_CLASS (mode) != MODE_FLOAT
9690 /* Structure with single SFmode field belongs in GPR. */
9691 || (type && AGGREGATE_TYPE_P (type)))
9692 retval = gen_rtx_REG (mode, gpr_reg_base);
9693 else
9694 retval = gen_rtx_REG (mode, fpr_reg_base);
9695 }
9696 return retval;
9697 }
9698
9699 /* Arguments larger than one word are double word aligned. */
9700
9701 static unsigned int
9702 pa_function_arg_boundary (machine_mode mode, const_tree type)
9703 {
9704 bool singleword = (type
9705 ? (integer_zerop (TYPE_SIZE (type))
9706 || !TREE_CONSTANT (TYPE_SIZE (type))
9707 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9708 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9709
9710 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9711 }
9712
9713 /* If this arg would be passed totally in registers or totally on the stack,
9714 then this routine should return zero. */
9715
9716 static int
9717 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9718 tree type, bool named ATTRIBUTE_UNUSED)
9719 {
9720 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9721 unsigned int max_arg_words = 8;
9722 unsigned int offset = 0;
9723
9724 if (!TARGET_64BIT)
9725 return 0;
9726
9727 if (pa_function_arg_size (mode, type) > 1 && (cum->words & 1))
9728 offset = 1;
9729
9730 if (cum->words + offset + pa_function_arg_size (mode, type) <= max_arg_words)
9731 /* Arg fits fully into registers. */
9732 return 0;
9733 else if (cum->words + offset >= max_arg_words)
9734 /* Arg fully on the stack. */
9735 return 0;
9736 else
9737 /* Arg is split. */
9738 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9739 }
9740
9741
9742 /* A get_unnamed_section callback for switching to the text section.
9743
9744 This function is only used with SOM. Because we don't support
9745 named subspaces, we can only create a new subspace or switch back
9746 to the default text subspace. */
9747
9748 static void
9749 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9750 {
9751 gcc_assert (TARGET_SOM);
9752 if (TARGET_GAS)
9753 {
9754 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9755 {
9756 /* We only want to emit a .nsubspa directive once at the
9757 start of the function. */
9758 cfun->machine->in_nsubspa = 1;
9759
9760 /* Create a new subspace for the text. This provides
9761 better stub placement and one-only functions. */
9762 if (cfun->decl
9763 && DECL_ONE_ONLY (cfun->decl)
9764 && !DECL_WEAK (cfun->decl))
9765 {
9766 output_section_asm_op ("\t.SPACE $TEXT$\n"
9767 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9768 "ACCESS=44,SORT=24,COMDAT");
9769 return;
9770 }
9771 }
9772 else
9773 {
9774 /* There isn't a current function or the body of the current
9775 function has been completed. So, we are changing to the
9776 text section to output debugging information. Thus, we
9777 need to forget that we are in the text section so that
9778 varasm.c will call us when text_section is selected again. */
9779 gcc_assert (!cfun || !cfun->machine
9780 || cfun->machine->in_nsubspa == 2);
9781 in_section = NULL;
9782 }
9783 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9784 return;
9785 }
9786 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9787 }
9788
9789 /* A get_unnamed_section callback for switching to comdat data
9790 sections. This function is only used with SOM. */
9791
9792 static void
9793 som_output_comdat_data_section_asm_op (const void *data)
9794 {
9795 in_section = NULL;
9796 output_section_asm_op (data);
9797 }
9798
9799 /* Implement TARGET_ASM_INIT_SECTIONS. */
9800
9801 static void
9802 pa_som_asm_init_sections (void)
9803 {
9804 text_section
9805 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9806
9807 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9808 is not being generated. */
9809 som_readonly_data_section
9810 = get_unnamed_section (0, output_section_asm_op,
9811 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9812
9813 /* When secondary definitions are not supported, SOM makes readonly
9814 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9815 the comdat flag. */
9816 som_one_only_readonly_data_section
9817 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9818 "\t.SPACE $TEXT$\n"
9819 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9820 "ACCESS=0x2c,SORT=16,COMDAT");
9821
9822
9823 /* When secondary definitions are not supported, SOM makes data one-only
9824 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9825 som_one_only_data_section
9826 = get_unnamed_section (SECTION_WRITE,
9827 som_output_comdat_data_section_asm_op,
9828 "\t.SPACE $PRIVATE$\n"
9829 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9830 "ACCESS=31,SORT=24,COMDAT");
9831
9832 if (flag_tm)
9833 som_tm_clone_table_section
9834 = get_unnamed_section (0, output_section_asm_op,
9835 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9836
9837 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9838 which reference data within the $TEXT$ space (for example constant
9839 strings in the $LIT$ subspace).
9840
9841 The assemblers (GAS and HP as) both have problems with handling
9842 the difference of two symbols which is the other correct way to
9843 reference constant data during PIC code generation.
9844
9845 So, there's no way to reference constant data which is in the
9846 $TEXT$ space during PIC generation. Instead place all constant
9847 data into the $PRIVATE$ subspace (this reduces sharing, but it
9848 works correctly). */
9849 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9850
9851 /* We must not have a reference to an external symbol defined in a
9852 shared library in a readonly section, else the SOM linker will
9853 complain.
9854
9855 So, we force exception information into the data section. */
9856 exception_section = data_section;
9857 }
9858
9859 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9860
9861 static section *
9862 pa_som_tm_clone_table_section (void)
9863 {
9864 return som_tm_clone_table_section;
9865 }
9866
9867 /* On hpux10, the linker will give an error if we have a reference
9868 in the read-only data section to a symbol defined in a shared
9869 library. Therefore, expressions that might require a reloc can
9870 not be placed in the read-only data section. */
9871
9872 static section *
9873 pa_select_section (tree exp, int reloc,
9874 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9875 {
9876 if (TREE_CODE (exp) == VAR_DECL
9877 && TREE_READONLY (exp)
9878 && !TREE_THIS_VOLATILE (exp)
9879 && DECL_INITIAL (exp)
9880 && (DECL_INITIAL (exp) == error_mark_node
9881 || TREE_CONSTANT (DECL_INITIAL (exp)))
9882 && !reloc)
9883 {
9884 if (TARGET_SOM
9885 && DECL_ONE_ONLY (exp)
9886 && !DECL_WEAK (exp))
9887 return som_one_only_readonly_data_section;
9888 else
9889 return readonly_data_section;
9890 }
9891 else if (CONSTANT_CLASS_P (exp) && !reloc)
9892 return readonly_data_section;
9893 else if (TARGET_SOM
9894 && TREE_CODE (exp) == VAR_DECL
9895 && DECL_ONE_ONLY (exp)
9896 && !DECL_WEAK (exp))
9897 return som_one_only_data_section;
9898 else
9899 return data_section;
9900 }
9901
9902 /* Implement pa_reloc_rw_mask. */
9903
9904 static int
9905 pa_reloc_rw_mask (void)
9906 {
9907 /* We force (const (plus (symbol) (const_int))) to memory when the
9908 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9909 handle this construct in read-only memory and we want to avoid
9910 this for ELF. So, we always force an RTX needing relocation to
9911 the data section. */
9912 return 3;
9913 }
9914
9915 static void
9916 pa_globalize_label (FILE *stream, const char *name)
9917 {
9918 /* We only handle DATA objects here, functions are globalized in
9919 ASM_DECLARE_FUNCTION_NAME. */
9920 if (! FUNCTION_NAME_P (name))
9921 {
9922 fputs ("\t.EXPORT ", stream);
9923 assemble_name (stream, name);
9924 fputs (",DATA\n", stream);
9925 }
9926 }
9927
9928 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9929
9930 static rtx
9931 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9932 int incoming ATTRIBUTE_UNUSED)
9933 {
9934 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9935 }
9936
9937 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9938
9939 bool
9940 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9941 {
9942 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9943 PA64 ABI says that objects larger than 128 bits are returned in memory.
9944 Note, int_size_in_bytes can return -1 if the size of the object is
9945 variable or larger than the maximum value that can be expressed as
9946 a HOST_WIDE_INT. It can also return zero for an empty type. The
9947 simplest way to handle variable and empty types is to pass them in
9948 memory. This avoids problems in defining the boundaries of argument
9949 slots, allocating registers, etc. */
9950 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9951 || int_size_in_bytes (type) <= 0);
9952 }
9953
9954 /* Structure to hold declaration and name of external symbols that are
9955 emitted by GCC. We generate a vector of these symbols and output them
9956 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9957 This avoids putting out names that are never really used. */
9958
9959 typedef struct GTY(()) extern_symbol
9960 {
9961 tree decl;
9962 const char *name;
9963 } extern_symbol;
9964
9965 /* Define gc'd vector type for extern_symbol. */
9966
9967 /* Vector of extern_symbol pointers. */
9968 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9969
9970 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9971 /* Mark DECL (name NAME) as an external reference (assembler output
9972 file FILE). This saves the names to output at the end of the file
9973 if actually referenced. */
9974
9975 void
9976 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9977 {
9978 gcc_assert (file == asm_out_file);
9979 extern_symbol p = {decl, name};
9980 vec_safe_push (extern_symbols, p);
9981 }
9982 #endif
9983
9984 /* Output text required at the end of an assembler file.
9985 This includes deferred plabels and .import directives for
9986 all external symbols that were actually referenced. */
9987
9988 static void
9989 pa_file_end (void)
9990 {
9991 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9992 unsigned int i;
9993 extern_symbol *p;
9994
9995 if (!NO_DEFERRED_PROFILE_COUNTERS)
9996 output_deferred_profile_counters ();
9997 #endif
9998
9999 output_deferred_plabels ();
10000
10001 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10002 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
10003 {
10004 tree decl = p->decl;
10005
10006 if (!TREE_ASM_WRITTEN (decl)
10007 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10008 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10009 }
10010
10011 vec_free (extern_symbols);
10012 #endif
10013
10014 if (NEED_INDICATE_EXEC_STACK)
10015 file_end_indicate_exec_stack ();
10016 }
10017
10018 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10019
10020 static bool
10021 pa_can_change_mode_class (machine_mode from, machine_mode to,
10022 reg_class_t rclass)
10023 {
10024 if (from == to)
10025 return true;
10026
10027 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10028 return true;
10029
10030 /* Reject changes to/from modes with zero size. */
10031 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
10032 return false;
10033
10034 /* Reject changes to/from complex and vector modes. */
10035 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10036 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10037 return false;
10038
10039 /* There is no way to load QImode or HImode values directly from memory
10040 to a FP register. SImode loads to the FP registers are not zero
10041 extended. On the 64-bit target, this conflicts with the definition
10042 of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
10043 different sizes in the floating-point registers. */
10044 if (MAYBE_FP_REG_CLASS_P (rclass))
10045 return false;
10046
10047 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10048 in specific sets of registers. Thus, we cannot allow changing
10049 to a larger mode when it's larger than a word. */
10050 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10051 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10052 return false;
10053
10054 return true;
10055 }
10056
10057 /* Implement TARGET_MODES_TIEABLE_P.
10058
10059 We should return FALSE for QImode and HImode because these modes
10060 are not ok in the floating-point registers. However, this prevents
10061 tieing these modes to SImode and DImode in the general registers.
10062 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10063 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10064 in the floating-point registers. */
10065
10066 static bool
10067 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10068 {
10069 /* Don't tie modes in different classes. */
10070 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10071 return false;
10072
10073 return true;
10074 }
10075
10076 \f
10077 /* Length in units of the trampoline instruction code. */
10078
10079 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10080
10081
10082 /* Output assembler code for a block containing the constant parts
10083 of a trampoline, leaving space for the variable parts.\
10084
10085 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10086 and then branches to the specified routine.
10087
10088 This code template is copied from text segment to stack location
10089 and then patched with pa_trampoline_init to contain valid values,
10090 and then entered as a subroutine.
10091
10092 It is best to keep this as small as possible to avoid having to
10093 flush multiple lines in the cache. */
10094
10095 static void
10096 pa_asm_trampoline_template (FILE *f)
10097 {
10098 if (!TARGET_64BIT)
10099 {
10100 fputs ("\tldw 36(%r22),%r21\n", f);
10101 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10102 if (ASSEMBLER_DIALECT == 0)
10103 fputs ("\tdepi 0,31,2,%r21\n", f);
10104 else
10105 fputs ("\tdepwi 0,31,2,%r21\n", f);
10106 fputs ("\tldw 4(%r21),%r19\n", f);
10107 fputs ("\tldw 0(%r21),%r21\n", f);
10108 if (TARGET_PA_20)
10109 {
10110 fputs ("\tbve (%r21)\n", f);
10111 fputs ("\tldw 40(%r22),%r29\n", f);
10112 fputs ("\t.word 0\n", f);
10113 fputs ("\t.word 0\n", f);
10114 }
10115 else
10116 {
10117 fputs ("\tldsid (%r21),%r1\n", f);
10118 fputs ("\tmtsp %r1,%sr0\n", f);
10119 fputs ("\tbe 0(%sr0,%r21)\n", f);
10120 fputs ("\tldw 40(%r22),%r29\n", f);
10121 }
10122 fputs ("\t.word 0\n", f);
10123 fputs ("\t.word 0\n", f);
10124 fputs ("\t.word 0\n", f);
10125 fputs ("\t.word 0\n", f);
10126 }
10127 else
10128 {
10129 fputs ("\t.dword 0\n", f);
10130 fputs ("\t.dword 0\n", f);
10131 fputs ("\t.dword 0\n", f);
10132 fputs ("\t.dword 0\n", f);
10133 fputs ("\tmfia %r31\n", f);
10134 fputs ("\tldd 24(%r31),%r1\n", f);
10135 fputs ("\tldd 24(%r1),%r27\n", f);
10136 fputs ("\tldd 16(%r1),%r1\n", f);
10137 fputs ("\tbve (%r1)\n", f);
10138 fputs ("\tldd 32(%r31),%r31\n", f);
10139 fputs ("\t.dword 0 ; fptr\n", f);
10140 fputs ("\t.dword 0 ; static link\n", f);
10141 }
10142 }
10143
10144 /* Emit RTL insns to initialize the variable parts of a trampoline.
10145 FNADDR is an RTX for the address of the function's pure code.
10146 CXT is an RTX for the static chain value for the function.
10147
10148 Move the function address to the trampoline template at offset 36.
10149 Move the static chain value to trampoline template at offset 40.
10150 Move the trampoline address to trampoline template at offset 44.
10151 Move r19 to trampoline template at offset 48. The latter two
10152 words create a plabel for the indirect call to the trampoline.
10153
10154 A similar sequence is used for the 64-bit port but the plabel is
10155 at the beginning of the trampoline.
10156
10157 Finally, the cache entries for the trampoline code are flushed.
10158 This is necessary to ensure that the trampoline instruction sequence
10159 is written to memory prior to any attempts at prefetching the code
10160 sequence. */
10161
10162 static void
10163 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10164 {
10165 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10166 rtx start_addr = gen_reg_rtx (Pmode);
10167 rtx end_addr = gen_reg_rtx (Pmode);
10168 rtx line_length = gen_reg_rtx (Pmode);
10169 rtx r_tramp, tmp;
10170
10171 emit_block_move (m_tramp, assemble_trampoline_template (),
10172 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10173 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10174
10175 if (!TARGET_64BIT)
10176 {
10177 tmp = adjust_address (m_tramp, Pmode, 36);
10178 emit_move_insn (tmp, fnaddr);
10179 tmp = adjust_address (m_tramp, Pmode, 40);
10180 emit_move_insn (tmp, chain_value);
10181
10182 /* Create a fat pointer for the trampoline. */
10183 tmp = adjust_address (m_tramp, Pmode, 44);
10184 emit_move_insn (tmp, r_tramp);
10185 tmp = adjust_address (m_tramp, Pmode, 48);
10186 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10187
10188 /* fdc and fic only use registers for the address to flush,
10189 they do not accept integer displacements. We align the
10190 start and end addresses to the beginning of their respective
10191 cache lines to minimize the number of lines flushed. */
10192 emit_insn (gen_andsi3 (start_addr, r_tramp,
10193 GEN_INT (-MIN_CACHELINE_SIZE)));
10194 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10195 TRAMPOLINE_CODE_SIZE-1));
10196 emit_insn (gen_andsi3 (end_addr, tmp,
10197 GEN_INT (-MIN_CACHELINE_SIZE)));
10198 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10199 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10200 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10201 gen_reg_rtx (Pmode),
10202 gen_reg_rtx (Pmode)));
10203 }
10204 else
10205 {
10206 tmp = adjust_address (m_tramp, Pmode, 56);
10207 emit_move_insn (tmp, fnaddr);
10208 tmp = adjust_address (m_tramp, Pmode, 64);
10209 emit_move_insn (tmp, chain_value);
10210
10211 /* Create a fat pointer for the trampoline. */
10212 tmp = adjust_address (m_tramp, Pmode, 16);
10213 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10214 r_tramp, 32)));
10215 tmp = adjust_address (m_tramp, Pmode, 24);
10216 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10217
10218 /* fdc and fic only use registers for the address to flush,
10219 they do not accept integer displacements. We align the
10220 start and end addresses to the beginning of their respective
10221 cache lines to minimize the number of lines flushed. */
10222 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10223 emit_insn (gen_anddi3 (start_addr, tmp,
10224 GEN_INT (-MIN_CACHELINE_SIZE)));
10225 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10226 TRAMPOLINE_CODE_SIZE - 1));
10227 emit_insn (gen_anddi3 (end_addr, tmp,
10228 GEN_INT (-MIN_CACHELINE_SIZE)));
10229 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10230 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10231 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10232 gen_reg_rtx (Pmode),
10233 gen_reg_rtx (Pmode)));
10234 }
10235
10236 #ifdef HAVE_ENABLE_EXECUTE_STACK
10237  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10238 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10239 #endif
10240 }
10241
10242 /* Perform any machine-specific adjustment in the address of the trampoline.
10243 ADDR contains the address that was passed to pa_trampoline_init.
10244 Adjust the trampoline address to point to the plabel at offset 44. */
10245
10246 static rtx
10247 pa_trampoline_adjust_address (rtx addr)
10248 {
10249 if (!TARGET_64BIT)
10250 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10251 return addr;
10252 }
10253
10254 static rtx
10255 pa_delegitimize_address (rtx orig_x)
10256 {
10257 rtx x = delegitimize_mem_from_attrs (orig_x);
10258
10259 if (GET_CODE (x) == LO_SUM
10260 && GET_CODE (XEXP (x, 1)) == UNSPEC
10261 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10262 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10263 return x;
10264 }
10265 \f
10266 static rtx
10267 pa_internal_arg_pointer (void)
10268 {
10269 /* The argument pointer and the hard frame pointer are the same in
10270 the 32-bit runtime, so we don't need a copy. */
10271 if (TARGET_64BIT)
10272 return copy_to_reg (virtual_incoming_args_rtx);
10273 else
10274 return virtual_incoming_args_rtx;
10275 }
10276
10277 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10278 Frame pointer elimination is automatically handled. */
10279
10280 static bool
10281 pa_can_eliminate (const int from, const int to)
10282 {
10283 /* The argument cannot be eliminated in the 64-bit runtime. */
10284 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10285 return false;
10286
10287 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10288 ? ! frame_pointer_needed
10289 : true);
10290 }
10291
10292 /* Define the offset between two registers, FROM to be eliminated and its
10293 replacement TO, at the start of a routine. */
10294 HOST_WIDE_INT
10295 pa_initial_elimination_offset (int from, int to)
10296 {
10297 HOST_WIDE_INT offset;
10298
10299 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10300 && to == STACK_POINTER_REGNUM)
10301 offset = -pa_compute_frame_size (get_frame_size (), 0);
10302 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10303 offset = 0;
10304 else
10305 gcc_unreachable ();
10306
10307 return offset;
10308 }
10309
10310 static void
10311 pa_conditional_register_usage (void)
10312 {
10313 int i;
10314
10315 if (!TARGET_64BIT && !TARGET_PA_11)
10316 {
10317 for (i = 56; i <= FP_REG_LAST; i++)
10318 fixed_regs[i] = call_used_regs[i] = 1;
10319 for (i = 33; i < 56; i += 2)
10320 fixed_regs[i] = call_used_regs[i] = 1;
10321 }
10322 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10323 {
10324 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10325 fixed_regs[i] = call_used_regs[i] = 1;
10326 }
10327 if (flag_pic)
10328 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10329 }
10330
10331 /* Target hook for c_mode_for_suffix. */
10332
10333 static machine_mode
10334 pa_c_mode_for_suffix (char suffix)
10335 {
10336 if (HPUX_LONG_DOUBLE_LIBRARY)
10337 {
10338 if (suffix == 'q')
10339 return TFmode;
10340 }
10341
10342 return VOIDmode;
10343 }
10344
10345 /* Target hook for function_section. */
10346
10347 static section *
10348 pa_function_section (tree decl, enum node_frequency freq,
10349 bool startup, bool exit)
10350 {
10351 /* Put functions in text section if target doesn't have named sections. */
10352 if (!targetm_common.have_named_sections)
10353 return text_section;
10354
10355 /* Force nested functions into the same section as the containing
10356 function. */
10357 if (decl
10358 && DECL_SECTION_NAME (decl) == NULL
10359 && DECL_CONTEXT (decl) != NULL_TREE
10360 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10361 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10362 return function_section (DECL_CONTEXT (decl));
10363
10364 /* Otherwise, use the default function section. */
10365 return default_function_section (decl, freq, startup, exit);
10366 }
10367
10368 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10369
10370 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10371 that need more than three instructions to load prior to reload. This
10372 limit is somewhat arbitrary. It takes three instructions to load a
10373 CONST_INT from memory but two are memory accesses. It may be better
10374 to increase the allowed range for CONST_INTS. We may also be able
10375 to handle CONST_DOUBLES. */
10376
10377 static bool
10378 pa_legitimate_constant_p (machine_mode mode, rtx x)
10379 {
10380 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10381 return false;
10382
10383 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10384 return false;
10385
10386 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10387 legitimate constants. The other variants can't be handled by
10388 the move patterns after reload starts. */
10389 if (tls_referenced_p (x))
10390 return false;
10391
10392 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10393 return false;
10394
10395 if (TARGET_64BIT
10396 && HOST_BITS_PER_WIDE_INT > 32
10397 && GET_CODE (x) == CONST_INT
10398 && !reload_in_progress
10399 && !reload_completed
10400 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10401 && !pa_cint_ok_for_move (UINTVAL (x)))
10402 return false;
10403
10404 if (function_label_operand (x, mode))
10405 return false;
10406
10407 return true;
10408 }
10409
10410 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10411
10412 static unsigned int
10413 pa_section_type_flags (tree decl, const char *name, int reloc)
10414 {
10415 unsigned int flags;
10416
10417 flags = default_section_type_flags (decl, name, reloc);
10418
10419 /* Function labels are placed in the constant pool. This can
10420 cause a section conflict if decls are put in ".data.rel.ro"
10421 or ".data.rel.ro.local" using the __attribute__ construct. */
10422 if (strcmp (name, ".data.rel.ro") == 0
10423 || strcmp (name, ".data.rel.ro.local") == 0)
10424 flags |= SECTION_WRITE | SECTION_RELRO;
10425
10426 return flags;
10427 }
10428
10429 /* pa_legitimate_address_p recognizes an RTL expression that is a
10430 valid memory address for an instruction. The MODE argument is the
10431 machine mode for the MEM expression that wants to use this address.
10432
10433 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10434 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10435 available with floating point loads and stores, and integer loads.
10436 We get better code by allowing indexed addresses in the initial
10437 RTL generation.
10438
10439 The acceptance of indexed addresses as legitimate implies that we
10440 must provide patterns for doing indexed integer stores, or the move
10441 expanders must force the address of an indexed store to a register.
10442 We have adopted the latter approach.
10443
10444 Another function of pa_legitimate_address_p is to ensure that
10445 the base register is a valid pointer for indexed instructions.
10446 On targets that have non-equivalent space registers, we have to
10447 know at the time of assembler output which register in a REG+REG
10448 pair is the base register. The REG_POINTER flag is sometimes lost
10449 in reload and the following passes, so it can't be relied on during
10450 code generation. Thus, we either have to canonicalize the order
10451 of the registers in REG+REG indexed addresses, or treat REG+REG
10452 addresses separately and provide patterns for both permutations.
10453
10454 The latter approach requires several hundred additional lines of
10455 code in pa.md. The downside to canonicalizing is that a PLUS
10456 in the wrong order can't combine to form to make a scaled indexed
10457 memory operand. As we won't need to canonicalize the operands if
10458 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10459
10460 We initially break out scaled indexed addresses in canonical order
10461 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10462 scaled indexed addresses during RTL generation. However, fold_rtx
10463 has its own opinion on how the operands of a PLUS should be ordered.
10464 If one of the operands is equivalent to a constant, it will make
10465 that operand the second operand. As the base register is likely to
10466 be equivalent to a SYMBOL_REF, we have made it the second operand.
10467
10468 pa_legitimate_address_p accepts REG+REG as legitimate when the
10469 operands are in the order INDEX+BASE on targets with non-equivalent
10470 space registers, and in any order on targets with equivalent space
10471 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10472
10473 We treat a SYMBOL_REF as legitimate if it is part of the current
10474 function's constant-pool, because such addresses can actually be
10475 output as REG+SMALLINT. */
10476
10477 static bool
10478 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10479 {
10480 if ((REG_P (x)
10481 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10482 : REG_OK_FOR_BASE_P (x)))
10483 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10484 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10485 && REG_P (XEXP (x, 0))
10486 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10487 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10488 return true;
10489
10490 if (GET_CODE (x) == PLUS)
10491 {
10492 rtx base, index;
10493
10494 /* For REG+REG, the base register should be in XEXP (x, 1),
10495 so check it first. */
10496 if (REG_P (XEXP (x, 1))
10497 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10498 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10499 base = XEXP (x, 1), index = XEXP (x, 0);
10500 else if (REG_P (XEXP (x, 0))
10501 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10502 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10503 base = XEXP (x, 0), index = XEXP (x, 1);
10504 else
10505 return false;
10506
10507 if (GET_CODE (index) == CONST_INT)
10508 {
10509 if (INT_5_BITS (index))
10510 return true;
10511
10512 /* When INT14_OK_STRICT is false, a secondary reload is needed
10513 to adjust the displacement of SImode and DImode floating point
10514 instructions but this may fail when the register also needs
10515 reloading. So, we return false when STRICT is true. We
10516 also reject long displacements for float mode addresses since
10517 the majority of accesses will use floating point instructions
10518 that don't support 14-bit offsets. */
10519 if (!INT14_OK_STRICT
10520 && (strict || !(reload_in_progress || reload_completed))
10521 && mode != QImode
10522 && mode != HImode)
10523 return false;
10524
10525 return base14_operand (index, mode);
10526 }
10527
10528 if (!TARGET_DISABLE_INDEXING
10529 /* Only accept the "canonical" INDEX+BASE operand order
10530 on targets with non-equivalent space registers. */
10531 && (TARGET_NO_SPACE_REGS
10532 ? REG_P (index)
10533 : (base == XEXP (x, 1) && REG_P (index)
10534 && (reload_completed
10535 || (reload_in_progress && HARD_REGISTER_P (base))
10536 || REG_POINTER (base))
10537 && (reload_completed
10538 || (reload_in_progress && HARD_REGISTER_P (index))
10539 || !REG_POINTER (index))))
10540 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10541 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10542 : REG_OK_FOR_INDEX_P (index))
10543 && borx_reg_operand (base, Pmode)
10544 && borx_reg_operand (index, Pmode))
10545 return true;
10546
10547 if (!TARGET_DISABLE_INDEXING
10548 && GET_CODE (index) == MULT
10549 /* Only accept base operands with the REG_POINTER flag prior to
10550 reload on targets with non-equivalent space registers. */
10551 && (TARGET_NO_SPACE_REGS
10552 || (base == XEXP (x, 1)
10553 && (reload_completed
10554 || (reload_in_progress && HARD_REGISTER_P (base))
10555 || REG_POINTER (base))))
10556 && REG_P (XEXP (index, 0))
10557 && GET_MODE (XEXP (index, 0)) == Pmode
10558 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10559 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10560 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10561 && GET_CODE (XEXP (index, 1)) == CONST_INT
10562 && INTVAL (XEXP (index, 1))
10563 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10564 && borx_reg_operand (base, Pmode))
10565 return true;
10566
10567 return false;
10568 }
10569
10570 if (GET_CODE (x) == LO_SUM)
10571 {
10572 rtx y = XEXP (x, 0);
10573
10574 if (GET_CODE (y) == SUBREG)
10575 y = SUBREG_REG (y);
10576
10577 if (REG_P (y)
10578 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10579 : REG_OK_FOR_BASE_P (y)))
10580 {
10581 /* Needed for -fPIC */
10582 if (mode == Pmode
10583 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10584 return true;
10585
10586 if (!INT14_OK_STRICT
10587 && (strict || !(reload_in_progress || reload_completed))
10588 && mode != QImode
10589 && mode != HImode)
10590 return false;
10591
10592 if (CONSTANT_P (XEXP (x, 1)))
10593 return true;
10594 }
10595 return false;
10596 }
10597
10598 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10599 return true;
10600
10601 return false;
10602 }
10603
10604 /* Look for machine dependent ways to make the invalid address AD a
10605 valid address.
10606
10607 For the PA, transform:
10608
10609 memory(X + <large int>)
10610
10611 into:
10612
10613 if (<large int> & mask) >= 16
10614 Y = (<large int> & ~mask) + mask + 1 Round up.
10615 else
10616 Y = (<large int> & ~mask) Round down.
10617 Z = X + Y
10618 memory (Z + (<large int> - Y));
10619
10620 This makes reload inheritance and reload_cse work better since Z
10621 can be reused.
10622
10623 There may be more opportunities to improve code with this hook. */
10624
10625 rtx
10626 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10627 int opnum, int type,
10628 int ind_levels ATTRIBUTE_UNUSED)
10629 {
10630 long offset, newoffset, mask;
10631 rtx new_rtx, temp = NULL_RTX;
10632
10633 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10634 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10635
10636 if (optimize && GET_CODE (ad) == PLUS)
10637 temp = simplify_binary_operation (PLUS, Pmode,
10638 XEXP (ad, 0), XEXP (ad, 1));
10639
10640 new_rtx = temp ? temp : ad;
10641
10642 if (optimize
10643 && GET_CODE (new_rtx) == PLUS
10644 && GET_CODE (XEXP (new_rtx, 0)) == REG
10645 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10646 {
10647 offset = INTVAL (XEXP ((new_rtx), 1));
10648
10649 /* Choose rounding direction. Round up if we are >= halfway. */
10650 if ((offset & mask) >= ((mask + 1) / 2))
10651 newoffset = (offset & ~mask) + mask + 1;
10652 else
10653 newoffset = offset & ~mask;
10654
10655 /* Ensure that long displacements are aligned. */
10656 if (mask == 0x3fff
10657 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10658 || (TARGET_64BIT && (mode) == DImode)))
10659 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10660
10661 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10662 {
10663 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10664 GEN_INT (newoffset));
10665 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10666 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10667 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10668 opnum, (enum reload_type) type);
10669 return ad;
10670 }
10671 }
10672
10673 return NULL_RTX;
10674 }
10675
10676 /* Output address vector. */
10677
10678 void
10679 pa_output_addr_vec (rtx lab, rtx body)
10680 {
10681 int idx, vlen = XVECLEN (body, 0);
10682
10683 if (!TARGET_SOM)
10684 fputs ("\t.align 4\n", asm_out_file);
10685 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10686 if (TARGET_GAS)
10687 fputs ("\t.begin_brtab\n", asm_out_file);
10688 for (idx = 0; idx < vlen; idx++)
10689 {
10690 ASM_OUTPUT_ADDR_VEC_ELT
10691 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10692 }
10693 if (TARGET_GAS)
10694 fputs ("\t.end_brtab\n", asm_out_file);
10695 }
10696
10697 /* Output address difference vector. */
10698
10699 void
10700 pa_output_addr_diff_vec (rtx lab, rtx body)
10701 {
10702 rtx base = XEXP (XEXP (body, 0), 0);
10703 int idx, vlen = XVECLEN (body, 1);
10704
10705 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10706 if (TARGET_GAS)
10707 fputs ("\t.begin_brtab\n", asm_out_file);
10708 for (idx = 0; idx < vlen; idx++)
10709 {
10710 ASM_OUTPUT_ADDR_DIFF_ELT
10711 (asm_out_file,
10712 body,
10713 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10714 CODE_LABEL_NUMBER (base));
10715 }
10716 if (TARGET_GAS)
10717 fputs ("\t.end_brtab\n", asm_out_file);
10718 }
10719
10720 /* This is a helper function for the other atomic operations. This function
10721 emits a loop that contains SEQ that iterates until a compare-and-swap
10722 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10723 a set of instructions that takes a value from OLD_REG as an input and
10724 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10725 set to the current contents of MEM. After SEQ, a compare-and-swap will
10726 attempt to update MEM with NEW_REG. The function returns true when the
10727 loop was generated successfully. */
10728
10729 static bool
10730 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10731 {
10732 machine_mode mode = GET_MODE (mem);
10733 rtx_code_label *label;
10734 rtx cmp_reg, success, oldval;
10735
10736 /* The loop we want to generate looks like
10737
10738 cmp_reg = mem;
10739 label:
10740 old_reg = cmp_reg;
10741 seq;
10742 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10743 if (success)
10744 goto label;
10745
10746 Note that we only do the plain load from memory once. Subsequent
10747 iterations use the value loaded by the compare-and-swap pattern. */
10748
10749 label = gen_label_rtx ();
10750 cmp_reg = gen_reg_rtx (mode);
10751
10752 emit_move_insn (cmp_reg, mem);
10753 emit_label (label);
10754 emit_move_insn (old_reg, cmp_reg);
10755 if (seq)
10756 emit_insn (seq);
10757
10758 success = NULL_RTX;
10759 oldval = cmp_reg;
10760 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10761 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10762 MEMMODEL_RELAXED))
10763 return false;
10764
10765 if (oldval != cmp_reg)
10766 emit_move_insn (cmp_reg, oldval);
10767
10768 /* Mark this jump predicted not taken. */
10769 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10770 GET_MODE (success), 1, label,
10771 profile_probability::guessed_never ());
10772 return true;
10773 }
10774
10775 /* This function tries to implement an atomic exchange operation using a
10776 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10777 *MEM are returned, using TARGET if possible. No memory model is required
10778 since a compare_and_swap loop is seq-cst. */
10779
10780 rtx
10781 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10782 {
10783 machine_mode mode = GET_MODE (mem);
10784
10785 if (can_compare_and_swap_p (mode, true))
10786 {
10787 if (!target || !register_operand (target, mode))
10788 target = gen_reg_rtx (mode);
10789 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10790 return target;
10791 }
10792
10793 return NULL_RTX;
10794 }
10795
10796 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10797 arguments passed by hidden reference in the 32-bit HP runtime. Users
10798 can override this behavior for better compatibility with openmp at the
10799 risk of library incompatibilities. Arguments are always passed by value
10800 in the 64-bit HP runtime. */
10801
10802 static bool
10803 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10804 machine_mode mode ATTRIBUTE_UNUSED,
10805 const_tree type ATTRIBUTE_UNUSED,
10806 bool named ATTRIBUTE_UNUSED)
10807 {
10808 return !TARGET_CALLER_COPIES;
10809 }
10810
10811 /* Implement TARGET_HARD_REGNO_NREGS. */
10812
10813 static unsigned int
10814 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
10815 {
10816 return PA_HARD_REGNO_NREGS (regno, mode);
10817 }
10818
10819 /* Implement TARGET_HARD_REGNO_MODE_OK. */
10820
10821 static bool
10822 pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10823 {
10824 return PA_HARD_REGNO_MODE_OK (regno, mode);
10825 }
10826
10827 /* Implement TARGET_STARTING_FRAME_OFFSET.
10828
10829 On the 32-bit ports, we reserve one slot for the previous frame
10830 pointer and one fill slot. The fill slot is for compatibility
10831 with HP compiled programs. On the 64-bit ports, we reserve one
10832 slot for the previous frame pointer. */
10833
10834 static HOST_WIDE_INT
10835 pa_starting_frame_offset (void)
10836 {
10837 return 8;
10838 }
10839
10840 /* Figure out the size in words of the function argument. The size
10841 returned by this function should always be greater than zero because
10842 we pass variable and zero sized objects by reference. */
10843
10844 HOST_WIDE_INT
10845 pa_function_arg_size (machine_mode mode, const_tree type)
10846 {
10847 HOST_WIDE_INT size;
10848
10849 size = mode != BLKmode ? GET_MODE_SIZE (mode) : int_size_in_bytes (type);
10850 return CEIL (size, UNITS_PER_WORD);
10851 }
10852
10853 #include "gt-pa.h"