]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
Turn CONSTANT_ALIGNMENT into a hook
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "memmodel.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "stringpool.h"
32 #include "attribs.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "insn-attr.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "varasm.h"
43 #include "calls.h"
44 #include "output.h"
45 #include "except.h"
46 #include "explow.h"
47 #include "expr.h"
48 #include "reload.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "cfgrtl.h"
52 #include "opts.h"
53 #include "builtins.h"
54
55 /* This file should be included last. */
56 #include "target-def.h"
57
58 /* Return nonzero if there is a bypass for the output of
59 OUT_INSN and the fp store IN_INSN. */
60 int
61 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
62 {
63 machine_mode store_mode;
64 machine_mode other_mode;
65 rtx set;
66
67 if (recog_memoized (in_insn) < 0
68 || (get_attr_type (in_insn) != TYPE_FPSTORE
69 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
70 || recog_memoized (out_insn) < 0)
71 return 0;
72
73 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74
75 set = single_set (out_insn);
76 if (!set)
77 return 0;
78
79 other_mode = GET_MODE (SET_SRC (set));
80
81 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
82 }
83
84
85 #ifndef DO_FRAME_NOTES
86 #ifdef INCOMING_RETURN_ADDR_RTX
87 #define DO_FRAME_NOTES 1
88 #else
89 #define DO_FRAME_NOTES 0
90 #endif
91 #endif
92
93 static void pa_option_override (void);
94 static void copy_reg_pointer (rtx, rtx);
95 static void fix_range (const char *);
96 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
97 reg_class_t);
98 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
99 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
100 static inline rtx force_mode (machine_mode, rtx);
101 static void pa_reorg (void);
102 static void pa_combine_instructions (void);
103 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
104 rtx, rtx);
105 static bool forward_branch_p (rtx_insn *);
106 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
108 static int compute_movmem_length (rtx_insn *);
109 static int compute_clrmem_length (rtx_insn *);
110 static bool pa_assemble_integer (rtx, unsigned int, int);
111 static void remove_useless_addtr_insns (int);
112 static void store_reg (int, HOST_WIDE_INT, int);
113 static void store_reg_modify (int, int, HOST_WIDE_INT);
114 static void load_reg (int, HOST_WIDE_INT, int);
115 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
116 static rtx pa_function_value (const_tree, const_tree, bool);
117 static rtx pa_libcall_value (machine_mode, const_rtx);
118 static bool pa_function_value_regno_p (const unsigned int);
119 static void pa_output_function_prologue (FILE *);
120 static void update_total_code_bytes (unsigned int);
121 static void pa_output_function_epilogue (FILE *);
122 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
123 static int pa_adjust_priority (rtx_insn *, int);
124 static int pa_issue_rate (void);
125 static int pa_reloc_rw_mask (void);
126 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
127 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
128 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
129 ATTRIBUTE_UNUSED;
130 static void pa_encode_section_info (tree, rtx, int);
131 static const char *pa_strip_name_encoding (const char *);
132 static bool pa_function_ok_for_sibcall (tree, tree);
133 static void pa_globalize_label (FILE *, const char *)
134 ATTRIBUTE_UNUSED;
135 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
136 HOST_WIDE_INT, tree);
137 #if !defined(USE_COLLECT2)
138 static void pa_asm_out_constructor (rtx, int);
139 static void pa_asm_out_destructor (rtx, int);
140 #endif
141 static void pa_init_builtins (void);
142 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
143 static rtx hppa_builtin_saveregs (void);
144 static void hppa_va_start (tree, rtx);
145 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
146 static bool pa_scalar_mode_supported_p (scalar_mode);
147 static bool pa_commutative_p (const_rtx x, int outer_code);
148 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
149 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
150 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
151 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
153 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
155 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
160 static void output_deferred_plabels (void);
161 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
162 #ifdef ASM_OUTPUT_EXTERNAL_REAL
163 static void pa_hpux_file_end (void);
164 #endif
165 static void pa_init_libfuncs (void);
166 static rtx pa_struct_value_rtx (tree, int);
167 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
168 const_tree, bool);
169 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
170 tree, bool);
171 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static rtx pa_function_arg (cumulative_args_t, machine_mode,
174 const_tree, bool);
175 static pad_direction pa_function_arg_padding (machine_mode, const_tree);
176 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
177 static struct machine_function * pa_init_machine_status (void);
178 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
179 machine_mode,
180 secondary_reload_info *);
181 static bool pa_secondary_memory_needed (machine_mode,
182 reg_class_t, reg_class_t);
183 static void pa_extra_live_on_entry (bitmap);
184 static machine_mode pa_promote_function_mode (const_tree,
185 machine_mode, int *,
186 const_tree, int);
187
188 static void pa_asm_trampoline_template (FILE *);
189 static void pa_trampoline_init (rtx, tree, rtx);
190 static rtx pa_trampoline_adjust_address (rtx);
191 static rtx pa_delegitimize_address (rtx);
192 static bool pa_print_operand_punct_valid_p (unsigned char);
193 static rtx pa_internal_arg_pointer (void);
194 static bool pa_can_eliminate (const int, const int);
195 static void pa_conditional_register_usage (void);
196 static machine_mode pa_c_mode_for_suffix (char);
197 static section *pa_function_section (tree, enum node_frequency, bool, bool);
198 static bool pa_cannot_force_const_mem (machine_mode, rtx);
199 static bool pa_legitimate_constant_p (machine_mode, rtx);
200 static unsigned int pa_section_type_flags (tree, const char *, int);
201 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
202 static bool pa_callee_copies (cumulative_args_t, machine_mode,
203 const_tree, bool);
204 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
205 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
206 static bool pa_modes_tieable_p (machine_mode, machine_mode);
207 static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
208
209 /* The following extra sections are only used for SOM. */
210 static GTY(()) section *som_readonly_data_section;
211 static GTY(()) section *som_one_only_readonly_data_section;
212 static GTY(()) section *som_one_only_data_section;
213 static GTY(()) section *som_tm_clone_table_section;
214
215 /* Counts for the number of callee-saved general and floating point
216 registers which were saved by the current function's prologue. */
217 static int gr_saved, fr_saved;
218
219 /* Boolean indicating whether the return pointer was saved by the
220 current function's prologue. */
221 static bool rp_saved;
222
223 static rtx find_addr_reg (rtx);
224
225 /* Keep track of the number of bytes we have output in the CODE subspace
226 during this compilation so we'll know when to emit inline long-calls. */
227 unsigned long total_code_bytes;
228
229 /* The last address of the previous function plus the number of bytes in
230 associated thunks that have been output. This is used to determine if
231 a thunk can use an IA-relative branch to reach its target function. */
232 static unsigned int last_address;
233
234 /* Variables to handle plabels that we discover are necessary at assembly
235 output time. They are output after the current function. */
236 struct GTY(()) deferred_plabel
237 {
238 rtx internal_label;
239 rtx symbol;
240 };
241 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
242 deferred_plabels;
243 static size_t n_deferred_plabels = 0;
244 \f
245 /* Initialize the GCC target structure. */
246
247 #undef TARGET_OPTION_OVERRIDE
248 #define TARGET_OPTION_OVERRIDE pa_option_override
249
250 #undef TARGET_ASM_ALIGNED_HI_OP
251 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
252 #undef TARGET_ASM_ALIGNED_SI_OP
253 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
254 #undef TARGET_ASM_ALIGNED_DI_OP
255 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
256 #undef TARGET_ASM_UNALIGNED_HI_OP
257 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
258 #undef TARGET_ASM_UNALIGNED_SI_OP
259 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
260 #undef TARGET_ASM_UNALIGNED_DI_OP
261 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
262 #undef TARGET_ASM_INTEGER
263 #define TARGET_ASM_INTEGER pa_assemble_integer
264
265 #undef TARGET_ASM_FUNCTION_PROLOGUE
266 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
267 #undef TARGET_ASM_FUNCTION_EPILOGUE
268 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
269
270 #undef TARGET_FUNCTION_VALUE
271 #define TARGET_FUNCTION_VALUE pa_function_value
272 #undef TARGET_LIBCALL_VALUE
273 #define TARGET_LIBCALL_VALUE pa_libcall_value
274 #undef TARGET_FUNCTION_VALUE_REGNO_P
275 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
276
277 #undef TARGET_LEGITIMIZE_ADDRESS
278 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
279
280 #undef TARGET_SCHED_ADJUST_COST
281 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
282 #undef TARGET_SCHED_ADJUST_PRIORITY
283 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
284 #undef TARGET_SCHED_ISSUE_RATE
285 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
286
287 #undef TARGET_ENCODE_SECTION_INFO
288 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
289 #undef TARGET_STRIP_NAME_ENCODING
290 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
291
292 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
293 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
294
295 #undef TARGET_COMMUTATIVE_P
296 #define TARGET_COMMUTATIVE_P pa_commutative_p
297
298 #undef TARGET_ASM_OUTPUT_MI_THUNK
299 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
300 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
301 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
302
303 #undef TARGET_ASM_FILE_END
304 #ifdef ASM_OUTPUT_EXTERNAL_REAL
305 #define TARGET_ASM_FILE_END pa_hpux_file_end
306 #else
307 #define TARGET_ASM_FILE_END output_deferred_plabels
308 #endif
309
310 #undef TARGET_ASM_RELOC_RW_MASK
311 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
312
313 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
314 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
315
316 #if !defined(USE_COLLECT2)
317 #undef TARGET_ASM_CONSTRUCTOR
318 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
319 #undef TARGET_ASM_DESTRUCTOR
320 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
321 #endif
322
323 #undef TARGET_INIT_BUILTINS
324 #define TARGET_INIT_BUILTINS pa_init_builtins
325
326 #undef TARGET_EXPAND_BUILTIN
327 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
328
329 #undef TARGET_REGISTER_MOVE_COST
330 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
331 #undef TARGET_RTX_COSTS
332 #define TARGET_RTX_COSTS hppa_rtx_costs
333 #undef TARGET_ADDRESS_COST
334 #define TARGET_ADDRESS_COST hppa_address_cost
335
336 #undef TARGET_MACHINE_DEPENDENT_REORG
337 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
338
339 #undef TARGET_INIT_LIBFUNCS
340 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
341
342 #undef TARGET_PROMOTE_FUNCTION_MODE
343 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
344 #undef TARGET_PROMOTE_PROTOTYPES
345 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
346
347 #undef TARGET_STRUCT_VALUE_RTX
348 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
349 #undef TARGET_RETURN_IN_MEMORY
350 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
351 #undef TARGET_MUST_PASS_IN_STACK
352 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
353 #undef TARGET_PASS_BY_REFERENCE
354 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
355 #undef TARGET_CALLEE_COPIES
356 #define TARGET_CALLEE_COPIES pa_callee_copies
357 #undef TARGET_ARG_PARTIAL_BYTES
358 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
359 #undef TARGET_FUNCTION_ARG
360 #define TARGET_FUNCTION_ARG pa_function_arg
361 #undef TARGET_FUNCTION_ARG_ADVANCE
362 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
363 #undef TARGET_FUNCTION_ARG_PADDING
364 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
365 #undef TARGET_FUNCTION_ARG_BOUNDARY
366 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
367
368 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
369 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
370 #undef TARGET_EXPAND_BUILTIN_VA_START
371 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
372 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
373 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
374
375 #undef TARGET_SCALAR_MODE_SUPPORTED_P
376 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
377
378 #undef TARGET_CANNOT_FORCE_CONST_MEM
379 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
380
381 #undef TARGET_SECONDARY_RELOAD
382 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
383 #undef TARGET_SECONDARY_MEMORY_NEEDED
384 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
385
386 #undef TARGET_EXTRA_LIVE_ON_ENTRY
387 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
388
389 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
390 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
391 #undef TARGET_TRAMPOLINE_INIT
392 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
393 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
394 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
395 #undef TARGET_DELEGITIMIZE_ADDRESS
396 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
397 #undef TARGET_INTERNAL_ARG_POINTER
398 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
399 #undef TARGET_CAN_ELIMINATE
400 #define TARGET_CAN_ELIMINATE pa_can_eliminate
401 #undef TARGET_CONDITIONAL_REGISTER_USAGE
402 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
403 #undef TARGET_C_MODE_FOR_SUFFIX
404 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
405 #undef TARGET_ASM_FUNCTION_SECTION
406 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
407
408 #undef TARGET_LEGITIMATE_CONSTANT_P
409 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
410 #undef TARGET_SECTION_TYPE_FLAGS
411 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
412 #undef TARGET_LEGITIMATE_ADDRESS_P
413 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
414
415 #undef TARGET_LRA_P
416 #define TARGET_LRA_P hook_bool_void_false
417
418 #undef TARGET_HARD_REGNO_NREGS
419 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
420 #undef TARGET_HARD_REGNO_MODE_OK
421 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
422 #undef TARGET_MODES_TIEABLE_P
423 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
424
425 #undef TARGET_CAN_CHANGE_MODE_CLASS
426 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
427
428 #undef TARGET_CONSTANT_ALIGNMENT
429 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
430
431 struct gcc_target targetm = TARGET_INITIALIZER;
432 \f
433 /* Parse the -mfixed-range= option string. */
434
435 static void
436 fix_range (const char *const_str)
437 {
438 int i, first, last;
439 char *str, *dash, *comma;
440
441 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
442 REG2 are either register names or register numbers. The effect
443 of this option is to mark the registers in the range from REG1 to
444 REG2 as ``fixed'' so they won't be used by the compiler. This is
445 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
446
447 i = strlen (const_str);
448 str = (char *) alloca (i + 1);
449 memcpy (str, const_str, i + 1);
450
451 while (1)
452 {
453 dash = strchr (str, '-');
454 if (!dash)
455 {
456 warning (0, "value of -mfixed-range must have form REG1-REG2");
457 return;
458 }
459 *dash = '\0';
460
461 comma = strchr (dash + 1, ',');
462 if (comma)
463 *comma = '\0';
464
465 first = decode_reg_name (str);
466 if (first < 0)
467 {
468 warning (0, "unknown register name: %s", str);
469 return;
470 }
471
472 last = decode_reg_name (dash + 1);
473 if (last < 0)
474 {
475 warning (0, "unknown register name: %s", dash + 1);
476 return;
477 }
478
479 *dash = '-';
480
481 if (first > last)
482 {
483 warning (0, "%s-%s is an empty range", str, dash + 1);
484 return;
485 }
486
487 for (i = first; i <= last; ++i)
488 fixed_regs[i] = call_used_regs[i] = 1;
489
490 if (!comma)
491 break;
492
493 *comma = ',';
494 str = comma + 1;
495 }
496
497 /* Check if all floating point registers have been fixed. */
498 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
499 if (!fixed_regs[i])
500 break;
501
502 if (i > FP_REG_LAST)
503 target_flags |= MASK_DISABLE_FPREGS;
504 }
505
506 /* Implement the TARGET_OPTION_OVERRIDE hook. */
507
508 static void
509 pa_option_override (void)
510 {
511 unsigned int i;
512 cl_deferred_option *opt;
513 vec<cl_deferred_option> *v
514 = (vec<cl_deferred_option> *) pa_deferred_options;
515
516 if (v)
517 FOR_EACH_VEC_ELT (*v, i, opt)
518 {
519 switch (opt->opt_index)
520 {
521 case OPT_mfixed_range_:
522 fix_range (opt->arg);
523 break;
524
525 default:
526 gcc_unreachable ();
527 }
528 }
529
530 if (flag_pic && TARGET_PORTABLE_RUNTIME)
531 {
532 warning (0, "PIC code generation is not supported in the portable runtime model");
533 }
534
535 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
536 {
537 warning (0, "PIC code generation is not compatible with fast indirect calls");
538 }
539
540 if (! TARGET_GAS && write_symbols != NO_DEBUG)
541 {
542 warning (0, "-g is only supported when using GAS on this processor,");
543 warning (0, "-g option disabled");
544 write_symbols = NO_DEBUG;
545 }
546
547 /* We only support the "big PIC" model now. And we always generate PIC
548 code when in 64bit mode. */
549 if (flag_pic == 1 || TARGET_64BIT)
550 flag_pic = 2;
551
552 /* Disable -freorder-blocks-and-partition as we don't support hot and
553 cold partitioning. */
554 if (flag_reorder_blocks_and_partition)
555 {
556 inform (input_location,
557 "-freorder-blocks-and-partition does not work "
558 "on this architecture");
559 flag_reorder_blocks_and_partition = 0;
560 flag_reorder_blocks = 1;
561 }
562
563 /* We can't guarantee that .dword is available for 32-bit targets. */
564 if (UNITS_PER_WORD == 4)
565 targetm.asm_out.aligned_op.di = NULL;
566
567 /* The unaligned ops are only available when using GAS. */
568 if (!TARGET_GAS)
569 {
570 targetm.asm_out.unaligned_op.hi = NULL;
571 targetm.asm_out.unaligned_op.si = NULL;
572 targetm.asm_out.unaligned_op.di = NULL;
573 }
574
575 init_machine_status = pa_init_machine_status;
576 }
577
578 enum pa_builtins
579 {
580 PA_BUILTIN_COPYSIGNQ,
581 PA_BUILTIN_FABSQ,
582 PA_BUILTIN_INFQ,
583 PA_BUILTIN_HUGE_VALQ,
584 PA_BUILTIN_max
585 };
586
587 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
588
589 static void
590 pa_init_builtins (void)
591 {
592 #ifdef DONT_HAVE_FPUTC_UNLOCKED
593 {
594 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
595 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
596 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
597 }
598 #endif
599 #if TARGET_HPUX_11
600 {
601 tree decl;
602
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinite");
605 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
606 set_user_assembler_name (decl, "_Isfinitef");
607 }
608 #endif
609
610 if (HPUX_LONG_DOUBLE_LIBRARY)
611 {
612 tree decl, ftype;
613
614 /* Under HPUX, the __float128 type is a synonym for "long double". */
615 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
616 "__float128");
617
618 /* TFmode support builtins. */
619 ftype = build_function_type_list (long_double_type_node,
620 long_double_type_node,
621 NULL_TREE);
622 decl = add_builtin_function ("__builtin_fabsq", ftype,
623 PA_BUILTIN_FABSQ, BUILT_IN_MD,
624 "_U_Qfabs", NULL_TREE);
625 TREE_READONLY (decl) = 1;
626 pa_builtins[PA_BUILTIN_FABSQ] = decl;
627
628 ftype = build_function_type_list (long_double_type_node,
629 long_double_type_node,
630 long_double_type_node,
631 NULL_TREE);
632 decl = add_builtin_function ("__builtin_copysignq", ftype,
633 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
634 "_U_Qfcopysign", NULL_TREE);
635 TREE_READONLY (decl) = 1;
636 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
637
638 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
639 decl = add_builtin_function ("__builtin_infq", ftype,
640 PA_BUILTIN_INFQ, BUILT_IN_MD,
641 NULL, NULL_TREE);
642 pa_builtins[PA_BUILTIN_INFQ] = decl;
643
644 decl = add_builtin_function ("__builtin_huge_valq", ftype,
645 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
646 NULL, NULL_TREE);
647 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
648 }
649 }
650
651 static rtx
652 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
653 machine_mode mode ATTRIBUTE_UNUSED,
654 int ignore ATTRIBUTE_UNUSED)
655 {
656 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
657 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
658
659 switch (fcode)
660 {
661 case PA_BUILTIN_FABSQ:
662 case PA_BUILTIN_COPYSIGNQ:
663 return expand_call (exp, target, ignore);
664
665 case PA_BUILTIN_INFQ:
666 case PA_BUILTIN_HUGE_VALQ:
667 {
668 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
669 REAL_VALUE_TYPE inf;
670 rtx tmp;
671
672 real_inf (&inf);
673 tmp = const_double_from_real_value (inf, target_mode);
674
675 tmp = validize_mem (force_const_mem (target_mode, tmp));
676
677 if (target == 0)
678 target = gen_reg_rtx (target_mode);
679
680 emit_move_insn (target, tmp);
681 return target;
682 }
683
684 default:
685 gcc_unreachable ();
686 }
687
688 return NULL_RTX;
689 }
690
691 /* Function to init struct machine_function.
692 This will be called, via a pointer variable,
693 from push_function_context. */
694
695 static struct machine_function *
696 pa_init_machine_status (void)
697 {
698 return ggc_cleared_alloc<machine_function> ();
699 }
700
701 /* If FROM is a probable pointer register, mark TO as a probable
702 pointer register with the same pointer alignment as FROM. */
703
704 static void
705 copy_reg_pointer (rtx to, rtx from)
706 {
707 if (REG_POINTER (from))
708 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
709 }
710
711 /* Return 1 if X contains a symbolic expression. We know these
712 expressions will have one of a few well defined forms, so
713 we need only check those forms. */
714 int
715 pa_symbolic_expression_p (rtx x)
716 {
717
718 /* Strip off any HIGH. */
719 if (GET_CODE (x) == HIGH)
720 x = XEXP (x, 0);
721
722 return symbolic_operand (x, VOIDmode);
723 }
724
725 /* Accept any constant that can be moved in one instruction into a
726 general register. */
727 int
728 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
729 {
730 /* OK if ldo, ldil, or zdepi, can be used. */
731 return (VAL_14_BITS_P (ival)
732 || pa_ldil_cint_p (ival)
733 || pa_zdepi_cint_p (ival));
734 }
735 \f
736 /* True iff ldil can be used to load this CONST_INT. The least
737 significant 11 bits of the value must be zero and the value must
738 not change sign when extended from 32 to 64 bits. */
739 int
740 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
741 {
742 unsigned HOST_WIDE_INT x;
743
744 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
745 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
746 }
747
748 /* True iff zdepi can be used to generate this CONST_INT.
749 zdepi first sign extends a 5-bit signed number to a given field
750 length, then places this field anywhere in a zero. */
751 int
752 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
753 {
754 unsigned HOST_WIDE_INT lsb_mask, t;
755
756 /* This might not be obvious, but it's at least fast.
757 This function is critical; we don't have the time loops would take. */
758 lsb_mask = x & -x;
759 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
760 /* Return true iff t is a power of two. */
761 return ((t & (t - 1)) == 0);
762 }
763
764 /* True iff depi or extru can be used to compute (reg & mask).
765 Accept bit pattern like these:
766 0....01....1
767 1....10....0
768 1..10..01..1 */
769 int
770 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
771 {
772 mask = ~mask;
773 mask += mask & -mask;
774 return (mask & (mask - 1)) == 0;
775 }
776
777 /* True iff depi can be used to compute (reg | MASK). */
778 int
779 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
780 {
781 mask += mask & -mask;
782 return (mask & (mask - 1)) == 0;
783 }
784 \f
785 /* Legitimize PIC addresses. If the address is already
786 position-independent, we return ORIG. Newly generated
787 position-independent addresses go to REG. If we need more
788 than one register, we lose. */
789
790 static rtx
791 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
792 {
793 rtx pic_ref = orig;
794
795 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
796
797 /* Labels need special handling. */
798 if (pic_label_operand (orig, mode))
799 {
800 rtx_insn *insn;
801
802 /* We do not want to go through the movXX expanders here since that
803 would create recursion.
804
805 Nor do we really want to call a generator for a named pattern
806 since that requires multiple patterns if we want to support
807 multiple word sizes.
808
809 So instead we just emit the raw set, which avoids the movXX
810 expanders completely. */
811 mark_reg_pointer (reg, BITS_PER_UNIT);
812 insn = emit_insn (gen_rtx_SET (reg, orig));
813
814 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
815 add_reg_note (insn, REG_EQUAL, orig);
816
817 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
818 and update LABEL_NUSES because this is not done automatically. */
819 if (reload_in_progress || reload_completed)
820 {
821 /* Extract LABEL_REF. */
822 if (GET_CODE (orig) == CONST)
823 orig = XEXP (XEXP (orig, 0), 0);
824 /* Extract CODE_LABEL. */
825 orig = XEXP (orig, 0);
826 add_reg_note (insn, REG_LABEL_OPERAND, orig);
827 /* Make sure we have label and not a note. */
828 if (LABEL_P (orig))
829 LABEL_NUSES (orig)++;
830 }
831 crtl->uses_pic_offset_table = 1;
832 return reg;
833 }
834 if (GET_CODE (orig) == SYMBOL_REF)
835 {
836 rtx_insn *insn;
837 rtx tmp_reg;
838
839 gcc_assert (reg);
840
841 /* Before reload, allocate a temporary register for the intermediate
842 result. This allows the sequence to be deleted when the final
843 result is unused and the insns are trivially dead. */
844 tmp_reg = ((reload_in_progress || reload_completed)
845 ? reg : gen_reg_rtx (Pmode));
846
847 if (function_label_operand (orig, VOIDmode))
848 {
849 /* Force function label into memory in word mode. */
850 orig = XEXP (force_const_mem (word_mode, orig), 0);
851 /* Load plabel address from DLT. */
852 emit_move_insn (tmp_reg,
853 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
854 gen_rtx_HIGH (word_mode, orig)));
855 pic_ref
856 = gen_const_mem (Pmode,
857 gen_rtx_LO_SUM (Pmode, tmp_reg,
858 gen_rtx_UNSPEC (Pmode,
859 gen_rtvec (1, orig),
860 UNSPEC_DLTIND14R)));
861 emit_move_insn (reg, pic_ref);
862 /* Now load address of function descriptor. */
863 pic_ref = gen_rtx_MEM (Pmode, reg);
864 }
865 else
866 {
867 /* Load symbol reference from DLT. */
868 emit_move_insn (tmp_reg,
869 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
870 gen_rtx_HIGH (word_mode, orig)));
871 pic_ref
872 = gen_const_mem (Pmode,
873 gen_rtx_LO_SUM (Pmode, tmp_reg,
874 gen_rtx_UNSPEC (Pmode,
875 gen_rtvec (1, orig),
876 UNSPEC_DLTIND14R)));
877 }
878
879 crtl->uses_pic_offset_table = 1;
880 mark_reg_pointer (reg, BITS_PER_UNIT);
881 insn = emit_move_insn (reg, pic_ref);
882
883 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
884 set_unique_reg_note (insn, REG_EQUAL, orig);
885
886 return reg;
887 }
888 else if (GET_CODE (orig) == CONST)
889 {
890 rtx base;
891
892 if (GET_CODE (XEXP (orig, 0)) == PLUS
893 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
894 return orig;
895
896 gcc_assert (reg);
897 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
898
899 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
900 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
901 base == reg ? 0 : reg);
902
903 if (GET_CODE (orig) == CONST_INT)
904 {
905 if (INT_14_BITS (orig))
906 return plus_constant (Pmode, base, INTVAL (orig));
907 orig = force_reg (Pmode, orig);
908 }
909 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
910 /* Likewise, should we set special REG_NOTEs here? */
911 }
912
913 return pic_ref;
914 }
915
916 static GTY(()) rtx gen_tls_tga;
917
918 static rtx
919 gen_tls_get_addr (void)
920 {
921 if (!gen_tls_tga)
922 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
923 return gen_tls_tga;
924 }
925
926 static rtx
927 hppa_tls_call (rtx arg)
928 {
929 rtx ret;
930
931 ret = gen_reg_rtx (Pmode);
932 emit_library_call_value (gen_tls_get_addr (), ret,
933 LCT_CONST, Pmode, arg, Pmode);
934
935 return ret;
936 }
937
938 static rtx
939 legitimize_tls_address (rtx addr)
940 {
941 rtx ret, tmp, t1, t2, tp;
942 rtx_insn *insn;
943
944 /* Currently, we can't handle anything but a SYMBOL_REF. */
945 if (GET_CODE (addr) != SYMBOL_REF)
946 return addr;
947
948 switch (SYMBOL_REF_TLS_MODEL (addr))
949 {
950 case TLS_MODEL_GLOBAL_DYNAMIC:
951 tmp = gen_reg_rtx (Pmode);
952 if (flag_pic)
953 emit_insn (gen_tgd_load_pic (tmp, addr));
954 else
955 emit_insn (gen_tgd_load (tmp, addr));
956 ret = hppa_tls_call (tmp);
957 break;
958
959 case TLS_MODEL_LOCAL_DYNAMIC:
960 ret = gen_reg_rtx (Pmode);
961 tmp = gen_reg_rtx (Pmode);
962 start_sequence ();
963 if (flag_pic)
964 emit_insn (gen_tld_load_pic (tmp, addr));
965 else
966 emit_insn (gen_tld_load (tmp, addr));
967 t1 = hppa_tls_call (tmp);
968 insn = get_insns ();
969 end_sequence ();
970 t2 = gen_reg_rtx (Pmode);
971 emit_libcall_block (insn, t2, t1,
972 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
973 UNSPEC_TLSLDBASE));
974 emit_insn (gen_tld_offset_load (ret, addr, t2));
975 break;
976
977 case TLS_MODEL_INITIAL_EXEC:
978 tp = gen_reg_rtx (Pmode);
979 tmp = gen_reg_rtx (Pmode);
980 ret = gen_reg_rtx (Pmode);
981 emit_insn (gen_tp_load (tp));
982 if (flag_pic)
983 emit_insn (gen_tie_load_pic (tmp, addr));
984 else
985 emit_insn (gen_tie_load (tmp, addr));
986 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
987 break;
988
989 case TLS_MODEL_LOCAL_EXEC:
990 tp = gen_reg_rtx (Pmode);
991 ret = gen_reg_rtx (Pmode);
992 emit_insn (gen_tp_load (tp));
993 emit_insn (gen_tle_load (ret, addr, tp));
994 break;
995
996 default:
997 gcc_unreachable ();
998 }
999
1000 return ret;
1001 }
1002
1003 /* Helper for hppa_legitimize_address. Given X, return true if it
1004 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1005
1006 This respectively represent canonical shift-add rtxs or scaled
1007 memory addresses. */
1008 static bool
1009 mem_shadd_or_shadd_rtx_p (rtx x)
1010 {
1011 return ((GET_CODE (x) == ASHIFT
1012 || GET_CODE (x) == MULT)
1013 && GET_CODE (XEXP (x, 1)) == CONST_INT
1014 && ((GET_CODE (x) == ASHIFT
1015 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
1016 || (GET_CODE (x) == MULT
1017 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
1018 }
1019
1020 /* Try machine-dependent ways of modifying an illegitimate address
1021 to be legitimate. If we find one, return the new, valid address.
1022 This macro is used in only one place: `memory_address' in explow.c.
1023
1024 OLDX is the address as it was before break_out_memory_refs was called.
1025 In some cases it is useful to look at this to decide what needs to be done.
1026
1027 It is always safe for this macro to do nothing. It exists to recognize
1028 opportunities to optimize the output.
1029
1030 For the PA, transform:
1031
1032 memory(X + <large int>)
1033
1034 into:
1035
1036 if (<large int> & mask) >= 16
1037 Y = (<large int> & ~mask) + mask + 1 Round up.
1038 else
1039 Y = (<large int> & ~mask) Round down.
1040 Z = X + Y
1041 memory (Z + (<large int> - Y));
1042
1043 This is for CSE to find several similar references, and only use one Z.
1044
1045 X can either be a SYMBOL_REF or REG, but because combine cannot
1046 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1047 D will not fit in 14 bits.
1048
1049 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1050 0x1f as the mask.
1051
1052 MODE_INT references allow displacements which fit in 14 bits, so use
1053 0x3fff as the mask.
1054
1055 This relies on the fact that most mode MODE_FLOAT references will use FP
1056 registers and most mode MODE_INT references will use integer registers.
1057 (In the rare case of an FP register used in an integer MODE, we depend
1058 on secondary reloads to clean things up.)
1059
1060
1061 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1062 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1063 addressing modes to be used).
1064
1065 Note that the addresses passed into hppa_legitimize_address always
1066 come from a MEM, so we only have to match the MULT form on incoming
1067 addresses. But to be future proof we also match the ASHIFT form.
1068
1069 However, this routine always places those shift-add sequences into
1070 registers, so we have to generate the ASHIFT form as our output.
1071
1072 Put X and Z into registers. Then put the entire expression into
1073 a register. */
1074
1075 rtx
1076 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1077 machine_mode mode)
1078 {
1079 rtx orig = x;
1080
1081 /* We need to canonicalize the order of operands in unscaled indexed
1082 addresses since the code that checks if an address is valid doesn't
1083 always try both orders. */
1084 if (!TARGET_NO_SPACE_REGS
1085 && GET_CODE (x) == PLUS
1086 && GET_MODE (x) == Pmode
1087 && REG_P (XEXP (x, 0))
1088 && REG_P (XEXP (x, 1))
1089 && REG_POINTER (XEXP (x, 0))
1090 && !REG_POINTER (XEXP (x, 1)))
1091 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1092
1093 if (tls_referenced_p (x))
1094 return legitimize_tls_address (x);
1095 else if (flag_pic)
1096 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1097
1098 /* Strip off CONST. */
1099 if (GET_CODE (x) == CONST)
1100 x = XEXP (x, 0);
1101
1102 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1103 That should always be safe. */
1104 if (GET_CODE (x) == PLUS
1105 && GET_CODE (XEXP (x, 0)) == REG
1106 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1107 {
1108 rtx reg = force_reg (Pmode, XEXP (x, 1));
1109 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1110 }
1111
1112 /* Note we must reject symbols which represent function addresses
1113 since the assembler/linker can't handle arithmetic on plabels. */
1114 if (GET_CODE (x) == PLUS
1115 && GET_CODE (XEXP (x, 1)) == CONST_INT
1116 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1117 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1118 || GET_CODE (XEXP (x, 0)) == REG))
1119 {
1120 rtx int_part, ptr_reg;
1121 int newoffset;
1122 int offset = INTVAL (XEXP (x, 1));
1123 int mask;
1124
1125 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1126 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1127
1128 /* Choose which way to round the offset. Round up if we
1129 are >= halfway to the next boundary. */
1130 if ((offset & mask) >= ((mask + 1) / 2))
1131 newoffset = (offset & ~ mask) + mask + 1;
1132 else
1133 newoffset = (offset & ~ mask);
1134
1135 /* If the newoffset will not fit in 14 bits (ldo), then
1136 handling this would take 4 or 5 instructions (2 to load
1137 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1138 add the new offset and the SYMBOL_REF.) Combine can
1139 not handle 4->2 or 5->2 combinations, so do not create
1140 them. */
1141 if (! VAL_14_BITS_P (newoffset)
1142 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1143 {
1144 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1145 rtx tmp_reg
1146 = force_reg (Pmode,
1147 gen_rtx_HIGH (Pmode, const_part));
1148 ptr_reg
1149 = force_reg (Pmode,
1150 gen_rtx_LO_SUM (Pmode,
1151 tmp_reg, const_part));
1152 }
1153 else
1154 {
1155 if (! VAL_14_BITS_P (newoffset))
1156 int_part = force_reg (Pmode, GEN_INT (newoffset));
1157 else
1158 int_part = GEN_INT (newoffset);
1159
1160 ptr_reg = force_reg (Pmode,
1161 gen_rtx_PLUS (Pmode,
1162 force_reg (Pmode, XEXP (x, 0)),
1163 int_part));
1164 }
1165 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1166 }
1167
1168 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1169
1170 if (GET_CODE (x) == PLUS
1171 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1172 && (OBJECT_P (XEXP (x, 1))
1173 || GET_CODE (XEXP (x, 1)) == SUBREG)
1174 && GET_CODE (XEXP (x, 1)) != CONST)
1175 {
1176 /* If we were given a MULT, we must fix the constant
1177 as we're going to create the ASHIFT form. */
1178 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1179 if (GET_CODE (XEXP (x, 0)) == MULT)
1180 shift_val = exact_log2 (shift_val);
1181
1182 rtx reg1, reg2;
1183 reg1 = XEXP (x, 1);
1184 if (GET_CODE (reg1) != REG)
1185 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1186
1187 reg2 = XEXP (XEXP (x, 0), 0);
1188 if (GET_CODE (reg2) != REG)
1189 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1190
1191 return force_reg (Pmode,
1192 gen_rtx_PLUS (Pmode,
1193 gen_rtx_ASHIFT (Pmode, reg2,
1194 GEN_INT (shift_val)),
1195 reg1));
1196 }
1197
1198 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1199
1200 Only do so for floating point modes since this is more speculative
1201 and we lose if it's an integer store. */
1202 if (GET_CODE (x) == PLUS
1203 && GET_CODE (XEXP (x, 0)) == PLUS
1204 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1205 && (mode == SFmode || mode == DFmode))
1206 {
1207 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1208
1209 /* If we were given a MULT, we must fix the constant
1210 as we're going to create the ASHIFT form. */
1211 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1212 shift_val = exact_log2 (shift_val);
1213
1214 /* Try and figure out what to use as a base register. */
1215 rtx reg1, reg2, base, idx;
1216
1217 reg1 = XEXP (XEXP (x, 0), 1);
1218 reg2 = XEXP (x, 1);
1219 base = NULL_RTX;
1220 idx = NULL_RTX;
1221
1222 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1223 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1224 it's a base register below. */
1225 if (GET_CODE (reg1) != REG)
1226 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1227
1228 if (GET_CODE (reg2) != REG)
1229 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1230
1231 /* Figure out what the base and index are. */
1232
1233 if (GET_CODE (reg1) == REG
1234 && REG_POINTER (reg1))
1235 {
1236 base = reg1;
1237 idx = gen_rtx_PLUS (Pmode,
1238 gen_rtx_ASHIFT (Pmode,
1239 XEXP (XEXP (XEXP (x, 0), 0), 0),
1240 GEN_INT (shift_val)),
1241 XEXP (x, 1));
1242 }
1243 else if (GET_CODE (reg2) == REG
1244 && REG_POINTER (reg2))
1245 {
1246 base = reg2;
1247 idx = XEXP (x, 0);
1248 }
1249
1250 if (base == 0)
1251 return orig;
1252
1253 /* If the index adds a large constant, try to scale the
1254 constant so that it can be loaded with only one insn. */
1255 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1256 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1257 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1258 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1259 {
1260 /* Divide the CONST_INT by the scale factor, then add it to A. */
1261 int val = INTVAL (XEXP (idx, 1));
1262 val /= (1 << shift_val);
1263
1264 reg1 = XEXP (XEXP (idx, 0), 0);
1265 if (GET_CODE (reg1) != REG)
1266 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1267
1268 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1269
1270 /* We can now generate a simple scaled indexed address. */
1271 return
1272 force_reg
1273 (Pmode, gen_rtx_PLUS (Pmode,
1274 gen_rtx_ASHIFT (Pmode, reg1,
1275 GEN_INT (shift_val)),
1276 base));
1277 }
1278
1279 /* If B + C is still a valid base register, then add them. */
1280 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1281 && INTVAL (XEXP (idx, 1)) <= 4096
1282 && INTVAL (XEXP (idx, 1)) >= -4096)
1283 {
1284 rtx reg1, reg2;
1285
1286 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1287
1288 reg2 = XEXP (XEXP (idx, 0), 0);
1289 if (GET_CODE (reg2) != CONST_INT)
1290 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1291
1292 return force_reg (Pmode,
1293 gen_rtx_PLUS (Pmode,
1294 gen_rtx_ASHIFT (Pmode, reg2,
1295 GEN_INT (shift_val)),
1296 reg1));
1297 }
1298
1299 /* Get the index into a register, then add the base + index and
1300 return a register holding the result. */
1301
1302 /* First get A into a register. */
1303 reg1 = XEXP (XEXP (idx, 0), 0);
1304 if (GET_CODE (reg1) != REG)
1305 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1306
1307 /* And get B into a register. */
1308 reg2 = XEXP (idx, 1);
1309 if (GET_CODE (reg2) != REG)
1310 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1311
1312 reg1 = force_reg (Pmode,
1313 gen_rtx_PLUS (Pmode,
1314 gen_rtx_ASHIFT (Pmode, reg1,
1315 GEN_INT (shift_val)),
1316 reg2));
1317
1318 /* Add the result to our base register and return. */
1319 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1320
1321 }
1322
1323 /* Uh-oh. We might have an address for x[n-100000]. This needs
1324 special handling to avoid creating an indexed memory address
1325 with x-100000 as the base.
1326
1327 If the constant part is small enough, then it's still safe because
1328 there is a guard page at the beginning and end of the data segment.
1329
1330 Scaled references are common enough that we want to try and rearrange the
1331 terms so that we can use indexing for these addresses too. Only
1332 do the optimization for floatint point modes. */
1333
1334 if (GET_CODE (x) == PLUS
1335 && pa_symbolic_expression_p (XEXP (x, 1)))
1336 {
1337 /* Ugly. We modify things here so that the address offset specified
1338 by the index expression is computed first, then added to x to form
1339 the entire address. */
1340
1341 rtx regx1, regx2, regy1, regy2, y;
1342
1343 /* Strip off any CONST. */
1344 y = XEXP (x, 1);
1345 if (GET_CODE (y) == CONST)
1346 y = XEXP (y, 0);
1347
1348 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1349 {
1350 /* See if this looks like
1351 (plus (mult (reg) (mem_shadd_const))
1352 (const (plus (symbol_ref) (const_int))))
1353
1354 Where const_int is small. In that case the const
1355 expression is a valid pointer for indexing.
1356
1357 If const_int is big, but can be divided evenly by shadd_const
1358 and added to (reg). This allows more scaled indexed addresses. */
1359 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1360 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1361 && GET_CODE (XEXP (y, 1)) == CONST_INT
1362 && INTVAL (XEXP (y, 1)) >= -4096
1363 && INTVAL (XEXP (y, 1)) <= 4095)
1364 {
1365 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1366
1367 /* If we were given a MULT, we must fix the constant
1368 as we're going to create the ASHIFT form. */
1369 if (GET_CODE (XEXP (x, 0)) == MULT)
1370 shift_val = exact_log2 (shift_val);
1371
1372 rtx reg1, reg2;
1373
1374 reg1 = XEXP (x, 1);
1375 if (GET_CODE (reg1) != REG)
1376 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1377
1378 reg2 = XEXP (XEXP (x, 0), 0);
1379 if (GET_CODE (reg2) != REG)
1380 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1381
1382 return
1383 force_reg (Pmode,
1384 gen_rtx_PLUS (Pmode,
1385 gen_rtx_ASHIFT (Pmode,
1386 reg2,
1387 GEN_INT (shift_val)),
1388 reg1));
1389 }
1390 else if ((mode == DFmode || mode == SFmode)
1391 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1392 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1393 && GET_CODE (XEXP (y, 1)) == CONST_INT
1394 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1395 {
1396 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1397
1398 /* If we were given a MULT, we must fix the constant
1399 as we're going to create the ASHIFT form. */
1400 if (GET_CODE (XEXP (x, 0)) == MULT)
1401 shift_val = exact_log2 (shift_val);
1402
1403 regx1
1404 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1405 / INTVAL (XEXP (XEXP (x, 0), 1))));
1406 regx2 = XEXP (XEXP (x, 0), 0);
1407 if (GET_CODE (regx2) != REG)
1408 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1409 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1410 regx2, regx1));
1411 return
1412 force_reg (Pmode,
1413 gen_rtx_PLUS (Pmode,
1414 gen_rtx_ASHIFT (Pmode, regx2,
1415 GEN_INT (shift_val)),
1416 force_reg (Pmode, XEXP (y, 0))));
1417 }
1418 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1419 && INTVAL (XEXP (y, 1)) >= -4096
1420 && INTVAL (XEXP (y, 1)) <= 4095)
1421 {
1422 /* This is safe because of the guard page at the
1423 beginning and end of the data space. Just
1424 return the original address. */
1425 return orig;
1426 }
1427 else
1428 {
1429 /* Doesn't look like one we can optimize. */
1430 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1431 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1432 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1433 regx1 = force_reg (Pmode,
1434 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1435 regx1, regy2));
1436 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1437 }
1438 }
1439 }
1440
1441 return orig;
1442 }
1443
1444 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1445
1446 Compute extra cost of moving data between one register class
1447 and another.
1448
1449 Make moves from SAR so expensive they should never happen. We used to
1450 have 0xffff here, but that generates overflow in rare cases.
1451
1452 Copies involving a FP register and a non-FP register are relatively
1453 expensive because they must go through memory.
1454
1455 Other copies are reasonably cheap. */
1456
1457 static int
1458 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1459 reg_class_t from, reg_class_t to)
1460 {
1461 if (from == SHIFT_REGS)
1462 return 0x100;
1463 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1464 return 18;
1465 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1466 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1467 return 16;
1468 else
1469 return 2;
1470 }
1471
1472 /* For the HPPA, REG and REG+CONST is cost 0
1473 and addresses involving symbolic constants are cost 2.
1474
1475 PIC addresses are very expensive.
1476
1477 It is no coincidence that this has the same structure
1478 as pa_legitimate_address_p. */
1479
1480 static int
1481 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1482 addr_space_t as ATTRIBUTE_UNUSED,
1483 bool speed ATTRIBUTE_UNUSED)
1484 {
1485 switch (GET_CODE (X))
1486 {
1487 case REG:
1488 case PLUS:
1489 case LO_SUM:
1490 return 1;
1491 case HIGH:
1492 return 2;
1493 default:
1494 return 4;
1495 }
1496 }
1497
1498 /* Compute a (partial) cost for rtx X. Return true if the complete
1499 cost has been computed, and false if subexpressions should be
1500 scanned. In either case, *TOTAL contains the cost result. */
1501
1502 static bool
1503 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1504 int opno ATTRIBUTE_UNUSED,
1505 int *total, bool speed ATTRIBUTE_UNUSED)
1506 {
1507 int factor;
1508 int code = GET_CODE (x);
1509
1510 switch (code)
1511 {
1512 case CONST_INT:
1513 if (INTVAL (x) == 0)
1514 *total = 0;
1515 else if (INT_14_BITS (x))
1516 *total = 1;
1517 else
1518 *total = 2;
1519 return true;
1520
1521 case HIGH:
1522 *total = 2;
1523 return true;
1524
1525 case CONST:
1526 case LABEL_REF:
1527 case SYMBOL_REF:
1528 *total = 4;
1529 return true;
1530
1531 case CONST_DOUBLE:
1532 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1533 && outer_code != SET)
1534 *total = 0;
1535 else
1536 *total = 8;
1537 return true;
1538
1539 case MULT:
1540 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1541 {
1542 *total = COSTS_N_INSNS (3);
1543 return true;
1544 }
1545
1546 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1547 factor = GET_MODE_SIZE (mode) / 4;
1548 if (factor == 0)
1549 factor = 1;
1550
1551 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1552 *total = factor * factor * COSTS_N_INSNS (8);
1553 else
1554 *total = factor * factor * COSTS_N_INSNS (20);
1555 return true;
1556
1557 case DIV:
1558 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1559 {
1560 *total = COSTS_N_INSNS (14);
1561 return true;
1562 }
1563 /* FALLTHRU */
1564
1565 case UDIV:
1566 case MOD:
1567 case UMOD:
1568 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1569 factor = GET_MODE_SIZE (mode) / 4;
1570 if (factor == 0)
1571 factor = 1;
1572
1573 *total = factor * factor * COSTS_N_INSNS (60);
1574 return true;
1575
1576 case PLUS: /* this includes shNadd insns */
1577 case MINUS:
1578 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1579 {
1580 *total = COSTS_N_INSNS (3);
1581 return true;
1582 }
1583
1584 /* A size N times larger than UNITS_PER_WORD needs N times as
1585 many insns, taking N times as long. */
1586 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1587 if (factor == 0)
1588 factor = 1;
1589 *total = factor * COSTS_N_INSNS (1);
1590 return true;
1591
1592 case ASHIFT:
1593 case ASHIFTRT:
1594 case LSHIFTRT:
1595 *total = COSTS_N_INSNS (1);
1596 return true;
1597
1598 default:
1599 return false;
1600 }
1601 }
1602
1603 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1604 new rtx with the correct mode. */
1605 static inline rtx
1606 force_mode (machine_mode mode, rtx orig)
1607 {
1608 if (mode == GET_MODE (orig))
1609 return orig;
1610
1611 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1612
1613 return gen_rtx_REG (mode, REGNO (orig));
1614 }
1615
1616 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1617
1618 static bool
1619 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1620 {
1621 return tls_referenced_p (x);
1622 }
1623
1624 /* Emit insns to move operands[1] into operands[0].
1625
1626 Return 1 if we have written out everything that needs to be done to
1627 do the move. Otherwise, return 0 and the caller will emit the move
1628 normally.
1629
1630 Note SCRATCH_REG may not be in the proper mode depending on how it
1631 will be used. This routine is responsible for creating a new copy
1632 of SCRATCH_REG in the proper mode. */
1633
1634 int
1635 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1636 {
1637 register rtx operand0 = operands[0];
1638 register rtx operand1 = operands[1];
1639 register rtx tem;
1640
1641 /* We can only handle indexed addresses in the destination operand
1642 of floating point stores. Thus, we need to break out indexed
1643 addresses from the destination operand. */
1644 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1645 {
1646 gcc_assert (can_create_pseudo_p ());
1647
1648 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1649 operand0 = replace_equiv_address (operand0, tem);
1650 }
1651
1652 /* On targets with non-equivalent space registers, break out unscaled
1653 indexed addresses from the source operand before the final CSE.
1654 We have to do this because the REG_POINTER flag is not correctly
1655 carried through various optimization passes and CSE may substitute
1656 a pseudo without the pointer set for one with the pointer set. As
1657 a result, we loose various opportunities to create insns with
1658 unscaled indexed addresses. */
1659 if (!TARGET_NO_SPACE_REGS
1660 && !cse_not_expected
1661 && GET_CODE (operand1) == MEM
1662 && GET_CODE (XEXP (operand1, 0)) == PLUS
1663 && REG_P (XEXP (XEXP (operand1, 0), 0))
1664 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1665 operand1
1666 = replace_equiv_address (operand1,
1667 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1668
1669 if (scratch_reg
1670 && reload_in_progress && GET_CODE (operand0) == REG
1671 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1672 operand0 = reg_equiv_mem (REGNO (operand0));
1673 else if (scratch_reg
1674 && reload_in_progress && GET_CODE (operand0) == SUBREG
1675 && GET_CODE (SUBREG_REG (operand0)) == REG
1676 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1677 {
1678 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1679 the code which tracks sets/uses for delete_output_reload. */
1680 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1681 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1682 SUBREG_BYTE (operand0));
1683 operand0 = alter_subreg (&temp, true);
1684 }
1685
1686 if (scratch_reg
1687 && reload_in_progress && GET_CODE (operand1) == REG
1688 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1689 operand1 = reg_equiv_mem (REGNO (operand1));
1690 else if (scratch_reg
1691 && reload_in_progress && GET_CODE (operand1) == SUBREG
1692 && GET_CODE (SUBREG_REG (operand1)) == REG
1693 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1694 {
1695 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1696 the code which tracks sets/uses for delete_output_reload. */
1697 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1698 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1699 SUBREG_BYTE (operand1));
1700 operand1 = alter_subreg (&temp, true);
1701 }
1702
1703 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1704 && ((tem = find_replacement (&XEXP (operand0, 0)))
1705 != XEXP (operand0, 0)))
1706 operand0 = replace_equiv_address (operand0, tem);
1707
1708 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1709 && ((tem = find_replacement (&XEXP (operand1, 0)))
1710 != XEXP (operand1, 0)))
1711 operand1 = replace_equiv_address (operand1, tem);
1712
1713 /* Handle secondary reloads for loads/stores of FP registers from
1714 REG+D addresses where D does not fit in 5 or 14 bits, including
1715 (subreg (mem (addr))) cases, and reloads for other unsupported
1716 memory operands. */
1717 if (scratch_reg
1718 && FP_REG_P (operand0)
1719 && (MEM_P (operand1)
1720 || (GET_CODE (operand1) == SUBREG
1721 && MEM_P (XEXP (operand1, 0)))))
1722 {
1723 rtx op1 = operand1;
1724
1725 if (GET_CODE (op1) == SUBREG)
1726 op1 = XEXP (op1, 0);
1727
1728 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1729 {
1730 if (!(TARGET_PA_20
1731 && !TARGET_ELF32
1732 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1733 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1734 {
1735 /* SCRATCH_REG will hold an address and maybe the actual data.
1736 We want it in WORD_MODE regardless of what mode it was
1737 originally given to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1739
1740 /* D might not fit in 14 bits either; for such cases load D
1741 into scratch reg. */
1742 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1743 {
1744 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1745 emit_move_insn (scratch_reg,
1746 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1747 Pmode,
1748 XEXP (XEXP (op1, 0), 0),
1749 scratch_reg));
1750 }
1751 else
1752 emit_move_insn (scratch_reg, XEXP (op1, 0));
1753 emit_insn (gen_rtx_SET (operand0,
1754 replace_equiv_address (op1, scratch_reg)));
1755 return 1;
1756 }
1757 }
1758 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1759 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1760 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1761 {
1762 /* Load memory address into SCRATCH_REG. */
1763 scratch_reg = force_mode (word_mode, scratch_reg);
1764 emit_move_insn (scratch_reg, XEXP (op1, 0));
1765 emit_insn (gen_rtx_SET (operand0,
1766 replace_equiv_address (op1, scratch_reg)));
1767 return 1;
1768 }
1769 }
1770 else if (scratch_reg
1771 && FP_REG_P (operand1)
1772 && (MEM_P (operand0)
1773 || (GET_CODE (operand0) == SUBREG
1774 && MEM_P (XEXP (operand0, 0)))))
1775 {
1776 rtx op0 = operand0;
1777
1778 if (GET_CODE (op0) == SUBREG)
1779 op0 = XEXP (op0, 0);
1780
1781 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1782 {
1783 if (!(TARGET_PA_20
1784 && !TARGET_ELF32
1785 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1786 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1787 {
1788 /* SCRATCH_REG will hold an address and maybe the actual data.
1789 We want it in WORD_MODE regardless of what mode it was
1790 originally given to us. */
1791 scratch_reg = force_mode (word_mode, scratch_reg);
1792
1793 /* D might not fit in 14 bits either; for such cases load D
1794 into scratch reg. */
1795 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1796 {
1797 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1798 emit_move_insn (scratch_reg,
1799 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1800 Pmode,
1801 XEXP (XEXP (op0, 0), 0),
1802 scratch_reg));
1803 }
1804 else
1805 emit_move_insn (scratch_reg, XEXP (op0, 0));
1806 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1807 operand1));
1808 return 1;
1809 }
1810 }
1811 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1812 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1813 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1814 {
1815 /* Load memory address into SCRATCH_REG. */
1816 scratch_reg = force_mode (word_mode, scratch_reg);
1817 emit_move_insn (scratch_reg, XEXP (op0, 0));
1818 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1819 operand1));
1820 return 1;
1821 }
1822 }
1823 /* Handle secondary reloads for loads of FP registers from constant
1824 expressions by forcing the constant into memory. For the most part,
1825 this is only necessary for SImode and DImode.
1826
1827 Use scratch_reg to hold the address of the memory location. */
1828 else if (scratch_reg
1829 && CONSTANT_P (operand1)
1830 && FP_REG_P (operand0))
1831 {
1832 rtx const_mem, xoperands[2];
1833
1834 if (operand1 == CONST0_RTX (mode))
1835 {
1836 emit_insn (gen_rtx_SET (operand0, operand1));
1837 return 1;
1838 }
1839
1840 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1841 it in WORD_MODE regardless of what mode it was originally given
1842 to us. */
1843 scratch_reg = force_mode (word_mode, scratch_reg);
1844
1845 /* Force the constant into memory and put the address of the
1846 memory location into scratch_reg. */
1847 const_mem = force_const_mem (mode, operand1);
1848 xoperands[0] = scratch_reg;
1849 xoperands[1] = XEXP (const_mem, 0);
1850 pa_emit_move_sequence (xoperands, Pmode, 0);
1851
1852 /* Now load the destination register. */
1853 emit_insn (gen_rtx_SET (operand0,
1854 replace_equiv_address (const_mem, scratch_reg)));
1855 return 1;
1856 }
1857 /* Handle secondary reloads for SAR. These occur when trying to load
1858 the SAR from memory or a constant. */
1859 else if (scratch_reg
1860 && GET_CODE (operand0) == REG
1861 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1862 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1863 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1864 {
1865 /* D might not fit in 14 bits either; for such cases load D into
1866 scratch reg. */
1867 if (GET_CODE (operand1) == MEM
1868 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1869 {
1870 /* We are reloading the address into the scratch register, so we
1871 want to make sure the scratch register is a full register. */
1872 scratch_reg = force_mode (word_mode, scratch_reg);
1873
1874 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1875 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1876 0)),
1877 Pmode,
1878 XEXP (XEXP (operand1, 0),
1879 0),
1880 scratch_reg));
1881
1882 /* Now we are going to load the scratch register from memory,
1883 we want to load it in the same width as the original MEM,
1884 which must be the same as the width of the ultimate destination,
1885 OPERAND0. */
1886 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1887
1888 emit_move_insn (scratch_reg,
1889 replace_equiv_address (operand1, scratch_reg));
1890 }
1891 else
1892 {
1893 /* We want to load the scratch register using the same mode as
1894 the ultimate destination. */
1895 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1896
1897 emit_move_insn (scratch_reg, operand1);
1898 }
1899
1900 /* And emit the insn to set the ultimate destination. We know that
1901 the scratch register has the same mode as the destination at this
1902 point. */
1903 emit_move_insn (operand0, scratch_reg);
1904 return 1;
1905 }
1906
1907 /* Handle the most common case: storing into a register. */
1908 if (register_operand (operand0, mode))
1909 {
1910 /* Legitimize TLS symbol references. This happens for references
1911 that aren't a legitimate constant. */
1912 if (PA_SYMBOL_REF_TLS_P (operand1))
1913 operand1 = legitimize_tls_address (operand1);
1914
1915 if (register_operand (operand1, mode)
1916 || (GET_CODE (operand1) == CONST_INT
1917 && pa_cint_ok_for_move (UINTVAL (operand1)))
1918 || (operand1 == CONST0_RTX (mode))
1919 || (GET_CODE (operand1) == HIGH
1920 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1921 /* Only `general_operands' can come here, so MEM is ok. */
1922 || GET_CODE (operand1) == MEM)
1923 {
1924 /* Various sets are created during RTL generation which don't
1925 have the REG_POINTER flag correctly set. After the CSE pass,
1926 instruction recognition can fail if we don't consistently
1927 set this flag when performing register copies. This should
1928 also improve the opportunities for creating insns that use
1929 unscaled indexing. */
1930 if (REG_P (operand0) && REG_P (operand1))
1931 {
1932 if (REG_POINTER (operand1)
1933 && !REG_POINTER (operand0)
1934 && !HARD_REGISTER_P (operand0))
1935 copy_reg_pointer (operand0, operand1);
1936 }
1937
1938 /* When MEMs are broken out, the REG_POINTER flag doesn't
1939 get set. In some cases, we can set the REG_POINTER flag
1940 from the declaration for the MEM. */
1941 if (REG_P (operand0)
1942 && GET_CODE (operand1) == MEM
1943 && !REG_POINTER (operand0))
1944 {
1945 tree decl = MEM_EXPR (operand1);
1946
1947 /* Set the register pointer flag and register alignment
1948 if the declaration for this memory reference is a
1949 pointer type. */
1950 if (decl)
1951 {
1952 tree type;
1953
1954 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1955 tree operand 1. */
1956 if (TREE_CODE (decl) == COMPONENT_REF)
1957 decl = TREE_OPERAND (decl, 1);
1958
1959 type = TREE_TYPE (decl);
1960 type = strip_array_types (type);
1961
1962 if (POINTER_TYPE_P (type))
1963 mark_reg_pointer (operand0, BITS_PER_UNIT);
1964 }
1965 }
1966
1967 emit_insn (gen_rtx_SET (operand0, operand1));
1968 return 1;
1969 }
1970 }
1971 else if (GET_CODE (operand0) == MEM)
1972 {
1973 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1974 && !(reload_in_progress || reload_completed))
1975 {
1976 rtx temp = gen_reg_rtx (DFmode);
1977
1978 emit_insn (gen_rtx_SET (temp, operand1));
1979 emit_insn (gen_rtx_SET (operand0, temp));
1980 return 1;
1981 }
1982 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1983 {
1984 /* Run this case quickly. */
1985 emit_insn (gen_rtx_SET (operand0, operand1));
1986 return 1;
1987 }
1988 if (! (reload_in_progress || reload_completed))
1989 {
1990 operands[0] = validize_mem (operand0);
1991 operands[1] = operand1 = force_reg (mode, operand1);
1992 }
1993 }
1994
1995 /* Simplify the source if we need to.
1996 Note we do have to handle function labels here, even though we do
1997 not consider them legitimate constants. Loop optimizations can
1998 call the emit_move_xxx with one as a source. */
1999 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
2000 || (GET_CODE (operand1) == HIGH
2001 && symbolic_operand (XEXP (operand1, 0), mode))
2002 || function_label_operand (operand1, VOIDmode)
2003 || tls_referenced_p (operand1))
2004 {
2005 int ishighonly = 0;
2006
2007 if (GET_CODE (operand1) == HIGH)
2008 {
2009 ishighonly = 1;
2010 operand1 = XEXP (operand1, 0);
2011 }
2012 if (symbolic_operand (operand1, mode))
2013 {
2014 /* Argh. The assembler and linker can't handle arithmetic
2015 involving plabels.
2016
2017 So we force the plabel into memory, load operand0 from
2018 the memory location, then add in the constant part. */
2019 if ((GET_CODE (operand1) == CONST
2020 && GET_CODE (XEXP (operand1, 0)) == PLUS
2021 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
2022 VOIDmode))
2023 || function_label_operand (operand1, VOIDmode))
2024 {
2025 rtx temp, const_part;
2026
2027 /* Figure out what (if any) scratch register to use. */
2028 if (reload_in_progress || reload_completed)
2029 {
2030 scratch_reg = scratch_reg ? scratch_reg : operand0;
2031 /* SCRATCH_REG will hold an address and maybe the actual
2032 data. We want it in WORD_MODE regardless of what mode it
2033 was originally given to us. */
2034 scratch_reg = force_mode (word_mode, scratch_reg);
2035 }
2036 else if (flag_pic)
2037 scratch_reg = gen_reg_rtx (Pmode);
2038
2039 if (GET_CODE (operand1) == CONST)
2040 {
2041 /* Save away the constant part of the expression. */
2042 const_part = XEXP (XEXP (operand1, 0), 1);
2043 gcc_assert (GET_CODE (const_part) == CONST_INT);
2044
2045 /* Force the function label into memory. */
2046 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2047 }
2048 else
2049 {
2050 /* No constant part. */
2051 const_part = NULL_RTX;
2052
2053 /* Force the function label into memory. */
2054 temp = force_const_mem (mode, operand1);
2055 }
2056
2057
2058 /* Get the address of the memory location. PIC-ify it if
2059 necessary. */
2060 temp = XEXP (temp, 0);
2061 if (flag_pic)
2062 temp = legitimize_pic_address (temp, mode, scratch_reg);
2063
2064 /* Put the address of the memory location into our destination
2065 register. */
2066 operands[1] = temp;
2067 pa_emit_move_sequence (operands, mode, scratch_reg);
2068
2069 /* Now load from the memory location into our destination
2070 register. */
2071 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2072 pa_emit_move_sequence (operands, mode, scratch_reg);
2073
2074 /* And add back in the constant part. */
2075 if (const_part != NULL_RTX)
2076 expand_inc (operand0, const_part);
2077
2078 return 1;
2079 }
2080
2081 if (flag_pic)
2082 {
2083 rtx_insn *insn;
2084 rtx temp;
2085
2086 if (reload_in_progress || reload_completed)
2087 {
2088 temp = scratch_reg ? scratch_reg : operand0;
2089 /* TEMP will hold an address and maybe the actual
2090 data. We want it in WORD_MODE regardless of what mode it
2091 was originally given to us. */
2092 temp = force_mode (word_mode, temp);
2093 }
2094 else
2095 temp = gen_reg_rtx (Pmode);
2096
2097 /* Force (const (plus (symbol) (const_int))) to memory
2098 if the const_int will not fit in 14 bits. Although
2099 this requires a relocation, the instruction sequence
2100 needed to load the value is shorter. */
2101 if (GET_CODE (operand1) == CONST
2102 && GET_CODE (XEXP (operand1, 0)) == PLUS
2103 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2104 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2105 {
2106 rtx x, m = force_const_mem (mode, operand1);
2107
2108 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2109 x = replace_equiv_address (m, x);
2110 insn = emit_move_insn (operand0, x);
2111 }
2112 else
2113 {
2114 operands[1] = legitimize_pic_address (operand1, mode, temp);
2115 if (REG_P (operand0) && REG_P (operands[1]))
2116 copy_reg_pointer (operand0, operands[1]);
2117 insn = emit_move_insn (operand0, operands[1]);
2118 }
2119
2120 /* Put a REG_EQUAL note on this insn. */
2121 set_unique_reg_note (insn, REG_EQUAL, operand1);
2122 }
2123 /* On the HPPA, references to data space are supposed to use dp,
2124 register 27, but showing it in the RTL inhibits various cse
2125 and loop optimizations. */
2126 else
2127 {
2128 rtx temp, set;
2129
2130 if (reload_in_progress || reload_completed)
2131 {
2132 temp = scratch_reg ? scratch_reg : operand0;
2133 /* TEMP will hold an address and maybe the actual
2134 data. We want it in WORD_MODE regardless of what mode it
2135 was originally given to us. */
2136 temp = force_mode (word_mode, temp);
2137 }
2138 else
2139 temp = gen_reg_rtx (mode);
2140
2141 /* Loading a SYMBOL_REF into a register makes that register
2142 safe to be used as the base in an indexed address.
2143
2144 Don't mark hard registers though. That loses. */
2145 if (GET_CODE (operand0) == REG
2146 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2147 mark_reg_pointer (operand0, BITS_PER_UNIT);
2148 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2149 mark_reg_pointer (temp, BITS_PER_UNIT);
2150
2151 if (ishighonly)
2152 set = gen_rtx_SET (operand0, temp);
2153 else
2154 set = gen_rtx_SET (operand0,
2155 gen_rtx_LO_SUM (mode, temp, operand1));
2156
2157 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2158 emit_insn (set);
2159
2160 }
2161 return 1;
2162 }
2163 else if (tls_referenced_p (operand1))
2164 {
2165 rtx tmp = operand1;
2166 rtx addend = NULL;
2167
2168 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2169 {
2170 addend = XEXP (XEXP (tmp, 0), 1);
2171 tmp = XEXP (XEXP (tmp, 0), 0);
2172 }
2173
2174 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2175 tmp = legitimize_tls_address (tmp);
2176 if (addend)
2177 {
2178 tmp = gen_rtx_PLUS (mode, tmp, addend);
2179 tmp = force_operand (tmp, operands[0]);
2180 }
2181 operands[1] = tmp;
2182 }
2183 else if (GET_CODE (operand1) != CONST_INT
2184 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2185 {
2186 rtx temp;
2187 rtx_insn *insn;
2188 rtx op1 = operand1;
2189 HOST_WIDE_INT value = 0;
2190 HOST_WIDE_INT insv = 0;
2191 int insert = 0;
2192
2193 if (GET_CODE (operand1) == CONST_INT)
2194 value = INTVAL (operand1);
2195
2196 if (TARGET_64BIT
2197 && GET_CODE (operand1) == CONST_INT
2198 && HOST_BITS_PER_WIDE_INT > 32
2199 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2200 {
2201 HOST_WIDE_INT nval;
2202
2203 /* Extract the low order 32 bits of the value and sign extend.
2204 If the new value is the same as the original value, we can
2205 can use the original value as-is. If the new value is
2206 different, we use it and insert the most-significant 32-bits
2207 of the original value into the final result. */
2208 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2209 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2210 if (value != nval)
2211 {
2212 #if HOST_BITS_PER_WIDE_INT > 32
2213 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2214 #endif
2215 insert = 1;
2216 value = nval;
2217 operand1 = GEN_INT (nval);
2218 }
2219 }
2220
2221 if (reload_in_progress || reload_completed)
2222 temp = scratch_reg ? scratch_reg : operand0;
2223 else
2224 temp = gen_reg_rtx (mode);
2225
2226 /* We don't directly split DImode constants on 32-bit targets
2227 because PLUS uses an 11-bit immediate and the insn sequence
2228 generated is not as efficient as the one using HIGH/LO_SUM. */
2229 if (GET_CODE (operand1) == CONST_INT
2230 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2231 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2232 && !insert)
2233 {
2234 /* Directly break constant into high and low parts. This
2235 provides better optimization opportunities because various
2236 passes recognize constants split with PLUS but not LO_SUM.
2237 We use a 14-bit signed low part except when the addition
2238 of 0x4000 to the high part might change the sign of the
2239 high part. */
2240 HOST_WIDE_INT low = value & 0x3fff;
2241 HOST_WIDE_INT high = value & ~ 0x3fff;
2242
2243 if (low >= 0x2000)
2244 {
2245 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2246 high += 0x2000;
2247 else
2248 high += 0x4000;
2249 }
2250
2251 low = value - high;
2252
2253 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2254 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2255 }
2256 else
2257 {
2258 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2259 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2260 }
2261
2262 insn = emit_move_insn (operands[0], operands[1]);
2263
2264 /* Now insert the most significant 32 bits of the value
2265 into the register. When we don't have a second register
2266 available, it could take up to nine instructions to load
2267 a 64-bit integer constant. Prior to reload, we force
2268 constants that would take more than three instructions
2269 to load to the constant pool. During and after reload,
2270 we have to handle all possible values. */
2271 if (insert)
2272 {
2273 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2274 register and the value to be inserted is outside the
2275 range that can be loaded with three depdi instructions. */
2276 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2277 {
2278 operand1 = GEN_INT (insv);
2279
2280 emit_insn (gen_rtx_SET (temp,
2281 gen_rtx_HIGH (mode, operand1)));
2282 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2283 if (mode == DImode)
2284 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2285 const0_rtx, temp));
2286 else
2287 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2288 const0_rtx, temp));
2289 }
2290 else
2291 {
2292 int len = 5, pos = 27;
2293
2294 /* Insert the bits using the depdi instruction. */
2295 while (pos >= 0)
2296 {
2297 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2298 HOST_WIDE_INT sign = v5 < 0;
2299
2300 /* Left extend the insertion. */
2301 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2302 while (pos > 0 && (insv & 1) == sign)
2303 {
2304 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2305 len += 1;
2306 pos -= 1;
2307 }
2308
2309 if (mode == DImode)
2310 insn = emit_insn (gen_insvdi (operand0,
2311 GEN_INT (len),
2312 GEN_INT (pos),
2313 GEN_INT (v5)));
2314 else
2315 insn = emit_insn (gen_insvsi (operand0,
2316 GEN_INT (len),
2317 GEN_INT (pos),
2318 GEN_INT (v5)));
2319
2320 len = pos > 0 && pos < 5 ? pos : 5;
2321 pos -= len;
2322 }
2323 }
2324 }
2325
2326 set_unique_reg_note (insn, REG_EQUAL, op1);
2327
2328 return 1;
2329 }
2330 }
2331 /* Now have insn-emit do whatever it normally does. */
2332 return 0;
2333 }
2334
2335 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2336 it will need a link/runtime reloc). */
2337
2338 int
2339 pa_reloc_needed (tree exp)
2340 {
2341 int reloc = 0;
2342
2343 switch (TREE_CODE (exp))
2344 {
2345 case ADDR_EXPR:
2346 return 1;
2347
2348 case POINTER_PLUS_EXPR:
2349 case PLUS_EXPR:
2350 case MINUS_EXPR:
2351 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2352 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2353 break;
2354
2355 CASE_CONVERT:
2356 case NON_LVALUE_EXPR:
2357 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2358 break;
2359
2360 case CONSTRUCTOR:
2361 {
2362 tree value;
2363 unsigned HOST_WIDE_INT ix;
2364
2365 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2366 if (value)
2367 reloc |= pa_reloc_needed (value);
2368 }
2369 break;
2370
2371 case ERROR_MARK:
2372 break;
2373
2374 default:
2375 break;
2376 }
2377 return reloc;
2378 }
2379
2380 \f
2381 /* Return the best assembler insn template
2382 for moving operands[1] into operands[0] as a fullword. */
2383 const char *
2384 pa_singlemove_string (rtx *operands)
2385 {
2386 HOST_WIDE_INT intval;
2387
2388 if (GET_CODE (operands[0]) == MEM)
2389 return "stw %r1,%0";
2390 if (GET_CODE (operands[1]) == MEM)
2391 return "ldw %1,%0";
2392 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2393 {
2394 long i;
2395
2396 gcc_assert (GET_MODE (operands[1]) == SFmode);
2397
2398 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2399 bit pattern. */
2400 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2401
2402 operands[1] = GEN_INT (i);
2403 /* Fall through to CONST_INT case. */
2404 }
2405 if (GET_CODE (operands[1]) == CONST_INT)
2406 {
2407 intval = INTVAL (operands[1]);
2408
2409 if (VAL_14_BITS_P (intval))
2410 return "ldi %1,%0";
2411 else if ((intval & 0x7ff) == 0)
2412 return "ldil L'%1,%0";
2413 else if (pa_zdepi_cint_p (intval))
2414 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2415 else
2416 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2417 }
2418 return "copy %1,%0";
2419 }
2420 \f
2421
2422 /* Compute position (in OP[1]) and width (in OP[2])
2423 useful for copying IMM to a register using the zdepi
2424 instructions. Store the immediate value to insert in OP[0]. */
2425 static void
2426 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2427 {
2428 int lsb, len;
2429
2430 /* Find the least significant set bit in IMM. */
2431 for (lsb = 0; lsb < 32; lsb++)
2432 {
2433 if ((imm & 1) != 0)
2434 break;
2435 imm >>= 1;
2436 }
2437
2438 /* Choose variants based on *sign* of the 5-bit field. */
2439 if ((imm & 0x10) == 0)
2440 len = (lsb <= 28) ? 4 : 32 - lsb;
2441 else
2442 {
2443 /* Find the width of the bitstring in IMM. */
2444 for (len = 5; len < 32 - lsb; len++)
2445 {
2446 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2447 break;
2448 }
2449
2450 /* Sign extend IMM as a 5-bit value. */
2451 imm = (imm & 0xf) - 0x10;
2452 }
2453
2454 op[0] = imm;
2455 op[1] = 31 - lsb;
2456 op[2] = len;
2457 }
2458
2459 /* Compute position (in OP[1]) and width (in OP[2])
2460 useful for copying IMM to a register using the depdi,z
2461 instructions. Store the immediate value to insert in OP[0]. */
2462
2463 static void
2464 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2465 {
2466 int lsb, len, maxlen;
2467
2468 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2469
2470 /* Find the least significant set bit in IMM. */
2471 for (lsb = 0; lsb < maxlen; lsb++)
2472 {
2473 if ((imm & 1) != 0)
2474 break;
2475 imm >>= 1;
2476 }
2477
2478 /* Choose variants based on *sign* of the 5-bit field. */
2479 if ((imm & 0x10) == 0)
2480 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2481 else
2482 {
2483 /* Find the width of the bitstring in IMM. */
2484 for (len = 5; len < maxlen - lsb; len++)
2485 {
2486 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2487 break;
2488 }
2489
2490 /* Extend length if host is narrow and IMM is negative. */
2491 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2492 len += 32;
2493
2494 /* Sign extend IMM as a 5-bit value. */
2495 imm = (imm & 0xf) - 0x10;
2496 }
2497
2498 op[0] = imm;
2499 op[1] = 63 - lsb;
2500 op[2] = len;
2501 }
2502
2503 /* Output assembler code to perform a doubleword move insn
2504 with operands OPERANDS. */
2505
2506 const char *
2507 pa_output_move_double (rtx *operands)
2508 {
2509 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2510 rtx latehalf[2];
2511 rtx addreg0 = 0, addreg1 = 0;
2512 int highonly = 0;
2513
2514 /* First classify both operands. */
2515
2516 if (REG_P (operands[0]))
2517 optype0 = REGOP;
2518 else if (offsettable_memref_p (operands[0]))
2519 optype0 = OFFSOP;
2520 else if (GET_CODE (operands[0]) == MEM)
2521 optype0 = MEMOP;
2522 else
2523 optype0 = RNDOP;
2524
2525 if (REG_P (operands[1]))
2526 optype1 = REGOP;
2527 else if (CONSTANT_P (operands[1]))
2528 optype1 = CNSTOP;
2529 else if (offsettable_memref_p (operands[1]))
2530 optype1 = OFFSOP;
2531 else if (GET_CODE (operands[1]) == MEM)
2532 optype1 = MEMOP;
2533 else
2534 optype1 = RNDOP;
2535
2536 /* Check for the cases that the operand constraints are not
2537 supposed to allow to happen. */
2538 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2539
2540 /* Handle copies between general and floating registers. */
2541
2542 if (optype0 == REGOP && optype1 == REGOP
2543 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2544 {
2545 if (FP_REG_P (operands[0]))
2546 {
2547 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2548 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2549 return "{fldds|fldd} -16(%%sp),%0";
2550 }
2551 else
2552 {
2553 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2554 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2555 return "{ldws|ldw} -12(%%sp),%R0";
2556 }
2557 }
2558
2559 /* Handle auto decrementing and incrementing loads and stores
2560 specifically, since the structure of the function doesn't work
2561 for them without major modification. Do it better when we learn
2562 this port about the general inc/dec addressing of PA.
2563 (This was written by tege. Chide him if it doesn't work.) */
2564
2565 if (optype0 == MEMOP)
2566 {
2567 /* We have to output the address syntax ourselves, since print_operand
2568 doesn't deal with the addresses we want to use. Fix this later. */
2569
2570 rtx addr = XEXP (operands[0], 0);
2571 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2572 {
2573 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2574
2575 operands[0] = XEXP (addr, 0);
2576 gcc_assert (GET_CODE (operands[1]) == REG
2577 && GET_CODE (operands[0]) == REG);
2578
2579 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2580
2581 /* No overlap between high target register and address
2582 register. (We do this in a non-obvious way to
2583 save a register file writeback) */
2584 if (GET_CODE (addr) == POST_INC)
2585 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2586 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2587 }
2588 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2589 {
2590 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2591
2592 operands[0] = XEXP (addr, 0);
2593 gcc_assert (GET_CODE (operands[1]) == REG
2594 && GET_CODE (operands[0]) == REG);
2595
2596 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2597 /* No overlap between high target register and address
2598 register. (We do this in a non-obvious way to save a
2599 register file writeback) */
2600 if (GET_CODE (addr) == PRE_INC)
2601 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2602 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2603 }
2604 }
2605 if (optype1 == MEMOP)
2606 {
2607 /* We have to output the address syntax ourselves, since print_operand
2608 doesn't deal with the addresses we want to use. Fix this later. */
2609
2610 rtx addr = XEXP (operands[1], 0);
2611 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2612 {
2613 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2614
2615 operands[1] = XEXP (addr, 0);
2616 gcc_assert (GET_CODE (operands[0]) == REG
2617 && GET_CODE (operands[1]) == REG);
2618
2619 if (!reg_overlap_mentioned_p (high_reg, addr))
2620 {
2621 /* No overlap between high target register and address
2622 register. (We do this in a non-obvious way to
2623 save a register file writeback) */
2624 if (GET_CODE (addr) == POST_INC)
2625 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2626 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2627 }
2628 else
2629 {
2630 /* This is an undefined situation. We should load into the
2631 address register *and* update that register. Probably
2632 we don't need to handle this at all. */
2633 if (GET_CODE (addr) == POST_INC)
2634 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2635 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2636 }
2637 }
2638 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2639 {
2640 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2641
2642 operands[1] = XEXP (addr, 0);
2643 gcc_assert (GET_CODE (operands[0]) == REG
2644 && GET_CODE (operands[1]) == REG);
2645
2646 if (!reg_overlap_mentioned_p (high_reg, addr))
2647 {
2648 /* No overlap between high target register and address
2649 register. (We do this in a non-obvious way to
2650 save a register file writeback) */
2651 if (GET_CODE (addr) == PRE_INC)
2652 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2653 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2654 }
2655 else
2656 {
2657 /* This is an undefined situation. We should load into the
2658 address register *and* update that register. Probably
2659 we don't need to handle this at all. */
2660 if (GET_CODE (addr) == PRE_INC)
2661 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2662 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2663 }
2664 }
2665 else if (GET_CODE (addr) == PLUS
2666 && GET_CODE (XEXP (addr, 0)) == MULT)
2667 {
2668 rtx xoperands[4];
2669
2670 /* Load address into left half of destination register. */
2671 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2672 xoperands[1] = XEXP (addr, 1);
2673 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2674 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2675 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2676 xoperands);
2677 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2678 }
2679 else if (GET_CODE (addr) == PLUS
2680 && REG_P (XEXP (addr, 0))
2681 && REG_P (XEXP (addr, 1)))
2682 {
2683 rtx xoperands[3];
2684
2685 /* Load address into left half of destination register. */
2686 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2687 xoperands[1] = XEXP (addr, 0);
2688 xoperands[2] = XEXP (addr, 1);
2689 output_asm_insn ("{addl|add,l} %1,%2,%0",
2690 xoperands);
2691 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2692 }
2693 }
2694
2695 /* If an operand is an unoffsettable memory ref, find a register
2696 we can increment temporarily to make it refer to the second word. */
2697
2698 if (optype0 == MEMOP)
2699 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2700
2701 if (optype1 == MEMOP)
2702 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2703
2704 /* Ok, we can do one word at a time.
2705 Normally we do the low-numbered word first.
2706
2707 In either case, set up in LATEHALF the operands to use
2708 for the high-numbered word and in some cases alter the
2709 operands in OPERANDS to be suitable for the low-numbered word. */
2710
2711 if (optype0 == REGOP)
2712 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2713 else if (optype0 == OFFSOP)
2714 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2715 else
2716 latehalf[0] = operands[0];
2717
2718 if (optype1 == REGOP)
2719 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2720 else if (optype1 == OFFSOP)
2721 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2722 else if (optype1 == CNSTOP)
2723 {
2724 if (GET_CODE (operands[1]) == HIGH)
2725 {
2726 operands[1] = XEXP (operands[1], 0);
2727 highonly = 1;
2728 }
2729 split_double (operands[1], &operands[1], &latehalf[1]);
2730 }
2731 else
2732 latehalf[1] = operands[1];
2733
2734 /* If the first move would clobber the source of the second one,
2735 do them in the other order.
2736
2737 This can happen in two cases:
2738
2739 mem -> register where the first half of the destination register
2740 is the same register used in the memory's address. Reload
2741 can create such insns.
2742
2743 mem in this case will be either register indirect or register
2744 indirect plus a valid offset.
2745
2746 register -> register move where REGNO(dst) == REGNO(src + 1)
2747 someone (Tim/Tege?) claimed this can happen for parameter loads.
2748
2749 Handle mem -> register case first. */
2750 if (optype0 == REGOP
2751 && (optype1 == MEMOP || optype1 == OFFSOP)
2752 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2753 {
2754 /* Do the late half first. */
2755 if (addreg1)
2756 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2757 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2758
2759 /* Then clobber. */
2760 if (addreg1)
2761 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2762 return pa_singlemove_string (operands);
2763 }
2764
2765 /* Now handle register -> register case. */
2766 if (optype0 == REGOP && optype1 == REGOP
2767 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2768 {
2769 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2770 return pa_singlemove_string (operands);
2771 }
2772
2773 /* Normal case: do the two words, low-numbered first. */
2774
2775 output_asm_insn (pa_singlemove_string (operands), operands);
2776
2777 /* Make any unoffsettable addresses point at high-numbered word. */
2778 if (addreg0)
2779 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2780 if (addreg1)
2781 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2782
2783 /* Do high-numbered word. */
2784 if (highonly)
2785 output_asm_insn ("ldil L'%1,%0", latehalf);
2786 else
2787 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2788
2789 /* Undo the adds we just did. */
2790 if (addreg0)
2791 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2792 if (addreg1)
2793 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2794
2795 return "";
2796 }
2797 \f
2798 const char *
2799 pa_output_fp_move_double (rtx *operands)
2800 {
2801 if (FP_REG_P (operands[0]))
2802 {
2803 if (FP_REG_P (operands[1])
2804 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2805 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2806 else
2807 output_asm_insn ("fldd%F1 %1,%0", operands);
2808 }
2809 else if (FP_REG_P (operands[1]))
2810 {
2811 output_asm_insn ("fstd%F0 %1,%0", operands);
2812 }
2813 else
2814 {
2815 rtx xoperands[2];
2816
2817 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2818
2819 /* This is a pain. You have to be prepared to deal with an
2820 arbitrary address here including pre/post increment/decrement.
2821
2822 so avoid this in the MD. */
2823 gcc_assert (GET_CODE (operands[0]) == REG);
2824
2825 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2826 xoperands[0] = operands[0];
2827 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2828 }
2829 return "";
2830 }
2831 \f
2832 /* Return a REG that occurs in ADDR with coefficient 1.
2833 ADDR can be effectively incremented by incrementing REG. */
2834
2835 static rtx
2836 find_addr_reg (rtx addr)
2837 {
2838 while (GET_CODE (addr) == PLUS)
2839 {
2840 if (GET_CODE (XEXP (addr, 0)) == REG)
2841 addr = XEXP (addr, 0);
2842 else if (GET_CODE (XEXP (addr, 1)) == REG)
2843 addr = XEXP (addr, 1);
2844 else if (CONSTANT_P (XEXP (addr, 0)))
2845 addr = XEXP (addr, 1);
2846 else if (CONSTANT_P (XEXP (addr, 1)))
2847 addr = XEXP (addr, 0);
2848 else
2849 gcc_unreachable ();
2850 }
2851 gcc_assert (GET_CODE (addr) == REG);
2852 return addr;
2853 }
2854
2855 /* Emit code to perform a block move.
2856
2857 OPERANDS[0] is the destination pointer as a REG, clobbered.
2858 OPERANDS[1] is the source pointer as a REG, clobbered.
2859 OPERANDS[2] is a register for temporary storage.
2860 OPERANDS[3] is a register for temporary storage.
2861 OPERANDS[4] is the size as a CONST_INT
2862 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2863 OPERANDS[6] is another temporary register. */
2864
2865 const char *
2866 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2867 {
2868 int align = INTVAL (operands[5]);
2869 unsigned long n_bytes = INTVAL (operands[4]);
2870
2871 /* We can't move more than a word at a time because the PA
2872 has no longer integer move insns. (Could use fp mem ops?) */
2873 if (align > (TARGET_64BIT ? 8 : 4))
2874 align = (TARGET_64BIT ? 8 : 4);
2875
2876 /* Note that we know each loop below will execute at least twice
2877 (else we would have open-coded the copy). */
2878 switch (align)
2879 {
2880 case 8:
2881 /* Pre-adjust the loop counter. */
2882 operands[4] = GEN_INT (n_bytes - 16);
2883 output_asm_insn ("ldi %4,%2", operands);
2884
2885 /* Copying loop. */
2886 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2887 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2888 output_asm_insn ("std,ma %3,8(%0)", operands);
2889 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2890 output_asm_insn ("std,ma %6,8(%0)", operands);
2891
2892 /* Handle the residual. There could be up to 7 bytes of
2893 residual to copy! */
2894 if (n_bytes % 16 != 0)
2895 {
2896 operands[4] = GEN_INT (n_bytes % 8);
2897 if (n_bytes % 16 >= 8)
2898 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2899 if (n_bytes % 8 != 0)
2900 output_asm_insn ("ldd 0(%1),%6", operands);
2901 if (n_bytes % 16 >= 8)
2902 output_asm_insn ("std,ma %3,8(%0)", operands);
2903 if (n_bytes % 8 != 0)
2904 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2905 }
2906 return "";
2907
2908 case 4:
2909 /* Pre-adjust the loop counter. */
2910 operands[4] = GEN_INT (n_bytes - 8);
2911 output_asm_insn ("ldi %4,%2", operands);
2912
2913 /* Copying loop. */
2914 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2915 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2916 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2917 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2918 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2919
2920 /* Handle the residual. There could be up to 7 bytes of
2921 residual to copy! */
2922 if (n_bytes % 8 != 0)
2923 {
2924 operands[4] = GEN_INT (n_bytes % 4);
2925 if (n_bytes % 8 >= 4)
2926 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2927 if (n_bytes % 4 != 0)
2928 output_asm_insn ("ldw 0(%1),%6", operands);
2929 if (n_bytes % 8 >= 4)
2930 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2931 if (n_bytes % 4 != 0)
2932 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2933 }
2934 return "";
2935
2936 case 2:
2937 /* Pre-adjust the loop counter. */
2938 operands[4] = GEN_INT (n_bytes - 4);
2939 output_asm_insn ("ldi %4,%2", operands);
2940
2941 /* Copying loop. */
2942 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2943 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2944 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2945 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2946 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2947
2948 /* Handle the residual. */
2949 if (n_bytes % 4 != 0)
2950 {
2951 if (n_bytes % 4 >= 2)
2952 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2953 if (n_bytes % 2 != 0)
2954 output_asm_insn ("ldb 0(%1),%6", operands);
2955 if (n_bytes % 4 >= 2)
2956 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2957 if (n_bytes % 2 != 0)
2958 output_asm_insn ("stb %6,0(%0)", operands);
2959 }
2960 return "";
2961
2962 case 1:
2963 /* Pre-adjust the loop counter. */
2964 operands[4] = GEN_INT (n_bytes - 2);
2965 output_asm_insn ("ldi %4,%2", operands);
2966
2967 /* Copying loop. */
2968 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2969 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2970 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2971 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2972 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2973
2974 /* Handle the residual. */
2975 if (n_bytes % 2 != 0)
2976 {
2977 output_asm_insn ("ldb 0(%1),%3", operands);
2978 output_asm_insn ("stb %3,0(%0)", operands);
2979 }
2980 return "";
2981
2982 default:
2983 gcc_unreachable ();
2984 }
2985 }
2986
2987 /* Count the number of insns necessary to handle this block move.
2988
2989 Basic structure is the same as emit_block_move, except that we
2990 count insns rather than emit them. */
2991
2992 static int
2993 compute_movmem_length (rtx_insn *insn)
2994 {
2995 rtx pat = PATTERN (insn);
2996 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2997 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2998 unsigned int n_insns = 0;
2999
3000 /* We can't move more than four bytes at a time because the PA
3001 has no longer integer move insns. (Could use fp mem ops?) */
3002 if (align > (TARGET_64BIT ? 8 : 4))
3003 align = (TARGET_64BIT ? 8 : 4);
3004
3005 /* The basic copying loop. */
3006 n_insns = 6;
3007
3008 /* Residuals. */
3009 if (n_bytes % (2 * align) != 0)
3010 {
3011 if ((n_bytes % (2 * align)) >= align)
3012 n_insns += 2;
3013
3014 if ((n_bytes % align) != 0)
3015 n_insns += 2;
3016 }
3017
3018 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3019 return n_insns * 4;
3020 }
3021
3022 /* Emit code to perform a block clear.
3023
3024 OPERANDS[0] is the destination pointer as a REG, clobbered.
3025 OPERANDS[1] is a register for temporary storage.
3026 OPERANDS[2] is the size as a CONST_INT
3027 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3028
3029 const char *
3030 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3031 {
3032 int align = INTVAL (operands[3]);
3033 unsigned long n_bytes = INTVAL (operands[2]);
3034
3035 /* We can't clear more than a word at a time because the PA
3036 has no longer integer move insns. */
3037 if (align > (TARGET_64BIT ? 8 : 4))
3038 align = (TARGET_64BIT ? 8 : 4);
3039
3040 /* Note that we know each loop below will execute at least twice
3041 (else we would have open-coded the copy). */
3042 switch (align)
3043 {
3044 case 8:
3045 /* Pre-adjust the loop counter. */
3046 operands[2] = GEN_INT (n_bytes - 16);
3047 output_asm_insn ("ldi %2,%1", operands);
3048
3049 /* Loop. */
3050 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3051 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3052 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3053
3054 /* Handle the residual. There could be up to 7 bytes of
3055 residual to copy! */
3056 if (n_bytes % 16 != 0)
3057 {
3058 operands[2] = GEN_INT (n_bytes % 8);
3059 if (n_bytes % 16 >= 8)
3060 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3061 if (n_bytes % 8 != 0)
3062 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3063 }
3064 return "";
3065
3066 case 4:
3067 /* Pre-adjust the loop counter. */
3068 operands[2] = GEN_INT (n_bytes - 8);
3069 output_asm_insn ("ldi %2,%1", operands);
3070
3071 /* Loop. */
3072 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3073 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3074 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3075
3076 /* Handle the residual. There could be up to 7 bytes of
3077 residual to copy! */
3078 if (n_bytes % 8 != 0)
3079 {
3080 operands[2] = GEN_INT (n_bytes % 4);
3081 if (n_bytes % 8 >= 4)
3082 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3083 if (n_bytes % 4 != 0)
3084 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3085 }
3086 return "";
3087
3088 case 2:
3089 /* Pre-adjust the loop counter. */
3090 operands[2] = GEN_INT (n_bytes - 4);
3091 output_asm_insn ("ldi %2,%1", operands);
3092
3093 /* Loop. */
3094 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3095 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3096 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3097
3098 /* Handle the residual. */
3099 if (n_bytes % 4 != 0)
3100 {
3101 if (n_bytes % 4 >= 2)
3102 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3103 if (n_bytes % 2 != 0)
3104 output_asm_insn ("stb %%r0,0(%0)", operands);
3105 }
3106 return "";
3107
3108 case 1:
3109 /* Pre-adjust the loop counter. */
3110 operands[2] = GEN_INT (n_bytes - 2);
3111 output_asm_insn ("ldi %2,%1", operands);
3112
3113 /* Loop. */
3114 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3115 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3116 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3117
3118 /* Handle the residual. */
3119 if (n_bytes % 2 != 0)
3120 output_asm_insn ("stb %%r0,0(%0)", operands);
3121
3122 return "";
3123
3124 default:
3125 gcc_unreachable ();
3126 }
3127 }
3128
3129 /* Count the number of insns necessary to handle this block move.
3130
3131 Basic structure is the same as emit_block_move, except that we
3132 count insns rather than emit them. */
3133
3134 static int
3135 compute_clrmem_length (rtx_insn *insn)
3136 {
3137 rtx pat = PATTERN (insn);
3138 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3139 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3140 unsigned int n_insns = 0;
3141
3142 /* We can't clear more than a word at a time because the PA
3143 has no longer integer move insns. */
3144 if (align > (TARGET_64BIT ? 8 : 4))
3145 align = (TARGET_64BIT ? 8 : 4);
3146
3147 /* The basic loop. */
3148 n_insns = 4;
3149
3150 /* Residuals. */
3151 if (n_bytes % (2 * align) != 0)
3152 {
3153 if ((n_bytes % (2 * align)) >= align)
3154 n_insns++;
3155
3156 if ((n_bytes % align) != 0)
3157 n_insns++;
3158 }
3159
3160 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3161 return n_insns * 4;
3162 }
3163 \f
3164
3165 const char *
3166 pa_output_and (rtx *operands)
3167 {
3168 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3169 {
3170 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3171 int ls0, ls1, ms0, p, len;
3172
3173 for (ls0 = 0; ls0 < 32; ls0++)
3174 if ((mask & (1 << ls0)) == 0)
3175 break;
3176
3177 for (ls1 = ls0; ls1 < 32; ls1++)
3178 if ((mask & (1 << ls1)) != 0)
3179 break;
3180
3181 for (ms0 = ls1; ms0 < 32; ms0++)
3182 if ((mask & (1 << ms0)) == 0)
3183 break;
3184
3185 gcc_assert (ms0 == 32);
3186
3187 if (ls1 == 32)
3188 {
3189 len = ls0;
3190
3191 gcc_assert (len);
3192
3193 operands[2] = GEN_INT (len);
3194 return "{extru|extrw,u} %1,31,%2,%0";
3195 }
3196 else
3197 {
3198 /* We could use this `depi' for the case above as well, but `depi'
3199 requires one more register file access than an `extru'. */
3200
3201 p = 31 - ls0;
3202 len = ls1 - ls0;
3203
3204 operands[2] = GEN_INT (p);
3205 operands[3] = GEN_INT (len);
3206 return "{depi|depwi} 0,%2,%3,%0";
3207 }
3208 }
3209 else
3210 return "and %1,%2,%0";
3211 }
3212
3213 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3214 storing the result in operands[0]. */
3215 const char *
3216 pa_output_64bit_and (rtx *operands)
3217 {
3218 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3219 {
3220 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3221 int ls0, ls1, ms0, p, len;
3222
3223 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3224 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3225 break;
3226
3227 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3228 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3229 break;
3230
3231 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3232 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3233 break;
3234
3235 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3236
3237 if (ls1 == HOST_BITS_PER_WIDE_INT)
3238 {
3239 len = ls0;
3240
3241 gcc_assert (len);
3242
3243 operands[2] = GEN_INT (len);
3244 return "extrd,u %1,63,%2,%0";
3245 }
3246 else
3247 {
3248 /* We could use this `depi' for the case above as well, but `depi'
3249 requires one more register file access than an `extru'. */
3250
3251 p = 63 - ls0;
3252 len = ls1 - ls0;
3253
3254 operands[2] = GEN_INT (p);
3255 operands[3] = GEN_INT (len);
3256 return "depdi 0,%2,%3,%0";
3257 }
3258 }
3259 else
3260 return "and %1,%2,%0";
3261 }
3262
3263 const char *
3264 pa_output_ior (rtx *operands)
3265 {
3266 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3267 int bs0, bs1, p, len;
3268
3269 if (INTVAL (operands[2]) == 0)
3270 return "copy %1,%0";
3271
3272 for (bs0 = 0; bs0 < 32; bs0++)
3273 if ((mask & (1 << bs0)) != 0)
3274 break;
3275
3276 for (bs1 = bs0; bs1 < 32; bs1++)
3277 if ((mask & (1 << bs1)) == 0)
3278 break;
3279
3280 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3281
3282 p = 31 - bs0;
3283 len = bs1 - bs0;
3284
3285 operands[2] = GEN_INT (p);
3286 operands[3] = GEN_INT (len);
3287 return "{depi|depwi} -1,%2,%3,%0";
3288 }
3289
3290 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3291 storing the result in operands[0]. */
3292 const char *
3293 pa_output_64bit_ior (rtx *operands)
3294 {
3295 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3296 int bs0, bs1, p, len;
3297
3298 if (INTVAL (operands[2]) == 0)
3299 return "copy %1,%0";
3300
3301 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3302 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3303 break;
3304
3305 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3306 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3307 break;
3308
3309 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3310 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3311
3312 p = 63 - bs0;
3313 len = bs1 - bs0;
3314
3315 operands[2] = GEN_INT (p);
3316 operands[3] = GEN_INT (len);
3317 return "depdi -1,%2,%3,%0";
3318 }
3319 \f
3320 /* Target hook for assembling integer objects. This code handles
3321 aligned SI and DI integers specially since function references
3322 must be preceded by P%. */
3323
3324 static bool
3325 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3326 {
3327 bool result;
3328 tree decl = NULL;
3329
3330 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3331 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3332 calling output_addr_const. Otherwise, it may call assemble_external
3333 in the midst of outputing the assembler code for the SYMBOL_REF.
3334 We restore the SYMBOL_REF_DECL after the output is done. */
3335 if (GET_CODE (x) == SYMBOL_REF)
3336 {
3337 decl = SYMBOL_REF_DECL (x);
3338 if (decl)
3339 {
3340 assemble_external (decl);
3341 SET_SYMBOL_REF_DECL (x, NULL);
3342 }
3343 }
3344
3345 if (size == UNITS_PER_WORD
3346 && aligned_p
3347 && function_label_operand (x, VOIDmode))
3348 {
3349 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3350
3351 /* We don't want an OPD when generating fast indirect calls. */
3352 if (!TARGET_FAST_INDIRECT_CALLS)
3353 fputs ("P%", asm_out_file);
3354
3355 output_addr_const (asm_out_file, x);
3356 fputc ('\n', asm_out_file);
3357 result = true;
3358 }
3359 else
3360 result = default_assemble_integer (x, size, aligned_p);
3361
3362 if (decl)
3363 SET_SYMBOL_REF_DECL (x, decl);
3364
3365 return result;
3366 }
3367 \f
3368 /* Output an ascii string. */
3369 void
3370 pa_output_ascii (FILE *file, const char *p, int size)
3371 {
3372 int i;
3373 int chars_output;
3374 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3375
3376 /* The HP assembler can only take strings of 256 characters at one
3377 time. This is a limitation on input line length, *not* the
3378 length of the string. Sigh. Even worse, it seems that the
3379 restriction is in number of input characters (see \xnn &
3380 \whatever). So we have to do this very carefully. */
3381
3382 fputs ("\t.STRING \"", file);
3383
3384 chars_output = 0;
3385 for (i = 0; i < size; i += 4)
3386 {
3387 int co = 0;
3388 int io = 0;
3389 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3390 {
3391 register unsigned int c = (unsigned char) p[i + io];
3392
3393 if (c == '\"' || c == '\\')
3394 partial_output[co++] = '\\';
3395 if (c >= ' ' && c < 0177)
3396 partial_output[co++] = c;
3397 else
3398 {
3399 unsigned int hexd;
3400 partial_output[co++] = '\\';
3401 partial_output[co++] = 'x';
3402 hexd = c / 16 - 0 + '0';
3403 if (hexd > '9')
3404 hexd -= '9' - 'a' + 1;
3405 partial_output[co++] = hexd;
3406 hexd = c % 16 - 0 + '0';
3407 if (hexd > '9')
3408 hexd -= '9' - 'a' + 1;
3409 partial_output[co++] = hexd;
3410 }
3411 }
3412 if (chars_output + co > 243)
3413 {
3414 fputs ("\"\n\t.STRING \"", file);
3415 chars_output = 0;
3416 }
3417 fwrite (partial_output, 1, (size_t) co, file);
3418 chars_output += co;
3419 co = 0;
3420 }
3421 fputs ("\"\n", file);
3422 }
3423
3424 /* Try to rewrite floating point comparisons & branches to avoid
3425 useless add,tr insns.
3426
3427 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3428 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3429 first attempt to remove useless add,tr insns. It is zero
3430 for the second pass as reorg sometimes leaves bogus REG_DEAD
3431 notes lying around.
3432
3433 When CHECK_NOTES is zero we can only eliminate add,tr insns
3434 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3435 instructions. */
3436 static void
3437 remove_useless_addtr_insns (int check_notes)
3438 {
3439 rtx_insn *insn;
3440 static int pass = 0;
3441
3442 /* This is fairly cheap, so always run it when optimizing. */
3443 if (optimize > 0)
3444 {
3445 int fcmp_count = 0;
3446 int fbranch_count = 0;
3447
3448 /* Walk all the insns in this function looking for fcmp & fbranch
3449 instructions. Keep track of how many of each we find. */
3450 for (insn = get_insns (); insn; insn = next_insn (insn))
3451 {
3452 rtx tmp;
3453
3454 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3455 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3456 continue;
3457
3458 tmp = PATTERN (insn);
3459
3460 /* It must be a set. */
3461 if (GET_CODE (tmp) != SET)
3462 continue;
3463
3464 /* If the destination is CCFP, then we've found an fcmp insn. */
3465 tmp = SET_DEST (tmp);
3466 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3467 {
3468 fcmp_count++;
3469 continue;
3470 }
3471
3472 tmp = PATTERN (insn);
3473 /* If this is an fbranch instruction, bump the fbranch counter. */
3474 if (GET_CODE (tmp) == SET
3475 && SET_DEST (tmp) == pc_rtx
3476 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3477 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3478 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3479 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3480 {
3481 fbranch_count++;
3482 continue;
3483 }
3484 }
3485
3486
3487 /* Find all floating point compare + branch insns. If possible,
3488 reverse the comparison & the branch to avoid add,tr insns. */
3489 for (insn = get_insns (); insn; insn = next_insn (insn))
3490 {
3491 rtx tmp;
3492 rtx_insn *next;
3493
3494 /* Ignore anything that isn't an INSN. */
3495 if (! NONJUMP_INSN_P (insn))
3496 continue;
3497
3498 tmp = PATTERN (insn);
3499
3500 /* It must be a set. */
3501 if (GET_CODE (tmp) != SET)
3502 continue;
3503
3504 /* The destination must be CCFP, which is register zero. */
3505 tmp = SET_DEST (tmp);
3506 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3507 continue;
3508
3509 /* INSN should be a set of CCFP.
3510
3511 See if the result of this insn is used in a reversed FP
3512 conditional branch. If so, reverse our condition and
3513 the branch. Doing so avoids useless add,tr insns. */
3514 next = next_insn (insn);
3515 while (next)
3516 {
3517 /* Jumps, calls and labels stop our search. */
3518 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3519 break;
3520
3521 /* As does another fcmp insn. */
3522 if (NONJUMP_INSN_P (next)
3523 && GET_CODE (PATTERN (next)) == SET
3524 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3525 && REGNO (SET_DEST (PATTERN (next))) == 0)
3526 break;
3527
3528 next = next_insn (next);
3529 }
3530
3531 /* Is NEXT_INSN a branch? */
3532 if (next && JUMP_P (next))
3533 {
3534 rtx pattern = PATTERN (next);
3535
3536 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3537 and CCFP dies, then reverse our conditional and the branch
3538 to avoid the add,tr. */
3539 if (GET_CODE (pattern) == SET
3540 && SET_DEST (pattern) == pc_rtx
3541 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3542 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3543 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3544 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3545 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3546 && (fcmp_count == fbranch_count
3547 || (check_notes
3548 && find_regno_note (next, REG_DEAD, 0))))
3549 {
3550 /* Reverse the branch. */
3551 tmp = XEXP (SET_SRC (pattern), 1);
3552 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3553 XEXP (SET_SRC (pattern), 2) = tmp;
3554 INSN_CODE (next) = -1;
3555
3556 /* Reverse our condition. */
3557 tmp = PATTERN (insn);
3558 PUT_CODE (XEXP (tmp, 1),
3559 (reverse_condition_maybe_unordered
3560 (GET_CODE (XEXP (tmp, 1)))));
3561 }
3562 }
3563 }
3564 }
3565
3566 pass = !pass;
3567
3568 }
3569 \f
3570 /* You may have trouble believing this, but this is the 32 bit HP-PA
3571 stack layout. Wow.
3572
3573 Offset Contents
3574
3575 Variable arguments (optional; any number may be allocated)
3576
3577 SP-(4*(N+9)) arg word N
3578 : :
3579 SP-56 arg word 5
3580 SP-52 arg word 4
3581
3582 Fixed arguments (must be allocated; may remain unused)
3583
3584 SP-48 arg word 3
3585 SP-44 arg word 2
3586 SP-40 arg word 1
3587 SP-36 arg word 0
3588
3589 Frame Marker
3590
3591 SP-32 External Data Pointer (DP)
3592 SP-28 External sr4
3593 SP-24 External/stub RP (RP')
3594 SP-20 Current RP
3595 SP-16 Static Link
3596 SP-12 Clean up
3597 SP-8 Calling Stub RP (RP'')
3598 SP-4 Previous SP
3599
3600 Top of Frame
3601
3602 SP-0 Stack Pointer (points to next available address)
3603
3604 */
3605
3606 /* This function saves registers as follows. Registers marked with ' are
3607 this function's registers (as opposed to the previous function's).
3608 If a frame_pointer isn't needed, r4 is saved as a general register;
3609 the space for the frame pointer is still allocated, though, to keep
3610 things simple.
3611
3612
3613 Top of Frame
3614
3615 SP (FP') Previous FP
3616 SP + 4 Alignment filler (sigh)
3617 SP + 8 Space for locals reserved here.
3618 .
3619 .
3620 .
3621 SP + n All call saved register used.
3622 .
3623 .
3624 .
3625 SP + o All call saved fp registers used.
3626 .
3627 .
3628 .
3629 SP + p (SP') points to next available address.
3630
3631 */
3632
3633 /* Global variables set by output_function_prologue(). */
3634 /* Size of frame. Need to know this to emit return insns from
3635 leaf procedures. */
3636 static HOST_WIDE_INT actual_fsize, local_fsize;
3637 static int save_fregs;
3638
3639 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3640 Handle case where DISP > 8k by using the add_high_const patterns.
3641
3642 Note in DISP > 8k case, we will leave the high part of the address
3643 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3644
3645 static void
3646 store_reg (int reg, HOST_WIDE_INT disp, int base)
3647 {
3648 rtx dest, src, basereg;
3649 rtx_insn *insn;
3650
3651 src = gen_rtx_REG (word_mode, reg);
3652 basereg = gen_rtx_REG (Pmode, base);
3653 if (VAL_14_BITS_P (disp))
3654 {
3655 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3656 insn = emit_move_insn (dest, src);
3657 }
3658 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3659 {
3660 rtx delta = GEN_INT (disp);
3661 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3662
3663 emit_move_insn (tmpreg, delta);
3664 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3665 if (DO_FRAME_NOTES)
3666 {
3667 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3668 gen_rtx_SET (tmpreg,
3669 gen_rtx_PLUS (Pmode, basereg, delta)));
3670 RTX_FRAME_RELATED_P (insn) = 1;
3671 }
3672 dest = gen_rtx_MEM (word_mode, tmpreg);
3673 insn = emit_move_insn (dest, src);
3674 }
3675 else
3676 {
3677 rtx delta = GEN_INT (disp);
3678 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3679 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3680
3681 emit_move_insn (tmpreg, high);
3682 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3683 insn = emit_move_insn (dest, src);
3684 if (DO_FRAME_NOTES)
3685 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3686 gen_rtx_SET (gen_rtx_MEM (word_mode,
3687 gen_rtx_PLUS (word_mode,
3688 basereg,
3689 delta)),
3690 src));
3691 }
3692
3693 if (DO_FRAME_NOTES)
3694 RTX_FRAME_RELATED_P (insn) = 1;
3695 }
3696
3697 /* Emit RTL to store REG at the memory location specified by BASE and then
3698 add MOD to BASE. MOD must be <= 8k. */
3699
3700 static void
3701 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3702 {
3703 rtx basereg, srcreg, delta;
3704 rtx_insn *insn;
3705
3706 gcc_assert (VAL_14_BITS_P (mod));
3707
3708 basereg = gen_rtx_REG (Pmode, base);
3709 srcreg = gen_rtx_REG (word_mode, reg);
3710 delta = GEN_INT (mod);
3711
3712 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3713 if (DO_FRAME_NOTES)
3714 {
3715 RTX_FRAME_RELATED_P (insn) = 1;
3716
3717 /* RTX_FRAME_RELATED_P must be set on each frame related set
3718 in a parallel with more than one element. */
3719 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3720 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3721 }
3722 }
3723
3724 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3725 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3726 whether to add a frame note or not.
3727
3728 In the DISP > 8k case, we leave the high part of the address in %r1.
3729 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3730
3731 static void
3732 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3733 {
3734 rtx_insn *insn;
3735
3736 if (VAL_14_BITS_P (disp))
3737 {
3738 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3739 plus_constant (Pmode,
3740 gen_rtx_REG (Pmode, base), disp));
3741 }
3742 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3743 {
3744 rtx basereg = gen_rtx_REG (Pmode, base);
3745 rtx delta = GEN_INT (disp);
3746 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3747
3748 emit_move_insn (tmpreg, delta);
3749 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3750 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3751 if (DO_FRAME_NOTES)
3752 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3753 gen_rtx_SET (tmpreg,
3754 gen_rtx_PLUS (Pmode, basereg, delta)));
3755 }
3756 else
3757 {
3758 rtx basereg = gen_rtx_REG (Pmode, base);
3759 rtx delta = GEN_INT (disp);
3760 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3761
3762 emit_move_insn (tmpreg,
3763 gen_rtx_PLUS (Pmode, basereg,
3764 gen_rtx_HIGH (Pmode, delta)));
3765 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3766 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3767 }
3768
3769 if (DO_FRAME_NOTES && note)
3770 RTX_FRAME_RELATED_P (insn) = 1;
3771 }
3772
3773 HOST_WIDE_INT
3774 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3775 {
3776 int freg_saved = 0;
3777 int i, j;
3778
3779 /* The code in pa_expand_prologue and pa_expand_epilogue must
3780 be consistent with the rounding and size calculation done here.
3781 Change them at the same time. */
3782
3783 /* We do our own stack alignment. First, round the size of the
3784 stack locals up to a word boundary. */
3785 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3786
3787 /* Space for previous frame pointer + filler. If any frame is
3788 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3789 waste some space here for the sake of HP compatibility. The
3790 first slot is only used when the frame pointer is needed. */
3791 if (size || frame_pointer_needed)
3792 size += STARTING_FRAME_OFFSET;
3793
3794 /* If the current function calls __builtin_eh_return, then we need
3795 to allocate stack space for registers that will hold data for
3796 the exception handler. */
3797 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3798 {
3799 unsigned int i;
3800
3801 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3802 continue;
3803 size += i * UNITS_PER_WORD;
3804 }
3805
3806 /* Account for space used by the callee general register saves. */
3807 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3808 if (df_regs_ever_live_p (i))
3809 size += UNITS_PER_WORD;
3810
3811 /* Account for space used by the callee floating point register saves. */
3812 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3813 if (df_regs_ever_live_p (i)
3814 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3815 {
3816 freg_saved = 1;
3817
3818 /* We always save both halves of the FP register, so always
3819 increment the frame size by 8 bytes. */
3820 size += 8;
3821 }
3822
3823 /* If any of the floating registers are saved, account for the
3824 alignment needed for the floating point register save block. */
3825 if (freg_saved)
3826 {
3827 size = (size + 7) & ~7;
3828 if (fregs_live)
3829 *fregs_live = 1;
3830 }
3831
3832 /* The various ABIs include space for the outgoing parameters in the
3833 size of the current function's stack frame. We don't need to align
3834 for the outgoing arguments as their alignment is set by the final
3835 rounding for the frame as a whole. */
3836 size += crtl->outgoing_args_size;
3837
3838 /* Allocate space for the fixed frame marker. This space must be
3839 allocated for any function that makes calls or allocates
3840 stack space. */
3841 if (!crtl->is_leaf || size)
3842 size += TARGET_64BIT ? 48 : 32;
3843
3844 /* Finally, round to the preferred stack boundary. */
3845 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3846 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3847 }
3848
3849 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3850 of memory. If any fpu reg is used in the function, we allocate
3851 such a block here, at the bottom of the frame, just in case it's needed.
3852
3853 If this function is a leaf procedure, then we may choose not
3854 to do a "save" insn. The decision about whether or not
3855 to do this is made in regclass.c. */
3856
3857 static void
3858 pa_output_function_prologue (FILE *file)
3859 {
3860 /* The function's label and associated .PROC must never be
3861 separated and must be output *after* any profiling declarations
3862 to avoid changing spaces/subspaces within a procedure. */
3863 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3864 fputs ("\t.PROC\n", file);
3865
3866 /* pa_expand_prologue does the dirty work now. We just need
3867 to output the assembler directives which denote the start
3868 of a function. */
3869 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3870 if (crtl->is_leaf)
3871 fputs (",NO_CALLS", file);
3872 else
3873 fputs (",CALLS", file);
3874 if (rp_saved)
3875 fputs (",SAVE_RP", file);
3876
3877 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3878 at the beginning of the frame and that it is used as the frame
3879 pointer for the frame. We do this because our current frame
3880 layout doesn't conform to that specified in the HP runtime
3881 documentation and we need a way to indicate to programs such as
3882 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3883 isn't used by HP compilers but is supported by the assembler.
3884 However, SAVE_SP is supposed to indicate that the previous stack
3885 pointer has been saved in the frame marker. */
3886 if (frame_pointer_needed)
3887 fputs (",SAVE_SP", file);
3888
3889 /* Pass on information about the number of callee register saves
3890 performed in the prologue.
3891
3892 The compiler is supposed to pass the highest register number
3893 saved, the assembler then has to adjust that number before
3894 entering it into the unwind descriptor (to account for any
3895 caller saved registers with lower register numbers than the
3896 first callee saved register). */
3897 if (gr_saved)
3898 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3899
3900 if (fr_saved)
3901 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3902
3903 fputs ("\n\t.ENTRY\n", file);
3904
3905 remove_useless_addtr_insns (0);
3906 }
3907
3908 void
3909 pa_expand_prologue (void)
3910 {
3911 int merge_sp_adjust_with_store = 0;
3912 HOST_WIDE_INT size = get_frame_size ();
3913 HOST_WIDE_INT offset;
3914 int i;
3915 rtx tmpreg;
3916 rtx_insn *insn;
3917
3918 gr_saved = 0;
3919 fr_saved = 0;
3920 save_fregs = 0;
3921
3922 /* Compute total size for frame pointer, filler, locals and rounding to
3923 the next word boundary. Similar code appears in pa_compute_frame_size
3924 and must be changed in tandem with this code. */
3925 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3926 if (local_fsize || frame_pointer_needed)
3927 local_fsize += STARTING_FRAME_OFFSET;
3928
3929 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3930 if (flag_stack_usage_info)
3931 current_function_static_stack_size = actual_fsize;
3932
3933 /* Compute a few things we will use often. */
3934 tmpreg = gen_rtx_REG (word_mode, 1);
3935
3936 /* Save RP first. The calling conventions manual states RP will
3937 always be stored into the caller's frame at sp - 20 or sp - 16
3938 depending on which ABI is in use. */
3939 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3940 {
3941 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3942 rp_saved = true;
3943 }
3944 else
3945 rp_saved = false;
3946
3947 /* Allocate the local frame and set up the frame pointer if needed. */
3948 if (actual_fsize != 0)
3949 {
3950 if (frame_pointer_needed)
3951 {
3952 /* Copy the old frame pointer temporarily into %r1. Set up the
3953 new stack pointer, then store away the saved old frame pointer
3954 into the stack at sp and at the same time update the stack
3955 pointer by actual_fsize bytes. Two versions, first
3956 handles small (<8k) frames. The second handles large (>=8k)
3957 frames. */
3958 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3959 if (DO_FRAME_NOTES)
3960 RTX_FRAME_RELATED_P (insn) = 1;
3961
3962 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3963 if (DO_FRAME_NOTES)
3964 RTX_FRAME_RELATED_P (insn) = 1;
3965
3966 if (VAL_14_BITS_P (actual_fsize))
3967 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3968 else
3969 {
3970 /* It is incorrect to store the saved frame pointer at *sp,
3971 then increment sp (writes beyond the current stack boundary).
3972
3973 So instead use stwm to store at *sp and post-increment the
3974 stack pointer as an atomic operation. Then increment sp to
3975 finish allocating the new frame. */
3976 HOST_WIDE_INT adjust1 = 8192 - 64;
3977 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3978
3979 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3980 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3981 adjust2, 1);
3982 }
3983
3984 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3985 we need to store the previous stack pointer (frame pointer)
3986 into the frame marker on targets that use the HP unwind
3987 library. This allows the HP unwind library to be used to
3988 unwind GCC frames. However, we are not fully compatible
3989 with the HP library because our frame layout differs from
3990 that specified in the HP runtime specification.
3991
3992 We don't want a frame note on this instruction as the frame
3993 marker moves during dynamic stack allocation.
3994
3995 This instruction also serves as a blockage to prevent
3996 register spills from being scheduled before the stack
3997 pointer is raised. This is necessary as we store
3998 registers using the frame pointer as a base register,
3999 and the frame pointer is set before sp is raised. */
4000 if (TARGET_HPUX_UNWIND_LIBRARY)
4001 {
4002 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
4003 GEN_INT (TARGET_64BIT ? -8 : -4));
4004
4005 emit_move_insn (gen_rtx_MEM (word_mode, addr),
4006 hard_frame_pointer_rtx);
4007 }
4008 else
4009 emit_insn (gen_blockage ());
4010 }
4011 /* no frame pointer needed. */
4012 else
4013 {
4014 /* In some cases we can perform the first callee register save
4015 and allocating the stack frame at the same time. If so, just
4016 make a note of it and defer allocating the frame until saving
4017 the callee registers. */
4018 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
4019 merge_sp_adjust_with_store = 1;
4020 /* Can not optimize. Adjust the stack frame by actual_fsize
4021 bytes. */
4022 else
4023 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4024 actual_fsize, 1);
4025 }
4026 }
4027
4028 /* Normal register save.
4029
4030 Do not save the frame pointer in the frame_pointer_needed case. It
4031 was done earlier. */
4032 if (frame_pointer_needed)
4033 {
4034 offset = local_fsize;
4035
4036 /* Saving the EH return data registers in the frame is the simplest
4037 way to get the frame unwind information emitted. We put them
4038 just before the general registers. */
4039 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4040 {
4041 unsigned int i, regno;
4042
4043 for (i = 0; ; ++i)
4044 {
4045 regno = EH_RETURN_DATA_REGNO (i);
4046 if (regno == INVALID_REGNUM)
4047 break;
4048
4049 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4050 offset += UNITS_PER_WORD;
4051 }
4052 }
4053
4054 for (i = 18; i >= 4; i--)
4055 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4056 {
4057 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4058 offset += UNITS_PER_WORD;
4059 gr_saved++;
4060 }
4061 /* Account for %r3 which is saved in a special place. */
4062 gr_saved++;
4063 }
4064 /* No frame pointer needed. */
4065 else
4066 {
4067 offset = local_fsize - actual_fsize;
4068
4069 /* Saving the EH return data registers in the frame is the simplest
4070 way to get the frame unwind information emitted. */
4071 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4072 {
4073 unsigned int i, regno;
4074
4075 for (i = 0; ; ++i)
4076 {
4077 regno = EH_RETURN_DATA_REGNO (i);
4078 if (regno == INVALID_REGNUM)
4079 break;
4080
4081 /* If merge_sp_adjust_with_store is nonzero, then we can
4082 optimize the first save. */
4083 if (merge_sp_adjust_with_store)
4084 {
4085 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4086 merge_sp_adjust_with_store = 0;
4087 }
4088 else
4089 store_reg (regno, offset, STACK_POINTER_REGNUM);
4090 offset += UNITS_PER_WORD;
4091 }
4092 }
4093
4094 for (i = 18; i >= 3; i--)
4095 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4096 {
4097 /* If merge_sp_adjust_with_store is nonzero, then we can
4098 optimize the first GR save. */
4099 if (merge_sp_adjust_with_store)
4100 {
4101 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4102 merge_sp_adjust_with_store = 0;
4103 }
4104 else
4105 store_reg (i, offset, STACK_POINTER_REGNUM);
4106 offset += UNITS_PER_WORD;
4107 gr_saved++;
4108 }
4109
4110 /* If we wanted to merge the SP adjustment with a GR save, but we never
4111 did any GR saves, then just emit the adjustment here. */
4112 if (merge_sp_adjust_with_store)
4113 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4114 actual_fsize, 1);
4115 }
4116
4117 /* The hppa calling conventions say that %r19, the pic offset
4118 register, is saved at sp - 32 (in this function's frame)
4119 when generating PIC code. FIXME: What is the correct thing
4120 to do for functions which make no calls and allocate no
4121 frame? Do we need to allocate a frame, or can we just omit
4122 the save? For now we'll just omit the save.
4123
4124 We don't want a note on this insn as the frame marker can
4125 move if there is a dynamic stack allocation. */
4126 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4127 {
4128 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4129
4130 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4131
4132 }
4133
4134 /* Align pointer properly (doubleword boundary). */
4135 offset = (offset + 7) & ~7;
4136
4137 /* Floating point register store. */
4138 if (save_fregs)
4139 {
4140 rtx base;
4141
4142 /* First get the frame or stack pointer to the start of the FP register
4143 save area. */
4144 if (frame_pointer_needed)
4145 {
4146 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4147 base = hard_frame_pointer_rtx;
4148 }
4149 else
4150 {
4151 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4152 base = stack_pointer_rtx;
4153 }
4154
4155 /* Now actually save the FP registers. */
4156 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4157 {
4158 if (df_regs_ever_live_p (i)
4159 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4160 {
4161 rtx addr, reg;
4162 rtx_insn *insn;
4163 addr = gen_rtx_MEM (DFmode,
4164 gen_rtx_POST_INC (word_mode, tmpreg));
4165 reg = gen_rtx_REG (DFmode, i);
4166 insn = emit_move_insn (addr, reg);
4167 if (DO_FRAME_NOTES)
4168 {
4169 RTX_FRAME_RELATED_P (insn) = 1;
4170 if (TARGET_64BIT)
4171 {
4172 rtx mem = gen_rtx_MEM (DFmode,
4173 plus_constant (Pmode, base,
4174 offset));
4175 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4176 gen_rtx_SET (mem, reg));
4177 }
4178 else
4179 {
4180 rtx meml = gen_rtx_MEM (SFmode,
4181 plus_constant (Pmode, base,
4182 offset));
4183 rtx memr = gen_rtx_MEM (SFmode,
4184 plus_constant (Pmode, base,
4185 offset + 4));
4186 rtx regl = gen_rtx_REG (SFmode, i);
4187 rtx regr = gen_rtx_REG (SFmode, i + 1);
4188 rtx setl = gen_rtx_SET (meml, regl);
4189 rtx setr = gen_rtx_SET (memr, regr);
4190 rtvec vec;
4191
4192 RTX_FRAME_RELATED_P (setl) = 1;
4193 RTX_FRAME_RELATED_P (setr) = 1;
4194 vec = gen_rtvec (2, setl, setr);
4195 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4196 gen_rtx_SEQUENCE (VOIDmode, vec));
4197 }
4198 }
4199 offset += GET_MODE_SIZE (DFmode);
4200 fr_saved++;
4201 }
4202 }
4203 }
4204 }
4205
4206 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4207 Handle case where DISP > 8k by using the add_high_const patterns. */
4208
4209 static void
4210 load_reg (int reg, HOST_WIDE_INT disp, int base)
4211 {
4212 rtx dest = gen_rtx_REG (word_mode, reg);
4213 rtx basereg = gen_rtx_REG (Pmode, base);
4214 rtx src;
4215
4216 if (VAL_14_BITS_P (disp))
4217 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4218 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4219 {
4220 rtx delta = GEN_INT (disp);
4221 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4222
4223 emit_move_insn (tmpreg, delta);
4224 if (TARGET_DISABLE_INDEXING)
4225 {
4226 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4227 src = gen_rtx_MEM (word_mode, tmpreg);
4228 }
4229 else
4230 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4231 }
4232 else
4233 {
4234 rtx delta = GEN_INT (disp);
4235 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4236 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4237
4238 emit_move_insn (tmpreg, high);
4239 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4240 }
4241
4242 emit_move_insn (dest, src);
4243 }
4244
4245 /* Update the total code bytes output to the text section. */
4246
4247 static void
4248 update_total_code_bytes (unsigned int nbytes)
4249 {
4250 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4251 && !IN_NAMED_SECTION_P (cfun->decl))
4252 {
4253 unsigned int old_total = total_code_bytes;
4254
4255 total_code_bytes += nbytes;
4256
4257 /* Be prepared to handle overflows. */
4258 if (old_total > total_code_bytes)
4259 total_code_bytes = UINT_MAX;
4260 }
4261 }
4262
4263 /* This function generates the assembly code for function exit.
4264 Args are as for output_function_prologue ().
4265
4266 The function epilogue should not depend on the current stack
4267 pointer! It should use the frame pointer only. This is mandatory
4268 because of alloca; we also take advantage of it to omit stack
4269 adjustments before returning. */
4270
4271 static void
4272 pa_output_function_epilogue (FILE *file)
4273 {
4274 rtx_insn *insn = get_last_insn ();
4275 bool extra_nop;
4276
4277 /* pa_expand_epilogue does the dirty work now. We just need
4278 to output the assembler directives which denote the end
4279 of a function.
4280
4281 To make debuggers happy, emit a nop if the epilogue was completely
4282 eliminated due to a volatile call as the last insn in the
4283 current function. That way the return address (in %r2) will
4284 always point to a valid instruction in the current function. */
4285
4286 /* Get the last real insn. */
4287 if (NOTE_P (insn))
4288 insn = prev_real_insn (insn);
4289
4290 /* If it is a sequence, then look inside. */
4291 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4292 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4293
4294 /* If insn is a CALL_INSN, then it must be a call to a volatile
4295 function (otherwise there would be epilogue insns). */
4296 if (insn && CALL_P (insn))
4297 {
4298 fputs ("\tnop\n", file);
4299 extra_nop = true;
4300 }
4301 else
4302 extra_nop = false;
4303
4304 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4305
4306 if (TARGET_SOM && TARGET_GAS)
4307 {
4308 /* We are done with this subspace except possibly for some additional
4309 debug information. Forget that we are in this subspace to ensure
4310 that the next function is output in its own subspace. */
4311 in_section = NULL;
4312 cfun->machine->in_nsubspa = 2;
4313 }
4314
4315 /* Thunks do their own insn accounting. */
4316 if (cfun->is_thunk)
4317 return;
4318
4319 if (INSN_ADDRESSES_SET_P ())
4320 {
4321 last_address = extra_nop ? 4 : 0;
4322 insn = get_last_nonnote_insn ();
4323 if (insn)
4324 {
4325 last_address += INSN_ADDRESSES (INSN_UID (insn));
4326 if (INSN_P (insn))
4327 last_address += insn_default_length (insn);
4328 }
4329 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4330 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4331 }
4332 else
4333 last_address = UINT_MAX;
4334
4335 /* Finally, update the total number of code bytes output so far. */
4336 update_total_code_bytes (last_address);
4337 }
4338
4339 void
4340 pa_expand_epilogue (void)
4341 {
4342 rtx tmpreg;
4343 HOST_WIDE_INT offset;
4344 HOST_WIDE_INT ret_off = 0;
4345 int i;
4346 int merge_sp_adjust_with_load = 0;
4347
4348 /* We will use this often. */
4349 tmpreg = gen_rtx_REG (word_mode, 1);
4350
4351 /* Try to restore RP early to avoid load/use interlocks when
4352 RP gets used in the return (bv) instruction. This appears to still
4353 be necessary even when we schedule the prologue and epilogue. */
4354 if (rp_saved)
4355 {
4356 ret_off = TARGET_64BIT ? -16 : -20;
4357 if (frame_pointer_needed)
4358 {
4359 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4360 ret_off = 0;
4361 }
4362 else
4363 {
4364 /* No frame pointer, and stack is smaller than 8k. */
4365 if (VAL_14_BITS_P (ret_off - actual_fsize))
4366 {
4367 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4368 ret_off = 0;
4369 }
4370 }
4371 }
4372
4373 /* General register restores. */
4374 if (frame_pointer_needed)
4375 {
4376 offset = local_fsize;
4377
4378 /* If the current function calls __builtin_eh_return, then we need
4379 to restore the saved EH data registers. */
4380 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4381 {
4382 unsigned int i, regno;
4383
4384 for (i = 0; ; ++i)
4385 {
4386 regno = EH_RETURN_DATA_REGNO (i);
4387 if (regno == INVALID_REGNUM)
4388 break;
4389
4390 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4391 offset += UNITS_PER_WORD;
4392 }
4393 }
4394
4395 for (i = 18; i >= 4; i--)
4396 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4397 {
4398 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4399 offset += UNITS_PER_WORD;
4400 }
4401 }
4402 else
4403 {
4404 offset = local_fsize - actual_fsize;
4405
4406 /* If the current function calls __builtin_eh_return, then we need
4407 to restore the saved EH data registers. */
4408 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4409 {
4410 unsigned int i, regno;
4411
4412 for (i = 0; ; ++i)
4413 {
4414 regno = EH_RETURN_DATA_REGNO (i);
4415 if (regno == INVALID_REGNUM)
4416 break;
4417
4418 /* Only for the first load.
4419 merge_sp_adjust_with_load holds the register load
4420 with which we will merge the sp adjustment. */
4421 if (merge_sp_adjust_with_load == 0
4422 && local_fsize == 0
4423 && VAL_14_BITS_P (-actual_fsize))
4424 merge_sp_adjust_with_load = regno;
4425 else
4426 load_reg (regno, offset, STACK_POINTER_REGNUM);
4427 offset += UNITS_PER_WORD;
4428 }
4429 }
4430
4431 for (i = 18; i >= 3; i--)
4432 {
4433 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4434 {
4435 /* Only for the first load.
4436 merge_sp_adjust_with_load holds the register load
4437 with which we will merge the sp adjustment. */
4438 if (merge_sp_adjust_with_load == 0
4439 && local_fsize == 0
4440 && VAL_14_BITS_P (-actual_fsize))
4441 merge_sp_adjust_with_load = i;
4442 else
4443 load_reg (i, offset, STACK_POINTER_REGNUM);
4444 offset += UNITS_PER_WORD;
4445 }
4446 }
4447 }
4448
4449 /* Align pointer properly (doubleword boundary). */
4450 offset = (offset + 7) & ~7;
4451
4452 /* FP register restores. */
4453 if (save_fregs)
4454 {
4455 /* Adjust the register to index off of. */
4456 if (frame_pointer_needed)
4457 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4458 else
4459 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4460
4461 /* Actually do the restores now. */
4462 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4463 if (df_regs_ever_live_p (i)
4464 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4465 {
4466 rtx src = gen_rtx_MEM (DFmode,
4467 gen_rtx_POST_INC (word_mode, tmpreg));
4468 rtx dest = gen_rtx_REG (DFmode, i);
4469 emit_move_insn (dest, src);
4470 }
4471 }
4472
4473 /* Emit a blockage insn here to keep these insns from being moved to
4474 an earlier spot in the epilogue, or into the main instruction stream.
4475
4476 This is necessary as we must not cut the stack back before all the
4477 restores are finished. */
4478 emit_insn (gen_blockage ());
4479
4480 /* Reset stack pointer (and possibly frame pointer). The stack
4481 pointer is initially set to fp + 64 to avoid a race condition. */
4482 if (frame_pointer_needed)
4483 {
4484 rtx delta = GEN_INT (-64);
4485
4486 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4487 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4488 stack_pointer_rtx, delta));
4489 }
4490 /* If we were deferring a callee register restore, do it now. */
4491 else if (merge_sp_adjust_with_load)
4492 {
4493 rtx delta = GEN_INT (-actual_fsize);
4494 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4495
4496 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4497 }
4498 else if (actual_fsize != 0)
4499 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4500 - actual_fsize, 0);
4501
4502 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4503 frame greater than 8k), do so now. */
4504 if (ret_off != 0)
4505 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4506
4507 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4508 {
4509 rtx sa = EH_RETURN_STACKADJ_RTX;
4510
4511 emit_insn (gen_blockage ());
4512 emit_insn (TARGET_64BIT
4513 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4514 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4515 }
4516 }
4517
4518 bool
4519 pa_can_use_return_insn (void)
4520 {
4521 if (!reload_completed)
4522 return false;
4523
4524 if (frame_pointer_needed)
4525 return false;
4526
4527 if (df_regs_ever_live_p (2))
4528 return false;
4529
4530 if (crtl->profile)
4531 return false;
4532
4533 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4534 }
4535
4536 rtx
4537 hppa_pic_save_rtx (void)
4538 {
4539 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4540 }
4541
4542 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4543 #define NO_DEFERRED_PROFILE_COUNTERS 0
4544 #endif
4545
4546
4547 /* Vector of funcdef numbers. */
4548 static vec<int> funcdef_nos;
4549
4550 /* Output deferred profile counters. */
4551 static void
4552 output_deferred_profile_counters (void)
4553 {
4554 unsigned int i;
4555 int align, n;
4556
4557 if (funcdef_nos.is_empty ())
4558 return;
4559
4560 switch_to_section (data_section);
4561 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4562 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4563
4564 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4565 {
4566 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4567 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4568 }
4569
4570 funcdef_nos.release ();
4571 }
4572
4573 void
4574 hppa_profile_hook (int label_no)
4575 {
4576 /* We use SImode for the address of the function in both 32 and
4577 64-bit code to avoid having to provide DImode versions of the
4578 lcla2 and load_offset_label_address insn patterns. */
4579 rtx reg = gen_reg_rtx (SImode);
4580 rtx_code_label *label_rtx = gen_label_rtx ();
4581 rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
4582 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4583 rtx arg_bytes, begin_label_rtx;
4584 rtx_insn *call_insn;
4585 char begin_label_name[16];
4586 bool use_mcount_pcrel_call;
4587
4588 /* If we can reach _mcount with a pc-relative call, we can optimize
4589 loading the address of the current function. This requires linker
4590 long branch stub support. */
4591 if (!TARGET_PORTABLE_RUNTIME
4592 && !TARGET_LONG_CALLS
4593 && (TARGET_SOM || flag_function_sections))
4594 use_mcount_pcrel_call = TRUE;
4595 else
4596 use_mcount_pcrel_call = FALSE;
4597
4598 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4599 label_no);
4600 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4601
4602 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4603
4604 if (!use_mcount_pcrel_call)
4605 {
4606 /* The address of the function is loaded into %r25 with an instruction-
4607 relative sequence that avoids the use of relocations. The sequence
4608 is split so that the load_offset_label_address instruction can
4609 occupy the delay slot of the call to _mcount. */
4610 if (TARGET_PA_20)
4611 emit_insn (gen_lcla2 (reg, label_rtx));
4612 else
4613 emit_insn (gen_lcla1 (reg, label_rtx));
4614
4615 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4616 reg,
4617 begin_label_rtx,
4618 label_rtx));
4619 }
4620
4621 if (!NO_DEFERRED_PROFILE_COUNTERS)
4622 {
4623 rtx count_label_rtx, addr, r24;
4624 char count_label_name[16];
4625
4626 funcdef_nos.safe_push (label_no);
4627 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4628 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4629 ggc_strdup (count_label_name));
4630
4631 addr = force_reg (Pmode, count_label_rtx);
4632 r24 = gen_rtx_REG (Pmode, 24);
4633 emit_move_insn (r24, addr);
4634
4635 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4636 if (use_mcount_pcrel_call)
4637 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4638 begin_label_rtx));
4639 else
4640 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4641
4642 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4643 }
4644 else
4645 {
4646 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4647 if (use_mcount_pcrel_call)
4648 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4649 begin_label_rtx));
4650 else
4651 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4652 }
4653
4654 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4655 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4656
4657 /* Indicate the _mcount call cannot throw, nor will it execute a
4658 non-local goto. */
4659 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4660
4661 /* Allocate space for fixed arguments. */
4662 if (reg_parm_stack_space > crtl->outgoing_args_size)
4663 crtl->outgoing_args_size = reg_parm_stack_space;
4664 }
4665
4666 /* Fetch the return address for the frame COUNT steps up from
4667 the current frame, after the prologue. FRAMEADDR is the
4668 frame pointer of the COUNT frame.
4669
4670 We want to ignore any export stub remnants here. To handle this,
4671 we examine the code at the return address, and if it is an export
4672 stub, we return a memory rtx for the stub return address stored
4673 at frame-24.
4674
4675 The value returned is used in two different ways:
4676
4677 1. To find a function's caller.
4678
4679 2. To change the return address for a function.
4680
4681 This function handles most instances of case 1; however, it will
4682 fail if there are two levels of stubs to execute on the return
4683 path. The only way I believe that can happen is if the return value
4684 needs a parameter relocation, which never happens for C code.
4685
4686 This function handles most instances of case 2; however, it will
4687 fail if we did not originally have stub code on the return path
4688 but will need stub code on the new return path. This can happen if
4689 the caller & callee are both in the main program, but the new
4690 return location is in a shared library. */
4691
4692 rtx
4693 pa_return_addr_rtx (int count, rtx frameaddr)
4694 {
4695 rtx label;
4696 rtx rp;
4697 rtx saved_rp;
4698 rtx ins;
4699
4700 /* The instruction stream at the return address of a PA1.X export stub is:
4701
4702 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4703 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4704 0x00011820 | stub+16: mtsp r1,sr0
4705 0xe0400002 | stub+20: be,n 0(sr0,rp)
4706
4707 0xe0400002 must be specified as -532676606 so that it won't be
4708 rejected as an invalid immediate operand on 64-bit hosts.
4709
4710 The instruction stream at the return address of a PA2.0 export stub is:
4711
4712 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4713 0xe840d002 | stub+12: bve,n (rp)
4714 */
4715
4716 HOST_WIDE_INT insns[4];
4717 int i, len;
4718
4719 if (count != 0)
4720 return NULL_RTX;
4721
4722 rp = get_hard_reg_initial_val (Pmode, 2);
4723
4724 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4725 return rp;
4726
4727 /* If there is no export stub then just use the value saved from
4728 the return pointer register. */
4729
4730 saved_rp = gen_reg_rtx (Pmode);
4731 emit_move_insn (saved_rp, rp);
4732
4733 /* Get pointer to the instruction stream. We have to mask out the
4734 privilege level from the two low order bits of the return address
4735 pointer here so that ins will point to the start of the first
4736 instruction that would have been executed if we returned. */
4737 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4738 label = gen_label_rtx ();
4739
4740 if (TARGET_PA_20)
4741 {
4742 insns[0] = 0x4bc23fd1;
4743 insns[1] = -398405630;
4744 len = 2;
4745 }
4746 else
4747 {
4748 insns[0] = 0x4bc23fd1;
4749 insns[1] = 0x004010a1;
4750 insns[2] = 0x00011820;
4751 insns[3] = -532676606;
4752 len = 4;
4753 }
4754
4755 /* Check the instruction stream at the normal return address for the
4756 export stub. If it is an export stub, than our return address is
4757 really in -24[frameaddr]. */
4758
4759 for (i = 0; i < len; i++)
4760 {
4761 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4762 rtx op1 = GEN_INT (insns[i]);
4763 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4764 }
4765
4766 /* Here we know that our return address points to an export
4767 stub. We don't want to return the address of the export stub,
4768 but rather the return address of the export stub. That return
4769 address is stored at -24[frameaddr]. */
4770
4771 emit_move_insn (saved_rp,
4772 gen_rtx_MEM (Pmode,
4773 memory_address (Pmode,
4774 plus_constant (Pmode, frameaddr,
4775 -24))));
4776
4777 emit_label (label);
4778
4779 return saved_rp;
4780 }
4781
4782 void
4783 pa_emit_bcond_fp (rtx operands[])
4784 {
4785 enum rtx_code code = GET_CODE (operands[0]);
4786 rtx operand0 = operands[1];
4787 rtx operand1 = operands[2];
4788 rtx label = operands[3];
4789
4790 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4791 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4792
4793 emit_jump_insn (gen_rtx_SET (pc_rtx,
4794 gen_rtx_IF_THEN_ELSE (VOIDmode,
4795 gen_rtx_fmt_ee (NE,
4796 VOIDmode,
4797 gen_rtx_REG (CCFPmode, 0),
4798 const0_rtx),
4799 gen_rtx_LABEL_REF (VOIDmode, label),
4800 pc_rtx)));
4801
4802 }
4803
4804 /* Adjust the cost of a scheduling dependency. Return the new cost of
4805 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4806
4807 static int
4808 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4809 unsigned int)
4810 {
4811 enum attr_type attr_type;
4812
4813 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4814 true dependencies as they are described with bypasses now. */
4815 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4816 return cost;
4817
4818 if (! recog_memoized (insn))
4819 return 0;
4820
4821 attr_type = get_attr_type (insn);
4822
4823 switch (dep_type)
4824 {
4825 case REG_DEP_ANTI:
4826 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4827 cycles later. */
4828
4829 if (attr_type == TYPE_FPLOAD)
4830 {
4831 rtx pat = PATTERN (insn);
4832 rtx dep_pat = PATTERN (dep_insn);
4833 if (GET_CODE (pat) == PARALLEL)
4834 {
4835 /* This happens for the fldXs,mb patterns. */
4836 pat = XVECEXP (pat, 0, 0);
4837 }
4838 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4839 /* If this happens, we have to extend this to schedule
4840 optimally. Return 0 for now. */
4841 return 0;
4842
4843 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4844 {
4845 if (! recog_memoized (dep_insn))
4846 return 0;
4847 switch (get_attr_type (dep_insn))
4848 {
4849 case TYPE_FPALU:
4850 case TYPE_FPMULSGL:
4851 case TYPE_FPMULDBL:
4852 case TYPE_FPDIVSGL:
4853 case TYPE_FPDIVDBL:
4854 case TYPE_FPSQRTSGL:
4855 case TYPE_FPSQRTDBL:
4856 /* A fpload can't be issued until one cycle before a
4857 preceding arithmetic operation has finished if
4858 the target of the fpload is any of the sources
4859 (or destination) of the arithmetic operation. */
4860 return insn_default_latency (dep_insn) - 1;
4861
4862 default:
4863 return 0;
4864 }
4865 }
4866 }
4867 else if (attr_type == TYPE_FPALU)
4868 {
4869 rtx pat = PATTERN (insn);
4870 rtx dep_pat = PATTERN (dep_insn);
4871 if (GET_CODE (pat) == PARALLEL)
4872 {
4873 /* This happens for the fldXs,mb patterns. */
4874 pat = XVECEXP (pat, 0, 0);
4875 }
4876 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4877 /* If this happens, we have to extend this to schedule
4878 optimally. Return 0 for now. */
4879 return 0;
4880
4881 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4882 {
4883 if (! recog_memoized (dep_insn))
4884 return 0;
4885 switch (get_attr_type (dep_insn))
4886 {
4887 case TYPE_FPDIVSGL:
4888 case TYPE_FPDIVDBL:
4889 case TYPE_FPSQRTSGL:
4890 case TYPE_FPSQRTDBL:
4891 /* An ALU flop can't be issued until two cycles before a
4892 preceding divide or sqrt operation has finished if
4893 the target of the ALU flop is any of the sources
4894 (or destination) of the divide or sqrt operation. */
4895 return insn_default_latency (dep_insn) - 2;
4896
4897 default:
4898 return 0;
4899 }
4900 }
4901 }
4902
4903 /* For other anti dependencies, the cost is 0. */
4904 return 0;
4905
4906 case REG_DEP_OUTPUT:
4907 /* Output dependency; DEP_INSN writes a register that INSN writes some
4908 cycles later. */
4909 if (attr_type == TYPE_FPLOAD)
4910 {
4911 rtx pat = PATTERN (insn);
4912 rtx dep_pat = PATTERN (dep_insn);
4913 if (GET_CODE (pat) == PARALLEL)
4914 {
4915 /* This happens for the fldXs,mb patterns. */
4916 pat = XVECEXP (pat, 0, 0);
4917 }
4918 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4919 /* If this happens, we have to extend this to schedule
4920 optimally. Return 0 for now. */
4921 return 0;
4922
4923 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4924 {
4925 if (! recog_memoized (dep_insn))
4926 return 0;
4927 switch (get_attr_type (dep_insn))
4928 {
4929 case TYPE_FPALU:
4930 case TYPE_FPMULSGL:
4931 case TYPE_FPMULDBL:
4932 case TYPE_FPDIVSGL:
4933 case TYPE_FPDIVDBL:
4934 case TYPE_FPSQRTSGL:
4935 case TYPE_FPSQRTDBL:
4936 /* A fpload can't be issued until one cycle before a
4937 preceding arithmetic operation has finished if
4938 the target of the fpload is the destination of the
4939 arithmetic operation.
4940
4941 Exception: For PA7100LC, PA7200 and PA7300, the cost
4942 is 3 cycles, unless they bundle together. We also
4943 pay the penalty if the second insn is a fpload. */
4944 return insn_default_latency (dep_insn) - 1;
4945
4946 default:
4947 return 0;
4948 }
4949 }
4950 }
4951 else if (attr_type == TYPE_FPALU)
4952 {
4953 rtx pat = PATTERN (insn);
4954 rtx dep_pat = PATTERN (dep_insn);
4955 if (GET_CODE (pat) == PARALLEL)
4956 {
4957 /* This happens for the fldXs,mb patterns. */
4958 pat = XVECEXP (pat, 0, 0);
4959 }
4960 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4961 /* If this happens, we have to extend this to schedule
4962 optimally. Return 0 for now. */
4963 return 0;
4964
4965 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4966 {
4967 if (! recog_memoized (dep_insn))
4968 return 0;
4969 switch (get_attr_type (dep_insn))
4970 {
4971 case TYPE_FPDIVSGL:
4972 case TYPE_FPDIVDBL:
4973 case TYPE_FPSQRTSGL:
4974 case TYPE_FPSQRTDBL:
4975 /* An ALU flop can't be issued until two cycles before a
4976 preceding divide or sqrt operation has finished if
4977 the target of the ALU flop is also the target of
4978 the divide or sqrt operation. */
4979 return insn_default_latency (dep_insn) - 2;
4980
4981 default:
4982 return 0;
4983 }
4984 }
4985 }
4986
4987 /* For other output dependencies, the cost is 0. */
4988 return 0;
4989
4990 default:
4991 gcc_unreachable ();
4992 }
4993 }
4994
4995 /* Adjust scheduling priorities. We use this to try and keep addil
4996 and the next use of %r1 close together. */
4997 static int
4998 pa_adjust_priority (rtx_insn *insn, int priority)
4999 {
5000 rtx set = single_set (insn);
5001 rtx src, dest;
5002 if (set)
5003 {
5004 src = SET_SRC (set);
5005 dest = SET_DEST (set);
5006 if (GET_CODE (src) == LO_SUM
5007 && symbolic_operand (XEXP (src, 1), VOIDmode)
5008 && ! read_only_operand (XEXP (src, 1), VOIDmode))
5009 priority >>= 3;
5010
5011 else if (GET_CODE (src) == MEM
5012 && GET_CODE (XEXP (src, 0)) == LO_SUM
5013 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
5014 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
5015 priority >>= 1;
5016
5017 else if (GET_CODE (dest) == MEM
5018 && GET_CODE (XEXP (dest, 0)) == LO_SUM
5019 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
5020 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
5021 priority >>= 3;
5022 }
5023 return priority;
5024 }
5025
5026 /* The 700 can only issue a single insn at a time.
5027 The 7XXX processors can issue two insns at a time.
5028 The 8000 can issue 4 insns at a time. */
5029 static int
5030 pa_issue_rate (void)
5031 {
5032 switch (pa_cpu)
5033 {
5034 case PROCESSOR_700: return 1;
5035 case PROCESSOR_7100: return 2;
5036 case PROCESSOR_7100LC: return 2;
5037 case PROCESSOR_7200: return 2;
5038 case PROCESSOR_7300: return 2;
5039 case PROCESSOR_8000: return 4;
5040
5041 default:
5042 gcc_unreachable ();
5043 }
5044 }
5045
5046
5047
5048 /* Return any length plus adjustment needed by INSN which already has
5049 its length computed as LENGTH. Return LENGTH if no adjustment is
5050 necessary.
5051
5052 Also compute the length of an inline block move here as it is too
5053 complicated to express as a length attribute in pa.md. */
5054 int
5055 pa_adjust_insn_length (rtx_insn *insn, int length)
5056 {
5057 rtx pat = PATTERN (insn);
5058
5059 /* If length is negative or undefined, provide initial length. */
5060 if ((unsigned int) length >= INT_MAX)
5061 {
5062 if (GET_CODE (pat) == SEQUENCE)
5063 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5064
5065 switch (get_attr_type (insn))
5066 {
5067 case TYPE_MILLI:
5068 length = pa_attr_length_millicode_call (insn);
5069 break;
5070 case TYPE_CALL:
5071 length = pa_attr_length_call (insn, 0);
5072 break;
5073 case TYPE_SIBCALL:
5074 length = pa_attr_length_call (insn, 1);
5075 break;
5076 case TYPE_DYNCALL:
5077 length = pa_attr_length_indirect_call (insn);
5078 break;
5079 case TYPE_SH_FUNC_ADRS:
5080 length = pa_attr_length_millicode_call (insn) + 20;
5081 break;
5082 default:
5083 gcc_unreachable ();
5084 }
5085 }
5086
5087 /* Block move pattern. */
5088 if (NONJUMP_INSN_P (insn)
5089 && GET_CODE (pat) == PARALLEL
5090 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5091 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5092 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5093 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5094 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5095 length += compute_movmem_length (insn) - 4;
5096 /* Block clear pattern. */
5097 else if (NONJUMP_INSN_P (insn)
5098 && GET_CODE (pat) == PARALLEL
5099 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5100 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5101 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5102 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5103 length += compute_clrmem_length (insn) - 4;
5104 /* Conditional branch with an unfilled delay slot. */
5105 else if (JUMP_P (insn) && ! simplejump_p (insn))
5106 {
5107 /* Adjust a short backwards conditional with an unfilled delay slot. */
5108 if (GET_CODE (pat) == SET
5109 && length == 4
5110 && JUMP_LABEL (insn) != NULL_RTX
5111 && ! forward_branch_p (insn))
5112 length += 4;
5113 else if (GET_CODE (pat) == PARALLEL
5114 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5115 && length == 4)
5116 length += 4;
5117 /* Adjust dbra insn with short backwards conditional branch with
5118 unfilled delay slot -- only for case where counter is in a
5119 general register register. */
5120 else if (GET_CODE (pat) == PARALLEL
5121 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5122 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5123 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5124 && length == 4
5125 && ! forward_branch_p (insn))
5126 length += 4;
5127 }
5128 return length;
5129 }
5130
5131 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5132
5133 static bool
5134 pa_print_operand_punct_valid_p (unsigned char code)
5135 {
5136 if (code == '@'
5137 || code == '#'
5138 || code == '*'
5139 || code == '^')
5140 return true;
5141
5142 return false;
5143 }
5144
5145 /* Print operand X (an rtx) in assembler syntax to file FILE.
5146 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5147 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5148
5149 void
5150 pa_print_operand (FILE *file, rtx x, int code)
5151 {
5152 switch (code)
5153 {
5154 case '#':
5155 /* Output a 'nop' if there's nothing for the delay slot. */
5156 if (dbr_sequence_length () == 0)
5157 fputs ("\n\tnop", file);
5158 return;
5159 case '*':
5160 /* Output a nullification completer if there's nothing for the */
5161 /* delay slot or nullification is requested. */
5162 if (dbr_sequence_length () == 0 ||
5163 (final_sequence &&
5164 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5165 fputs (",n", file);
5166 return;
5167 case 'R':
5168 /* Print out the second register name of a register pair.
5169 I.e., R (6) => 7. */
5170 fputs (reg_names[REGNO (x) + 1], file);
5171 return;
5172 case 'r':
5173 /* A register or zero. */
5174 if (x == const0_rtx
5175 || (x == CONST0_RTX (DFmode))
5176 || (x == CONST0_RTX (SFmode)))
5177 {
5178 fputs ("%r0", file);
5179 return;
5180 }
5181 else
5182 break;
5183 case 'f':
5184 /* A register or zero (floating point). */
5185 if (x == const0_rtx
5186 || (x == CONST0_RTX (DFmode))
5187 || (x == CONST0_RTX (SFmode)))
5188 {
5189 fputs ("%fr0", file);
5190 return;
5191 }
5192 else
5193 break;
5194 case 'A':
5195 {
5196 rtx xoperands[2];
5197
5198 xoperands[0] = XEXP (XEXP (x, 0), 0);
5199 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5200 pa_output_global_address (file, xoperands[1], 0);
5201 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5202 return;
5203 }
5204
5205 case 'C': /* Plain (C)ondition */
5206 case 'X':
5207 switch (GET_CODE (x))
5208 {
5209 case EQ:
5210 fputs ("=", file); break;
5211 case NE:
5212 fputs ("<>", file); break;
5213 case GT:
5214 fputs (">", file); break;
5215 case GE:
5216 fputs (">=", file); break;
5217 case GEU:
5218 fputs (">>=", file); break;
5219 case GTU:
5220 fputs (">>", file); break;
5221 case LT:
5222 fputs ("<", file); break;
5223 case LE:
5224 fputs ("<=", file); break;
5225 case LEU:
5226 fputs ("<<=", file); break;
5227 case LTU:
5228 fputs ("<<", file); break;
5229 default:
5230 gcc_unreachable ();
5231 }
5232 return;
5233 case 'N': /* Condition, (N)egated */
5234 switch (GET_CODE (x))
5235 {
5236 case EQ:
5237 fputs ("<>", file); break;
5238 case NE:
5239 fputs ("=", file); break;
5240 case GT:
5241 fputs ("<=", file); break;
5242 case GE:
5243 fputs ("<", file); break;
5244 case GEU:
5245 fputs ("<<", file); break;
5246 case GTU:
5247 fputs ("<<=", file); break;
5248 case LT:
5249 fputs (">=", file); break;
5250 case LE:
5251 fputs (">", file); break;
5252 case LEU:
5253 fputs (">>", file); break;
5254 case LTU:
5255 fputs (">>=", file); break;
5256 default:
5257 gcc_unreachable ();
5258 }
5259 return;
5260 /* For floating point comparisons. Note that the output
5261 predicates are the complement of the desired mode. The
5262 conditions for GT, GE, LT, LE and LTGT cause an invalid
5263 operation exception if the result is unordered and this
5264 exception is enabled in the floating-point status register. */
5265 case 'Y':
5266 switch (GET_CODE (x))
5267 {
5268 case EQ:
5269 fputs ("!=", file); break;
5270 case NE:
5271 fputs ("=", file); break;
5272 case GT:
5273 fputs ("!>", file); break;
5274 case GE:
5275 fputs ("!>=", file); break;
5276 case LT:
5277 fputs ("!<", file); break;
5278 case LE:
5279 fputs ("!<=", file); break;
5280 case LTGT:
5281 fputs ("!<>", file); break;
5282 case UNLE:
5283 fputs ("!?<=", file); break;
5284 case UNLT:
5285 fputs ("!?<", file); break;
5286 case UNGE:
5287 fputs ("!?>=", file); break;
5288 case UNGT:
5289 fputs ("!?>", file); break;
5290 case UNEQ:
5291 fputs ("!?=", file); break;
5292 case UNORDERED:
5293 fputs ("!?", file); break;
5294 case ORDERED:
5295 fputs ("?", file); break;
5296 default:
5297 gcc_unreachable ();
5298 }
5299 return;
5300 case 'S': /* Condition, operands are (S)wapped. */
5301 switch (GET_CODE (x))
5302 {
5303 case EQ:
5304 fputs ("=", file); break;
5305 case NE:
5306 fputs ("<>", file); break;
5307 case GT:
5308 fputs ("<", file); break;
5309 case GE:
5310 fputs ("<=", file); break;
5311 case GEU:
5312 fputs ("<<=", file); break;
5313 case GTU:
5314 fputs ("<<", file); break;
5315 case LT:
5316 fputs (">", file); break;
5317 case LE:
5318 fputs (">=", file); break;
5319 case LEU:
5320 fputs (">>=", file); break;
5321 case LTU:
5322 fputs (">>", file); break;
5323 default:
5324 gcc_unreachable ();
5325 }
5326 return;
5327 case 'B': /* Condition, (B)oth swapped and negate. */
5328 switch (GET_CODE (x))
5329 {
5330 case EQ:
5331 fputs ("<>", file); break;
5332 case NE:
5333 fputs ("=", file); break;
5334 case GT:
5335 fputs (">=", file); break;
5336 case GE:
5337 fputs (">", file); break;
5338 case GEU:
5339 fputs (">>", file); break;
5340 case GTU:
5341 fputs (">>=", file); break;
5342 case LT:
5343 fputs ("<=", file); break;
5344 case LE:
5345 fputs ("<", file); break;
5346 case LEU:
5347 fputs ("<<", file); break;
5348 case LTU:
5349 fputs ("<<=", file); break;
5350 default:
5351 gcc_unreachable ();
5352 }
5353 return;
5354 case 'k':
5355 gcc_assert (GET_CODE (x) == CONST_INT);
5356 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5357 return;
5358 case 'Q':
5359 gcc_assert (GET_CODE (x) == CONST_INT);
5360 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5361 return;
5362 case 'L':
5363 gcc_assert (GET_CODE (x) == CONST_INT);
5364 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5365 return;
5366 case 'o':
5367 gcc_assert (GET_CODE (x) == CONST_INT
5368 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5369 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5370 return;
5371 case 'O':
5372 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5373 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5374 return;
5375 case 'p':
5376 gcc_assert (GET_CODE (x) == CONST_INT);
5377 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5378 return;
5379 case 'P':
5380 gcc_assert (GET_CODE (x) == CONST_INT);
5381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5382 return;
5383 case 'I':
5384 if (GET_CODE (x) == CONST_INT)
5385 fputs ("i", file);
5386 return;
5387 case 'M':
5388 case 'F':
5389 switch (GET_CODE (XEXP (x, 0)))
5390 {
5391 case PRE_DEC:
5392 case PRE_INC:
5393 if (ASSEMBLER_DIALECT == 0)
5394 fputs ("s,mb", file);
5395 else
5396 fputs (",mb", file);
5397 break;
5398 case POST_DEC:
5399 case POST_INC:
5400 if (ASSEMBLER_DIALECT == 0)
5401 fputs ("s,ma", file);
5402 else
5403 fputs (",ma", file);
5404 break;
5405 case PLUS:
5406 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5407 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5408 {
5409 if (ASSEMBLER_DIALECT == 0)
5410 fputs ("x", file);
5411 }
5412 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5413 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5414 {
5415 if (ASSEMBLER_DIALECT == 0)
5416 fputs ("x,s", file);
5417 else
5418 fputs (",s", file);
5419 }
5420 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5421 fputs ("s", file);
5422 break;
5423 default:
5424 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5425 fputs ("s", file);
5426 break;
5427 }
5428 return;
5429 case 'G':
5430 pa_output_global_address (file, x, 0);
5431 return;
5432 case 'H':
5433 pa_output_global_address (file, x, 1);
5434 return;
5435 case 0: /* Don't do anything special */
5436 break;
5437 case 'Z':
5438 {
5439 unsigned op[3];
5440 compute_zdepwi_operands (INTVAL (x), op);
5441 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5442 return;
5443 }
5444 case 'z':
5445 {
5446 unsigned op[3];
5447 compute_zdepdi_operands (INTVAL (x), op);
5448 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5449 return;
5450 }
5451 case 'c':
5452 /* We can get here from a .vtable_inherit due to our
5453 CONSTANT_ADDRESS_P rejecting perfectly good constant
5454 addresses. */
5455 break;
5456 default:
5457 gcc_unreachable ();
5458 }
5459 if (GET_CODE (x) == REG)
5460 {
5461 fputs (reg_names [REGNO (x)], file);
5462 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5463 {
5464 fputs ("R", file);
5465 return;
5466 }
5467 if (FP_REG_P (x)
5468 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5469 && (REGNO (x) & 1) == 0)
5470 fputs ("L", file);
5471 }
5472 else if (GET_CODE (x) == MEM)
5473 {
5474 int size = GET_MODE_SIZE (GET_MODE (x));
5475 rtx base = NULL_RTX;
5476 switch (GET_CODE (XEXP (x, 0)))
5477 {
5478 case PRE_DEC:
5479 case POST_DEC:
5480 base = XEXP (XEXP (x, 0), 0);
5481 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5482 break;
5483 case PRE_INC:
5484 case POST_INC:
5485 base = XEXP (XEXP (x, 0), 0);
5486 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5487 break;
5488 case PLUS:
5489 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5490 fprintf (file, "%s(%s)",
5491 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5492 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5493 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5494 fprintf (file, "%s(%s)",
5495 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5496 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5497 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5498 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5499 {
5500 /* Because the REG_POINTER flag can get lost during reload,
5501 pa_legitimate_address_p canonicalizes the order of the
5502 index and base registers in the combined move patterns. */
5503 rtx base = XEXP (XEXP (x, 0), 1);
5504 rtx index = XEXP (XEXP (x, 0), 0);
5505
5506 fprintf (file, "%s(%s)",
5507 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5508 }
5509 else
5510 output_address (GET_MODE (x), XEXP (x, 0));
5511 break;
5512 default:
5513 output_address (GET_MODE (x), XEXP (x, 0));
5514 break;
5515 }
5516 }
5517 else
5518 output_addr_const (file, x);
5519 }
5520
5521 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5522
5523 void
5524 pa_output_global_address (FILE *file, rtx x, int round_constant)
5525 {
5526
5527 /* Imagine (high (const (plus ...))). */
5528 if (GET_CODE (x) == HIGH)
5529 x = XEXP (x, 0);
5530
5531 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5532 output_addr_const (file, x);
5533 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5534 {
5535 output_addr_const (file, x);
5536 fputs ("-$global$", file);
5537 }
5538 else if (GET_CODE (x) == CONST)
5539 {
5540 const char *sep = "";
5541 int offset = 0; /* assembler wants -$global$ at end */
5542 rtx base = NULL_RTX;
5543
5544 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5545 {
5546 case LABEL_REF:
5547 case SYMBOL_REF:
5548 base = XEXP (XEXP (x, 0), 0);
5549 output_addr_const (file, base);
5550 break;
5551 case CONST_INT:
5552 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5553 break;
5554 default:
5555 gcc_unreachable ();
5556 }
5557
5558 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5559 {
5560 case LABEL_REF:
5561 case SYMBOL_REF:
5562 base = XEXP (XEXP (x, 0), 1);
5563 output_addr_const (file, base);
5564 break;
5565 case CONST_INT:
5566 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5567 break;
5568 default:
5569 gcc_unreachable ();
5570 }
5571
5572 /* How bogus. The compiler is apparently responsible for
5573 rounding the constant if it uses an LR field selector.
5574
5575 The linker and/or assembler seem a better place since
5576 they have to do this kind of thing already.
5577
5578 If we fail to do this, HP's optimizing linker may eliminate
5579 an addil, but not update the ldw/stw/ldo instruction that
5580 uses the result of the addil. */
5581 if (round_constant)
5582 offset = ((offset + 0x1000) & ~0x1fff);
5583
5584 switch (GET_CODE (XEXP (x, 0)))
5585 {
5586 case PLUS:
5587 if (offset < 0)
5588 {
5589 offset = -offset;
5590 sep = "-";
5591 }
5592 else
5593 sep = "+";
5594 break;
5595
5596 case MINUS:
5597 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5598 sep = "-";
5599 break;
5600
5601 default:
5602 gcc_unreachable ();
5603 }
5604
5605 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5606 fputs ("-$global$", file);
5607 if (offset)
5608 fprintf (file, "%s%d", sep, offset);
5609 }
5610 else
5611 output_addr_const (file, x);
5612 }
5613
5614 /* Output boilerplate text to appear at the beginning of the file.
5615 There are several possible versions. */
5616 #define aputs(x) fputs(x, asm_out_file)
5617 static inline void
5618 pa_file_start_level (void)
5619 {
5620 if (TARGET_64BIT)
5621 aputs ("\t.LEVEL 2.0w\n");
5622 else if (TARGET_PA_20)
5623 aputs ("\t.LEVEL 2.0\n");
5624 else if (TARGET_PA_11)
5625 aputs ("\t.LEVEL 1.1\n");
5626 else
5627 aputs ("\t.LEVEL 1.0\n");
5628 }
5629
5630 static inline void
5631 pa_file_start_space (int sortspace)
5632 {
5633 aputs ("\t.SPACE $PRIVATE$");
5634 if (sortspace)
5635 aputs (",SORT=16");
5636 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5637 if (flag_tm)
5638 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5639 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5640 "\n\t.SPACE $TEXT$");
5641 if (sortspace)
5642 aputs (",SORT=8");
5643 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5644 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5645 }
5646
5647 static inline void
5648 pa_file_start_file (int want_version)
5649 {
5650 if (write_symbols != NO_DEBUG)
5651 {
5652 output_file_directive (asm_out_file, main_input_filename);
5653 if (want_version)
5654 aputs ("\t.version\t\"01.01\"\n");
5655 }
5656 }
5657
5658 static inline void
5659 pa_file_start_mcount (const char *aswhat)
5660 {
5661 if (profile_flag)
5662 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5663 }
5664
5665 static void
5666 pa_elf_file_start (void)
5667 {
5668 pa_file_start_level ();
5669 pa_file_start_mcount ("ENTRY");
5670 pa_file_start_file (0);
5671 }
5672
5673 static void
5674 pa_som_file_start (void)
5675 {
5676 pa_file_start_level ();
5677 pa_file_start_space (0);
5678 aputs ("\t.IMPORT $global$,DATA\n"
5679 "\t.IMPORT $$dyncall,MILLICODE\n");
5680 pa_file_start_mcount ("CODE");
5681 pa_file_start_file (0);
5682 }
5683
5684 static void
5685 pa_linux_file_start (void)
5686 {
5687 pa_file_start_file (1);
5688 pa_file_start_level ();
5689 pa_file_start_mcount ("CODE");
5690 }
5691
5692 static void
5693 pa_hpux64_gas_file_start (void)
5694 {
5695 pa_file_start_level ();
5696 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5697 if (profile_flag)
5698 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5699 #endif
5700 pa_file_start_file (1);
5701 }
5702
5703 static void
5704 pa_hpux64_hpas_file_start (void)
5705 {
5706 pa_file_start_level ();
5707 pa_file_start_space (1);
5708 pa_file_start_mcount ("CODE");
5709 pa_file_start_file (0);
5710 }
5711 #undef aputs
5712
5713 /* Search the deferred plabel list for SYMBOL and return its internal
5714 label. If an entry for SYMBOL is not found, a new entry is created. */
5715
5716 rtx
5717 pa_get_deferred_plabel (rtx symbol)
5718 {
5719 const char *fname = XSTR (symbol, 0);
5720 size_t i;
5721
5722 /* See if we have already put this function on the list of deferred
5723 plabels. This list is generally small, so a liner search is not
5724 too ugly. If it proves too slow replace it with something faster. */
5725 for (i = 0; i < n_deferred_plabels; i++)
5726 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5727 break;
5728
5729 /* If the deferred plabel list is empty, or this entry was not found
5730 on the list, create a new entry on the list. */
5731 if (deferred_plabels == NULL || i == n_deferred_plabels)
5732 {
5733 tree id;
5734
5735 if (deferred_plabels == 0)
5736 deferred_plabels = ggc_alloc<deferred_plabel> ();
5737 else
5738 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5739 deferred_plabels,
5740 n_deferred_plabels + 1);
5741
5742 i = n_deferred_plabels++;
5743 deferred_plabels[i].internal_label = gen_label_rtx ();
5744 deferred_plabels[i].symbol = symbol;
5745
5746 /* Gross. We have just implicitly taken the address of this
5747 function. Mark it in the same manner as assemble_name. */
5748 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5749 if (id)
5750 mark_referenced (id);
5751 }
5752
5753 return deferred_plabels[i].internal_label;
5754 }
5755
5756 static void
5757 output_deferred_plabels (void)
5758 {
5759 size_t i;
5760
5761 /* If we have some deferred plabels, then we need to switch into the
5762 data or readonly data section, and align it to a 4 byte boundary
5763 before outputting the deferred plabels. */
5764 if (n_deferred_plabels)
5765 {
5766 switch_to_section (flag_pic ? data_section : readonly_data_section);
5767 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5768 }
5769
5770 /* Now output the deferred plabels. */
5771 for (i = 0; i < n_deferred_plabels; i++)
5772 {
5773 targetm.asm_out.internal_label (asm_out_file, "L",
5774 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5775 assemble_integer (deferred_plabels[i].symbol,
5776 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5777 }
5778 }
5779
5780 /* Initialize optabs to point to emulation routines. */
5781
5782 static void
5783 pa_init_libfuncs (void)
5784 {
5785 if (HPUX_LONG_DOUBLE_LIBRARY)
5786 {
5787 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5788 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5789 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5790 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5791 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5792 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5793 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5794 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5795 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5796
5797 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5798 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5799 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5800 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5801 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5802 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5803 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5804
5805 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5806 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5807 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5808 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5809
5810 set_conv_libfunc (sfix_optab, SImode, TFmode,
5811 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5812 : "_U_Qfcnvfxt_quad_to_sgl");
5813 set_conv_libfunc (sfix_optab, DImode, TFmode,
5814 "_U_Qfcnvfxt_quad_to_dbl");
5815 set_conv_libfunc (ufix_optab, SImode, TFmode,
5816 "_U_Qfcnvfxt_quad_to_usgl");
5817 set_conv_libfunc (ufix_optab, DImode, TFmode,
5818 "_U_Qfcnvfxt_quad_to_udbl");
5819
5820 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5821 "_U_Qfcnvxf_sgl_to_quad");
5822 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5823 "_U_Qfcnvxf_dbl_to_quad");
5824 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5825 "_U_Qfcnvxf_usgl_to_quad");
5826 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5827 "_U_Qfcnvxf_udbl_to_quad");
5828 }
5829
5830 if (TARGET_SYNC_LIBCALL)
5831 init_sync_libfuncs (8);
5832 }
5833
5834 /* HP's millicode routines mean something special to the assembler.
5835 Keep track of which ones we have used. */
5836
5837 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5838 static void import_milli (enum millicodes);
5839 static char imported[(int) end1000];
5840 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5841 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5842 #define MILLI_START 10
5843
5844 static void
5845 import_milli (enum millicodes code)
5846 {
5847 char str[sizeof (import_string)];
5848
5849 if (!imported[(int) code])
5850 {
5851 imported[(int) code] = 1;
5852 strcpy (str, import_string);
5853 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5854 output_asm_insn (str, 0);
5855 }
5856 }
5857
5858 /* The register constraints have put the operands and return value in
5859 the proper registers. */
5860
5861 const char *
5862 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5863 {
5864 import_milli (mulI);
5865 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5866 }
5867
5868 /* Emit the rtl for doing a division by a constant. */
5869
5870 /* Do magic division millicodes exist for this value? */
5871 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5872
5873 /* We'll use an array to keep track of the magic millicodes and
5874 whether or not we've used them already. [n][0] is signed, [n][1] is
5875 unsigned. */
5876
5877 static int div_milli[16][2];
5878
5879 int
5880 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5881 {
5882 if (GET_CODE (operands[2]) == CONST_INT
5883 && INTVAL (operands[2]) > 0
5884 && INTVAL (operands[2]) < 16
5885 && pa_magic_milli[INTVAL (operands[2])])
5886 {
5887 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5888
5889 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5890 emit
5891 (gen_rtx_PARALLEL
5892 (VOIDmode,
5893 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5894 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5895 SImode,
5896 gen_rtx_REG (SImode, 26),
5897 operands[2])),
5898 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5899 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5900 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5901 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5902 gen_rtx_CLOBBER (VOIDmode, ret))));
5903 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5904 return 1;
5905 }
5906 return 0;
5907 }
5908
5909 const char *
5910 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5911 {
5912 int divisor;
5913
5914 /* If the divisor is a constant, try to use one of the special
5915 opcodes .*/
5916 if (GET_CODE (operands[0]) == CONST_INT)
5917 {
5918 static char buf[100];
5919 divisor = INTVAL (operands[0]);
5920 if (!div_milli[divisor][unsignedp])
5921 {
5922 div_milli[divisor][unsignedp] = 1;
5923 if (unsignedp)
5924 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5925 else
5926 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5927 }
5928 if (unsignedp)
5929 {
5930 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5931 INTVAL (operands[0]));
5932 return pa_output_millicode_call (insn,
5933 gen_rtx_SYMBOL_REF (SImode, buf));
5934 }
5935 else
5936 {
5937 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5938 INTVAL (operands[0]));
5939 return pa_output_millicode_call (insn,
5940 gen_rtx_SYMBOL_REF (SImode, buf));
5941 }
5942 }
5943 /* Divisor isn't a special constant. */
5944 else
5945 {
5946 if (unsignedp)
5947 {
5948 import_milli (divU);
5949 return pa_output_millicode_call (insn,
5950 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5951 }
5952 else
5953 {
5954 import_milli (divI);
5955 return pa_output_millicode_call (insn,
5956 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5957 }
5958 }
5959 }
5960
5961 /* Output a $$rem millicode to do mod. */
5962
5963 const char *
5964 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5965 {
5966 if (unsignedp)
5967 {
5968 import_milli (remU);
5969 return pa_output_millicode_call (insn,
5970 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5971 }
5972 else
5973 {
5974 import_milli (remI);
5975 return pa_output_millicode_call (insn,
5976 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5977 }
5978 }
5979
5980 void
5981 pa_output_arg_descriptor (rtx_insn *call_insn)
5982 {
5983 const char *arg_regs[4];
5984 machine_mode arg_mode;
5985 rtx link;
5986 int i, output_flag = 0;
5987 int regno;
5988
5989 /* We neither need nor want argument location descriptors for the
5990 64bit runtime environment or the ELF32 environment. */
5991 if (TARGET_64BIT || TARGET_ELF32)
5992 return;
5993
5994 for (i = 0; i < 4; i++)
5995 arg_regs[i] = 0;
5996
5997 /* Specify explicitly that no argument relocations should take place
5998 if using the portable runtime calling conventions. */
5999 if (TARGET_PORTABLE_RUNTIME)
6000 {
6001 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
6002 asm_out_file);
6003 return;
6004 }
6005
6006 gcc_assert (CALL_P (call_insn));
6007 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
6008 link; link = XEXP (link, 1))
6009 {
6010 rtx use = XEXP (link, 0);
6011
6012 if (! (GET_CODE (use) == USE
6013 && GET_CODE (XEXP (use, 0)) == REG
6014 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6015 continue;
6016
6017 arg_mode = GET_MODE (XEXP (use, 0));
6018 regno = REGNO (XEXP (use, 0));
6019 if (regno >= 23 && regno <= 26)
6020 {
6021 arg_regs[26 - regno] = "GR";
6022 if (arg_mode == DImode)
6023 arg_regs[25 - regno] = "GR";
6024 }
6025 else if (regno >= 32 && regno <= 39)
6026 {
6027 if (arg_mode == SFmode)
6028 arg_regs[(regno - 32) / 2] = "FR";
6029 else
6030 {
6031 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6032 arg_regs[(regno - 34) / 2] = "FR";
6033 arg_regs[(regno - 34) / 2 + 1] = "FU";
6034 #else
6035 arg_regs[(regno - 34) / 2] = "FU";
6036 arg_regs[(regno - 34) / 2 + 1] = "FR";
6037 #endif
6038 }
6039 }
6040 }
6041 fputs ("\t.CALL ", asm_out_file);
6042 for (i = 0; i < 4; i++)
6043 {
6044 if (arg_regs[i])
6045 {
6046 if (output_flag++)
6047 fputc (',', asm_out_file);
6048 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6049 }
6050 }
6051 fputc ('\n', asm_out_file);
6052 }
6053 \f
6054 /* Inform reload about cases where moving X with a mode MODE to or from
6055 a register in RCLASS requires an extra scratch or immediate register.
6056 Return the class needed for the immediate register. */
6057
6058 static reg_class_t
6059 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6060 machine_mode mode, secondary_reload_info *sri)
6061 {
6062 int regno;
6063 enum reg_class rclass = (enum reg_class) rclass_i;
6064
6065 /* Handle the easy stuff first. */
6066 if (rclass == R1_REGS)
6067 return NO_REGS;
6068
6069 if (REG_P (x))
6070 {
6071 regno = REGNO (x);
6072 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6073 return NO_REGS;
6074 }
6075 else
6076 regno = -1;
6077
6078 /* If we have something like (mem (mem (...)), we can safely assume the
6079 inner MEM will end up in a general register after reloading, so there's
6080 no need for a secondary reload. */
6081 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6082 return NO_REGS;
6083
6084 /* Trying to load a constant into a FP register during PIC code
6085 generation requires %r1 as a scratch register. For float modes,
6086 the only legitimate constant is CONST0_RTX. However, there are
6087 a few patterns that accept constant double operands. */
6088 if (flag_pic
6089 && FP_REG_CLASS_P (rclass)
6090 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6091 {
6092 switch (mode)
6093 {
6094 case E_SImode:
6095 sri->icode = CODE_FOR_reload_insi_r1;
6096 break;
6097
6098 case E_DImode:
6099 sri->icode = CODE_FOR_reload_indi_r1;
6100 break;
6101
6102 case E_SFmode:
6103 sri->icode = CODE_FOR_reload_insf_r1;
6104 break;
6105
6106 case E_DFmode:
6107 sri->icode = CODE_FOR_reload_indf_r1;
6108 break;
6109
6110 default:
6111 gcc_unreachable ();
6112 }
6113 return NO_REGS;
6114 }
6115
6116 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6117 register when we're generating PIC code or when the operand isn't
6118 readonly. */
6119 if (pa_symbolic_expression_p (x))
6120 {
6121 if (GET_CODE (x) == HIGH)
6122 x = XEXP (x, 0);
6123
6124 if (flag_pic || !read_only_operand (x, VOIDmode))
6125 {
6126 switch (mode)
6127 {
6128 case E_SImode:
6129 sri->icode = CODE_FOR_reload_insi_r1;
6130 break;
6131
6132 case E_DImode:
6133 sri->icode = CODE_FOR_reload_indi_r1;
6134 break;
6135
6136 default:
6137 gcc_unreachable ();
6138 }
6139 return NO_REGS;
6140 }
6141 }
6142
6143 /* Profiling showed the PA port spends about 1.3% of its compilation
6144 time in true_regnum from calls inside pa_secondary_reload_class. */
6145 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6146 regno = true_regnum (x);
6147
6148 /* Handle reloads for floating point loads and stores. */
6149 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6150 && FP_REG_CLASS_P (rclass))
6151 {
6152 if (MEM_P (x))
6153 {
6154 x = XEXP (x, 0);
6155
6156 /* We don't need a secondary reload for indexed memory addresses.
6157
6158 When INT14_OK_STRICT is true, it might appear that we could
6159 directly allow register indirect memory addresses. However,
6160 this doesn't work because we don't support SUBREGs in
6161 floating-point register copies and reload doesn't tell us
6162 when it's going to use a SUBREG. */
6163 if (IS_INDEX_ADDR_P (x))
6164 return NO_REGS;
6165 }
6166
6167 /* Request a secondary reload with a general scratch register
6168 for everything else. ??? Could symbolic operands be handled
6169 directly when generating non-pic PA 2.0 code? */
6170 sri->icode = (in_p
6171 ? direct_optab_handler (reload_in_optab, mode)
6172 : direct_optab_handler (reload_out_optab, mode));
6173 return NO_REGS;
6174 }
6175
6176 /* A SAR<->FP register copy requires an intermediate general register
6177 and secondary memory. We need a secondary reload with a general
6178 scratch register for spills. */
6179 if (rclass == SHIFT_REGS)
6180 {
6181 /* Handle spill. */
6182 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6183 {
6184 sri->icode = (in_p
6185 ? direct_optab_handler (reload_in_optab, mode)
6186 : direct_optab_handler (reload_out_optab, mode));
6187 return NO_REGS;
6188 }
6189
6190 /* Handle FP copy. */
6191 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6192 return GENERAL_REGS;
6193 }
6194
6195 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6196 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6197 && FP_REG_CLASS_P (rclass))
6198 return GENERAL_REGS;
6199
6200 return NO_REGS;
6201 }
6202
6203 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6204
6205 static bool
6206 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
6207 reg_class_t class1 ATTRIBUTE_UNUSED,
6208 reg_class_t class2 ATTRIBUTE_UNUSED)
6209 {
6210 #ifdef PA_SECONDARY_MEMORY_NEEDED
6211 return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
6212 #else
6213 return false;
6214 #endif
6215 }
6216
6217 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6218 is only marked as live on entry by df-scan when it is a fixed
6219 register. It isn't a fixed register in the 64-bit runtime,
6220 so we need to mark it here. */
6221
6222 static void
6223 pa_extra_live_on_entry (bitmap regs)
6224 {
6225 if (TARGET_64BIT)
6226 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6227 }
6228
6229 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6230 to prevent it from being deleted. */
6231
6232 rtx
6233 pa_eh_return_handler_rtx (void)
6234 {
6235 rtx tmp;
6236
6237 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6238 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6239 tmp = gen_rtx_MEM (word_mode, tmp);
6240 tmp->volatil = 1;
6241 return tmp;
6242 }
6243
6244 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6245 by invisible reference. As a GCC extension, we also pass anything
6246 with a zero or variable size by reference.
6247
6248 The 64-bit runtime does not describe passing any types by invisible
6249 reference. The internals of GCC can't currently handle passing
6250 empty structures, and zero or variable length arrays when they are
6251 not passed entirely on the stack or by reference. Thus, as a GCC
6252 extension, we pass these types by reference. The HP compiler doesn't
6253 support these types, so hopefully there shouldn't be any compatibility
6254 issues. This may have to be revisited when HP releases a C99 compiler
6255 or updates the ABI. */
6256
6257 static bool
6258 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6259 machine_mode mode, const_tree type,
6260 bool named ATTRIBUTE_UNUSED)
6261 {
6262 HOST_WIDE_INT size;
6263
6264 if (type)
6265 size = int_size_in_bytes (type);
6266 else
6267 size = GET_MODE_SIZE (mode);
6268
6269 if (TARGET_64BIT)
6270 return size <= 0;
6271 else
6272 return size <= 0 || size > 8;
6273 }
6274
6275 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6276
6277 static pad_direction
6278 pa_function_arg_padding (machine_mode mode, const_tree type)
6279 {
6280 if (mode == BLKmode
6281 || (TARGET_64BIT
6282 && type
6283 && (AGGREGATE_TYPE_P (type)
6284 || TREE_CODE (type) == COMPLEX_TYPE
6285 || TREE_CODE (type) == VECTOR_TYPE)))
6286 {
6287 /* Return PAD_NONE if justification is not required. */
6288 if (type
6289 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6290 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6291 return PAD_NONE;
6292
6293 /* The directions set here are ignored when a BLKmode argument larger
6294 than a word is placed in a register. Different code is used for
6295 the stack and registers. This makes it difficult to have a
6296 consistent data representation for both the stack and registers.
6297 For both runtimes, the justification and padding for arguments on
6298 the stack and in registers should be identical. */
6299 if (TARGET_64BIT)
6300 /* The 64-bit runtime specifies left justification for aggregates. */
6301 return PAD_UPWARD;
6302 else
6303 /* The 32-bit runtime architecture specifies right justification.
6304 When the argument is passed on the stack, the argument is padded
6305 with garbage on the left. The HP compiler pads with zeros. */
6306 return PAD_DOWNWARD;
6307 }
6308
6309 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6310 return PAD_DOWNWARD;
6311 else
6312 return PAD_NONE;
6313 }
6314
6315 \f
6316 /* Do what is necessary for `va_start'. We look at the current function
6317 to determine if stdargs or varargs is used and fill in an initial
6318 va_list. A pointer to this constructor is returned. */
6319
6320 static rtx
6321 hppa_builtin_saveregs (void)
6322 {
6323 rtx offset, dest;
6324 tree fntype = TREE_TYPE (current_function_decl);
6325 int argadj = ((!stdarg_p (fntype))
6326 ? UNITS_PER_WORD : 0);
6327
6328 if (argadj)
6329 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6330 else
6331 offset = crtl->args.arg_offset_rtx;
6332
6333 if (TARGET_64BIT)
6334 {
6335 int i, off;
6336
6337 /* Adjust for varargs/stdarg differences. */
6338 if (argadj)
6339 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6340 else
6341 offset = crtl->args.arg_offset_rtx;
6342
6343 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6344 from the incoming arg pointer and growing to larger addresses. */
6345 for (i = 26, off = -64; i >= 19; i--, off += 8)
6346 emit_move_insn (gen_rtx_MEM (word_mode,
6347 plus_constant (Pmode,
6348 arg_pointer_rtx, off)),
6349 gen_rtx_REG (word_mode, i));
6350
6351 /* The incoming args pointer points just beyond the flushback area;
6352 normally this is not a serious concern. However, when we are doing
6353 varargs/stdargs we want to make the arg pointer point to the start
6354 of the incoming argument area. */
6355 emit_move_insn (virtual_incoming_args_rtx,
6356 plus_constant (Pmode, arg_pointer_rtx, -64));
6357
6358 /* Now return a pointer to the first anonymous argument. */
6359 return copy_to_reg (expand_binop (Pmode, add_optab,
6360 virtual_incoming_args_rtx,
6361 offset, 0, 0, OPTAB_LIB_WIDEN));
6362 }
6363
6364 /* Store general registers on the stack. */
6365 dest = gen_rtx_MEM (BLKmode,
6366 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6367 -16));
6368 set_mem_alias_set (dest, get_varargs_alias_set ());
6369 set_mem_align (dest, BITS_PER_WORD);
6370 move_block_from_reg (23, dest, 4);
6371
6372 /* move_block_from_reg will emit code to store the argument registers
6373 individually as scalar stores.
6374
6375 However, other insns may later load from the same addresses for
6376 a structure load (passing a struct to a varargs routine).
6377
6378 The alias code assumes that such aliasing can never happen, so we
6379 have to keep memory referencing insns from moving up beyond the
6380 last argument register store. So we emit a blockage insn here. */
6381 emit_insn (gen_blockage ());
6382
6383 return copy_to_reg (expand_binop (Pmode, add_optab,
6384 crtl->args.internal_arg_pointer,
6385 offset, 0, 0, OPTAB_LIB_WIDEN));
6386 }
6387
6388 static void
6389 hppa_va_start (tree valist, rtx nextarg)
6390 {
6391 nextarg = expand_builtin_saveregs ();
6392 std_expand_builtin_va_start (valist, nextarg);
6393 }
6394
6395 static tree
6396 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6397 gimple_seq *post_p)
6398 {
6399 if (TARGET_64BIT)
6400 {
6401 /* Args grow upward. We can use the generic routines. */
6402 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6403 }
6404 else /* !TARGET_64BIT */
6405 {
6406 tree ptr = build_pointer_type (type);
6407 tree valist_type;
6408 tree t, u;
6409 unsigned int size, ofs;
6410 bool indirect;
6411
6412 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6413 if (indirect)
6414 {
6415 type = ptr;
6416 ptr = build_pointer_type (type);
6417 }
6418 size = int_size_in_bytes (type);
6419 valist_type = TREE_TYPE (valist);
6420
6421 /* Args grow down. Not handled by generic routines. */
6422
6423 u = fold_convert (sizetype, size_in_bytes (type));
6424 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6425 t = fold_build_pointer_plus (valist, u);
6426
6427 /* Align to 4 or 8 byte boundary depending on argument size. */
6428
6429 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6430 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6431 t = fold_convert (valist_type, t);
6432
6433 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6434
6435 ofs = (8 - size) % 4;
6436 if (ofs != 0)
6437 t = fold_build_pointer_plus_hwi (t, ofs);
6438
6439 t = fold_convert (ptr, t);
6440 t = build_va_arg_indirect_ref (t);
6441
6442 if (indirect)
6443 t = build_va_arg_indirect_ref (t);
6444
6445 return t;
6446 }
6447 }
6448
6449 /* True if MODE is valid for the target. By "valid", we mean able to
6450 be manipulated in non-trivial ways. In particular, this means all
6451 the arithmetic is supported.
6452
6453 Currently, TImode is not valid as the HP 64-bit runtime documentation
6454 doesn't document the alignment and calling conventions for this type.
6455 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6456 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6457
6458 static bool
6459 pa_scalar_mode_supported_p (scalar_mode mode)
6460 {
6461 int precision = GET_MODE_PRECISION (mode);
6462
6463 switch (GET_MODE_CLASS (mode))
6464 {
6465 case MODE_PARTIAL_INT:
6466 case MODE_INT:
6467 if (precision == CHAR_TYPE_SIZE)
6468 return true;
6469 if (precision == SHORT_TYPE_SIZE)
6470 return true;
6471 if (precision == INT_TYPE_SIZE)
6472 return true;
6473 if (precision == LONG_TYPE_SIZE)
6474 return true;
6475 if (precision == LONG_LONG_TYPE_SIZE)
6476 return true;
6477 return false;
6478
6479 case MODE_FLOAT:
6480 if (precision == FLOAT_TYPE_SIZE)
6481 return true;
6482 if (precision == DOUBLE_TYPE_SIZE)
6483 return true;
6484 if (precision == LONG_DOUBLE_TYPE_SIZE)
6485 return true;
6486 return false;
6487
6488 case MODE_DECIMAL_FLOAT:
6489 return false;
6490
6491 default:
6492 gcc_unreachable ();
6493 }
6494 }
6495
6496 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6497 it branches into the delay slot. Otherwise, return FALSE. */
6498
6499 static bool
6500 branch_to_delay_slot_p (rtx_insn *insn)
6501 {
6502 rtx_insn *jump_insn;
6503
6504 if (dbr_sequence_length ())
6505 return FALSE;
6506
6507 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6508 while (insn)
6509 {
6510 insn = next_active_insn (insn);
6511 if (jump_insn == insn)
6512 return TRUE;
6513
6514 /* We can't rely on the length of asms. So, we return FALSE when
6515 the branch is followed by an asm. */
6516 if (!insn
6517 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6518 || asm_noperands (PATTERN (insn)) >= 0
6519 || get_attr_length (insn) > 0)
6520 break;
6521 }
6522
6523 return FALSE;
6524 }
6525
6526 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6527
6528 This occurs when INSN has an unfilled delay slot and is followed
6529 by an asm. Disaster can occur if the asm is empty and the jump
6530 branches into the delay slot. So, we add a nop in the delay slot
6531 when this occurs. */
6532
6533 static bool
6534 branch_needs_nop_p (rtx_insn *insn)
6535 {
6536 rtx_insn *jump_insn;
6537
6538 if (dbr_sequence_length ())
6539 return FALSE;
6540
6541 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6542 while (insn)
6543 {
6544 insn = next_active_insn (insn);
6545 if (!insn || jump_insn == insn)
6546 return TRUE;
6547
6548 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6549 || asm_noperands (PATTERN (insn)) >= 0)
6550 && get_attr_length (insn) > 0)
6551 break;
6552 }
6553
6554 return FALSE;
6555 }
6556
6557 /* Return TRUE if INSN, a forward jump insn, can use nullification
6558 to skip the following instruction. This avoids an extra cycle due
6559 to a mis-predicted branch when we fall through. */
6560
6561 static bool
6562 use_skip_p (rtx_insn *insn)
6563 {
6564 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6565
6566 while (insn)
6567 {
6568 insn = next_active_insn (insn);
6569
6570 /* We can't rely on the length of asms, so we can't skip asms. */
6571 if (!insn
6572 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6573 || asm_noperands (PATTERN (insn)) >= 0)
6574 break;
6575 if (get_attr_length (insn) == 4
6576 && jump_insn == next_active_insn (insn))
6577 return TRUE;
6578 if (get_attr_length (insn) > 0)
6579 break;
6580 }
6581
6582 return FALSE;
6583 }
6584
6585 /* This routine handles all the normal conditional branch sequences we
6586 might need to generate. It handles compare immediate vs compare
6587 register, nullification of delay slots, varying length branches,
6588 negated branches, and all combinations of the above. It returns the
6589 output appropriate to emit the branch corresponding to all given
6590 parameters. */
6591
6592 const char *
6593 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6594 {
6595 static char buf[100];
6596 bool useskip;
6597 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6598 int length = get_attr_length (insn);
6599 int xdelay;
6600
6601 /* A conditional branch to the following instruction (e.g. the delay slot)
6602 is asking for a disaster. This can happen when not optimizing and
6603 when jump optimization fails.
6604
6605 While it is usually safe to emit nothing, this can fail if the
6606 preceding instruction is a nullified branch with an empty delay
6607 slot and the same branch target as this branch. We could check
6608 for this but jump optimization should eliminate nop jumps. It
6609 is always safe to emit a nop. */
6610 if (branch_to_delay_slot_p (insn))
6611 return "nop";
6612
6613 /* The doubleword form of the cmpib instruction doesn't have the LEU
6614 and GTU conditions while the cmpb instruction does. Since we accept
6615 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6616 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6617 operands[2] = gen_rtx_REG (DImode, 0);
6618 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6619 operands[1] = gen_rtx_REG (DImode, 0);
6620
6621 /* If this is a long branch with its delay slot unfilled, set `nullify'
6622 as it can nullify the delay slot and save a nop. */
6623 if (length == 8 && dbr_sequence_length () == 0)
6624 nullify = 1;
6625
6626 /* If this is a short forward conditional branch which did not get
6627 its delay slot filled, the delay slot can still be nullified. */
6628 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6629 nullify = forward_branch_p (insn);
6630
6631 /* A forward branch over a single nullified insn can be done with a
6632 comclr instruction. This avoids a single cycle penalty due to
6633 mis-predicted branch if we fall through (branch not taken). */
6634 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6635
6636 switch (length)
6637 {
6638 /* All short conditional branches except backwards with an unfilled
6639 delay slot. */
6640 case 4:
6641 if (useskip)
6642 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6643 else
6644 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6645 if (GET_MODE (operands[1]) == DImode)
6646 strcat (buf, "*");
6647 if (negated)
6648 strcat (buf, "%B3");
6649 else
6650 strcat (buf, "%S3");
6651 if (useskip)
6652 strcat (buf, " %2,%r1,%%r0");
6653 else if (nullify)
6654 {
6655 if (branch_needs_nop_p (insn))
6656 strcat (buf, ",n %2,%r1,%0%#");
6657 else
6658 strcat (buf, ",n %2,%r1,%0");
6659 }
6660 else
6661 strcat (buf, " %2,%r1,%0");
6662 break;
6663
6664 /* All long conditionals. Note a short backward branch with an
6665 unfilled delay slot is treated just like a long backward branch
6666 with an unfilled delay slot. */
6667 case 8:
6668 /* Handle weird backwards branch with a filled delay slot
6669 which is nullified. */
6670 if (dbr_sequence_length () != 0
6671 && ! forward_branch_p (insn)
6672 && nullify)
6673 {
6674 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6675 if (GET_MODE (operands[1]) == DImode)
6676 strcat (buf, "*");
6677 if (negated)
6678 strcat (buf, "%S3");
6679 else
6680 strcat (buf, "%B3");
6681 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6682 }
6683 /* Handle short backwards branch with an unfilled delay slot.
6684 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6685 taken and untaken branches. */
6686 else if (dbr_sequence_length () == 0
6687 && ! forward_branch_p (insn)
6688 && INSN_ADDRESSES_SET_P ()
6689 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6690 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6691 {
6692 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6693 if (GET_MODE (operands[1]) == DImode)
6694 strcat (buf, "*");
6695 if (negated)
6696 strcat (buf, "%B3 %2,%r1,%0%#");
6697 else
6698 strcat (buf, "%S3 %2,%r1,%0%#");
6699 }
6700 else
6701 {
6702 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6703 if (GET_MODE (operands[1]) == DImode)
6704 strcat (buf, "*");
6705 if (negated)
6706 strcat (buf, "%S3");
6707 else
6708 strcat (buf, "%B3");
6709 if (nullify)
6710 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6711 else
6712 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6713 }
6714 break;
6715
6716 default:
6717 /* The reversed conditional branch must branch over one additional
6718 instruction if the delay slot is filled and needs to be extracted
6719 by pa_output_lbranch. If the delay slot is empty or this is a
6720 nullified forward branch, the instruction after the reversed
6721 condition branch must be nullified. */
6722 if (dbr_sequence_length () == 0
6723 || (nullify && forward_branch_p (insn)))
6724 {
6725 nullify = 1;
6726 xdelay = 0;
6727 operands[4] = GEN_INT (length);
6728 }
6729 else
6730 {
6731 xdelay = 1;
6732 operands[4] = GEN_INT (length + 4);
6733 }
6734
6735 /* Create a reversed conditional branch which branches around
6736 the following insns. */
6737 if (GET_MODE (operands[1]) != DImode)
6738 {
6739 if (nullify)
6740 {
6741 if (negated)
6742 strcpy (buf,
6743 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6744 else
6745 strcpy (buf,
6746 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6747 }
6748 else
6749 {
6750 if (negated)
6751 strcpy (buf,
6752 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6753 else
6754 strcpy (buf,
6755 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6756 }
6757 }
6758 else
6759 {
6760 if (nullify)
6761 {
6762 if (negated)
6763 strcpy (buf,
6764 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6765 else
6766 strcpy (buf,
6767 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6768 }
6769 else
6770 {
6771 if (negated)
6772 strcpy (buf,
6773 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6774 else
6775 strcpy (buf,
6776 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6777 }
6778 }
6779
6780 output_asm_insn (buf, operands);
6781 return pa_output_lbranch (operands[0], insn, xdelay);
6782 }
6783 return buf;
6784 }
6785
6786 /* Output a PIC pc-relative instruction sequence to load the address of
6787 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6788 or a code label. OPERANDS[1] specifies the register to use to load
6789 the program counter. OPERANDS[3] may be used for label generation
6790 The sequence is always three instructions in length. The program
6791 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6792 Register %r1 is clobbered. */
6793
6794 static void
6795 pa_output_pic_pcrel_sequence (rtx *operands)
6796 {
6797 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6798 if (TARGET_PA_20)
6799 {
6800 /* We can use mfia to determine the current program counter. */
6801 if (TARGET_SOM || !TARGET_GAS)
6802 {
6803 operands[3] = gen_label_rtx ();
6804 targetm.asm_out.internal_label (asm_out_file, "L",
6805 CODE_LABEL_NUMBER (operands[3]));
6806 output_asm_insn ("mfia %1", operands);
6807 output_asm_insn ("addil L'%0-%l3,%1", operands);
6808 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6809 }
6810 else
6811 {
6812 output_asm_insn ("mfia %1", operands);
6813 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6814 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6815 }
6816 }
6817 else
6818 {
6819 /* We need to use a branch to determine the current program counter. */
6820 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6821 if (TARGET_SOM || !TARGET_GAS)
6822 {
6823 operands[3] = gen_label_rtx ();
6824 output_asm_insn ("addil L'%0-%l3,%1", operands);
6825 targetm.asm_out.internal_label (asm_out_file, "L",
6826 CODE_LABEL_NUMBER (operands[3]));
6827 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6828 }
6829 else
6830 {
6831 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6832 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6833 }
6834 }
6835 }
6836
6837 /* This routine handles output of long unconditional branches that
6838 exceed the maximum range of a simple branch instruction. Since
6839 we don't have a register available for the branch, we save register
6840 %r1 in the frame marker, load the branch destination DEST into %r1,
6841 execute the branch, and restore %r1 in the delay slot of the branch.
6842
6843 Since long branches may have an insn in the delay slot and the
6844 delay slot is used to restore %r1, we in general need to extract
6845 this insn and execute it before the branch. However, to facilitate
6846 use of this function by conditional branches, we also provide an
6847 option to not extract the delay insn so that it will be emitted
6848 after the long branch. So, if there is an insn in the delay slot,
6849 it is extracted if XDELAY is nonzero.
6850
6851 The lengths of the various long-branch sequences are 20, 16 and 24
6852 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6853
6854 const char *
6855 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6856 {
6857 rtx xoperands[4];
6858
6859 xoperands[0] = dest;
6860
6861 /* First, free up the delay slot. */
6862 if (xdelay && dbr_sequence_length () != 0)
6863 {
6864 /* We can't handle a jump in the delay slot. */
6865 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6866
6867 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6868 optimize, 0, NULL);
6869
6870 /* Now delete the delay insn. */
6871 SET_INSN_DELETED (NEXT_INSN (insn));
6872 }
6873
6874 /* Output an insn to save %r1. The runtime documentation doesn't
6875 specify whether the "Clean Up" slot in the callers frame can
6876 be clobbered by the callee. It isn't copied by HP's builtin
6877 alloca, so this suggests that it can be clobbered if necessary.
6878 The "Static Link" location is copied by HP builtin alloca, so
6879 we avoid using it. Using the cleanup slot might be a problem
6880 if we have to interoperate with languages that pass cleanup
6881 information. However, it should be possible to handle these
6882 situations with GCC's asm feature.
6883
6884 The "Current RP" slot is reserved for the called procedure, so
6885 we try to use it when we don't have a frame of our own. It's
6886 rather unlikely that we won't have a frame when we need to emit
6887 a very long branch.
6888
6889 Really the way to go long term is a register scavenger; goto
6890 the target of the jump and find a register which we can use
6891 as a scratch to hold the value in %r1. Then, we wouldn't have
6892 to free up the delay slot or clobber a slot that may be needed
6893 for other purposes. */
6894 if (TARGET_64BIT)
6895 {
6896 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6897 /* Use the return pointer slot in the frame marker. */
6898 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6899 else
6900 /* Use the slot at -40 in the frame marker since HP builtin
6901 alloca doesn't copy it. */
6902 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6903 }
6904 else
6905 {
6906 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6907 /* Use the return pointer slot in the frame marker. */
6908 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6909 else
6910 /* Use the "Clean Up" slot in the frame marker. In GCC,
6911 the only other use of this location is for copying a
6912 floating point double argument from a floating-point
6913 register to two general registers. The copy is done
6914 as an "atomic" operation when outputting a call, so it
6915 won't interfere with our using the location here. */
6916 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6917 }
6918
6919 if (TARGET_PORTABLE_RUNTIME)
6920 {
6921 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6922 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6923 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6924 }
6925 else if (flag_pic)
6926 {
6927 xoperands[1] = gen_rtx_REG (Pmode, 1);
6928 xoperands[2] = xoperands[1];
6929 pa_output_pic_pcrel_sequence (xoperands);
6930 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6931 }
6932 else
6933 /* Now output a very long branch to the original target. */
6934 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6935
6936 /* Now restore the value of %r1 in the delay slot. */
6937 if (TARGET_64BIT)
6938 {
6939 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6940 return "ldd -16(%%r30),%%r1";
6941 else
6942 return "ldd -40(%%r30),%%r1";
6943 }
6944 else
6945 {
6946 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6947 return "ldw -20(%%r30),%%r1";
6948 else
6949 return "ldw -12(%%r30),%%r1";
6950 }
6951 }
6952
6953 /* This routine handles all the branch-on-bit conditional branch sequences we
6954 might need to generate. It handles nullification of delay slots,
6955 varying length branches, negated branches and all combinations of the
6956 above. it returns the appropriate output template to emit the branch. */
6957
6958 const char *
6959 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6960 {
6961 static char buf[100];
6962 bool useskip;
6963 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6964 int length = get_attr_length (insn);
6965 int xdelay;
6966
6967 /* A conditional branch to the following instruction (e.g. the delay slot) is
6968 asking for a disaster. I do not think this can happen as this pattern
6969 is only used when optimizing; jump optimization should eliminate the
6970 jump. But be prepared just in case. */
6971
6972 if (branch_to_delay_slot_p (insn))
6973 return "nop";
6974
6975 /* If this is a long branch with its delay slot unfilled, set `nullify'
6976 as it can nullify the delay slot and save a nop. */
6977 if (length == 8 && dbr_sequence_length () == 0)
6978 nullify = 1;
6979
6980 /* If this is a short forward conditional branch which did not get
6981 its delay slot filled, the delay slot can still be nullified. */
6982 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6983 nullify = forward_branch_p (insn);
6984
6985 /* A forward branch over a single nullified insn can be done with a
6986 extrs instruction. This avoids a single cycle penalty due to
6987 mis-predicted branch if we fall through (branch not taken). */
6988 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6989
6990 switch (length)
6991 {
6992
6993 /* All short conditional branches except backwards with an unfilled
6994 delay slot. */
6995 case 4:
6996 if (useskip)
6997 strcpy (buf, "{extrs,|extrw,s,}");
6998 else
6999 strcpy (buf, "bb,");
7000 if (useskip && GET_MODE (operands[0]) == DImode)
7001 strcpy (buf, "extrd,s,*");
7002 else if (GET_MODE (operands[0]) == DImode)
7003 strcpy (buf, "bb,*");
7004 if ((which == 0 && negated)
7005 || (which == 1 && ! negated))
7006 strcat (buf, ">=");
7007 else
7008 strcat (buf, "<");
7009 if (useskip)
7010 strcat (buf, " %0,%1,1,%%r0");
7011 else if (nullify && negated)
7012 {
7013 if (branch_needs_nop_p (insn))
7014 strcat (buf, ",n %0,%1,%3%#");
7015 else
7016 strcat (buf, ",n %0,%1,%3");
7017 }
7018 else if (nullify && ! negated)
7019 {
7020 if (branch_needs_nop_p (insn))
7021 strcat (buf, ",n %0,%1,%2%#");
7022 else
7023 strcat (buf, ",n %0,%1,%2");
7024 }
7025 else if (! nullify && negated)
7026 strcat (buf, " %0,%1,%3");
7027 else if (! nullify && ! negated)
7028 strcat (buf, " %0,%1,%2");
7029 break;
7030
7031 /* All long conditionals. Note a short backward branch with an
7032 unfilled delay slot is treated just like a long backward branch
7033 with an unfilled delay slot. */
7034 case 8:
7035 /* Handle weird backwards branch with a filled delay slot
7036 which is nullified. */
7037 if (dbr_sequence_length () != 0
7038 && ! forward_branch_p (insn)
7039 && nullify)
7040 {
7041 strcpy (buf, "bb,");
7042 if (GET_MODE (operands[0]) == DImode)
7043 strcat (buf, "*");
7044 if ((which == 0 && negated)
7045 || (which == 1 && ! negated))
7046 strcat (buf, "<");
7047 else
7048 strcat (buf, ">=");
7049 if (negated)
7050 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7051 else
7052 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7053 }
7054 /* Handle short backwards branch with an unfilled delay slot.
7055 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7056 taken and untaken branches. */
7057 else if (dbr_sequence_length () == 0
7058 && ! forward_branch_p (insn)
7059 && INSN_ADDRESSES_SET_P ()
7060 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7061 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7062 {
7063 strcpy (buf, "bb,");
7064 if (GET_MODE (operands[0]) == DImode)
7065 strcat (buf, "*");
7066 if ((which == 0 && negated)
7067 || (which == 1 && ! negated))
7068 strcat (buf, ">=");
7069 else
7070 strcat (buf, "<");
7071 if (negated)
7072 strcat (buf, " %0,%1,%3%#");
7073 else
7074 strcat (buf, " %0,%1,%2%#");
7075 }
7076 else
7077 {
7078 if (GET_MODE (operands[0]) == DImode)
7079 strcpy (buf, "extrd,s,*");
7080 else
7081 strcpy (buf, "{extrs,|extrw,s,}");
7082 if ((which == 0 && negated)
7083 || (which == 1 && ! negated))
7084 strcat (buf, "<");
7085 else
7086 strcat (buf, ">=");
7087 if (nullify && negated)
7088 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7089 else if (nullify && ! negated)
7090 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7091 else if (negated)
7092 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7093 else
7094 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7095 }
7096 break;
7097
7098 default:
7099 /* The reversed conditional branch must branch over one additional
7100 instruction if the delay slot is filled and needs to be extracted
7101 by pa_output_lbranch. If the delay slot is empty or this is a
7102 nullified forward branch, the instruction after the reversed
7103 condition branch must be nullified. */
7104 if (dbr_sequence_length () == 0
7105 || (nullify && forward_branch_p (insn)))
7106 {
7107 nullify = 1;
7108 xdelay = 0;
7109 operands[4] = GEN_INT (length);
7110 }
7111 else
7112 {
7113 xdelay = 1;
7114 operands[4] = GEN_INT (length + 4);
7115 }
7116
7117 if (GET_MODE (operands[0]) == DImode)
7118 strcpy (buf, "bb,*");
7119 else
7120 strcpy (buf, "bb,");
7121 if ((which == 0 && negated)
7122 || (which == 1 && !negated))
7123 strcat (buf, "<");
7124 else
7125 strcat (buf, ">=");
7126 if (nullify)
7127 strcat (buf, ",n %0,%1,.+%4");
7128 else
7129 strcat (buf, " %0,%1,.+%4");
7130 output_asm_insn (buf, operands);
7131 return pa_output_lbranch (negated ? operands[3] : operands[2],
7132 insn, xdelay);
7133 }
7134 return buf;
7135 }
7136
7137 /* This routine handles all the branch-on-variable-bit conditional branch
7138 sequences we might need to generate. It handles nullification of delay
7139 slots, varying length branches, negated branches and all combinations
7140 of the above. it returns the appropriate output template to emit the
7141 branch. */
7142
7143 const char *
7144 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7145 int which)
7146 {
7147 static char buf[100];
7148 bool useskip;
7149 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7150 int length = get_attr_length (insn);
7151 int xdelay;
7152
7153 /* A conditional branch to the following instruction (e.g. the delay slot) is
7154 asking for a disaster. I do not think this can happen as this pattern
7155 is only used when optimizing; jump optimization should eliminate the
7156 jump. But be prepared just in case. */
7157
7158 if (branch_to_delay_slot_p (insn))
7159 return "nop";
7160
7161 /* If this is a long branch with its delay slot unfilled, set `nullify'
7162 as it can nullify the delay slot and save a nop. */
7163 if (length == 8 && dbr_sequence_length () == 0)
7164 nullify = 1;
7165
7166 /* If this is a short forward conditional branch which did not get
7167 its delay slot filled, the delay slot can still be nullified. */
7168 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7169 nullify = forward_branch_p (insn);
7170
7171 /* A forward branch over a single nullified insn can be done with a
7172 extrs instruction. This avoids a single cycle penalty due to
7173 mis-predicted branch if we fall through (branch not taken). */
7174 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7175
7176 switch (length)
7177 {
7178
7179 /* All short conditional branches except backwards with an unfilled
7180 delay slot. */
7181 case 4:
7182 if (useskip)
7183 strcpy (buf, "{vextrs,|extrw,s,}");
7184 else
7185 strcpy (buf, "{bvb,|bb,}");
7186 if (useskip && GET_MODE (operands[0]) == DImode)
7187 strcpy (buf, "extrd,s,*");
7188 else if (GET_MODE (operands[0]) == DImode)
7189 strcpy (buf, "bb,*");
7190 if ((which == 0 && negated)
7191 || (which == 1 && ! negated))
7192 strcat (buf, ">=");
7193 else
7194 strcat (buf, "<");
7195 if (useskip)
7196 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7197 else if (nullify && negated)
7198 {
7199 if (branch_needs_nop_p (insn))
7200 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7201 else
7202 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7203 }
7204 else if (nullify && ! negated)
7205 {
7206 if (branch_needs_nop_p (insn))
7207 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7208 else
7209 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7210 }
7211 else if (! nullify && negated)
7212 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7213 else if (! nullify && ! negated)
7214 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7215 break;
7216
7217 /* All long conditionals. Note a short backward branch with an
7218 unfilled delay slot is treated just like a long backward branch
7219 with an unfilled delay slot. */
7220 case 8:
7221 /* Handle weird backwards branch with a filled delay slot
7222 which is nullified. */
7223 if (dbr_sequence_length () != 0
7224 && ! forward_branch_p (insn)
7225 && nullify)
7226 {
7227 strcpy (buf, "{bvb,|bb,}");
7228 if (GET_MODE (operands[0]) == DImode)
7229 strcat (buf, "*");
7230 if ((which == 0 && negated)
7231 || (which == 1 && ! negated))
7232 strcat (buf, "<");
7233 else
7234 strcat (buf, ">=");
7235 if (negated)
7236 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7237 else
7238 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7239 }
7240 /* Handle short backwards branch with an unfilled delay slot.
7241 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7242 taken and untaken branches. */
7243 else if (dbr_sequence_length () == 0
7244 && ! forward_branch_p (insn)
7245 && INSN_ADDRESSES_SET_P ()
7246 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7247 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7248 {
7249 strcpy (buf, "{bvb,|bb,}");
7250 if (GET_MODE (operands[0]) == DImode)
7251 strcat (buf, "*");
7252 if ((which == 0 && negated)
7253 || (which == 1 && ! negated))
7254 strcat (buf, ">=");
7255 else
7256 strcat (buf, "<");
7257 if (negated)
7258 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7259 else
7260 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7261 }
7262 else
7263 {
7264 strcpy (buf, "{vextrs,|extrw,s,}");
7265 if (GET_MODE (operands[0]) == DImode)
7266 strcpy (buf, "extrd,s,*");
7267 if ((which == 0 && negated)
7268 || (which == 1 && ! negated))
7269 strcat (buf, "<");
7270 else
7271 strcat (buf, ">=");
7272 if (nullify && negated)
7273 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7274 else if (nullify && ! negated)
7275 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7276 else if (negated)
7277 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7278 else
7279 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7280 }
7281 break;
7282
7283 default:
7284 /* The reversed conditional branch must branch over one additional
7285 instruction if the delay slot is filled and needs to be extracted
7286 by pa_output_lbranch. If the delay slot is empty or this is a
7287 nullified forward branch, the instruction after the reversed
7288 condition branch must be nullified. */
7289 if (dbr_sequence_length () == 0
7290 || (nullify && forward_branch_p (insn)))
7291 {
7292 nullify = 1;
7293 xdelay = 0;
7294 operands[4] = GEN_INT (length);
7295 }
7296 else
7297 {
7298 xdelay = 1;
7299 operands[4] = GEN_INT (length + 4);
7300 }
7301
7302 if (GET_MODE (operands[0]) == DImode)
7303 strcpy (buf, "bb,*");
7304 else
7305 strcpy (buf, "{bvb,|bb,}");
7306 if ((which == 0 && negated)
7307 || (which == 1 && !negated))
7308 strcat (buf, "<");
7309 else
7310 strcat (buf, ">=");
7311 if (nullify)
7312 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7313 else
7314 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7315 output_asm_insn (buf, operands);
7316 return pa_output_lbranch (negated ? operands[3] : operands[2],
7317 insn, xdelay);
7318 }
7319 return buf;
7320 }
7321
7322 /* Return the output template for emitting a dbra type insn.
7323
7324 Note it may perform some output operations on its own before
7325 returning the final output string. */
7326 const char *
7327 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7328 {
7329 int length = get_attr_length (insn);
7330
7331 /* A conditional branch to the following instruction (e.g. the delay slot) is
7332 asking for a disaster. Be prepared! */
7333
7334 if (branch_to_delay_slot_p (insn))
7335 {
7336 if (which_alternative == 0)
7337 return "ldo %1(%0),%0";
7338 else if (which_alternative == 1)
7339 {
7340 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7341 output_asm_insn ("ldw -16(%%r30),%4", operands);
7342 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7343 return "{fldws|fldw} -16(%%r30),%0";
7344 }
7345 else
7346 {
7347 output_asm_insn ("ldw %0,%4", operands);
7348 return "ldo %1(%4),%4\n\tstw %4,%0";
7349 }
7350 }
7351
7352 if (which_alternative == 0)
7353 {
7354 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7355 int xdelay;
7356
7357 /* If this is a long branch with its delay slot unfilled, set `nullify'
7358 as it can nullify the delay slot and save a nop. */
7359 if (length == 8 && dbr_sequence_length () == 0)
7360 nullify = 1;
7361
7362 /* If this is a short forward conditional branch which did not get
7363 its delay slot filled, the delay slot can still be nullified. */
7364 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7365 nullify = forward_branch_p (insn);
7366
7367 switch (length)
7368 {
7369 case 4:
7370 if (nullify)
7371 {
7372 if (branch_needs_nop_p (insn))
7373 return "addib,%C2,n %1,%0,%3%#";
7374 else
7375 return "addib,%C2,n %1,%0,%3";
7376 }
7377 else
7378 return "addib,%C2 %1,%0,%3";
7379
7380 case 8:
7381 /* Handle weird backwards branch with a fulled delay slot
7382 which is nullified. */
7383 if (dbr_sequence_length () != 0
7384 && ! forward_branch_p (insn)
7385 && nullify)
7386 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7387 /* Handle short backwards branch with an unfilled delay slot.
7388 Using a addb;nop rather than addi;bl saves 1 cycle for both
7389 taken and untaken branches. */
7390 else if (dbr_sequence_length () == 0
7391 && ! forward_branch_p (insn)
7392 && INSN_ADDRESSES_SET_P ()
7393 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7394 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7395 return "addib,%C2 %1,%0,%3%#";
7396
7397 /* Handle normal cases. */
7398 if (nullify)
7399 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7400 else
7401 return "addi,%N2 %1,%0,%0\n\tb %3";
7402
7403 default:
7404 /* The reversed conditional branch must branch over one additional
7405 instruction if the delay slot is filled and needs to be extracted
7406 by pa_output_lbranch. If the delay slot is empty or this is a
7407 nullified forward branch, the instruction after the reversed
7408 condition branch must be nullified. */
7409 if (dbr_sequence_length () == 0
7410 || (nullify && forward_branch_p (insn)))
7411 {
7412 nullify = 1;
7413 xdelay = 0;
7414 operands[4] = GEN_INT (length);
7415 }
7416 else
7417 {
7418 xdelay = 1;
7419 operands[4] = GEN_INT (length + 4);
7420 }
7421
7422 if (nullify)
7423 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7424 else
7425 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7426
7427 return pa_output_lbranch (operands[3], insn, xdelay);
7428 }
7429
7430 }
7431 /* Deal with gross reload from FP register case. */
7432 else if (which_alternative == 1)
7433 {
7434 /* Move loop counter from FP register to MEM then into a GR,
7435 increment the GR, store the GR into MEM, and finally reload
7436 the FP register from MEM from within the branch's delay slot. */
7437 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7438 operands);
7439 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7440 if (length == 24)
7441 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7442 else if (length == 28)
7443 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7444 else
7445 {
7446 operands[5] = GEN_INT (length - 16);
7447 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7448 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7449 return pa_output_lbranch (operands[3], insn, 0);
7450 }
7451 }
7452 /* Deal with gross reload from memory case. */
7453 else
7454 {
7455 /* Reload loop counter from memory, the store back to memory
7456 happens in the branch's delay slot. */
7457 output_asm_insn ("ldw %0,%4", operands);
7458 if (length == 12)
7459 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7460 else if (length == 16)
7461 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7462 else
7463 {
7464 operands[5] = GEN_INT (length - 4);
7465 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7466 return pa_output_lbranch (operands[3], insn, 0);
7467 }
7468 }
7469 }
7470
7471 /* Return the output template for emitting a movb type insn.
7472
7473 Note it may perform some output operations on its own before
7474 returning the final output string. */
7475 const char *
7476 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7477 int reverse_comparison)
7478 {
7479 int length = get_attr_length (insn);
7480
7481 /* A conditional branch to the following instruction (e.g. the delay slot) is
7482 asking for a disaster. Be prepared! */
7483
7484 if (branch_to_delay_slot_p (insn))
7485 {
7486 if (which_alternative == 0)
7487 return "copy %1,%0";
7488 else if (which_alternative == 1)
7489 {
7490 output_asm_insn ("stw %1,-16(%%r30)", operands);
7491 return "{fldws|fldw} -16(%%r30),%0";
7492 }
7493 else if (which_alternative == 2)
7494 return "stw %1,%0";
7495 else
7496 return "mtsar %r1";
7497 }
7498
7499 /* Support the second variant. */
7500 if (reverse_comparison)
7501 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7502
7503 if (which_alternative == 0)
7504 {
7505 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7506 int xdelay;
7507
7508 /* If this is a long branch with its delay slot unfilled, set `nullify'
7509 as it can nullify the delay slot and save a nop. */
7510 if (length == 8 && dbr_sequence_length () == 0)
7511 nullify = 1;
7512
7513 /* If this is a short forward conditional branch which did not get
7514 its delay slot filled, the delay slot can still be nullified. */
7515 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7516 nullify = forward_branch_p (insn);
7517
7518 switch (length)
7519 {
7520 case 4:
7521 if (nullify)
7522 {
7523 if (branch_needs_nop_p (insn))
7524 return "movb,%C2,n %1,%0,%3%#";
7525 else
7526 return "movb,%C2,n %1,%0,%3";
7527 }
7528 else
7529 return "movb,%C2 %1,%0,%3";
7530
7531 case 8:
7532 /* Handle weird backwards branch with a filled delay slot
7533 which is nullified. */
7534 if (dbr_sequence_length () != 0
7535 && ! forward_branch_p (insn)
7536 && nullify)
7537 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7538
7539 /* Handle short backwards branch with an unfilled delay slot.
7540 Using a movb;nop rather than or;bl saves 1 cycle for both
7541 taken and untaken branches. */
7542 else if (dbr_sequence_length () == 0
7543 && ! forward_branch_p (insn)
7544 && INSN_ADDRESSES_SET_P ()
7545 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7546 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7547 return "movb,%C2 %1,%0,%3%#";
7548 /* Handle normal cases. */
7549 if (nullify)
7550 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7551 else
7552 return "or,%N2 %1,%%r0,%0\n\tb %3";
7553
7554 default:
7555 /* The reversed conditional branch must branch over one additional
7556 instruction if the delay slot is filled and needs to be extracted
7557 by pa_output_lbranch. If the delay slot is empty or this is a
7558 nullified forward branch, the instruction after the reversed
7559 condition branch must be nullified. */
7560 if (dbr_sequence_length () == 0
7561 || (nullify && forward_branch_p (insn)))
7562 {
7563 nullify = 1;
7564 xdelay = 0;
7565 operands[4] = GEN_INT (length);
7566 }
7567 else
7568 {
7569 xdelay = 1;
7570 operands[4] = GEN_INT (length + 4);
7571 }
7572
7573 if (nullify)
7574 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7575 else
7576 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7577
7578 return pa_output_lbranch (operands[3], insn, xdelay);
7579 }
7580 }
7581 /* Deal with gross reload for FP destination register case. */
7582 else if (which_alternative == 1)
7583 {
7584 /* Move source register to MEM, perform the branch test, then
7585 finally load the FP register from MEM from within the branch's
7586 delay slot. */
7587 output_asm_insn ("stw %1,-16(%%r30)", operands);
7588 if (length == 12)
7589 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7590 else if (length == 16)
7591 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7592 else
7593 {
7594 operands[4] = GEN_INT (length - 4);
7595 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7596 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7597 return pa_output_lbranch (operands[3], insn, 0);
7598 }
7599 }
7600 /* Deal with gross reload from memory case. */
7601 else if (which_alternative == 2)
7602 {
7603 /* Reload loop counter from memory, the store back to memory
7604 happens in the branch's delay slot. */
7605 if (length == 8)
7606 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7607 else if (length == 12)
7608 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7609 else
7610 {
7611 operands[4] = GEN_INT (length);
7612 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7613 operands);
7614 return pa_output_lbranch (operands[3], insn, 0);
7615 }
7616 }
7617 /* Handle SAR as a destination. */
7618 else
7619 {
7620 if (length == 8)
7621 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7622 else if (length == 12)
7623 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7624 else
7625 {
7626 operands[4] = GEN_INT (length);
7627 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7628 operands);
7629 return pa_output_lbranch (operands[3], insn, 0);
7630 }
7631 }
7632 }
7633
7634 /* Copy any FP arguments in INSN into integer registers. */
7635 static void
7636 copy_fp_args (rtx_insn *insn)
7637 {
7638 rtx link;
7639 rtx xoperands[2];
7640
7641 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7642 {
7643 int arg_mode, regno;
7644 rtx use = XEXP (link, 0);
7645
7646 if (! (GET_CODE (use) == USE
7647 && GET_CODE (XEXP (use, 0)) == REG
7648 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7649 continue;
7650
7651 arg_mode = GET_MODE (XEXP (use, 0));
7652 regno = REGNO (XEXP (use, 0));
7653
7654 /* Is it a floating point register? */
7655 if (regno >= 32 && regno <= 39)
7656 {
7657 /* Copy the FP register into an integer register via memory. */
7658 if (arg_mode == SFmode)
7659 {
7660 xoperands[0] = XEXP (use, 0);
7661 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7662 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7663 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7664 }
7665 else
7666 {
7667 xoperands[0] = XEXP (use, 0);
7668 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7669 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7670 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7671 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7672 }
7673 }
7674 }
7675 }
7676
7677 /* Compute length of the FP argument copy sequence for INSN. */
7678 static int
7679 length_fp_args (rtx_insn *insn)
7680 {
7681 int length = 0;
7682 rtx link;
7683
7684 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7685 {
7686 int arg_mode, regno;
7687 rtx use = XEXP (link, 0);
7688
7689 if (! (GET_CODE (use) == USE
7690 && GET_CODE (XEXP (use, 0)) == REG
7691 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7692 continue;
7693
7694 arg_mode = GET_MODE (XEXP (use, 0));
7695 regno = REGNO (XEXP (use, 0));
7696
7697 /* Is it a floating point register? */
7698 if (regno >= 32 && regno <= 39)
7699 {
7700 if (arg_mode == SFmode)
7701 length += 8;
7702 else
7703 length += 12;
7704 }
7705 }
7706
7707 return length;
7708 }
7709
7710 /* Return the attribute length for the millicode call instruction INSN.
7711 The length must match the code generated by pa_output_millicode_call.
7712 We include the delay slot in the returned length as it is better to
7713 over estimate the length than to under estimate it. */
7714
7715 int
7716 pa_attr_length_millicode_call (rtx_insn *insn)
7717 {
7718 unsigned long distance = -1;
7719 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7720
7721 if (INSN_ADDRESSES_SET_P ())
7722 {
7723 distance = (total + insn_current_reference_address (insn));
7724 if (distance < total)
7725 distance = -1;
7726 }
7727
7728 if (TARGET_64BIT)
7729 {
7730 if (!TARGET_LONG_CALLS && distance < 7600000)
7731 return 8;
7732
7733 return 20;
7734 }
7735 else if (TARGET_PORTABLE_RUNTIME)
7736 return 24;
7737 else
7738 {
7739 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7740 return 8;
7741
7742 if (!flag_pic)
7743 return 12;
7744
7745 return 24;
7746 }
7747 }
7748
7749 /* INSN is a function call.
7750
7751 CALL_DEST is the routine we are calling. */
7752
7753 const char *
7754 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7755 {
7756 int attr_length = get_attr_length (insn);
7757 int seq_length = dbr_sequence_length ();
7758 rtx xoperands[4];
7759
7760 xoperands[0] = call_dest;
7761
7762 /* Handle the common case where we are sure that the branch will
7763 reach the beginning of the $CODE$ subspace. The within reach
7764 form of the $$sh_func_adrs call has a length of 28. Because it
7765 has an attribute type of sh_func_adrs, it never has a nonzero
7766 sequence length (i.e., the delay slot is never filled). */
7767 if (!TARGET_LONG_CALLS
7768 && (attr_length == 8
7769 || (attr_length == 28
7770 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7771 {
7772 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7773 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7774 }
7775 else
7776 {
7777 if (TARGET_64BIT)
7778 {
7779 /* It might seem that one insn could be saved by accessing
7780 the millicode function using the linkage table. However,
7781 this doesn't work in shared libraries and other dynamically
7782 loaded objects. Using a pc-relative sequence also avoids
7783 problems related to the implicit use of the gp register. */
7784 xoperands[1] = gen_rtx_REG (Pmode, 1);
7785 xoperands[2] = xoperands[1];
7786 pa_output_pic_pcrel_sequence (xoperands);
7787 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7788 }
7789 else if (TARGET_PORTABLE_RUNTIME)
7790 {
7791 /* Pure portable runtime doesn't allow be/ble; we also don't
7792 have PIC support in the assembler/linker, so this sequence
7793 is needed. */
7794
7795 /* Get the address of our target into %r1. */
7796 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7797 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7798
7799 /* Get our return address into %r31. */
7800 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7801 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7802
7803 /* Jump to our target address in %r1. */
7804 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7805 }
7806 else if (!flag_pic)
7807 {
7808 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7809 if (TARGET_PA_20)
7810 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7811 else
7812 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7813 }
7814 else
7815 {
7816 xoperands[1] = gen_rtx_REG (Pmode, 31);
7817 xoperands[2] = gen_rtx_REG (Pmode, 1);
7818 pa_output_pic_pcrel_sequence (xoperands);
7819
7820 /* Adjust return address. */
7821 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7822
7823 /* Jump to our target address in %r1. */
7824 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7825 }
7826 }
7827
7828 if (seq_length == 0)
7829 output_asm_insn ("nop", xoperands);
7830
7831 return "";
7832 }
7833
7834 /* Return the attribute length of the call instruction INSN. The SIBCALL
7835 flag indicates whether INSN is a regular call or a sibling call. The
7836 length returned must be longer than the code actually generated by
7837 pa_output_call. Since branch shortening is done before delay branch
7838 sequencing, there is no way to determine whether or not the delay
7839 slot will be filled during branch shortening. Even when the delay
7840 slot is filled, we may have to add a nop if the delay slot contains
7841 a branch that can't reach its target. Thus, we always have to include
7842 the delay slot in the length estimate. This used to be done in
7843 pa_adjust_insn_length but we do it here now as some sequences always
7844 fill the delay slot and we can save four bytes in the estimate for
7845 these sequences. */
7846
7847 int
7848 pa_attr_length_call (rtx_insn *insn, int sibcall)
7849 {
7850 int local_call;
7851 rtx call, call_dest;
7852 tree call_decl;
7853 int length = 0;
7854 rtx pat = PATTERN (insn);
7855 unsigned long distance = -1;
7856
7857 gcc_assert (CALL_P (insn));
7858
7859 if (INSN_ADDRESSES_SET_P ())
7860 {
7861 unsigned long total;
7862
7863 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7864 distance = (total + insn_current_reference_address (insn));
7865 if (distance < total)
7866 distance = -1;
7867 }
7868
7869 gcc_assert (GET_CODE (pat) == PARALLEL);
7870
7871 /* Get the call rtx. */
7872 call = XVECEXP (pat, 0, 0);
7873 if (GET_CODE (call) == SET)
7874 call = SET_SRC (call);
7875
7876 gcc_assert (GET_CODE (call) == CALL);
7877
7878 /* Determine if this is a local call. */
7879 call_dest = XEXP (XEXP (call, 0), 0);
7880 call_decl = SYMBOL_REF_DECL (call_dest);
7881 local_call = call_decl && targetm.binds_local_p (call_decl);
7882
7883 /* pc-relative branch. */
7884 if (!TARGET_LONG_CALLS
7885 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7886 || distance < MAX_PCREL17F_OFFSET))
7887 length += 8;
7888
7889 /* 64-bit plabel sequence. */
7890 else if (TARGET_64BIT && !local_call)
7891 length += sibcall ? 28 : 24;
7892
7893 /* non-pic long absolute branch sequence. */
7894 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7895 length += 12;
7896
7897 /* long pc-relative branch sequence. */
7898 else if (TARGET_LONG_PIC_SDIFF_CALL
7899 || (TARGET_GAS && !TARGET_SOM && local_call))
7900 {
7901 length += 20;
7902
7903 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7904 length += 8;
7905 }
7906
7907 /* 32-bit plabel sequence. */
7908 else
7909 {
7910 length += 32;
7911
7912 if (TARGET_SOM)
7913 length += length_fp_args (insn);
7914
7915 if (flag_pic)
7916 length += 4;
7917
7918 if (!TARGET_PA_20)
7919 {
7920 if (!sibcall)
7921 length += 8;
7922
7923 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7924 length += 8;
7925 }
7926 }
7927
7928 return length;
7929 }
7930
7931 /* INSN is a function call.
7932
7933 CALL_DEST is the routine we are calling. */
7934
7935 const char *
7936 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7937 {
7938 int seq_length = dbr_sequence_length ();
7939 tree call_decl = SYMBOL_REF_DECL (call_dest);
7940 int local_call = call_decl && targetm.binds_local_p (call_decl);
7941 rtx xoperands[4];
7942
7943 xoperands[0] = call_dest;
7944
7945 /* Handle the common case where we're sure that the branch will reach
7946 the beginning of the "$CODE$" subspace. This is the beginning of
7947 the current function if we are in a named section. */
7948 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7949 {
7950 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7951 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7952 }
7953 else
7954 {
7955 if (TARGET_64BIT && !local_call)
7956 {
7957 /* ??? As far as I can tell, the HP linker doesn't support the
7958 long pc-relative sequence described in the 64-bit runtime
7959 architecture. So, we use a slightly longer indirect call. */
7960 xoperands[0] = pa_get_deferred_plabel (call_dest);
7961 xoperands[1] = gen_label_rtx ();
7962
7963 /* If this isn't a sibcall, we put the load of %r27 into the
7964 delay slot. We can't do this in a sibcall as we don't
7965 have a second call-clobbered scratch register available.
7966 We don't need to do anything when generating fast indirect
7967 calls. */
7968 if (seq_length != 0 && !sibcall)
7969 {
7970 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7971 optimize, 0, NULL);
7972
7973 /* Now delete the delay insn. */
7974 SET_INSN_DELETED (NEXT_INSN (insn));
7975 seq_length = 0;
7976 }
7977
7978 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7979 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7980 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7981
7982 if (sibcall)
7983 {
7984 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7985 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7986 output_asm_insn ("bve (%%r1)", xoperands);
7987 }
7988 else
7989 {
7990 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7991 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7992 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7993 seq_length = 1;
7994 }
7995 }
7996 else
7997 {
7998 int indirect_call = 0;
7999
8000 /* Emit a long call. There are several different sequences
8001 of increasing length and complexity. In most cases,
8002 they don't allow an instruction in the delay slot. */
8003 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8004 && !TARGET_LONG_PIC_SDIFF_CALL
8005 && !(TARGET_GAS && !TARGET_SOM && local_call)
8006 && !TARGET_64BIT)
8007 indirect_call = 1;
8008
8009 if (seq_length != 0
8010 && !sibcall
8011 && (!TARGET_PA_20
8012 || indirect_call
8013 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
8014 {
8015 /* A non-jump insn in the delay slot. By definition we can
8016 emit this insn before the call (and in fact before argument
8017 relocating. */
8018 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
8019 NULL);
8020
8021 /* Now delete the delay insn. */
8022 SET_INSN_DELETED (NEXT_INSN (insn));
8023 seq_length = 0;
8024 }
8025
8026 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8027 {
8028 /* This is the best sequence for making long calls in
8029 non-pic code. Unfortunately, GNU ld doesn't provide
8030 the stub needed for external calls, and GAS's support
8031 for this with the SOM linker is buggy. It is safe
8032 to use this for local calls. */
8033 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8034 if (sibcall)
8035 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
8036 else
8037 {
8038 if (TARGET_PA_20)
8039 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8040 xoperands);
8041 else
8042 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8043
8044 output_asm_insn ("copy %%r31,%%r2", xoperands);
8045 seq_length = 1;
8046 }
8047 }
8048 else
8049 {
8050 /* The HP assembler and linker can handle relocations for
8051 the difference of two symbols. The HP assembler
8052 recognizes the sequence as a pc-relative call and
8053 the linker provides stubs when needed. */
8054
8055 /* GAS currently can't generate the relocations that
8056 are needed for the SOM linker under HP-UX using this
8057 sequence. The GNU linker doesn't generate the stubs
8058 that are needed for external calls on TARGET_ELF32
8059 with this sequence. For now, we have to use a longer
8060 plabel sequence when using GAS for non local calls. */
8061 if (TARGET_LONG_PIC_SDIFF_CALL
8062 || (TARGET_GAS && !TARGET_SOM && local_call))
8063 {
8064 xoperands[1] = gen_rtx_REG (Pmode, 1);
8065 xoperands[2] = xoperands[1];
8066 pa_output_pic_pcrel_sequence (xoperands);
8067 }
8068 else
8069 {
8070 /* Emit a long plabel-based call sequence. This is
8071 essentially an inline implementation of $$dyncall.
8072 We don't actually try to call $$dyncall as this is
8073 as difficult as calling the function itself. */
8074 xoperands[0] = pa_get_deferred_plabel (call_dest);
8075 xoperands[1] = gen_label_rtx ();
8076
8077 /* Since the call is indirect, FP arguments in registers
8078 need to be copied to the general registers. Then, the
8079 argument relocation stub will copy them back. */
8080 if (TARGET_SOM)
8081 copy_fp_args (insn);
8082
8083 if (flag_pic)
8084 {
8085 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8086 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8087 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8088 }
8089 else
8090 {
8091 output_asm_insn ("addil LR'%0-$global$,%%r27",
8092 xoperands);
8093 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8094 xoperands);
8095 }
8096
8097 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8098 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8099 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8100 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8101
8102 if (!sibcall && !TARGET_PA_20)
8103 {
8104 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8105 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8106 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8107 else
8108 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8109 }
8110 }
8111
8112 if (TARGET_PA_20)
8113 {
8114 if (sibcall)
8115 output_asm_insn ("bve (%%r1)", xoperands);
8116 else
8117 {
8118 if (indirect_call)
8119 {
8120 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8121 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8122 seq_length = 1;
8123 }
8124 else
8125 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8126 }
8127 }
8128 else
8129 {
8130 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8131 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8132 xoperands);
8133
8134 if (sibcall)
8135 {
8136 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8137 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8138 else
8139 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8140 }
8141 else
8142 {
8143 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8144 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8145 else
8146 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8147
8148 if (indirect_call)
8149 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8150 else
8151 output_asm_insn ("copy %%r31,%%r2", xoperands);
8152 seq_length = 1;
8153 }
8154 }
8155 }
8156 }
8157 }
8158
8159 if (seq_length == 0)
8160 output_asm_insn ("nop", xoperands);
8161
8162 return "";
8163 }
8164
8165 /* Return the attribute length of the indirect call instruction INSN.
8166 The length must match the code generated by output_indirect call.
8167 The returned length includes the delay slot. Currently, the delay
8168 slot of an indirect call sequence is not exposed and it is used by
8169 the sequence itself. */
8170
8171 int
8172 pa_attr_length_indirect_call (rtx_insn *insn)
8173 {
8174 unsigned long distance = -1;
8175 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8176
8177 if (INSN_ADDRESSES_SET_P ())
8178 {
8179 distance = (total + insn_current_reference_address (insn));
8180 if (distance < total)
8181 distance = -1;
8182 }
8183
8184 if (TARGET_64BIT)
8185 return 12;
8186
8187 if (TARGET_FAST_INDIRECT_CALLS)
8188 return 8;
8189
8190 if (TARGET_PORTABLE_RUNTIME)
8191 return 16;
8192
8193 /* Inline version of $$dyncall. */
8194 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8195 return 20;
8196
8197 if (!TARGET_LONG_CALLS
8198 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8199 || distance < MAX_PCREL17F_OFFSET))
8200 return 8;
8201
8202 /* Out of reach, can use ble. */
8203 if (!flag_pic)
8204 return 12;
8205
8206 /* Inline version of $$dyncall. */
8207 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8208 return 20;
8209
8210 if (!optimize_size)
8211 return 36;
8212
8213 /* Long PIC pc-relative call. */
8214 return 20;
8215 }
8216
8217 const char *
8218 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8219 {
8220 rtx xoperands[4];
8221 int length;
8222
8223 if (TARGET_64BIT)
8224 {
8225 xoperands[0] = call_dest;
8226 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8227 "bve,l (%%r2),%%r2\n\t"
8228 "ldd 24(%0),%%r27", xoperands);
8229 return "";
8230 }
8231
8232 /* First the special case for kernels, level 0 systems, etc. */
8233 if (TARGET_FAST_INDIRECT_CALLS)
8234 {
8235 pa_output_arg_descriptor (insn);
8236 if (TARGET_PA_20)
8237 return "bve,l,n (%%r22),%%r2\n\tnop";
8238 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8239 }
8240
8241 if (TARGET_PORTABLE_RUNTIME)
8242 {
8243 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8244 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8245 pa_output_arg_descriptor (insn);
8246 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8247 }
8248
8249 /* Maybe emit a fast inline version of $$dyncall. */
8250 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8251 {
8252 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8253 "ldw 2(%%r22),%%r19\n\t"
8254 "ldw -2(%%r22),%%r22", xoperands);
8255 pa_output_arg_descriptor (insn);
8256 if (TARGET_NO_SPACE_REGS)
8257 {
8258 if (TARGET_PA_20)
8259 return "bve,l,n (%%r22),%%r2\n\tnop";
8260 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8261 }
8262 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8263 }
8264
8265 /* Now the normal case -- we can reach $$dyncall directly or
8266 we're sure that we can get there via a long-branch stub.
8267
8268 No need to check target flags as the length uniquely identifies
8269 the remaining cases. */
8270 length = pa_attr_length_indirect_call (insn);
8271 if (length == 8)
8272 {
8273 pa_output_arg_descriptor (insn);
8274
8275 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8276 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8277 variant of the B,L instruction can't be used on the SOM target. */
8278 if (TARGET_PA_20 && !TARGET_SOM)
8279 return "b,l,n $$dyncall,%%r2\n\tnop";
8280 else
8281 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8282 }
8283
8284 /* Long millicode call, but we are not generating PIC or portable runtime
8285 code. */
8286 if (length == 12)
8287 {
8288 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8289 pa_output_arg_descriptor (insn);
8290 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8291 }
8292
8293 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8294 pc-relative call sequence is five instructions. The inline PA 2.0
8295 version of $$dyncall is also five instructions. The PA 1.X versions
8296 are longer but still an overall win. */
8297 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8298 {
8299 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8300 "ldw 2(%%r22),%%r19\n\t"
8301 "ldw -2(%%r22),%%r22", xoperands);
8302 if (TARGET_NO_SPACE_REGS)
8303 {
8304 pa_output_arg_descriptor (insn);
8305 if (TARGET_PA_20)
8306 return "bve,l,n (%%r22),%%r2\n\tnop";
8307 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8308 }
8309 if (TARGET_PA_20)
8310 {
8311 pa_output_arg_descriptor (insn);
8312 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8313 }
8314 output_asm_insn ("bl .+8,%%r2\n\t"
8315 "ldo 16(%%r2),%%r2\n\t"
8316 "ldsid (%%r22),%%r1\n\t"
8317 "mtsp %%r1,%%sr0", xoperands);
8318 pa_output_arg_descriptor (insn);
8319 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8320 }
8321
8322 /* We need a long PIC call to $$dyncall. */
8323 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8324 xoperands[1] = gen_rtx_REG (Pmode, 2);
8325 xoperands[2] = gen_rtx_REG (Pmode, 1);
8326 pa_output_pic_pcrel_sequence (xoperands);
8327 pa_output_arg_descriptor (insn);
8328 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8329 }
8330
8331 /* In HPUX 8.0's shared library scheme, special relocations are needed
8332 for function labels if they might be passed to a function
8333 in a shared library (because shared libraries don't live in code
8334 space), and special magic is needed to construct their address. */
8335
8336 void
8337 pa_encode_label (rtx sym)
8338 {
8339 const char *str = XSTR (sym, 0);
8340 int len = strlen (str) + 1;
8341 char *newstr, *p;
8342
8343 p = newstr = XALLOCAVEC (char, len + 1);
8344 *p++ = '@';
8345 strcpy (p, str);
8346
8347 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8348 }
8349
8350 static void
8351 pa_encode_section_info (tree decl, rtx rtl, int first)
8352 {
8353 int old_referenced = 0;
8354
8355 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8356 old_referenced
8357 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8358
8359 default_encode_section_info (decl, rtl, first);
8360
8361 if (first && TEXT_SPACE_P (decl))
8362 {
8363 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8364 if (TREE_CODE (decl) == FUNCTION_DECL)
8365 pa_encode_label (XEXP (rtl, 0));
8366 }
8367 else if (old_referenced)
8368 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8369 }
8370
8371 /* This is sort of inverse to pa_encode_section_info. */
8372
8373 static const char *
8374 pa_strip_name_encoding (const char *str)
8375 {
8376 str += (*str == '@');
8377 str += (*str == '*');
8378 return str;
8379 }
8380
8381 /* Returns 1 if OP is a function label involved in a simple addition
8382 with a constant. Used to keep certain patterns from matching
8383 during instruction combination. */
8384 int
8385 pa_is_function_label_plus_const (rtx op)
8386 {
8387 /* Strip off any CONST. */
8388 if (GET_CODE (op) == CONST)
8389 op = XEXP (op, 0);
8390
8391 return (GET_CODE (op) == PLUS
8392 && function_label_operand (XEXP (op, 0), VOIDmode)
8393 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8394 }
8395
8396 /* Output assembly code for a thunk to FUNCTION. */
8397
8398 static void
8399 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8400 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8401 tree function)
8402 {
8403 static unsigned int current_thunk_number;
8404 int val_14 = VAL_14_BITS_P (delta);
8405 unsigned int old_last_address = last_address, nbytes = 0;
8406 char label[17];
8407 rtx xoperands[4];
8408
8409 xoperands[0] = XEXP (DECL_RTL (function), 0);
8410 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8411 xoperands[2] = GEN_INT (delta);
8412
8413 final_start_function (emit_barrier (), file, 1);
8414
8415 /* Output the thunk. We know that the function is in the same
8416 translation unit (i.e., the same space) as the thunk, and that
8417 thunks are output after their method. Thus, we don't need an
8418 external branch to reach the function. With SOM and GAS,
8419 functions and thunks are effectively in different sections.
8420 Thus, we can always use a IA-relative branch and the linker
8421 will add a long branch stub if necessary.
8422
8423 However, we have to be careful when generating PIC code on the
8424 SOM port to ensure that the sequence does not transfer to an
8425 import stub for the target function as this could clobber the
8426 return value saved at SP-24. This would also apply to the
8427 32-bit linux port if the multi-space model is implemented. */
8428 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8429 && !(flag_pic && TREE_PUBLIC (function))
8430 && (TARGET_GAS || last_address < 262132))
8431 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8432 && ((targetm_common.have_named_sections
8433 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8434 /* The GNU 64-bit linker has rather poor stub management.
8435 So, we use a long branch from thunks that aren't in
8436 the same section as the target function. */
8437 && ((!TARGET_64BIT
8438 && (DECL_SECTION_NAME (thunk_fndecl)
8439 != DECL_SECTION_NAME (function)))
8440 || ((DECL_SECTION_NAME (thunk_fndecl)
8441 == DECL_SECTION_NAME (function))
8442 && last_address < 262132)))
8443 /* In this case, we need to be able to reach the start of
8444 the stub table even though the function is likely closer
8445 and can be jumped to directly. */
8446 || (targetm_common.have_named_sections
8447 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8448 && DECL_SECTION_NAME (function) == NULL
8449 && total_code_bytes < MAX_PCREL17F_OFFSET)
8450 /* Likewise. */
8451 || (!targetm_common.have_named_sections
8452 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8453 {
8454 if (!val_14)
8455 output_asm_insn ("addil L'%2,%%r26", xoperands);
8456
8457 output_asm_insn ("b %0", xoperands);
8458
8459 if (val_14)
8460 {
8461 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8462 nbytes += 8;
8463 }
8464 else
8465 {
8466 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8467 nbytes += 12;
8468 }
8469 }
8470 else if (TARGET_64BIT)
8471 {
8472 rtx xop[4];
8473
8474 /* We only have one call-clobbered scratch register, so we can't
8475 make use of the delay slot if delta doesn't fit in 14 bits. */
8476 if (!val_14)
8477 {
8478 output_asm_insn ("addil L'%2,%%r26", xoperands);
8479 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8480 }
8481
8482 /* Load function address into %r1. */
8483 xop[0] = xoperands[0];
8484 xop[1] = gen_rtx_REG (Pmode, 1);
8485 xop[2] = xop[1];
8486 pa_output_pic_pcrel_sequence (xop);
8487
8488 if (val_14)
8489 {
8490 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8491 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8492 nbytes += 20;
8493 }
8494 else
8495 {
8496 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8497 nbytes += 24;
8498 }
8499 }
8500 else if (TARGET_PORTABLE_RUNTIME)
8501 {
8502 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8503 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8504
8505 if (!val_14)
8506 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8507
8508 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8509
8510 if (val_14)
8511 {
8512 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8513 nbytes += 16;
8514 }
8515 else
8516 {
8517 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8518 nbytes += 20;
8519 }
8520 }
8521 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8522 {
8523 /* The function is accessible from outside this module. The only
8524 way to avoid an import stub between the thunk and function is to
8525 call the function directly with an indirect sequence similar to
8526 that used by $$dyncall. This is possible because $$dyncall acts
8527 as the import stub in an indirect call. */
8528 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8529 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8530 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8531 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8532 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8533 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8534 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8535 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8536 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8537
8538 if (!val_14)
8539 {
8540 output_asm_insn ("addil L'%2,%%r26", xoperands);
8541 nbytes += 4;
8542 }
8543
8544 if (TARGET_PA_20)
8545 {
8546 output_asm_insn ("bve (%%r22)", xoperands);
8547 nbytes += 36;
8548 }
8549 else if (TARGET_NO_SPACE_REGS)
8550 {
8551 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8552 nbytes += 36;
8553 }
8554 else
8555 {
8556 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8557 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8558 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8559 nbytes += 44;
8560 }
8561
8562 if (val_14)
8563 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8564 else
8565 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8566 }
8567 else if (flag_pic)
8568 {
8569 rtx xop[4];
8570
8571 /* Load function address into %r22. */
8572 xop[0] = xoperands[0];
8573 xop[1] = gen_rtx_REG (Pmode, 1);
8574 xop[2] = gen_rtx_REG (Pmode, 22);
8575 pa_output_pic_pcrel_sequence (xop);
8576
8577 if (!val_14)
8578 output_asm_insn ("addil L'%2,%%r26", xoperands);
8579
8580 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8581
8582 if (val_14)
8583 {
8584 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8585 nbytes += 20;
8586 }
8587 else
8588 {
8589 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8590 nbytes += 24;
8591 }
8592 }
8593 else
8594 {
8595 if (!val_14)
8596 output_asm_insn ("addil L'%2,%%r26", xoperands);
8597
8598 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8599 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8600
8601 if (val_14)
8602 {
8603 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8604 nbytes += 12;
8605 }
8606 else
8607 {
8608 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8609 nbytes += 16;
8610 }
8611 }
8612
8613 final_end_function ();
8614
8615 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8616 {
8617 switch_to_section (data_section);
8618 output_asm_insn (".align 4", xoperands);
8619 ASM_OUTPUT_LABEL (file, label);
8620 output_asm_insn (".word P'%0", xoperands);
8621 }
8622
8623 current_thunk_number++;
8624 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8625 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8626 last_address += nbytes;
8627 if (old_last_address > last_address)
8628 last_address = UINT_MAX;
8629 update_total_code_bytes (nbytes);
8630 }
8631
8632 /* Only direct calls to static functions are allowed to be sibling (tail)
8633 call optimized.
8634
8635 This restriction is necessary because some linker generated stubs will
8636 store return pointers into rp' in some cases which might clobber a
8637 live value already in rp'.
8638
8639 In a sibcall the current function and the target function share stack
8640 space. Thus if the path to the current function and the path to the
8641 target function save a value in rp', they save the value into the
8642 same stack slot, which has undesirable consequences.
8643
8644 Because of the deferred binding nature of shared libraries any function
8645 with external scope could be in a different load module and thus require
8646 rp' to be saved when calling that function. So sibcall optimizations
8647 can only be safe for static function.
8648
8649 Note that GCC never needs return value relocations, so we don't have to
8650 worry about static calls with return value relocations (which require
8651 saving rp').
8652
8653 It is safe to perform a sibcall optimization when the target function
8654 will never return. */
8655 static bool
8656 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8657 {
8658 if (TARGET_PORTABLE_RUNTIME)
8659 return false;
8660
8661 /* Sibcalls are not ok because the arg pointer register is not a fixed
8662 register. This prevents the sibcall optimization from occurring. In
8663 addition, there are problems with stub placement using GNU ld. This
8664 is because a normal sibcall branch uses a 17-bit relocation while
8665 a regular call branch uses a 22-bit relocation. As a result, more
8666 care needs to be taken in the placement of long-branch stubs. */
8667 if (TARGET_64BIT)
8668 return false;
8669
8670 /* Sibcalls are only ok within a translation unit. */
8671 return (decl && !TREE_PUBLIC (decl));
8672 }
8673
8674 /* ??? Addition is not commutative on the PA due to the weird implicit
8675 space register selection rules for memory addresses. Therefore, we
8676 don't consider a + b == b + a, as this might be inside a MEM. */
8677 static bool
8678 pa_commutative_p (const_rtx x, int outer_code)
8679 {
8680 return (COMMUTATIVE_P (x)
8681 && (TARGET_NO_SPACE_REGS
8682 || (outer_code != UNKNOWN && outer_code != MEM)
8683 || GET_CODE (x) != PLUS));
8684 }
8685
8686 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8687 use in fmpyadd instructions. */
8688 int
8689 pa_fmpyaddoperands (rtx *operands)
8690 {
8691 machine_mode mode = GET_MODE (operands[0]);
8692
8693 /* Must be a floating point mode. */
8694 if (mode != SFmode && mode != DFmode)
8695 return 0;
8696
8697 /* All modes must be the same. */
8698 if (! (mode == GET_MODE (operands[1])
8699 && mode == GET_MODE (operands[2])
8700 && mode == GET_MODE (operands[3])
8701 && mode == GET_MODE (operands[4])
8702 && mode == GET_MODE (operands[5])))
8703 return 0;
8704
8705 /* All operands must be registers. */
8706 if (! (GET_CODE (operands[1]) == REG
8707 && GET_CODE (operands[2]) == REG
8708 && GET_CODE (operands[3]) == REG
8709 && GET_CODE (operands[4]) == REG
8710 && GET_CODE (operands[5]) == REG))
8711 return 0;
8712
8713 /* Only 2 real operands to the addition. One of the input operands must
8714 be the same as the output operand. */
8715 if (! rtx_equal_p (operands[3], operands[4])
8716 && ! rtx_equal_p (operands[3], operands[5]))
8717 return 0;
8718
8719 /* Inout operand of add cannot conflict with any operands from multiply. */
8720 if (rtx_equal_p (operands[3], operands[0])
8721 || rtx_equal_p (operands[3], operands[1])
8722 || rtx_equal_p (operands[3], operands[2]))
8723 return 0;
8724
8725 /* multiply cannot feed into addition operands. */
8726 if (rtx_equal_p (operands[4], operands[0])
8727 || rtx_equal_p (operands[5], operands[0]))
8728 return 0;
8729
8730 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8731 if (mode == SFmode
8732 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8733 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8734 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8735 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8736 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8737 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8738 return 0;
8739
8740 /* Passed. Operands are suitable for fmpyadd. */
8741 return 1;
8742 }
8743
8744 #if !defined(USE_COLLECT2)
8745 static void
8746 pa_asm_out_constructor (rtx symbol, int priority)
8747 {
8748 if (!function_label_operand (symbol, VOIDmode))
8749 pa_encode_label (symbol);
8750
8751 #ifdef CTORS_SECTION_ASM_OP
8752 default_ctor_section_asm_out_constructor (symbol, priority);
8753 #else
8754 # ifdef TARGET_ASM_NAMED_SECTION
8755 default_named_section_asm_out_constructor (symbol, priority);
8756 # else
8757 default_stabs_asm_out_constructor (symbol, priority);
8758 # endif
8759 #endif
8760 }
8761
8762 static void
8763 pa_asm_out_destructor (rtx symbol, int priority)
8764 {
8765 if (!function_label_operand (symbol, VOIDmode))
8766 pa_encode_label (symbol);
8767
8768 #ifdef DTORS_SECTION_ASM_OP
8769 default_dtor_section_asm_out_destructor (symbol, priority);
8770 #else
8771 # ifdef TARGET_ASM_NAMED_SECTION
8772 default_named_section_asm_out_destructor (symbol, priority);
8773 # else
8774 default_stabs_asm_out_destructor (symbol, priority);
8775 # endif
8776 #endif
8777 }
8778 #endif
8779
8780 /* This function places uninitialized global data in the bss section.
8781 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8782 function on the SOM port to prevent uninitialized global data from
8783 being placed in the data section. */
8784
8785 void
8786 pa_asm_output_aligned_bss (FILE *stream,
8787 const char *name,
8788 unsigned HOST_WIDE_INT size,
8789 unsigned int align)
8790 {
8791 switch_to_section (bss_section);
8792 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8793
8794 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8795 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8796 #endif
8797
8798 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8799 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8800 #endif
8801
8802 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8803 ASM_OUTPUT_LABEL (stream, name);
8804 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8805 }
8806
8807 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8808 that doesn't allow the alignment of global common storage to be directly
8809 specified. The SOM linker aligns common storage based on the rounded
8810 value of the NUM_BYTES parameter in the .comm directive. It's not
8811 possible to use the .align directive as it doesn't affect the alignment
8812 of the label associated with a .comm directive. */
8813
8814 void
8815 pa_asm_output_aligned_common (FILE *stream,
8816 const char *name,
8817 unsigned HOST_WIDE_INT size,
8818 unsigned int align)
8819 {
8820 unsigned int max_common_align;
8821
8822 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8823 if (align > max_common_align)
8824 {
8825 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8826 "for global common data. Using %u",
8827 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8828 align = max_common_align;
8829 }
8830
8831 switch_to_section (bss_section);
8832
8833 assemble_name (stream, name);
8834 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8835 MAX (size, align / BITS_PER_UNIT));
8836 }
8837
8838 /* We can't use .comm for local common storage as the SOM linker effectively
8839 treats the symbol as universal and uses the same storage for local symbols
8840 with the same name in different object files. The .block directive
8841 reserves an uninitialized block of storage. However, it's not common
8842 storage. Fortunately, GCC never requests common storage with the same
8843 name in any given translation unit. */
8844
8845 void
8846 pa_asm_output_aligned_local (FILE *stream,
8847 const char *name,
8848 unsigned HOST_WIDE_INT size,
8849 unsigned int align)
8850 {
8851 switch_to_section (bss_section);
8852 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8853
8854 #ifdef LOCAL_ASM_OP
8855 fprintf (stream, "%s", LOCAL_ASM_OP);
8856 assemble_name (stream, name);
8857 fprintf (stream, "\n");
8858 #endif
8859
8860 ASM_OUTPUT_LABEL (stream, name);
8861 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8862 }
8863
8864 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8865 use in fmpysub instructions. */
8866 int
8867 pa_fmpysuboperands (rtx *operands)
8868 {
8869 machine_mode mode = GET_MODE (operands[0]);
8870
8871 /* Must be a floating point mode. */
8872 if (mode != SFmode && mode != DFmode)
8873 return 0;
8874
8875 /* All modes must be the same. */
8876 if (! (mode == GET_MODE (operands[1])
8877 && mode == GET_MODE (operands[2])
8878 && mode == GET_MODE (operands[3])
8879 && mode == GET_MODE (operands[4])
8880 && mode == GET_MODE (operands[5])))
8881 return 0;
8882
8883 /* All operands must be registers. */
8884 if (! (GET_CODE (operands[1]) == REG
8885 && GET_CODE (operands[2]) == REG
8886 && GET_CODE (operands[3]) == REG
8887 && GET_CODE (operands[4]) == REG
8888 && GET_CODE (operands[5]) == REG))
8889 return 0;
8890
8891 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8892 operation, so operands[4] must be the same as operand[3]. */
8893 if (! rtx_equal_p (operands[3], operands[4]))
8894 return 0;
8895
8896 /* multiply cannot feed into subtraction. */
8897 if (rtx_equal_p (operands[5], operands[0]))
8898 return 0;
8899
8900 /* Inout operand of sub cannot conflict with any operands from multiply. */
8901 if (rtx_equal_p (operands[3], operands[0])
8902 || rtx_equal_p (operands[3], operands[1])
8903 || rtx_equal_p (operands[3], operands[2]))
8904 return 0;
8905
8906 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8907 if (mode == SFmode
8908 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8909 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8910 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8911 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8912 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8913 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8914 return 0;
8915
8916 /* Passed. Operands are suitable for fmpysub. */
8917 return 1;
8918 }
8919
8920 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8921 constants for a MULT embedded inside a memory address. */
8922 int
8923 pa_mem_shadd_constant_p (int val)
8924 {
8925 if (val == 2 || val == 4 || val == 8)
8926 return 1;
8927 else
8928 return 0;
8929 }
8930
8931 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8932 constants for shadd instructions. */
8933 int
8934 pa_shadd_constant_p (int val)
8935 {
8936 if (val == 1 || val == 2 || val == 3)
8937 return 1;
8938 else
8939 return 0;
8940 }
8941
8942 /* Return TRUE if INSN branches forward. */
8943
8944 static bool
8945 forward_branch_p (rtx_insn *insn)
8946 {
8947 rtx lab = JUMP_LABEL (insn);
8948
8949 /* The INSN must have a jump label. */
8950 gcc_assert (lab != NULL_RTX);
8951
8952 if (INSN_ADDRESSES_SET_P ())
8953 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8954
8955 while (insn)
8956 {
8957 if (insn == lab)
8958 return true;
8959 else
8960 insn = NEXT_INSN (insn);
8961 }
8962
8963 return false;
8964 }
8965
8966 /* Output an unconditional move and branch insn. */
8967
8968 const char *
8969 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8970 {
8971 int length = get_attr_length (insn);
8972
8973 /* These are the cases in which we win. */
8974 if (length == 4)
8975 return "mov%I1b,tr %1,%0,%2";
8976
8977 /* None of the following cases win, but they don't lose either. */
8978 if (length == 8)
8979 {
8980 if (dbr_sequence_length () == 0)
8981 {
8982 /* Nothing in the delay slot, fake it by putting the combined
8983 insn (the copy or add) in the delay slot of a bl. */
8984 if (GET_CODE (operands[1]) == CONST_INT)
8985 return "b %2\n\tldi %1,%0";
8986 else
8987 return "b %2\n\tcopy %1,%0";
8988 }
8989 else
8990 {
8991 /* Something in the delay slot, but we've got a long branch. */
8992 if (GET_CODE (operands[1]) == CONST_INT)
8993 return "ldi %1,%0\n\tb %2";
8994 else
8995 return "copy %1,%0\n\tb %2";
8996 }
8997 }
8998
8999 if (GET_CODE (operands[1]) == CONST_INT)
9000 output_asm_insn ("ldi %1,%0", operands);
9001 else
9002 output_asm_insn ("copy %1,%0", operands);
9003 return pa_output_lbranch (operands[2], insn, 1);
9004 }
9005
9006 /* Output an unconditional add and branch insn. */
9007
9008 const char *
9009 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
9010 {
9011 int length = get_attr_length (insn);
9012
9013 /* To make life easy we want operand0 to be the shared input/output
9014 operand and operand1 to be the readonly operand. */
9015 if (operands[0] == operands[1])
9016 operands[1] = operands[2];
9017
9018 /* These are the cases in which we win. */
9019 if (length == 4)
9020 return "add%I1b,tr %1,%0,%3";
9021
9022 /* None of the following cases win, but they don't lose either. */
9023 if (length == 8)
9024 {
9025 if (dbr_sequence_length () == 0)
9026 /* Nothing in the delay slot, fake it by putting the combined
9027 insn (the copy or add) in the delay slot of a bl. */
9028 return "b %3\n\tadd%I1 %1,%0,%0";
9029 else
9030 /* Something in the delay slot, but we've got a long branch. */
9031 return "add%I1 %1,%0,%0\n\tb %3";
9032 }
9033
9034 output_asm_insn ("add%I1 %1,%0,%0", operands);
9035 return pa_output_lbranch (operands[3], insn, 1);
9036 }
9037
9038 /* We use this hook to perform a PA specific optimization which is difficult
9039 to do in earlier passes. */
9040
9041 static void
9042 pa_reorg (void)
9043 {
9044 remove_useless_addtr_insns (1);
9045
9046 if (pa_cpu < PROCESSOR_8000)
9047 pa_combine_instructions ();
9048 }
9049
9050 /* The PA has a number of odd instructions which can perform multiple
9051 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9052 it may be profitable to combine two instructions into one instruction
9053 with two outputs. It's not profitable PA2.0 machines because the
9054 two outputs would take two slots in the reorder buffers.
9055
9056 This routine finds instructions which can be combined and combines
9057 them. We only support some of the potential combinations, and we
9058 only try common ways to find suitable instructions.
9059
9060 * addb can add two registers or a register and a small integer
9061 and jump to a nearby (+-8k) location. Normally the jump to the
9062 nearby location is conditional on the result of the add, but by
9063 using the "true" condition we can make the jump unconditional.
9064 Thus addb can perform two independent operations in one insn.
9065
9066 * movb is similar to addb in that it can perform a reg->reg
9067 or small immediate->reg copy and jump to a nearby (+-8k location).
9068
9069 * fmpyadd and fmpysub can perform a FP multiply and either an
9070 FP add or FP sub if the operands of the multiply and add/sub are
9071 independent (there are other minor restrictions). Note both
9072 the fmpy and fadd/fsub can in theory move to better spots according
9073 to data dependencies, but for now we require the fmpy stay at a
9074 fixed location.
9075
9076 * Many of the memory operations can perform pre & post updates
9077 of index registers. GCC's pre/post increment/decrement addressing
9078 is far too simple to take advantage of all the possibilities. This
9079 pass may not be suitable since those insns may not be independent.
9080
9081 * comclr can compare two ints or an int and a register, nullify
9082 the following instruction and zero some other register. This
9083 is more difficult to use as it's harder to find an insn which
9084 will generate a comclr than finding something like an unconditional
9085 branch. (conditional moves & long branches create comclr insns).
9086
9087 * Most arithmetic operations can conditionally skip the next
9088 instruction. They can be viewed as "perform this operation
9089 and conditionally jump to this nearby location" (where nearby
9090 is an insns away). These are difficult to use due to the
9091 branch length restrictions. */
9092
9093 static void
9094 pa_combine_instructions (void)
9095 {
9096 rtx_insn *anchor;
9097
9098 /* This can get expensive since the basic algorithm is on the
9099 order of O(n^2) (or worse). Only do it for -O2 or higher
9100 levels of optimization. */
9101 if (optimize < 2)
9102 return;
9103
9104 /* Walk down the list of insns looking for "anchor" insns which
9105 may be combined with "floating" insns. As the name implies,
9106 "anchor" instructions don't move, while "floating" insns may
9107 move around. */
9108 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9109 rtx_insn *new_rtx = make_insn_raw (par);
9110
9111 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9112 {
9113 enum attr_pa_combine_type anchor_attr;
9114 enum attr_pa_combine_type floater_attr;
9115
9116 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9117 Also ignore any special USE insns. */
9118 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9119 || GET_CODE (PATTERN (anchor)) == USE
9120 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9121 continue;
9122
9123 anchor_attr = get_attr_pa_combine_type (anchor);
9124 /* See if anchor is an insn suitable for combination. */
9125 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9126 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9127 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9128 && ! forward_branch_p (anchor)))
9129 {
9130 rtx_insn *floater;
9131
9132 for (floater = PREV_INSN (anchor);
9133 floater;
9134 floater = PREV_INSN (floater))
9135 {
9136 if (NOTE_P (floater)
9137 || (NONJUMP_INSN_P (floater)
9138 && (GET_CODE (PATTERN (floater)) == USE
9139 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9140 continue;
9141
9142 /* Anything except a regular INSN will stop our search. */
9143 if (! NONJUMP_INSN_P (floater))
9144 {
9145 floater = NULL;
9146 break;
9147 }
9148
9149 /* See if FLOATER is suitable for combination with the
9150 anchor. */
9151 floater_attr = get_attr_pa_combine_type (floater);
9152 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9153 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9154 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9155 && floater_attr == PA_COMBINE_TYPE_FMPY))
9156 {
9157 /* If ANCHOR and FLOATER can be combined, then we're
9158 done with this pass. */
9159 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9160 SET_DEST (PATTERN (floater)),
9161 XEXP (SET_SRC (PATTERN (floater)), 0),
9162 XEXP (SET_SRC (PATTERN (floater)), 1)))
9163 break;
9164 }
9165
9166 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9167 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9168 {
9169 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9170 {
9171 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9172 SET_DEST (PATTERN (floater)),
9173 XEXP (SET_SRC (PATTERN (floater)), 0),
9174 XEXP (SET_SRC (PATTERN (floater)), 1)))
9175 break;
9176 }
9177 else
9178 {
9179 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9180 SET_DEST (PATTERN (floater)),
9181 SET_SRC (PATTERN (floater)),
9182 SET_SRC (PATTERN (floater))))
9183 break;
9184 }
9185 }
9186 }
9187
9188 /* If we didn't find anything on the backwards scan try forwards. */
9189 if (!floater
9190 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9191 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9192 {
9193 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9194 {
9195 if (NOTE_P (floater)
9196 || (NONJUMP_INSN_P (floater)
9197 && (GET_CODE (PATTERN (floater)) == USE
9198 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9199
9200 continue;
9201
9202 /* Anything except a regular INSN will stop our search. */
9203 if (! NONJUMP_INSN_P (floater))
9204 {
9205 floater = NULL;
9206 break;
9207 }
9208
9209 /* See if FLOATER is suitable for combination with the
9210 anchor. */
9211 floater_attr = get_attr_pa_combine_type (floater);
9212 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9213 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9214 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9215 && floater_attr == PA_COMBINE_TYPE_FMPY))
9216 {
9217 /* If ANCHOR and FLOATER can be combined, then we're
9218 done with this pass. */
9219 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9220 SET_DEST (PATTERN (floater)),
9221 XEXP (SET_SRC (PATTERN (floater)),
9222 0),
9223 XEXP (SET_SRC (PATTERN (floater)),
9224 1)))
9225 break;
9226 }
9227 }
9228 }
9229
9230 /* FLOATER will be nonzero if we found a suitable floating
9231 insn for combination with ANCHOR. */
9232 if (floater
9233 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9234 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9235 {
9236 /* Emit the new instruction and delete the old anchor. */
9237 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9238 copy_rtx (PATTERN (floater)));
9239 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9240 emit_insn_before (temp, anchor);
9241
9242 SET_INSN_DELETED (anchor);
9243
9244 /* Emit a special USE insn for FLOATER, then delete
9245 the floating insn. */
9246 temp = copy_rtx (PATTERN (floater));
9247 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9248 delete_insn (floater);
9249
9250 continue;
9251 }
9252 else if (floater
9253 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9254 {
9255 /* Emit the new_jump instruction and delete the old anchor. */
9256 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9257 copy_rtx (PATTERN (floater)));
9258 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9259 temp = emit_jump_insn_before (temp, anchor);
9260
9261 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9262 SET_INSN_DELETED (anchor);
9263
9264 /* Emit a special USE insn for FLOATER, then delete
9265 the floating insn. */
9266 temp = copy_rtx (PATTERN (floater));
9267 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9268 delete_insn (floater);
9269 continue;
9270 }
9271 }
9272 }
9273 }
9274
9275 static int
9276 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9277 int reversed, rtx dest,
9278 rtx src1, rtx src2)
9279 {
9280 int insn_code_number;
9281 rtx_insn *start, *end;
9282
9283 /* Create a PARALLEL with the patterns of ANCHOR and
9284 FLOATER, try to recognize it, then test constraints
9285 for the resulting pattern.
9286
9287 If the pattern doesn't match or the constraints
9288 aren't met keep searching for a suitable floater
9289 insn. */
9290 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9291 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9292 INSN_CODE (new_rtx) = -1;
9293 insn_code_number = recog_memoized (new_rtx);
9294 basic_block bb = BLOCK_FOR_INSN (anchor);
9295 if (insn_code_number < 0
9296 || (extract_insn (new_rtx),
9297 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9298 return 0;
9299
9300 if (reversed)
9301 {
9302 start = anchor;
9303 end = floater;
9304 }
9305 else
9306 {
9307 start = floater;
9308 end = anchor;
9309 }
9310
9311 /* There's up to three operands to consider. One
9312 output and two inputs.
9313
9314 The output must not be used between FLOATER & ANCHOR
9315 exclusive. The inputs must not be set between
9316 FLOATER and ANCHOR exclusive. */
9317
9318 if (reg_used_between_p (dest, start, end))
9319 return 0;
9320
9321 if (reg_set_between_p (src1, start, end))
9322 return 0;
9323
9324 if (reg_set_between_p (src2, start, end))
9325 return 0;
9326
9327 /* If we get here, then everything is good. */
9328 return 1;
9329 }
9330
9331 /* Return nonzero if references for INSN are delayed.
9332
9333 Millicode insns are actually function calls with some special
9334 constraints on arguments and register usage.
9335
9336 Millicode calls always expect their arguments in the integer argument
9337 registers, and always return their result in %r29 (ret1). They
9338 are expected to clobber their arguments, %r1, %r29, and the return
9339 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9340
9341 This function tells reorg that the references to arguments and
9342 millicode calls do not appear to happen until after the millicode call.
9343 This allows reorg to put insns which set the argument registers into the
9344 delay slot of the millicode call -- thus they act more like traditional
9345 CALL_INSNs.
9346
9347 Note we cannot consider side effects of the insn to be delayed because
9348 the branch and link insn will clobber the return pointer. If we happened
9349 to use the return pointer in the delay slot of the call, then we lose.
9350
9351 get_attr_type will try to recognize the given insn, so make sure to
9352 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9353 in particular. */
9354 int
9355 pa_insn_refs_are_delayed (rtx_insn *insn)
9356 {
9357 return ((NONJUMP_INSN_P (insn)
9358 && GET_CODE (PATTERN (insn)) != SEQUENCE
9359 && GET_CODE (PATTERN (insn)) != USE
9360 && GET_CODE (PATTERN (insn)) != CLOBBER
9361 && get_attr_type (insn) == TYPE_MILLI));
9362 }
9363
9364 /* Promote the return value, but not the arguments. */
9365
9366 static machine_mode
9367 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9368 machine_mode mode,
9369 int *punsignedp ATTRIBUTE_UNUSED,
9370 const_tree fntype ATTRIBUTE_UNUSED,
9371 int for_return)
9372 {
9373 if (for_return == 0)
9374 return mode;
9375 return promote_mode (type, mode, punsignedp);
9376 }
9377
9378 /* On the HP-PA the value is found in register(s) 28(-29), unless
9379 the mode is SF or DF. Then the value is returned in fr4 (32).
9380
9381 This must perform the same promotions as PROMOTE_MODE, else promoting
9382 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9383
9384 Small structures must be returned in a PARALLEL on PA64 in order
9385 to match the HP Compiler ABI. */
9386
9387 static rtx
9388 pa_function_value (const_tree valtype,
9389 const_tree func ATTRIBUTE_UNUSED,
9390 bool outgoing ATTRIBUTE_UNUSED)
9391 {
9392 machine_mode valmode;
9393
9394 if (AGGREGATE_TYPE_P (valtype)
9395 || TREE_CODE (valtype) == COMPLEX_TYPE
9396 || TREE_CODE (valtype) == VECTOR_TYPE)
9397 {
9398 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9399
9400 /* Handle aggregates that fit exactly in a word or double word. */
9401 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9402 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9403
9404 if (TARGET_64BIT)
9405 {
9406 /* Aggregates with a size less than or equal to 128 bits are
9407 returned in GR 28(-29). They are left justified. The pad
9408 bits are undefined. Larger aggregates are returned in
9409 memory. */
9410 rtx loc[2];
9411 int i, offset = 0;
9412 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9413
9414 for (i = 0; i < ub; i++)
9415 {
9416 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9417 gen_rtx_REG (DImode, 28 + i),
9418 GEN_INT (offset));
9419 offset += 8;
9420 }
9421
9422 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9423 }
9424 else if (valsize > UNITS_PER_WORD)
9425 {
9426 /* Aggregates 5 to 8 bytes in size are returned in general
9427 registers r28-r29 in the same manner as other non
9428 floating-point objects. The data is right-justified and
9429 zero-extended to 64 bits. This is opposite to the normal
9430 justification used on big endian targets and requires
9431 special treatment. */
9432 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9433 gen_rtx_REG (DImode, 28), const0_rtx);
9434 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9435 }
9436 }
9437
9438 if ((INTEGRAL_TYPE_P (valtype)
9439 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9440 || POINTER_TYPE_P (valtype))
9441 valmode = word_mode;
9442 else
9443 valmode = TYPE_MODE (valtype);
9444
9445 if (TREE_CODE (valtype) == REAL_TYPE
9446 && !AGGREGATE_TYPE_P (valtype)
9447 && TYPE_MODE (valtype) != TFmode
9448 && !TARGET_SOFT_FLOAT)
9449 return gen_rtx_REG (valmode, 32);
9450
9451 return gen_rtx_REG (valmode, 28);
9452 }
9453
9454 /* Implement the TARGET_LIBCALL_VALUE hook. */
9455
9456 static rtx
9457 pa_libcall_value (machine_mode mode,
9458 const_rtx fun ATTRIBUTE_UNUSED)
9459 {
9460 if (! TARGET_SOFT_FLOAT
9461 && (mode == SFmode || mode == DFmode))
9462 return gen_rtx_REG (mode, 32);
9463 else
9464 return gen_rtx_REG (mode, 28);
9465 }
9466
9467 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9468
9469 static bool
9470 pa_function_value_regno_p (const unsigned int regno)
9471 {
9472 if (regno == 28
9473 || (! TARGET_SOFT_FLOAT && regno == 32))
9474 return true;
9475
9476 return false;
9477 }
9478
9479 /* Update the data in CUM to advance over an argument
9480 of mode MODE and data type TYPE.
9481 (TYPE is null for libcalls where that information may not be available.) */
9482
9483 static void
9484 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9485 const_tree type, bool named ATTRIBUTE_UNUSED)
9486 {
9487 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9488 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9489
9490 cum->nargs_prototype--;
9491 cum->words += (arg_size
9492 + ((cum->words & 01)
9493 && type != NULL_TREE
9494 && arg_size > 1));
9495 }
9496
9497 /* Return the location of a parameter that is passed in a register or NULL
9498 if the parameter has any component that is passed in memory.
9499
9500 This is new code and will be pushed to into the net sources after
9501 further testing.
9502
9503 ??? We might want to restructure this so that it looks more like other
9504 ports. */
9505 static rtx
9506 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9507 const_tree type, bool named ATTRIBUTE_UNUSED)
9508 {
9509 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9510 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9511 int alignment = 0;
9512 int arg_size;
9513 int fpr_reg_base;
9514 int gpr_reg_base;
9515 rtx retval;
9516
9517 if (mode == VOIDmode)
9518 return NULL_RTX;
9519
9520 arg_size = FUNCTION_ARG_SIZE (mode, type);
9521
9522 /* If this arg would be passed partially or totally on the stack, then
9523 this routine should return zero. pa_arg_partial_bytes will
9524 handle arguments which are split between regs and stack slots if
9525 the ABI mandates split arguments. */
9526 if (!TARGET_64BIT)
9527 {
9528 /* The 32-bit ABI does not split arguments. */
9529 if (cum->words + arg_size > max_arg_words)
9530 return NULL_RTX;
9531 }
9532 else
9533 {
9534 if (arg_size > 1)
9535 alignment = cum->words & 1;
9536 if (cum->words + alignment >= max_arg_words)
9537 return NULL_RTX;
9538 }
9539
9540 /* The 32bit ABIs and the 64bit ABIs are rather different,
9541 particularly in their handling of FP registers. We might
9542 be able to cleverly share code between them, but I'm not
9543 going to bother in the hope that splitting them up results
9544 in code that is more easily understood. */
9545
9546 if (TARGET_64BIT)
9547 {
9548 /* Advance the base registers to their current locations.
9549
9550 Remember, gprs grow towards smaller register numbers while
9551 fprs grow to higher register numbers. Also remember that
9552 although FP regs are 32-bit addressable, we pretend that
9553 the registers are 64-bits wide. */
9554 gpr_reg_base = 26 - cum->words;
9555 fpr_reg_base = 32 + cum->words;
9556
9557 /* Arguments wider than one word and small aggregates need special
9558 treatment. */
9559 if (arg_size > 1
9560 || mode == BLKmode
9561 || (type && (AGGREGATE_TYPE_P (type)
9562 || TREE_CODE (type) == COMPLEX_TYPE
9563 || TREE_CODE (type) == VECTOR_TYPE)))
9564 {
9565 /* Double-extended precision (80-bit), quad-precision (128-bit)
9566 and aggregates including complex numbers are aligned on
9567 128-bit boundaries. The first eight 64-bit argument slots
9568 are associated one-to-one, with general registers r26
9569 through r19, and also with floating-point registers fr4
9570 through fr11. Arguments larger than one word are always
9571 passed in general registers.
9572
9573 Using a PARALLEL with a word mode register results in left
9574 justified data on a big-endian target. */
9575
9576 rtx loc[8];
9577 int i, offset = 0, ub = arg_size;
9578
9579 /* Align the base register. */
9580 gpr_reg_base -= alignment;
9581
9582 ub = MIN (ub, max_arg_words - cum->words - alignment);
9583 for (i = 0; i < ub; i++)
9584 {
9585 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9586 gen_rtx_REG (DImode, gpr_reg_base),
9587 GEN_INT (offset));
9588 gpr_reg_base -= 1;
9589 offset += 8;
9590 }
9591
9592 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9593 }
9594 }
9595 else
9596 {
9597 /* If the argument is larger than a word, then we know precisely
9598 which registers we must use. */
9599 if (arg_size > 1)
9600 {
9601 if (cum->words)
9602 {
9603 gpr_reg_base = 23;
9604 fpr_reg_base = 38;
9605 }
9606 else
9607 {
9608 gpr_reg_base = 25;
9609 fpr_reg_base = 34;
9610 }
9611
9612 /* Structures 5 to 8 bytes in size are passed in the general
9613 registers in the same manner as other non floating-point
9614 objects. The data is right-justified and zero-extended
9615 to 64 bits. This is opposite to the normal justification
9616 used on big endian targets and requires special treatment.
9617 We now define BLOCK_REG_PADDING to pad these objects.
9618 Aggregates, complex and vector types are passed in the same
9619 manner as structures. */
9620 if (mode == BLKmode
9621 || (type && (AGGREGATE_TYPE_P (type)
9622 || TREE_CODE (type) == COMPLEX_TYPE
9623 || TREE_CODE (type) == VECTOR_TYPE)))
9624 {
9625 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9626 gen_rtx_REG (DImode, gpr_reg_base),
9627 const0_rtx);
9628 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9629 }
9630 }
9631 else
9632 {
9633 /* We have a single word (32 bits). A simple computation
9634 will get us the register #s we need. */
9635 gpr_reg_base = 26 - cum->words;
9636 fpr_reg_base = 32 + 2 * cum->words;
9637 }
9638 }
9639
9640 /* Determine if the argument needs to be passed in both general and
9641 floating point registers. */
9642 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9643 /* If we are doing soft-float with portable runtime, then there
9644 is no need to worry about FP regs. */
9645 && !TARGET_SOFT_FLOAT
9646 /* The parameter must be some kind of scalar float, else we just
9647 pass it in integer registers. */
9648 && GET_MODE_CLASS (mode) == MODE_FLOAT
9649 /* The target function must not have a prototype. */
9650 && cum->nargs_prototype <= 0
9651 /* libcalls do not need to pass items in both FP and general
9652 registers. */
9653 && type != NULL_TREE
9654 /* All this hair applies to "outgoing" args only. This includes
9655 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9656 && !cum->incoming)
9657 /* Also pass outgoing floating arguments in both registers in indirect
9658 calls with the 32 bit ABI and the HP assembler since there is no
9659 way to the specify argument locations in static functions. */
9660 || (!TARGET_64BIT
9661 && !TARGET_GAS
9662 && !cum->incoming
9663 && cum->indirect
9664 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9665 {
9666 retval
9667 = gen_rtx_PARALLEL
9668 (mode,
9669 gen_rtvec (2,
9670 gen_rtx_EXPR_LIST (VOIDmode,
9671 gen_rtx_REG (mode, fpr_reg_base),
9672 const0_rtx),
9673 gen_rtx_EXPR_LIST (VOIDmode,
9674 gen_rtx_REG (mode, gpr_reg_base),
9675 const0_rtx)));
9676 }
9677 else
9678 {
9679 /* See if we should pass this parameter in a general register. */
9680 if (TARGET_SOFT_FLOAT
9681 /* Indirect calls in the normal 32bit ABI require all arguments
9682 to be passed in general registers. */
9683 || (!TARGET_PORTABLE_RUNTIME
9684 && !TARGET_64BIT
9685 && !TARGET_ELF32
9686 && cum->indirect)
9687 /* If the parameter is not a scalar floating-point parameter,
9688 then it belongs in GPRs. */
9689 || GET_MODE_CLASS (mode) != MODE_FLOAT
9690 /* Structure with single SFmode field belongs in GPR. */
9691 || (type && AGGREGATE_TYPE_P (type)))
9692 retval = gen_rtx_REG (mode, gpr_reg_base);
9693 else
9694 retval = gen_rtx_REG (mode, fpr_reg_base);
9695 }
9696 return retval;
9697 }
9698
9699 /* Arguments larger than one word are double word aligned. */
9700
9701 static unsigned int
9702 pa_function_arg_boundary (machine_mode mode, const_tree type)
9703 {
9704 bool singleword = (type
9705 ? (integer_zerop (TYPE_SIZE (type))
9706 || !TREE_CONSTANT (TYPE_SIZE (type))
9707 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9708 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9709
9710 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9711 }
9712
9713 /* If this arg would be passed totally in registers or totally on the stack,
9714 then this routine should return zero. */
9715
9716 static int
9717 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9718 tree type, bool named ATTRIBUTE_UNUSED)
9719 {
9720 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9721 unsigned int max_arg_words = 8;
9722 unsigned int offset = 0;
9723
9724 if (!TARGET_64BIT)
9725 return 0;
9726
9727 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9728 offset = 1;
9729
9730 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9731 /* Arg fits fully into registers. */
9732 return 0;
9733 else if (cum->words + offset >= max_arg_words)
9734 /* Arg fully on the stack. */
9735 return 0;
9736 else
9737 /* Arg is split. */
9738 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9739 }
9740
9741
9742 /* A get_unnamed_section callback for switching to the text section.
9743
9744 This function is only used with SOM. Because we don't support
9745 named subspaces, we can only create a new subspace or switch back
9746 to the default text subspace. */
9747
9748 static void
9749 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9750 {
9751 gcc_assert (TARGET_SOM);
9752 if (TARGET_GAS)
9753 {
9754 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9755 {
9756 /* We only want to emit a .nsubspa directive once at the
9757 start of the function. */
9758 cfun->machine->in_nsubspa = 1;
9759
9760 /* Create a new subspace for the text. This provides
9761 better stub placement and one-only functions. */
9762 if (cfun->decl
9763 && DECL_ONE_ONLY (cfun->decl)
9764 && !DECL_WEAK (cfun->decl))
9765 {
9766 output_section_asm_op ("\t.SPACE $TEXT$\n"
9767 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9768 "ACCESS=44,SORT=24,COMDAT");
9769 return;
9770 }
9771 }
9772 else
9773 {
9774 /* There isn't a current function or the body of the current
9775 function has been completed. So, we are changing to the
9776 text section to output debugging information. Thus, we
9777 need to forget that we are in the text section so that
9778 varasm.c will call us when text_section is selected again. */
9779 gcc_assert (!cfun || !cfun->machine
9780 || cfun->machine->in_nsubspa == 2);
9781 in_section = NULL;
9782 }
9783 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9784 return;
9785 }
9786 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9787 }
9788
9789 /* A get_unnamed_section callback for switching to comdat data
9790 sections. This function is only used with SOM. */
9791
9792 static void
9793 som_output_comdat_data_section_asm_op (const void *data)
9794 {
9795 in_section = NULL;
9796 output_section_asm_op (data);
9797 }
9798
9799 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9800
9801 static void
9802 pa_som_asm_init_sections (void)
9803 {
9804 text_section
9805 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9806
9807 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9808 is not being generated. */
9809 som_readonly_data_section
9810 = get_unnamed_section (0, output_section_asm_op,
9811 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9812
9813 /* When secondary definitions are not supported, SOM makes readonly
9814 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9815 the comdat flag. */
9816 som_one_only_readonly_data_section
9817 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9818 "\t.SPACE $TEXT$\n"
9819 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9820 "ACCESS=0x2c,SORT=16,COMDAT");
9821
9822
9823 /* When secondary definitions are not supported, SOM makes data one-only
9824 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9825 som_one_only_data_section
9826 = get_unnamed_section (SECTION_WRITE,
9827 som_output_comdat_data_section_asm_op,
9828 "\t.SPACE $PRIVATE$\n"
9829 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9830 "ACCESS=31,SORT=24,COMDAT");
9831
9832 if (flag_tm)
9833 som_tm_clone_table_section
9834 = get_unnamed_section (0, output_section_asm_op,
9835 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9836
9837 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9838 which reference data within the $TEXT$ space (for example constant
9839 strings in the $LIT$ subspace).
9840
9841 The assemblers (GAS and HP as) both have problems with handling
9842 the difference of two symbols which is the other correct way to
9843 reference constant data during PIC code generation.
9844
9845 So, there's no way to reference constant data which is in the
9846 $TEXT$ space during PIC generation. Instead place all constant
9847 data into the $PRIVATE$ subspace (this reduces sharing, but it
9848 works correctly). */
9849 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9850
9851 /* We must not have a reference to an external symbol defined in a
9852 shared library in a readonly section, else the SOM linker will
9853 complain.
9854
9855 So, we force exception information into the data section. */
9856 exception_section = data_section;
9857 }
9858
9859 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9860
9861 static section *
9862 pa_som_tm_clone_table_section (void)
9863 {
9864 return som_tm_clone_table_section;
9865 }
9866
9867 /* On hpux10, the linker will give an error if we have a reference
9868 in the read-only data section to a symbol defined in a shared
9869 library. Therefore, expressions that might require a reloc can
9870 not be placed in the read-only data section. */
9871
9872 static section *
9873 pa_select_section (tree exp, int reloc,
9874 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9875 {
9876 if (TREE_CODE (exp) == VAR_DECL
9877 && TREE_READONLY (exp)
9878 && !TREE_THIS_VOLATILE (exp)
9879 && DECL_INITIAL (exp)
9880 && (DECL_INITIAL (exp) == error_mark_node
9881 || TREE_CONSTANT (DECL_INITIAL (exp)))
9882 && !reloc)
9883 {
9884 if (TARGET_SOM
9885 && DECL_ONE_ONLY (exp)
9886 && !DECL_WEAK (exp))
9887 return som_one_only_readonly_data_section;
9888 else
9889 return readonly_data_section;
9890 }
9891 else if (CONSTANT_CLASS_P (exp) && !reloc)
9892 return readonly_data_section;
9893 else if (TARGET_SOM
9894 && TREE_CODE (exp) == VAR_DECL
9895 && DECL_ONE_ONLY (exp)
9896 && !DECL_WEAK (exp))
9897 return som_one_only_data_section;
9898 else
9899 return data_section;
9900 }
9901
9902 /* Implement pa_reloc_rw_mask. */
9903
9904 static int
9905 pa_reloc_rw_mask (void)
9906 {
9907 /* We force (const (plus (symbol) (const_int))) to memory when the
9908 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9909 handle this construct in read-only memory and we want to avoid
9910 this for ELF. So, we always force an RTX needing relocation to
9911 the data section. */
9912 return 3;
9913 }
9914
9915 static void
9916 pa_globalize_label (FILE *stream, const char *name)
9917 {
9918 /* We only handle DATA objects here, functions are globalized in
9919 ASM_DECLARE_FUNCTION_NAME. */
9920 if (! FUNCTION_NAME_P (name))
9921 {
9922 fputs ("\t.EXPORT ", stream);
9923 assemble_name (stream, name);
9924 fputs (",DATA\n", stream);
9925 }
9926 }
9927
9928 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9929
9930 static rtx
9931 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9932 int incoming ATTRIBUTE_UNUSED)
9933 {
9934 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9935 }
9936
9937 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9938
9939 bool
9940 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9941 {
9942 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9943 PA64 ABI says that objects larger than 128 bits are returned in memory.
9944 Note, int_size_in_bytes can return -1 if the size of the object is
9945 variable or larger than the maximum value that can be expressed as
9946 a HOST_WIDE_INT. It can also return zero for an empty type. The
9947 simplest way to handle variable and empty types is to pass them in
9948 memory. This avoids problems in defining the boundaries of argument
9949 slots, allocating registers, etc. */
9950 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9951 || int_size_in_bytes (type) <= 0);
9952 }
9953
9954 /* Structure to hold declaration and name of external symbols that are
9955 emitted by GCC. We generate a vector of these symbols and output them
9956 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9957 This avoids putting out names that are never really used. */
9958
9959 typedef struct GTY(()) extern_symbol
9960 {
9961 tree decl;
9962 const char *name;
9963 } extern_symbol;
9964
9965 /* Define gc'd vector type for extern_symbol. */
9966
9967 /* Vector of extern_symbol pointers. */
9968 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9969
9970 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9971 /* Mark DECL (name NAME) as an external reference (assembler output
9972 file FILE). This saves the names to output at the end of the file
9973 if actually referenced. */
9974
9975 void
9976 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9977 {
9978 gcc_assert (file == asm_out_file);
9979 extern_symbol p = {decl, name};
9980 vec_safe_push (extern_symbols, p);
9981 }
9982
9983 /* Output text required at the end of an assembler file.
9984 This includes deferred plabels and .import directives for
9985 all external symbols that were actually referenced. */
9986
9987 static void
9988 pa_hpux_file_end (void)
9989 {
9990 unsigned int i;
9991 extern_symbol *p;
9992
9993 if (!NO_DEFERRED_PROFILE_COUNTERS)
9994 output_deferred_profile_counters ();
9995
9996 output_deferred_plabels ();
9997
9998 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9999 {
10000 tree decl = p->decl;
10001
10002 if (!TREE_ASM_WRITTEN (decl)
10003 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10004 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10005 }
10006
10007 vec_free (extern_symbols);
10008 }
10009 #endif
10010
10011 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10012
10013 static bool
10014 pa_can_change_mode_class (machine_mode from, machine_mode to,
10015 reg_class_t rclass)
10016 {
10017 if (from == to)
10018 return true;
10019
10020 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10021 return true;
10022
10023 /* Reject changes to/from modes with zero size. */
10024 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
10025 return false;
10026
10027 /* Reject changes to/from complex and vector modes. */
10028 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10029 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10030 return false;
10031
10032 /* There is no way to load QImode or HImode values directly from memory
10033 to a FP register. SImode loads to the FP registers are not zero
10034 extended. On the 64-bit target, this conflicts with the definition
10035 of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
10036 different sizes in the floating-point registers. */
10037 if (MAYBE_FP_REG_CLASS_P (rclass))
10038 return false;
10039
10040 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10041 in specific sets of registers. Thus, we cannot allow changing
10042 to a larger mode when it's larger than a word. */
10043 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10044 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10045 return false;
10046
10047 return true;
10048 }
10049
10050 /* Implement TARGET_MODES_TIEABLE_P.
10051
10052 We should return FALSE for QImode and HImode because these modes
10053 are not ok in the floating-point registers. However, this prevents
10054 tieing these modes to SImode and DImode in the general registers.
10055 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10056 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10057 in the floating-point registers. */
10058
10059 static bool
10060 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10061 {
10062 /* Don't tie modes in different classes. */
10063 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10064 return false;
10065
10066 return true;
10067 }
10068
10069 \f
10070 /* Length in units of the trampoline instruction code. */
10071
10072 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10073
10074
10075 /* Output assembler code for a block containing the constant parts
10076 of a trampoline, leaving space for the variable parts.\
10077
10078 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10079 and then branches to the specified routine.
10080
10081 This code template is copied from text segment to stack location
10082 and then patched with pa_trampoline_init to contain valid values,
10083 and then entered as a subroutine.
10084
10085 It is best to keep this as small as possible to avoid having to
10086 flush multiple lines in the cache. */
10087
10088 static void
10089 pa_asm_trampoline_template (FILE *f)
10090 {
10091 if (!TARGET_64BIT)
10092 {
10093 fputs ("\tldw 36(%r22),%r21\n", f);
10094 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10095 if (ASSEMBLER_DIALECT == 0)
10096 fputs ("\tdepi 0,31,2,%r21\n", f);
10097 else
10098 fputs ("\tdepwi 0,31,2,%r21\n", f);
10099 fputs ("\tldw 4(%r21),%r19\n", f);
10100 fputs ("\tldw 0(%r21),%r21\n", f);
10101 if (TARGET_PA_20)
10102 {
10103 fputs ("\tbve (%r21)\n", f);
10104 fputs ("\tldw 40(%r22),%r29\n", f);
10105 fputs ("\t.word 0\n", f);
10106 fputs ("\t.word 0\n", f);
10107 }
10108 else
10109 {
10110 fputs ("\tldsid (%r21),%r1\n", f);
10111 fputs ("\tmtsp %r1,%sr0\n", f);
10112 fputs ("\tbe 0(%sr0,%r21)\n", f);
10113 fputs ("\tldw 40(%r22),%r29\n", f);
10114 }
10115 fputs ("\t.word 0\n", f);
10116 fputs ("\t.word 0\n", f);
10117 fputs ("\t.word 0\n", f);
10118 fputs ("\t.word 0\n", f);
10119 }
10120 else
10121 {
10122 fputs ("\t.dword 0\n", f);
10123 fputs ("\t.dword 0\n", f);
10124 fputs ("\t.dword 0\n", f);
10125 fputs ("\t.dword 0\n", f);
10126 fputs ("\tmfia %r31\n", f);
10127 fputs ("\tldd 24(%r31),%r1\n", f);
10128 fputs ("\tldd 24(%r1),%r27\n", f);
10129 fputs ("\tldd 16(%r1),%r1\n", f);
10130 fputs ("\tbve (%r1)\n", f);
10131 fputs ("\tldd 32(%r31),%r31\n", f);
10132 fputs ("\t.dword 0 ; fptr\n", f);
10133 fputs ("\t.dword 0 ; static link\n", f);
10134 }
10135 }
10136
10137 /* Emit RTL insns to initialize the variable parts of a trampoline.
10138 FNADDR is an RTX for the address of the function's pure code.
10139 CXT is an RTX for the static chain value for the function.
10140
10141 Move the function address to the trampoline template at offset 36.
10142 Move the static chain value to trampoline template at offset 40.
10143 Move the trampoline address to trampoline template at offset 44.
10144 Move r19 to trampoline template at offset 48. The latter two
10145 words create a plabel for the indirect call to the trampoline.
10146
10147 A similar sequence is used for the 64-bit port but the plabel is
10148 at the beginning of the trampoline.
10149
10150 Finally, the cache entries for the trampoline code are flushed.
10151 This is necessary to ensure that the trampoline instruction sequence
10152 is written to memory prior to any attempts at prefetching the code
10153 sequence. */
10154
10155 static void
10156 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10157 {
10158 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10159 rtx start_addr = gen_reg_rtx (Pmode);
10160 rtx end_addr = gen_reg_rtx (Pmode);
10161 rtx line_length = gen_reg_rtx (Pmode);
10162 rtx r_tramp, tmp;
10163
10164 emit_block_move (m_tramp, assemble_trampoline_template (),
10165 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10166 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10167
10168 if (!TARGET_64BIT)
10169 {
10170 tmp = adjust_address (m_tramp, Pmode, 36);
10171 emit_move_insn (tmp, fnaddr);
10172 tmp = adjust_address (m_tramp, Pmode, 40);
10173 emit_move_insn (tmp, chain_value);
10174
10175 /* Create a fat pointer for the trampoline. */
10176 tmp = adjust_address (m_tramp, Pmode, 44);
10177 emit_move_insn (tmp, r_tramp);
10178 tmp = adjust_address (m_tramp, Pmode, 48);
10179 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10180
10181 /* fdc and fic only use registers for the address to flush,
10182 they do not accept integer displacements. We align the
10183 start and end addresses to the beginning of their respective
10184 cache lines to minimize the number of lines flushed. */
10185 emit_insn (gen_andsi3 (start_addr, r_tramp,
10186 GEN_INT (-MIN_CACHELINE_SIZE)));
10187 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10188 TRAMPOLINE_CODE_SIZE-1));
10189 emit_insn (gen_andsi3 (end_addr, tmp,
10190 GEN_INT (-MIN_CACHELINE_SIZE)));
10191 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10192 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10193 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10194 gen_reg_rtx (Pmode),
10195 gen_reg_rtx (Pmode)));
10196 }
10197 else
10198 {
10199 tmp = adjust_address (m_tramp, Pmode, 56);
10200 emit_move_insn (tmp, fnaddr);
10201 tmp = adjust_address (m_tramp, Pmode, 64);
10202 emit_move_insn (tmp, chain_value);
10203
10204 /* Create a fat pointer for the trampoline. */
10205 tmp = adjust_address (m_tramp, Pmode, 16);
10206 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10207 r_tramp, 32)));
10208 tmp = adjust_address (m_tramp, Pmode, 24);
10209 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10210
10211 /* fdc and fic only use registers for the address to flush,
10212 they do not accept integer displacements. We align the
10213 start and end addresses to the beginning of their respective
10214 cache lines to minimize the number of lines flushed. */
10215 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10216 emit_insn (gen_anddi3 (start_addr, tmp,
10217 GEN_INT (-MIN_CACHELINE_SIZE)));
10218 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10219 TRAMPOLINE_CODE_SIZE - 1));
10220 emit_insn (gen_anddi3 (end_addr, tmp,
10221 GEN_INT (-MIN_CACHELINE_SIZE)));
10222 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10223 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10224 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10225 gen_reg_rtx (Pmode),
10226 gen_reg_rtx (Pmode)));
10227 }
10228
10229 #ifdef HAVE_ENABLE_EXECUTE_STACK
10230  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10231 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10232 #endif
10233 }
10234
10235 /* Perform any machine-specific adjustment in the address of the trampoline.
10236 ADDR contains the address that was passed to pa_trampoline_init.
10237 Adjust the trampoline address to point to the plabel at offset 44. */
10238
10239 static rtx
10240 pa_trampoline_adjust_address (rtx addr)
10241 {
10242 if (!TARGET_64BIT)
10243 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10244 return addr;
10245 }
10246
10247 static rtx
10248 pa_delegitimize_address (rtx orig_x)
10249 {
10250 rtx x = delegitimize_mem_from_attrs (orig_x);
10251
10252 if (GET_CODE (x) == LO_SUM
10253 && GET_CODE (XEXP (x, 1)) == UNSPEC
10254 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10255 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10256 return x;
10257 }
10258 \f
10259 static rtx
10260 pa_internal_arg_pointer (void)
10261 {
10262 /* The argument pointer and the hard frame pointer are the same in
10263 the 32-bit runtime, so we don't need a copy. */
10264 if (TARGET_64BIT)
10265 return copy_to_reg (virtual_incoming_args_rtx);
10266 else
10267 return virtual_incoming_args_rtx;
10268 }
10269
10270 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10271 Frame pointer elimination is automatically handled. */
10272
10273 static bool
10274 pa_can_eliminate (const int from, const int to)
10275 {
10276 /* The argument cannot be eliminated in the 64-bit runtime. */
10277 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10278 return false;
10279
10280 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10281 ? ! frame_pointer_needed
10282 : true);
10283 }
10284
10285 /* Define the offset between two registers, FROM to be eliminated and its
10286 replacement TO, at the start of a routine. */
10287 HOST_WIDE_INT
10288 pa_initial_elimination_offset (int from, int to)
10289 {
10290 HOST_WIDE_INT offset;
10291
10292 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10293 && to == STACK_POINTER_REGNUM)
10294 offset = -pa_compute_frame_size (get_frame_size (), 0);
10295 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10296 offset = 0;
10297 else
10298 gcc_unreachable ();
10299
10300 return offset;
10301 }
10302
10303 static void
10304 pa_conditional_register_usage (void)
10305 {
10306 int i;
10307
10308 if (!TARGET_64BIT && !TARGET_PA_11)
10309 {
10310 for (i = 56; i <= FP_REG_LAST; i++)
10311 fixed_regs[i] = call_used_regs[i] = 1;
10312 for (i = 33; i < 56; i += 2)
10313 fixed_regs[i] = call_used_regs[i] = 1;
10314 }
10315 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10316 {
10317 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10318 fixed_regs[i] = call_used_regs[i] = 1;
10319 }
10320 if (flag_pic)
10321 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10322 }
10323
10324 /* Target hook for c_mode_for_suffix. */
10325
10326 static machine_mode
10327 pa_c_mode_for_suffix (char suffix)
10328 {
10329 if (HPUX_LONG_DOUBLE_LIBRARY)
10330 {
10331 if (suffix == 'q')
10332 return TFmode;
10333 }
10334
10335 return VOIDmode;
10336 }
10337
10338 /* Target hook for function_section. */
10339
10340 static section *
10341 pa_function_section (tree decl, enum node_frequency freq,
10342 bool startup, bool exit)
10343 {
10344 /* Put functions in text section if target doesn't have named sections. */
10345 if (!targetm_common.have_named_sections)
10346 return text_section;
10347
10348 /* Force nested functions into the same section as the containing
10349 function. */
10350 if (decl
10351 && DECL_SECTION_NAME (decl) == NULL
10352 && DECL_CONTEXT (decl) != NULL_TREE
10353 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10354 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10355 return function_section (DECL_CONTEXT (decl));
10356
10357 /* Otherwise, use the default function section. */
10358 return default_function_section (decl, freq, startup, exit);
10359 }
10360
10361 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10362
10363 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10364 that need more than three instructions to load prior to reload. This
10365 limit is somewhat arbitrary. It takes three instructions to load a
10366 CONST_INT from memory but two are memory accesses. It may be better
10367 to increase the allowed range for CONST_INTS. We may also be able
10368 to handle CONST_DOUBLES. */
10369
10370 static bool
10371 pa_legitimate_constant_p (machine_mode mode, rtx x)
10372 {
10373 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10374 return false;
10375
10376 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10377 return false;
10378
10379 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10380 legitimate constants. The other variants can't be handled by
10381 the move patterns after reload starts. */
10382 if (tls_referenced_p (x))
10383 return false;
10384
10385 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10386 return false;
10387
10388 if (TARGET_64BIT
10389 && HOST_BITS_PER_WIDE_INT > 32
10390 && GET_CODE (x) == CONST_INT
10391 && !reload_in_progress
10392 && !reload_completed
10393 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10394 && !pa_cint_ok_for_move (UINTVAL (x)))
10395 return false;
10396
10397 if (function_label_operand (x, mode))
10398 return false;
10399
10400 return true;
10401 }
10402
10403 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10404
10405 static unsigned int
10406 pa_section_type_flags (tree decl, const char *name, int reloc)
10407 {
10408 unsigned int flags;
10409
10410 flags = default_section_type_flags (decl, name, reloc);
10411
10412 /* Function labels are placed in the constant pool. This can
10413 cause a section conflict if decls are put in ".data.rel.ro"
10414 or ".data.rel.ro.local" using the __attribute__ construct. */
10415 if (strcmp (name, ".data.rel.ro") == 0
10416 || strcmp (name, ".data.rel.ro.local") == 0)
10417 flags |= SECTION_WRITE | SECTION_RELRO;
10418
10419 return flags;
10420 }
10421
10422 /* pa_legitimate_address_p recognizes an RTL expression that is a
10423 valid memory address for an instruction. The MODE argument is the
10424 machine mode for the MEM expression that wants to use this address.
10425
10426 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10427 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10428 available with floating point loads and stores, and integer loads.
10429 We get better code by allowing indexed addresses in the initial
10430 RTL generation.
10431
10432 The acceptance of indexed addresses as legitimate implies that we
10433 must provide patterns for doing indexed integer stores, or the move
10434 expanders must force the address of an indexed store to a register.
10435 We have adopted the latter approach.
10436
10437 Another function of pa_legitimate_address_p is to ensure that
10438 the base register is a valid pointer for indexed instructions.
10439 On targets that have non-equivalent space registers, we have to
10440 know at the time of assembler output which register in a REG+REG
10441 pair is the base register. The REG_POINTER flag is sometimes lost
10442 in reload and the following passes, so it can't be relied on during
10443 code generation. Thus, we either have to canonicalize the order
10444 of the registers in REG+REG indexed addresses, or treat REG+REG
10445 addresses separately and provide patterns for both permutations.
10446
10447 The latter approach requires several hundred additional lines of
10448 code in pa.md. The downside to canonicalizing is that a PLUS
10449 in the wrong order can't combine to form to make a scaled indexed
10450 memory operand. As we won't need to canonicalize the operands if
10451 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10452
10453 We initially break out scaled indexed addresses in canonical order
10454 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10455 scaled indexed addresses during RTL generation. However, fold_rtx
10456 has its own opinion on how the operands of a PLUS should be ordered.
10457 If one of the operands is equivalent to a constant, it will make
10458 that operand the second operand. As the base register is likely to
10459 be equivalent to a SYMBOL_REF, we have made it the second operand.
10460
10461 pa_legitimate_address_p accepts REG+REG as legitimate when the
10462 operands are in the order INDEX+BASE on targets with non-equivalent
10463 space registers, and in any order on targets with equivalent space
10464 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10465
10466 We treat a SYMBOL_REF as legitimate if it is part of the current
10467 function's constant-pool, because such addresses can actually be
10468 output as REG+SMALLINT. */
10469
10470 static bool
10471 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10472 {
10473 if ((REG_P (x)
10474 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10475 : REG_OK_FOR_BASE_P (x)))
10476 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10477 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10478 && REG_P (XEXP (x, 0))
10479 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10480 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10481 return true;
10482
10483 if (GET_CODE (x) == PLUS)
10484 {
10485 rtx base, index;
10486
10487 /* For REG+REG, the base register should be in XEXP (x, 1),
10488 so check it first. */
10489 if (REG_P (XEXP (x, 1))
10490 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10491 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10492 base = XEXP (x, 1), index = XEXP (x, 0);
10493 else if (REG_P (XEXP (x, 0))
10494 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10495 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10496 base = XEXP (x, 0), index = XEXP (x, 1);
10497 else
10498 return false;
10499
10500 if (GET_CODE (index) == CONST_INT)
10501 {
10502 if (INT_5_BITS (index))
10503 return true;
10504
10505 /* When INT14_OK_STRICT is false, a secondary reload is needed
10506 to adjust the displacement of SImode and DImode floating point
10507 instructions but this may fail when the register also needs
10508 reloading. So, we return false when STRICT is true. We
10509 also reject long displacements for float mode addresses since
10510 the majority of accesses will use floating point instructions
10511 that don't support 14-bit offsets. */
10512 if (!INT14_OK_STRICT
10513 && (strict || !(reload_in_progress || reload_completed))
10514 && mode != QImode
10515 && mode != HImode)
10516 return false;
10517
10518 return base14_operand (index, mode);
10519 }
10520
10521 if (!TARGET_DISABLE_INDEXING
10522 /* Only accept the "canonical" INDEX+BASE operand order
10523 on targets with non-equivalent space registers. */
10524 && (TARGET_NO_SPACE_REGS
10525 ? REG_P (index)
10526 : (base == XEXP (x, 1) && REG_P (index)
10527 && (reload_completed
10528 || (reload_in_progress && HARD_REGISTER_P (base))
10529 || REG_POINTER (base))
10530 && (reload_completed
10531 || (reload_in_progress && HARD_REGISTER_P (index))
10532 || !REG_POINTER (index))))
10533 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10534 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10535 : REG_OK_FOR_INDEX_P (index))
10536 && borx_reg_operand (base, Pmode)
10537 && borx_reg_operand (index, Pmode))
10538 return true;
10539
10540 if (!TARGET_DISABLE_INDEXING
10541 && GET_CODE (index) == MULT
10542 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10543 && REG_P (XEXP (index, 0))
10544 && GET_MODE (XEXP (index, 0)) == Pmode
10545 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10546 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10547 && GET_CODE (XEXP (index, 1)) == CONST_INT
10548 && INTVAL (XEXP (index, 1))
10549 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10550 && borx_reg_operand (base, Pmode))
10551 return true;
10552
10553 return false;
10554 }
10555
10556 if (GET_CODE (x) == LO_SUM)
10557 {
10558 rtx y = XEXP (x, 0);
10559
10560 if (GET_CODE (y) == SUBREG)
10561 y = SUBREG_REG (y);
10562
10563 if (REG_P (y)
10564 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10565 : REG_OK_FOR_BASE_P (y)))
10566 {
10567 /* Needed for -fPIC */
10568 if (mode == Pmode
10569 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10570 return true;
10571
10572 if (!INT14_OK_STRICT
10573 && (strict || !(reload_in_progress || reload_completed))
10574 && mode != QImode
10575 && mode != HImode)
10576 return false;
10577
10578 if (CONSTANT_P (XEXP (x, 1)))
10579 return true;
10580 }
10581 return false;
10582 }
10583
10584 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10585 return true;
10586
10587 return false;
10588 }
10589
10590 /* Look for machine dependent ways to make the invalid address AD a
10591 valid address.
10592
10593 For the PA, transform:
10594
10595 memory(X + <large int>)
10596
10597 into:
10598
10599 if (<large int> & mask) >= 16
10600 Y = (<large int> & ~mask) + mask + 1 Round up.
10601 else
10602 Y = (<large int> & ~mask) Round down.
10603 Z = X + Y
10604 memory (Z + (<large int> - Y));
10605
10606 This makes reload inheritance and reload_cse work better since Z
10607 can be reused.
10608
10609 There may be more opportunities to improve code with this hook. */
10610
10611 rtx
10612 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10613 int opnum, int type,
10614 int ind_levels ATTRIBUTE_UNUSED)
10615 {
10616 long offset, newoffset, mask;
10617 rtx new_rtx, temp = NULL_RTX;
10618
10619 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10620 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10621
10622 if (optimize && GET_CODE (ad) == PLUS)
10623 temp = simplify_binary_operation (PLUS, Pmode,
10624 XEXP (ad, 0), XEXP (ad, 1));
10625
10626 new_rtx = temp ? temp : ad;
10627
10628 if (optimize
10629 && GET_CODE (new_rtx) == PLUS
10630 && GET_CODE (XEXP (new_rtx, 0)) == REG
10631 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10632 {
10633 offset = INTVAL (XEXP ((new_rtx), 1));
10634
10635 /* Choose rounding direction. Round up if we are >= halfway. */
10636 if ((offset & mask) >= ((mask + 1) / 2))
10637 newoffset = (offset & ~mask) + mask + 1;
10638 else
10639 newoffset = offset & ~mask;
10640
10641 /* Ensure that long displacements are aligned. */
10642 if (mask == 0x3fff
10643 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10644 || (TARGET_64BIT && (mode) == DImode)))
10645 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10646
10647 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10648 {
10649 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10650 GEN_INT (newoffset));
10651 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10652 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10653 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10654 opnum, (enum reload_type) type);
10655 return ad;
10656 }
10657 }
10658
10659 return NULL_RTX;
10660 }
10661
10662 /* Output address vector. */
10663
10664 void
10665 pa_output_addr_vec (rtx lab, rtx body)
10666 {
10667 int idx, vlen = XVECLEN (body, 0);
10668
10669 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10670 if (TARGET_GAS)
10671 fputs ("\t.begin_brtab\n", asm_out_file);
10672 for (idx = 0; idx < vlen; idx++)
10673 {
10674 ASM_OUTPUT_ADDR_VEC_ELT
10675 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10676 }
10677 if (TARGET_GAS)
10678 fputs ("\t.end_brtab\n", asm_out_file);
10679 }
10680
10681 /* Output address difference vector. */
10682
10683 void
10684 pa_output_addr_diff_vec (rtx lab, rtx body)
10685 {
10686 rtx base = XEXP (XEXP (body, 0), 0);
10687 int idx, vlen = XVECLEN (body, 1);
10688
10689 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10690 if (TARGET_GAS)
10691 fputs ("\t.begin_brtab\n", asm_out_file);
10692 for (idx = 0; idx < vlen; idx++)
10693 {
10694 ASM_OUTPUT_ADDR_DIFF_ELT
10695 (asm_out_file,
10696 body,
10697 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10698 CODE_LABEL_NUMBER (base));
10699 }
10700 if (TARGET_GAS)
10701 fputs ("\t.end_brtab\n", asm_out_file);
10702 }
10703
10704 /* This is a helper function for the other atomic operations. This function
10705 emits a loop that contains SEQ that iterates until a compare-and-swap
10706 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10707 a set of instructions that takes a value from OLD_REG as an input and
10708 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10709 set to the current contents of MEM. After SEQ, a compare-and-swap will
10710 attempt to update MEM with NEW_REG. The function returns true when the
10711 loop was generated successfully. */
10712
10713 static bool
10714 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10715 {
10716 machine_mode mode = GET_MODE (mem);
10717 rtx_code_label *label;
10718 rtx cmp_reg, success, oldval;
10719
10720 /* The loop we want to generate looks like
10721
10722 cmp_reg = mem;
10723 label:
10724 old_reg = cmp_reg;
10725 seq;
10726 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10727 if (success)
10728 goto label;
10729
10730 Note that we only do the plain load from memory once. Subsequent
10731 iterations use the value loaded by the compare-and-swap pattern. */
10732
10733 label = gen_label_rtx ();
10734 cmp_reg = gen_reg_rtx (mode);
10735
10736 emit_move_insn (cmp_reg, mem);
10737 emit_label (label);
10738 emit_move_insn (old_reg, cmp_reg);
10739 if (seq)
10740 emit_insn (seq);
10741
10742 success = NULL_RTX;
10743 oldval = cmp_reg;
10744 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10745 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10746 MEMMODEL_RELAXED))
10747 return false;
10748
10749 if (oldval != cmp_reg)
10750 emit_move_insn (cmp_reg, oldval);
10751
10752 /* Mark this jump predicted not taken. */
10753 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10754 GET_MODE (success), 1, label,
10755 profile_probability::guessed_never ());
10756 return true;
10757 }
10758
10759 /* This function tries to implement an atomic exchange operation using a
10760 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10761 *MEM are returned, using TARGET if possible. No memory model is required
10762 since a compare_and_swap loop is seq-cst. */
10763
10764 rtx
10765 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10766 {
10767 machine_mode mode = GET_MODE (mem);
10768
10769 if (can_compare_and_swap_p (mode, true))
10770 {
10771 if (!target || !register_operand (target, mode))
10772 target = gen_reg_rtx (mode);
10773 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10774 return target;
10775 }
10776
10777 return NULL_RTX;
10778 }
10779
10780 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10781 arguments passed by hidden reference in the 32-bit HP runtime. Users
10782 can override this behavior for better compatibility with openmp at the
10783 risk of library incompatibilities. Arguments are always passed by value
10784 in the 64-bit HP runtime. */
10785
10786 static bool
10787 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10788 machine_mode mode ATTRIBUTE_UNUSED,
10789 const_tree type ATTRIBUTE_UNUSED,
10790 bool named ATTRIBUTE_UNUSED)
10791 {
10792 return !TARGET_CALLER_COPIES;
10793 }
10794
10795 /* Implement TARGET_HARD_REGNO_NREGS. */
10796
10797 static unsigned int
10798 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
10799 {
10800 return PA_HARD_REGNO_NREGS (regno, mode);
10801 }
10802
10803 /* Implement TARGET_HARD_REGNO_MODE_OK. */
10804
10805 static bool
10806 pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10807 {
10808 return PA_HARD_REGNO_MODE_OK (regno, mode);
10809 }
10810
10811 #include "gt-pa.h"