]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
Put a TARGET_LRA_P into every target
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2016 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "df.h"
29 #include "tm_p.h"
30 #include "stringpool.h"
31 #include "optabs.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "insn-attr.h"
37 #include "alias.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "output.h"
43 #include "except.h"
44 #include "explow.h"
45 #include "expr.h"
46 #include "reload.h"
47 #include "common/common-target.h"
48 #include "langhooks.h"
49 #include "cfgrtl.h"
50 #include "opts.h"
51 #include "builtins.h"
52
53 /* This file should be included last. */
54 #include "target-def.h"
55
56 /* Return nonzero if there is a bypass for the output of
57 OUT_INSN and the fp store IN_INSN. */
58 int
59 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
60 {
61 machine_mode store_mode;
62 machine_mode other_mode;
63 rtx set;
64
65 if (recog_memoized (in_insn) < 0
66 || (get_attr_type (in_insn) != TYPE_FPSTORE
67 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
68 || recog_memoized (out_insn) < 0)
69 return 0;
70
71 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
72
73 set = single_set (out_insn);
74 if (!set)
75 return 0;
76
77 other_mode = GET_MODE (SET_SRC (set));
78
79 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
80 }
81
82
83 #ifndef DO_FRAME_NOTES
84 #ifdef INCOMING_RETURN_ADDR_RTX
85 #define DO_FRAME_NOTES 1
86 #else
87 #define DO_FRAME_NOTES 0
88 #endif
89 #endif
90
91 static void pa_option_override (void);
92 static void copy_reg_pointer (rtx, rtx);
93 static void fix_range (const char *);
94 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
95 reg_class_t);
96 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
97 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
98 static inline rtx force_mode (machine_mode, rtx);
99 static void pa_reorg (void);
100 static void pa_combine_instructions (void);
101 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
102 rtx, rtx);
103 static bool forward_branch_p (rtx_insn *);
104 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static int compute_movmem_length (rtx_insn *);
107 static int compute_clrmem_length (rtx_insn *);
108 static bool pa_assemble_integer (rtx, unsigned int, int);
109 static void remove_useless_addtr_insns (int);
110 static void store_reg (int, HOST_WIDE_INT, int);
111 static void store_reg_modify (int, int, HOST_WIDE_INT);
112 static void load_reg (int, HOST_WIDE_INT, int);
113 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
114 static rtx pa_function_value (const_tree, const_tree, bool);
115 static rtx pa_libcall_value (machine_mode, const_rtx);
116 static bool pa_function_value_regno_p (const unsigned int);
117 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static void update_total_code_bytes (unsigned int);
119 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
120 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
121 static int pa_adjust_priority (rtx_insn *, int);
122 static int pa_issue_rate (void);
123 static int pa_reloc_rw_mask (void);
124 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
125 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
126 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
127 ATTRIBUTE_UNUSED;
128 static void pa_encode_section_info (tree, rtx, int);
129 static const char *pa_strip_name_encoding (const char *);
130 static bool pa_function_ok_for_sibcall (tree, tree);
131 static void pa_globalize_label (FILE *, const char *)
132 ATTRIBUTE_UNUSED;
133 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
134 HOST_WIDE_INT, tree);
135 #if !defined(USE_COLLECT2)
136 static void pa_asm_out_constructor (rtx, int);
137 static void pa_asm_out_destructor (rtx, int);
138 #endif
139 static void pa_init_builtins (void);
140 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
141 static rtx hppa_builtin_saveregs (void);
142 static void hppa_va_start (tree, rtx);
143 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
144 static bool pa_scalar_mode_supported_p (machine_mode);
145 static bool pa_commutative_p (const_rtx x, int outer_code);
146 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
147 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
148 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
149 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
153 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
158 static void output_deferred_plabels (void);
159 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
160 #ifdef ASM_OUTPUT_EXTERNAL_REAL
161 static void pa_hpux_file_end (void);
162 #endif
163 static void pa_init_libfuncs (void);
164 static rtx pa_struct_value_rtx (tree, int);
165 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
166 const_tree, bool);
167 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
168 tree, bool);
169 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
170 const_tree, bool);
171 static rtx pa_function_arg (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
174 static struct machine_function * pa_init_machine_status (void);
175 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
176 machine_mode,
177 secondary_reload_info *);
178 static void pa_extra_live_on_entry (bitmap);
179 static machine_mode pa_promote_function_mode (const_tree,
180 machine_mode, int *,
181 const_tree, int);
182
183 static void pa_asm_trampoline_template (FILE *);
184 static void pa_trampoline_init (rtx, tree, rtx);
185 static rtx pa_trampoline_adjust_address (rtx);
186 static rtx pa_delegitimize_address (rtx);
187 static bool pa_print_operand_punct_valid_p (unsigned char);
188 static rtx pa_internal_arg_pointer (void);
189 static bool pa_can_eliminate (const int, const int);
190 static void pa_conditional_register_usage (void);
191 static machine_mode pa_c_mode_for_suffix (char);
192 static section *pa_function_section (tree, enum node_frequency, bool, bool);
193 static bool pa_cannot_force_const_mem (machine_mode, rtx);
194 static bool pa_legitimate_constant_p (machine_mode, rtx);
195 static unsigned int pa_section_type_flags (tree, const char *, int);
196 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
197
198 /* The following extra sections are only used for SOM. */
199 static GTY(()) section *som_readonly_data_section;
200 static GTY(()) section *som_one_only_readonly_data_section;
201 static GTY(()) section *som_one_only_data_section;
202 static GTY(()) section *som_tm_clone_table_section;
203
204 /* Counts for the number of callee-saved general and floating point
205 registers which were saved by the current function's prologue. */
206 static int gr_saved, fr_saved;
207
208 /* Boolean indicating whether the return pointer was saved by the
209 current function's prologue. */
210 static bool rp_saved;
211
212 static rtx find_addr_reg (rtx);
213
214 /* Keep track of the number of bytes we have output in the CODE subspace
215 during this compilation so we'll know when to emit inline long-calls. */
216 unsigned long total_code_bytes;
217
218 /* The last address of the previous function plus the number of bytes in
219 associated thunks that have been output. This is used to determine if
220 a thunk can use an IA-relative branch to reach its target function. */
221 static unsigned int last_address;
222
223 /* Variables to handle plabels that we discover are necessary at assembly
224 output time. They are output after the current function. */
225 struct GTY(()) deferred_plabel
226 {
227 rtx internal_label;
228 rtx symbol;
229 };
230 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
231 deferred_plabels;
232 static size_t n_deferred_plabels = 0;
233 \f
234 /* Initialize the GCC target structure. */
235
236 #undef TARGET_OPTION_OVERRIDE
237 #define TARGET_OPTION_OVERRIDE pa_option_override
238
239 #undef TARGET_ASM_ALIGNED_HI_OP
240 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
241 #undef TARGET_ASM_ALIGNED_SI_OP
242 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
243 #undef TARGET_ASM_ALIGNED_DI_OP
244 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
245 #undef TARGET_ASM_UNALIGNED_HI_OP
246 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
247 #undef TARGET_ASM_UNALIGNED_SI_OP
248 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
249 #undef TARGET_ASM_UNALIGNED_DI_OP
250 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
251 #undef TARGET_ASM_INTEGER
252 #define TARGET_ASM_INTEGER pa_assemble_integer
253
254 #undef TARGET_ASM_FUNCTION_PROLOGUE
255 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
256 #undef TARGET_ASM_FUNCTION_EPILOGUE
257 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
258
259 #undef TARGET_FUNCTION_VALUE
260 #define TARGET_FUNCTION_VALUE pa_function_value
261 #undef TARGET_LIBCALL_VALUE
262 #define TARGET_LIBCALL_VALUE pa_libcall_value
263 #undef TARGET_FUNCTION_VALUE_REGNO_P
264 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
265
266 #undef TARGET_LEGITIMIZE_ADDRESS
267 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
268
269 #undef TARGET_SCHED_ADJUST_COST
270 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
271 #undef TARGET_SCHED_ADJUST_PRIORITY
272 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
273 #undef TARGET_SCHED_ISSUE_RATE
274 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
275
276 #undef TARGET_ENCODE_SECTION_INFO
277 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
278 #undef TARGET_STRIP_NAME_ENCODING
279 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
280
281 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
282 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
283
284 #undef TARGET_COMMUTATIVE_P
285 #define TARGET_COMMUTATIVE_P pa_commutative_p
286
287 #undef TARGET_ASM_OUTPUT_MI_THUNK
288 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
289 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
290 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
291
292 #undef TARGET_ASM_FILE_END
293 #ifdef ASM_OUTPUT_EXTERNAL_REAL
294 #define TARGET_ASM_FILE_END pa_hpux_file_end
295 #else
296 #define TARGET_ASM_FILE_END output_deferred_plabels
297 #endif
298
299 #undef TARGET_ASM_RELOC_RW_MASK
300 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
301
302 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
303 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
304
305 #if !defined(USE_COLLECT2)
306 #undef TARGET_ASM_CONSTRUCTOR
307 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
308 #undef TARGET_ASM_DESTRUCTOR
309 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
310 #endif
311
312 #undef TARGET_INIT_BUILTINS
313 #define TARGET_INIT_BUILTINS pa_init_builtins
314
315 #undef TARGET_EXPAND_BUILTIN
316 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
317
318 #undef TARGET_REGISTER_MOVE_COST
319 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
320 #undef TARGET_RTX_COSTS
321 #define TARGET_RTX_COSTS hppa_rtx_costs
322 #undef TARGET_ADDRESS_COST
323 #define TARGET_ADDRESS_COST hppa_address_cost
324
325 #undef TARGET_MACHINE_DEPENDENT_REORG
326 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
327
328 #undef TARGET_INIT_LIBFUNCS
329 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
330
331 #undef TARGET_PROMOTE_FUNCTION_MODE
332 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
333 #undef TARGET_PROMOTE_PROTOTYPES
334 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
335
336 #undef TARGET_STRUCT_VALUE_RTX
337 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
338 #undef TARGET_RETURN_IN_MEMORY
339 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
340 #undef TARGET_MUST_PASS_IN_STACK
341 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
342 #undef TARGET_PASS_BY_REFERENCE
343 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
344 #undef TARGET_CALLEE_COPIES
345 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
346 #undef TARGET_ARG_PARTIAL_BYTES
347 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
348 #undef TARGET_FUNCTION_ARG
349 #define TARGET_FUNCTION_ARG pa_function_arg
350 #undef TARGET_FUNCTION_ARG_ADVANCE
351 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
352 #undef TARGET_FUNCTION_ARG_BOUNDARY
353 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
354
355 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
356 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
357 #undef TARGET_EXPAND_BUILTIN_VA_START
358 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
359 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
360 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
361
362 #undef TARGET_SCALAR_MODE_SUPPORTED_P
363 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
364
365 #undef TARGET_CANNOT_FORCE_CONST_MEM
366 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
367
368 #undef TARGET_SECONDARY_RELOAD
369 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
370
371 #undef TARGET_EXTRA_LIVE_ON_ENTRY
372 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
373
374 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
375 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
376 #undef TARGET_TRAMPOLINE_INIT
377 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
378 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
379 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
380 #undef TARGET_DELEGITIMIZE_ADDRESS
381 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
382 #undef TARGET_INTERNAL_ARG_POINTER
383 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
384 #undef TARGET_CAN_ELIMINATE
385 #define TARGET_CAN_ELIMINATE pa_can_eliminate
386 #undef TARGET_CONDITIONAL_REGISTER_USAGE
387 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
388 #undef TARGET_C_MODE_FOR_SUFFIX
389 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
390 #undef TARGET_ASM_FUNCTION_SECTION
391 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
392
393 #undef TARGET_LEGITIMATE_CONSTANT_P
394 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
395 #undef TARGET_SECTION_TYPE_FLAGS
396 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
397 #undef TARGET_LEGITIMATE_ADDRESS_P
398 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
399
400 #undef TARGET_LRA_P
401 #define TARGET_LRA_P hook_bool_void_false
402
403 struct gcc_target targetm = TARGET_INITIALIZER;
404 \f
405 /* Parse the -mfixed-range= option string. */
406
407 static void
408 fix_range (const char *const_str)
409 {
410 int i, first, last;
411 char *str, *dash, *comma;
412
413 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
414 REG2 are either register names or register numbers. The effect
415 of this option is to mark the registers in the range from REG1 to
416 REG2 as ``fixed'' so they won't be used by the compiler. This is
417 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
418
419 i = strlen (const_str);
420 str = (char *) alloca (i + 1);
421 memcpy (str, const_str, i + 1);
422
423 while (1)
424 {
425 dash = strchr (str, '-');
426 if (!dash)
427 {
428 warning (0, "value of -mfixed-range must have form REG1-REG2");
429 return;
430 }
431 *dash = '\0';
432
433 comma = strchr (dash + 1, ',');
434 if (comma)
435 *comma = '\0';
436
437 first = decode_reg_name (str);
438 if (first < 0)
439 {
440 warning (0, "unknown register name: %s", str);
441 return;
442 }
443
444 last = decode_reg_name (dash + 1);
445 if (last < 0)
446 {
447 warning (0, "unknown register name: %s", dash + 1);
448 return;
449 }
450
451 *dash = '-';
452
453 if (first > last)
454 {
455 warning (0, "%s-%s is an empty range", str, dash + 1);
456 return;
457 }
458
459 for (i = first; i <= last; ++i)
460 fixed_regs[i] = call_used_regs[i] = 1;
461
462 if (!comma)
463 break;
464
465 *comma = ',';
466 str = comma + 1;
467 }
468
469 /* Check if all floating point registers have been fixed. */
470 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
471 if (!fixed_regs[i])
472 break;
473
474 if (i > FP_REG_LAST)
475 target_flags |= MASK_DISABLE_FPREGS;
476 }
477
478 /* Implement the TARGET_OPTION_OVERRIDE hook. */
479
480 static void
481 pa_option_override (void)
482 {
483 unsigned int i;
484 cl_deferred_option *opt;
485 vec<cl_deferred_option> *v
486 = (vec<cl_deferred_option> *) pa_deferred_options;
487
488 if (v)
489 FOR_EACH_VEC_ELT (*v, i, opt)
490 {
491 switch (opt->opt_index)
492 {
493 case OPT_mfixed_range_:
494 fix_range (opt->arg);
495 break;
496
497 default:
498 gcc_unreachable ();
499 }
500 }
501
502 if (flag_pic && TARGET_PORTABLE_RUNTIME)
503 {
504 warning (0, "PIC code generation is not supported in the portable runtime model");
505 }
506
507 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
508 {
509 warning (0, "PIC code generation is not compatible with fast indirect calls");
510 }
511
512 if (! TARGET_GAS && write_symbols != NO_DEBUG)
513 {
514 warning (0, "-g is only supported when using GAS on this processor,");
515 warning (0, "-g option disabled");
516 write_symbols = NO_DEBUG;
517 }
518
519 /* We only support the "big PIC" model now. And we always generate PIC
520 code when in 64bit mode. */
521 if (flag_pic == 1 || TARGET_64BIT)
522 flag_pic = 2;
523
524 /* Disable -freorder-blocks-and-partition as we don't support hot and
525 cold partitioning. */
526 if (flag_reorder_blocks_and_partition)
527 {
528 inform (input_location,
529 "-freorder-blocks-and-partition does not work "
530 "on this architecture");
531 flag_reorder_blocks_and_partition = 0;
532 flag_reorder_blocks = 1;
533 }
534
535 /* We can't guarantee that .dword is available for 32-bit targets. */
536 if (UNITS_PER_WORD == 4)
537 targetm.asm_out.aligned_op.di = NULL;
538
539 /* The unaligned ops are only available when using GAS. */
540 if (!TARGET_GAS)
541 {
542 targetm.asm_out.unaligned_op.hi = NULL;
543 targetm.asm_out.unaligned_op.si = NULL;
544 targetm.asm_out.unaligned_op.di = NULL;
545 }
546
547 init_machine_status = pa_init_machine_status;
548 }
549
550 enum pa_builtins
551 {
552 PA_BUILTIN_COPYSIGNQ,
553 PA_BUILTIN_FABSQ,
554 PA_BUILTIN_INFQ,
555 PA_BUILTIN_HUGE_VALQ,
556 PA_BUILTIN_max
557 };
558
559 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
560
561 static void
562 pa_init_builtins (void)
563 {
564 #ifdef DONT_HAVE_FPUTC_UNLOCKED
565 {
566 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
567 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
568 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
569 }
570 #endif
571 #if TARGET_HPUX_11
572 {
573 tree decl;
574
575 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
576 set_user_assembler_name (decl, "_Isfinite");
577 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
578 set_user_assembler_name (decl, "_Isfinitef");
579 }
580 #endif
581
582 if (HPUX_LONG_DOUBLE_LIBRARY)
583 {
584 tree decl, ftype;
585
586 /* Under HPUX, the __float128 type is a synonym for "long double". */
587 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
588 "__float128");
589
590 /* TFmode support builtins. */
591 ftype = build_function_type_list (long_double_type_node,
592 long_double_type_node,
593 NULL_TREE);
594 decl = add_builtin_function ("__builtin_fabsq", ftype,
595 PA_BUILTIN_FABSQ, BUILT_IN_MD,
596 "_U_Qfabs", NULL_TREE);
597 TREE_READONLY (decl) = 1;
598 pa_builtins[PA_BUILTIN_FABSQ] = decl;
599
600 ftype = build_function_type_list (long_double_type_node,
601 long_double_type_node,
602 long_double_type_node,
603 NULL_TREE);
604 decl = add_builtin_function ("__builtin_copysignq", ftype,
605 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
606 "_U_Qfcopysign", NULL_TREE);
607 TREE_READONLY (decl) = 1;
608 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
609
610 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
611 decl = add_builtin_function ("__builtin_infq", ftype,
612 PA_BUILTIN_INFQ, BUILT_IN_MD,
613 NULL, NULL_TREE);
614 pa_builtins[PA_BUILTIN_INFQ] = decl;
615
616 decl = add_builtin_function ("__builtin_huge_valq", ftype,
617 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
618 NULL, NULL_TREE);
619 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
620 }
621 }
622
623 static rtx
624 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
625 machine_mode mode ATTRIBUTE_UNUSED,
626 int ignore ATTRIBUTE_UNUSED)
627 {
628 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
629 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
630
631 switch (fcode)
632 {
633 case PA_BUILTIN_FABSQ:
634 case PA_BUILTIN_COPYSIGNQ:
635 return expand_call (exp, target, ignore);
636
637 case PA_BUILTIN_INFQ:
638 case PA_BUILTIN_HUGE_VALQ:
639 {
640 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
641 REAL_VALUE_TYPE inf;
642 rtx tmp;
643
644 real_inf (&inf);
645 tmp = const_double_from_real_value (inf, target_mode);
646
647 tmp = validize_mem (force_const_mem (target_mode, tmp));
648
649 if (target == 0)
650 target = gen_reg_rtx (target_mode);
651
652 emit_move_insn (target, tmp);
653 return target;
654 }
655
656 default:
657 gcc_unreachable ();
658 }
659
660 return NULL_RTX;
661 }
662
663 /* Function to init struct machine_function.
664 This will be called, via a pointer variable,
665 from push_function_context. */
666
667 static struct machine_function *
668 pa_init_machine_status (void)
669 {
670 return ggc_cleared_alloc<machine_function> ();
671 }
672
673 /* If FROM is a probable pointer register, mark TO as a probable
674 pointer register with the same pointer alignment as FROM. */
675
676 static void
677 copy_reg_pointer (rtx to, rtx from)
678 {
679 if (REG_POINTER (from))
680 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
681 }
682
683 /* Return 1 if X contains a symbolic expression. We know these
684 expressions will have one of a few well defined forms, so
685 we need only check those forms. */
686 int
687 pa_symbolic_expression_p (rtx x)
688 {
689
690 /* Strip off any HIGH. */
691 if (GET_CODE (x) == HIGH)
692 x = XEXP (x, 0);
693
694 return symbolic_operand (x, VOIDmode);
695 }
696
697 /* Accept any constant that can be moved in one instruction into a
698 general register. */
699 int
700 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
701 {
702 /* OK if ldo, ldil, or zdepi, can be used. */
703 return (VAL_14_BITS_P (ival)
704 || pa_ldil_cint_p (ival)
705 || pa_zdepi_cint_p (ival));
706 }
707 \f
708 /* True iff ldil can be used to load this CONST_INT. The least
709 significant 11 bits of the value must be zero and the value must
710 not change sign when extended from 32 to 64 bits. */
711 int
712 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
713 {
714 unsigned HOST_WIDE_INT x;
715
716 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
717 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
718 }
719
720 /* True iff zdepi can be used to generate this CONST_INT.
721 zdepi first sign extends a 5-bit signed number to a given field
722 length, then places this field anywhere in a zero. */
723 int
724 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
725 {
726 unsigned HOST_WIDE_INT lsb_mask, t;
727
728 /* This might not be obvious, but it's at least fast.
729 This function is critical; we don't have the time loops would take. */
730 lsb_mask = x & -x;
731 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
732 /* Return true iff t is a power of two. */
733 return ((t & (t - 1)) == 0);
734 }
735
736 /* True iff depi or extru can be used to compute (reg & mask).
737 Accept bit pattern like these:
738 0....01....1
739 1....10....0
740 1..10..01..1 */
741 int
742 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
743 {
744 mask = ~mask;
745 mask += mask & -mask;
746 return (mask & (mask - 1)) == 0;
747 }
748
749 /* True iff depi can be used to compute (reg | MASK). */
750 int
751 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
752 {
753 mask += mask & -mask;
754 return (mask & (mask - 1)) == 0;
755 }
756 \f
757 /* Legitimize PIC addresses. If the address is already
758 position-independent, we return ORIG. Newly generated
759 position-independent addresses go to REG. If we need more
760 than one register, we lose. */
761
762 static rtx
763 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
764 {
765 rtx pic_ref = orig;
766
767 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
768
769 /* Labels need special handling. */
770 if (pic_label_operand (orig, mode))
771 {
772 rtx_insn *insn;
773
774 /* We do not want to go through the movXX expanders here since that
775 would create recursion.
776
777 Nor do we really want to call a generator for a named pattern
778 since that requires multiple patterns if we want to support
779 multiple word sizes.
780
781 So instead we just emit the raw set, which avoids the movXX
782 expanders completely. */
783 mark_reg_pointer (reg, BITS_PER_UNIT);
784 insn = emit_insn (gen_rtx_SET (reg, orig));
785
786 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
787 add_reg_note (insn, REG_EQUAL, orig);
788
789 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
790 and update LABEL_NUSES because this is not done automatically. */
791 if (reload_in_progress || reload_completed)
792 {
793 /* Extract LABEL_REF. */
794 if (GET_CODE (orig) == CONST)
795 orig = XEXP (XEXP (orig, 0), 0);
796 /* Extract CODE_LABEL. */
797 orig = XEXP (orig, 0);
798 add_reg_note (insn, REG_LABEL_OPERAND, orig);
799 /* Make sure we have label and not a note. */
800 if (LABEL_P (orig))
801 LABEL_NUSES (orig)++;
802 }
803 crtl->uses_pic_offset_table = 1;
804 return reg;
805 }
806 if (GET_CODE (orig) == SYMBOL_REF)
807 {
808 rtx_insn *insn;
809 rtx tmp_reg;
810
811 gcc_assert (reg);
812
813 /* Before reload, allocate a temporary register for the intermediate
814 result. This allows the sequence to be deleted when the final
815 result is unused and the insns are trivially dead. */
816 tmp_reg = ((reload_in_progress || reload_completed)
817 ? reg : gen_reg_rtx (Pmode));
818
819 if (function_label_operand (orig, VOIDmode))
820 {
821 /* Force function label into memory in word mode. */
822 orig = XEXP (force_const_mem (word_mode, orig), 0);
823 /* Load plabel address from DLT. */
824 emit_move_insn (tmp_reg,
825 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
826 gen_rtx_HIGH (word_mode, orig)));
827 pic_ref
828 = gen_const_mem (Pmode,
829 gen_rtx_LO_SUM (Pmode, tmp_reg,
830 gen_rtx_UNSPEC (Pmode,
831 gen_rtvec (1, orig),
832 UNSPEC_DLTIND14R)));
833 emit_move_insn (reg, pic_ref);
834 /* Now load address of function descriptor. */
835 pic_ref = gen_rtx_MEM (Pmode, reg);
836 }
837 else
838 {
839 /* Load symbol reference from DLT. */
840 emit_move_insn (tmp_reg,
841 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
842 gen_rtx_HIGH (word_mode, orig)));
843 pic_ref
844 = gen_const_mem (Pmode,
845 gen_rtx_LO_SUM (Pmode, tmp_reg,
846 gen_rtx_UNSPEC (Pmode,
847 gen_rtvec (1, orig),
848 UNSPEC_DLTIND14R)));
849 }
850
851 crtl->uses_pic_offset_table = 1;
852 mark_reg_pointer (reg, BITS_PER_UNIT);
853 insn = emit_move_insn (reg, pic_ref);
854
855 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
856 set_unique_reg_note (insn, REG_EQUAL, orig);
857
858 return reg;
859 }
860 else if (GET_CODE (orig) == CONST)
861 {
862 rtx base;
863
864 if (GET_CODE (XEXP (orig, 0)) == PLUS
865 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
866 return orig;
867
868 gcc_assert (reg);
869 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
870
871 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
872 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
873 base == reg ? 0 : reg);
874
875 if (GET_CODE (orig) == CONST_INT)
876 {
877 if (INT_14_BITS (orig))
878 return plus_constant (Pmode, base, INTVAL (orig));
879 orig = force_reg (Pmode, orig);
880 }
881 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
882 /* Likewise, should we set special REG_NOTEs here? */
883 }
884
885 return pic_ref;
886 }
887
888 static GTY(()) rtx gen_tls_tga;
889
890 static rtx
891 gen_tls_get_addr (void)
892 {
893 if (!gen_tls_tga)
894 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
895 return gen_tls_tga;
896 }
897
898 static rtx
899 hppa_tls_call (rtx arg)
900 {
901 rtx ret;
902
903 ret = gen_reg_rtx (Pmode);
904 emit_library_call_value (gen_tls_get_addr (), ret,
905 LCT_CONST, Pmode, 1, arg, Pmode);
906
907 return ret;
908 }
909
910 static rtx
911 legitimize_tls_address (rtx addr)
912 {
913 rtx ret, tmp, t1, t2, tp;
914 rtx_insn *insn;
915
916 /* Currently, we can't handle anything but a SYMBOL_REF. */
917 if (GET_CODE (addr) != SYMBOL_REF)
918 return addr;
919
920 switch (SYMBOL_REF_TLS_MODEL (addr))
921 {
922 case TLS_MODEL_GLOBAL_DYNAMIC:
923 tmp = gen_reg_rtx (Pmode);
924 if (flag_pic)
925 emit_insn (gen_tgd_load_pic (tmp, addr));
926 else
927 emit_insn (gen_tgd_load (tmp, addr));
928 ret = hppa_tls_call (tmp);
929 break;
930
931 case TLS_MODEL_LOCAL_DYNAMIC:
932 ret = gen_reg_rtx (Pmode);
933 tmp = gen_reg_rtx (Pmode);
934 start_sequence ();
935 if (flag_pic)
936 emit_insn (gen_tld_load_pic (tmp, addr));
937 else
938 emit_insn (gen_tld_load (tmp, addr));
939 t1 = hppa_tls_call (tmp);
940 insn = get_insns ();
941 end_sequence ();
942 t2 = gen_reg_rtx (Pmode);
943 emit_libcall_block (insn, t2, t1,
944 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
945 UNSPEC_TLSLDBASE));
946 emit_insn (gen_tld_offset_load (ret, addr, t2));
947 break;
948
949 case TLS_MODEL_INITIAL_EXEC:
950 tp = gen_reg_rtx (Pmode);
951 tmp = gen_reg_rtx (Pmode);
952 ret = gen_reg_rtx (Pmode);
953 emit_insn (gen_tp_load (tp));
954 if (flag_pic)
955 emit_insn (gen_tie_load_pic (tmp, addr));
956 else
957 emit_insn (gen_tie_load (tmp, addr));
958 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
959 break;
960
961 case TLS_MODEL_LOCAL_EXEC:
962 tp = gen_reg_rtx (Pmode);
963 ret = gen_reg_rtx (Pmode);
964 emit_insn (gen_tp_load (tp));
965 emit_insn (gen_tle_load (ret, addr, tp));
966 break;
967
968 default:
969 gcc_unreachable ();
970 }
971
972 return ret;
973 }
974
975 /* Helper for hppa_legitimize_address. Given X, return true if it
976 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
977
978 This respectively represent canonical shift-add rtxs or scaled
979 memory addresses. */
980 static bool
981 mem_shadd_or_shadd_rtx_p (rtx x)
982 {
983 return ((GET_CODE (x) == ASHIFT
984 || GET_CODE (x) == MULT)
985 && GET_CODE (XEXP (x, 1)) == CONST_INT
986 && ((GET_CODE (x) == ASHIFT
987 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
988 || (GET_CODE (x) == MULT
989 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
990 }
991
992 /* Try machine-dependent ways of modifying an illegitimate address
993 to be legitimate. If we find one, return the new, valid address.
994 This macro is used in only one place: `memory_address' in explow.c.
995
996 OLDX is the address as it was before break_out_memory_refs was called.
997 In some cases it is useful to look at this to decide what needs to be done.
998
999 It is always safe for this macro to do nothing. It exists to recognize
1000 opportunities to optimize the output.
1001
1002 For the PA, transform:
1003
1004 memory(X + <large int>)
1005
1006 into:
1007
1008 if (<large int> & mask) >= 16
1009 Y = (<large int> & ~mask) + mask + 1 Round up.
1010 else
1011 Y = (<large int> & ~mask) Round down.
1012 Z = X + Y
1013 memory (Z + (<large int> - Y));
1014
1015 This is for CSE to find several similar references, and only use one Z.
1016
1017 X can either be a SYMBOL_REF or REG, but because combine cannot
1018 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1019 D will not fit in 14 bits.
1020
1021 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1022 0x1f as the mask.
1023
1024 MODE_INT references allow displacements which fit in 14 bits, so use
1025 0x3fff as the mask.
1026
1027 This relies on the fact that most mode MODE_FLOAT references will use FP
1028 registers and most mode MODE_INT references will use integer registers.
1029 (In the rare case of an FP register used in an integer MODE, we depend
1030 on secondary reloads to clean things up.)
1031
1032
1033 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1034 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1035 addressing modes to be used).
1036
1037 Note that the addresses passed into hppa_legitimize_address always
1038 come from a MEM, so we only have to match the MULT form on incoming
1039 addresses. But to be future proof we also match the ASHIFT form.
1040
1041 However, this routine always places those shift-add sequences into
1042 registers, so we have to generate the ASHIFT form as our output.
1043
1044 Put X and Z into registers. Then put the entire expression into
1045 a register. */
1046
1047 rtx
1048 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1049 machine_mode mode)
1050 {
1051 rtx orig = x;
1052
1053 /* We need to canonicalize the order of operands in unscaled indexed
1054 addresses since the code that checks if an address is valid doesn't
1055 always try both orders. */
1056 if (!TARGET_NO_SPACE_REGS
1057 && GET_CODE (x) == PLUS
1058 && GET_MODE (x) == Pmode
1059 && REG_P (XEXP (x, 0))
1060 && REG_P (XEXP (x, 1))
1061 && REG_POINTER (XEXP (x, 0))
1062 && !REG_POINTER (XEXP (x, 1)))
1063 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1064
1065 if (tls_referenced_p (x))
1066 return legitimize_tls_address (x);
1067 else if (flag_pic)
1068 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1069
1070 /* Strip off CONST. */
1071 if (GET_CODE (x) == CONST)
1072 x = XEXP (x, 0);
1073
1074 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1075 That should always be safe. */
1076 if (GET_CODE (x) == PLUS
1077 && GET_CODE (XEXP (x, 0)) == REG
1078 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1079 {
1080 rtx reg = force_reg (Pmode, XEXP (x, 1));
1081 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1082 }
1083
1084 /* Note we must reject symbols which represent function addresses
1085 since the assembler/linker can't handle arithmetic on plabels. */
1086 if (GET_CODE (x) == PLUS
1087 && GET_CODE (XEXP (x, 1)) == CONST_INT
1088 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1089 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1090 || GET_CODE (XEXP (x, 0)) == REG))
1091 {
1092 rtx int_part, ptr_reg;
1093 int newoffset;
1094 int offset = INTVAL (XEXP (x, 1));
1095 int mask;
1096
1097 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1098 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1099
1100 /* Choose which way to round the offset. Round up if we
1101 are >= halfway to the next boundary. */
1102 if ((offset & mask) >= ((mask + 1) / 2))
1103 newoffset = (offset & ~ mask) + mask + 1;
1104 else
1105 newoffset = (offset & ~ mask);
1106
1107 /* If the newoffset will not fit in 14 bits (ldo), then
1108 handling this would take 4 or 5 instructions (2 to load
1109 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1110 add the new offset and the SYMBOL_REF.) Combine can
1111 not handle 4->2 or 5->2 combinations, so do not create
1112 them. */
1113 if (! VAL_14_BITS_P (newoffset)
1114 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1115 {
1116 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1117 rtx tmp_reg
1118 = force_reg (Pmode,
1119 gen_rtx_HIGH (Pmode, const_part));
1120 ptr_reg
1121 = force_reg (Pmode,
1122 gen_rtx_LO_SUM (Pmode,
1123 tmp_reg, const_part));
1124 }
1125 else
1126 {
1127 if (! VAL_14_BITS_P (newoffset))
1128 int_part = force_reg (Pmode, GEN_INT (newoffset));
1129 else
1130 int_part = GEN_INT (newoffset);
1131
1132 ptr_reg = force_reg (Pmode,
1133 gen_rtx_PLUS (Pmode,
1134 force_reg (Pmode, XEXP (x, 0)),
1135 int_part));
1136 }
1137 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1138 }
1139
1140 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1141
1142 if (GET_CODE (x) == PLUS
1143 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1144 && (OBJECT_P (XEXP (x, 1))
1145 || GET_CODE (XEXP (x, 1)) == SUBREG)
1146 && GET_CODE (XEXP (x, 1)) != CONST)
1147 {
1148 /* If we were given a MULT, we must fix the constant
1149 as we're going to create the ASHIFT form. */
1150 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1151 if (GET_CODE (XEXP (x, 0)) == MULT)
1152 shift_val = exact_log2 (shift_val);
1153
1154 rtx reg1, reg2;
1155 reg1 = XEXP (x, 1);
1156 if (GET_CODE (reg1) != REG)
1157 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1158
1159 reg2 = XEXP (XEXP (x, 0), 0);
1160 if (GET_CODE (reg2) != REG)
1161 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1162
1163 return force_reg (Pmode,
1164 gen_rtx_PLUS (Pmode,
1165 gen_rtx_ASHIFT (Pmode, reg2,
1166 GEN_INT (shift_val)),
1167 reg1));
1168 }
1169
1170 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1171
1172 Only do so for floating point modes since this is more speculative
1173 and we lose if it's an integer store. */
1174 if (GET_CODE (x) == PLUS
1175 && GET_CODE (XEXP (x, 0)) == PLUS
1176 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1177 && (mode == SFmode || mode == DFmode))
1178 {
1179 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1180
1181 /* If we were given a MULT, we must fix the constant
1182 as we're going to create the ASHIFT form. */
1183 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1184 shift_val = exact_log2 (shift_val);
1185
1186 /* Try and figure out what to use as a base register. */
1187 rtx reg1, reg2, base, idx;
1188
1189 reg1 = XEXP (XEXP (x, 0), 1);
1190 reg2 = XEXP (x, 1);
1191 base = NULL_RTX;
1192 idx = NULL_RTX;
1193
1194 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1195 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1196 it's a base register below. */
1197 if (GET_CODE (reg1) != REG)
1198 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1199
1200 if (GET_CODE (reg2) != REG)
1201 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1202
1203 /* Figure out what the base and index are. */
1204
1205 if (GET_CODE (reg1) == REG
1206 && REG_POINTER (reg1))
1207 {
1208 base = reg1;
1209 idx = gen_rtx_PLUS (Pmode,
1210 gen_rtx_ASHIFT (Pmode,
1211 XEXP (XEXP (XEXP (x, 0), 0), 0),
1212 GEN_INT (shift_val)),
1213 XEXP (x, 1));
1214 }
1215 else if (GET_CODE (reg2) == REG
1216 && REG_POINTER (reg2))
1217 {
1218 base = reg2;
1219 idx = XEXP (x, 0);
1220 }
1221
1222 if (base == 0)
1223 return orig;
1224
1225 /* If the index adds a large constant, try to scale the
1226 constant so that it can be loaded with only one insn. */
1227 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1228 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1229 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1230 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1231 {
1232 /* Divide the CONST_INT by the scale factor, then add it to A. */
1233 int val = INTVAL (XEXP (idx, 1));
1234 val /= (1 << shift_val);
1235
1236 reg1 = XEXP (XEXP (idx, 0), 0);
1237 if (GET_CODE (reg1) != REG)
1238 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1239
1240 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1241
1242 /* We can now generate a simple scaled indexed address. */
1243 return
1244 force_reg
1245 (Pmode, gen_rtx_PLUS (Pmode,
1246 gen_rtx_ASHIFT (Pmode, reg1,
1247 GEN_INT (shift_val)),
1248 base));
1249 }
1250
1251 /* If B + C is still a valid base register, then add them. */
1252 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1253 && INTVAL (XEXP (idx, 1)) <= 4096
1254 && INTVAL (XEXP (idx, 1)) >= -4096)
1255 {
1256 rtx reg1, reg2;
1257
1258 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1259
1260 reg2 = XEXP (XEXP (idx, 0), 0);
1261 if (GET_CODE (reg2) != CONST_INT)
1262 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1263
1264 return force_reg (Pmode,
1265 gen_rtx_PLUS (Pmode,
1266 gen_rtx_ASHIFT (Pmode, reg2,
1267 GEN_INT (shift_val)),
1268 reg1));
1269 }
1270
1271 /* Get the index into a register, then add the base + index and
1272 return a register holding the result. */
1273
1274 /* First get A into a register. */
1275 reg1 = XEXP (XEXP (idx, 0), 0);
1276 if (GET_CODE (reg1) != REG)
1277 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1278
1279 /* And get B into a register. */
1280 reg2 = XEXP (idx, 1);
1281 if (GET_CODE (reg2) != REG)
1282 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1283
1284 reg1 = force_reg (Pmode,
1285 gen_rtx_PLUS (Pmode,
1286 gen_rtx_ASHIFT (Pmode, reg1,
1287 GEN_INT (shift_val)),
1288 reg2));
1289
1290 /* Add the result to our base register and return. */
1291 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1292
1293 }
1294
1295 /* Uh-oh. We might have an address for x[n-100000]. This needs
1296 special handling to avoid creating an indexed memory address
1297 with x-100000 as the base.
1298
1299 If the constant part is small enough, then it's still safe because
1300 there is a guard page at the beginning and end of the data segment.
1301
1302 Scaled references are common enough that we want to try and rearrange the
1303 terms so that we can use indexing for these addresses too. Only
1304 do the optimization for floatint point modes. */
1305
1306 if (GET_CODE (x) == PLUS
1307 && pa_symbolic_expression_p (XEXP (x, 1)))
1308 {
1309 /* Ugly. We modify things here so that the address offset specified
1310 by the index expression is computed first, then added to x to form
1311 the entire address. */
1312
1313 rtx regx1, regx2, regy1, regy2, y;
1314
1315 /* Strip off any CONST. */
1316 y = XEXP (x, 1);
1317 if (GET_CODE (y) == CONST)
1318 y = XEXP (y, 0);
1319
1320 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1321 {
1322 /* See if this looks like
1323 (plus (mult (reg) (mem_shadd_const))
1324 (const (plus (symbol_ref) (const_int))))
1325
1326 Where const_int is small. In that case the const
1327 expression is a valid pointer for indexing.
1328
1329 If const_int is big, but can be divided evenly by shadd_const
1330 and added to (reg). This allows more scaled indexed addresses. */
1331 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1332 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1333 && GET_CODE (XEXP (y, 1)) == CONST_INT
1334 && INTVAL (XEXP (y, 1)) >= -4096
1335 && INTVAL (XEXP (y, 1)) <= 4095)
1336 {
1337 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1338
1339 /* If we were given a MULT, we must fix the constant
1340 as we're going to create the ASHIFT form. */
1341 if (GET_CODE (XEXP (x, 0)) == MULT)
1342 shift_val = exact_log2 (shift_val);
1343
1344 rtx reg1, reg2;
1345
1346 reg1 = XEXP (x, 1);
1347 if (GET_CODE (reg1) != REG)
1348 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1349
1350 reg2 = XEXP (XEXP (x, 0), 0);
1351 if (GET_CODE (reg2) != REG)
1352 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1353
1354 return
1355 force_reg (Pmode,
1356 gen_rtx_PLUS (Pmode,
1357 gen_rtx_ASHIFT (Pmode,
1358 reg2,
1359 GEN_INT (shift_val)),
1360 reg1));
1361 }
1362 else if ((mode == DFmode || mode == SFmode)
1363 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1364 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1365 && GET_CODE (XEXP (y, 1)) == CONST_INT
1366 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1367 {
1368 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1369
1370 /* If we were given a MULT, we must fix the constant
1371 as we're going to create the ASHIFT form. */
1372 if (GET_CODE (XEXP (x, 0)) == MULT)
1373 shift_val = exact_log2 (shift_val);
1374
1375 regx1
1376 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1377 / INTVAL (XEXP (XEXP (x, 0), 1))));
1378 regx2 = XEXP (XEXP (x, 0), 0);
1379 if (GET_CODE (regx2) != REG)
1380 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1381 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1382 regx2, regx1));
1383 return
1384 force_reg (Pmode,
1385 gen_rtx_PLUS (Pmode,
1386 gen_rtx_ASHIFT (Pmode, regx2,
1387 GEN_INT (shift_val)),
1388 force_reg (Pmode, XEXP (y, 0))));
1389 }
1390 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1391 && INTVAL (XEXP (y, 1)) >= -4096
1392 && INTVAL (XEXP (y, 1)) <= 4095)
1393 {
1394 /* This is safe because of the guard page at the
1395 beginning and end of the data space. Just
1396 return the original address. */
1397 return orig;
1398 }
1399 else
1400 {
1401 /* Doesn't look like one we can optimize. */
1402 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1403 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1404 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1405 regx1 = force_reg (Pmode,
1406 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1407 regx1, regy2));
1408 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1409 }
1410 }
1411 }
1412
1413 return orig;
1414 }
1415
1416 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1417
1418 Compute extra cost of moving data between one register class
1419 and another.
1420
1421 Make moves from SAR so expensive they should never happen. We used to
1422 have 0xffff here, but that generates overflow in rare cases.
1423
1424 Copies involving a FP register and a non-FP register are relatively
1425 expensive because they must go through memory.
1426
1427 Other copies are reasonably cheap. */
1428
1429 static int
1430 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1431 reg_class_t from, reg_class_t to)
1432 {
1433 if (from == SHIFT_REGS)
1434 return 0x100;
1435 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1436 return 18;
1437 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1438 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1439 return 16;
1440 else
1441 return 2;
1442 }
1443
1444 /* For the HPPA, REG and REG+CONST is cost 0
1445 and addresses involving symbolic constants are cost 2.
1446
1447 PIC addresses are very expensive.
1448
1449 It is no coincidence that this has the same structure
1450 as pa_legitimate_address_p. */
1451
1452 static int
1453 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1454 addr_space_t as ATTRIBUTE_UNUSED,
1455 bool speed ATTRIBUTE_UNUSED)
1456 {
1457 switch (GET_CODE (X))
1458 {
1459 case REG:
1460 case PLUS:
1461 case LO_SUM:
1462 return 1;
1463 case HIGH:
1464 return 2;
1465 default:
1466 return 4;
1467 }
1468 }
1469
1470 /* Compute a (partial) cost for rtx X. Return true if the complete
1471 cost has been computed, and false if subexpressions should be
1472 scanned. In either case, *TOTAL contains the cost result. */
1473
1474 static bool
1475 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1476 int opno ATTRIBUTE_UNUSED,
1477 int *total, bool speed ATTRIBUTE_UNUSED)
1478 {
1479 int factor;
1480 int code = GET_CODE (x);
1481
1482 switch (code)
1483 {
1484 case CONST_INT:
1485 if (INTVAL (x) == 0)
1486 *total = 0;
1487 else if (INT_14_BITS (x))
1488 *total = 1;
1489 else
1490 *total = 2;
1491 return true;
1492
1493 case HIGH:
1494 *total = 2;
1495 return true;
1496
1497 case CONST:
1498 case LABEL_REF:
1499 case SYMBOL_REF:
1500 *total = 4;
1501 return true;
1502
1503 case CONST_DOUBLE:
1504 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1505 && outer_code != SET)
1506 *total = 0;
1507 else
1508 *total = 8;
1509 return true;
1510
1511 case MULT:
1512 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1513 {
1514 *total = COSTS_N_INSNS (3);
1515 return true;
1516 }
1517
1518 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1519 factor = GET_MODE_SIZE (mode) / 4;
1520 if (factor == 0)
1521 factor = 1;
1522
1523 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1524 *total = factor * factor * COSTS_N_INSNS (8);
1525 else
1526 *total = factor * factor * COSTS_N_INSNS (20);
1527 return true;
1528
1529 case DIV:
1530 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1531 {
1532 *total = COSTS_N_INSNS (14);
1533 return true;
1534 }
1535 /* FALLTHRU */
1536
1537 case UDIV:
1538 case MOD:
1539 case UMOD:
1540 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1541 factor = GET_MODE_SIZE (mode) / 4;
1542 if (factor == 0)
1543 factor = 1;
1544
1545 *total = factor * factor * COSTS_N_INSNS (60);
1546 return true;
1547
1548 case PLUS: /* this includes shNadd insns */
1549 case MINUS:
1550 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1551 {
1552 *total = COSTS_N_INSNS (3);
1553 return true;
1554 }
1555
1556 /* A size N times larger than UNITS_PER_WORD needs N times as
1557 many insns, taking N times as long. */
1558 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1559 if (factor == 0)
1560 factor = 1;
1561 *total = factor * COSTS_N_INSNS (1);
1562 return true;
1563
1564 case ASHIFT:
1565 case ASHIFTRT:
1566 case LSHIFTRT:
1567 *total = COSTS_N_INSNS (1);
1568 return true;
1569
1570 default:
1571 return false;
1572 }
1573 }
1574
1575 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1576 new rtx with the correct mode. */
1577 static inline rtx
1578 force_mode (machine_mode mode, rtx orig)
1579 {
1580 if (mode == GET_MODE (orig))
1581 return orig;
1582
1583 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1584
1585 return gen_rtx_REG (mode, REGNO (orig));
1586 }
1587
1588 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1589
1590 static bool
1591 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1592 {
1593 return tls_referenced_p (x);
1594 }
1595
1596 /* Emit insns to move operands[1] into operands[0].
1597
1598 Return 1 if we have written out everything that needs to be done to
1599 do the move. Otherwise, return 0 and the caller will emit the move
1600 normally.
1601
1602 Note SCRATCH_REG may not be in the proper mode depending on how it
1603 will be used. This routine is responsible for creating a new copy
1604 of SCRATCH_REG in the proper mode. */
1605
1606 int
1607 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1608 {
1609 register rtx operand0 = operands[0];
1610 register rtx operand1 = operands[1];
1611 register rtx tem;
1612
1613 /* We can only handle indexed addresses in the destination operand
1614 of floating point stores. Thus, we need to break out indexed
1615 addresses from the destination operand. */
1616 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1617 {
1618 gcc_assert (can_create_pseudo_p ());
1619
1620 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1621 operand0 = replace_equiv_address (operand0, tem);
1622 }
1623
1624 /* On targets with non-equivalent space registers, break out unscaled
1625 indexed addresses from the source operand before the final CSE.
1626 We have to do this because the REG_POINTER flag is not correctly
1627 carried through various optimization passes and CSE may substitute
1628 a pseudo without the pointer set for one with the pointer set. As
1629 a result, we loose various opportunities to create insns with
1630 unscaled indexed addresses. */
1631 if (!TARGET_NO_SPACE_REGS
1632 && !cse_not_expected
1633 && GET_CODE (operand1) == MEM
1634 && GET_CODE (XEXP (operand1, 0)) == PLUS
1635 && REG_P (XEXP (XEXP (operand1, 0), 0))
1636 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1637 operand1
1638 = replace_equiv_address (operand1,
1639 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1640
1641 if (scratch_reg
1642 && reload_in_progress && GET_CODE (operand0) == REG
1643 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1644 operand0 = reg_equiv_mem (REGNO (operand0));
1645 else if (scratch_reg
1646 && reload_in_progress && GET_CODE (operand0) == SUBREG
1647 && GET_CODE (SUBREG_REG (operand0)) == REG
1648 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1649 {
1650 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1651 the code which tracks sets/uses for delete_output_reload. */
1652 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1653 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1654 SUBREG_BYTE (operand0));
1655 operand0 = alter_subreg (&temp, true);
1656 }
1657
1658 if (scratch_reg
1659 && reload_in_progress && GET_CODE (operand1) == REG
1660 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1661 operand1 = reg_equiv_mem (REGNO (operand1));
1662 else if (scratch_reg
1663 && reload_in_progress && GET_CODE (operand1) == SUBREG
1664 && GET_CODE (SUBREG_REG (operand1)) == REG
1665 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1666 {
1667 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1668 the code which tracks sets/uses for delete_output_reload. */
1669 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1670 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1671 SUBREG_BYTE (operand1));
1672 operand1 = alter_subreg (&temp, true);
1673 }
1674
1675 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1676 && ((tem = find_replacement (&XEXP (operand0, 0)))
1677 != XEXP (operand0, 0)))
1678 operand0 = replace_equiv_address (operand0, tem);
1679
1680 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1681 && ((tem = find_replacement (&XEXP (operand1, 0)))
1682 != XEXP (operand1, 0)))
1683 operand1 = replace_equiv_address (operand1, tem);
1684
1685 /* Handle secondary reloads for loads/stores of FP registers from
1686 REG+D addresses where D does not fit in 5 or 14 bits, including
1687 (subreg (mem (addr))) cases, and reloads for other unsupported
1688 memory operands. */
1689 if (scratch_reg
1690 && FP_REG_P (operand0)
1691 && (MEM_P (operand1)
1692 || (GET_CODE (operand1) == SUBREG
1693 && MEM_P (XEXP (operand1, 0)))))
1694 {
1695 rtx op1 = operand1;
1696
1697 if (GET_CODE (op1) == SUBREG)
1698 op1 = XEXP (op1, 0);
1699
1700 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1701 {
1702 if (!(TARGET_PA_20
1703 && !TARGET_ELF32
1704 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1705 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1706 {
1707 /* SCRATCH_REG will hold an address and maybe the actual data.
1708 We want it in WORD_MODE regardless of what mode it was
1709 originally given to us. */
1710 scratch_reg = force_mode (word_mode, scratch_reg);
1711
1712 /* D might not fit in 14 bits either; for such cases load D
1713 into scratch reg. */
1714 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1715 {
1716 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1717 emit_move_insn (scratch_reg,
1718 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1719 Pmode,
1720 XEXP (XEXP (op1, 0), 0),
1721 scratch_reg));
1722 }
1723 else
1724 emit_move_insn (scratch_reg, XEXP (op1, 0));
1725 emit_insn (gen_rtx_SET (operand0,
1726 replace_equiv_address (op1, scratch_reg)));
1727 return 1;
1728 }
1729 }
1730 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1731 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1732 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1733 {
1734 /* Load memory address into SCRATCH_REG. */
1735 scratch_reg = force_mode (word_mode, scratch_reg);
1736 emit_move_insn (scratch_reg, XEXP (op1, 0));
1737 emit_insn (gen_rtx_SET (operand0,
1738 replace_equiv_address (op1, scratch_reg)));
1739 return 1;
1740 }
1741 }
1742 else if (scratch_reg
1743 && FP_REG_P (operand1)
1744 && (MEM_P (operand0)
1745 || (GET_CODE (operand0) == SUBREG
1746 && MEM_P (XEXP (operand0, 0)))))
1747 {
1748 rtx op0 = operand0;
1749
1750 if (GET_CODE (op0) == SUBREG)
1751 op0 = XEXP (op0, 0);
1752
1753 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1754 {
1755 if (!(TARGET_PA_20
1756 && !TARGET_ELF32
1757 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1758 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1759 {
1760 /* SCRATCH_REG will hold an address and maybe the actual data.
1761 We want it in WORD_MODE regardless of what mode it was
1762 originally given to us. */
1763 scratch_reg = force_mode (word_mode, scratch_reg);
1764
1765 /* D might not fit in 14 bits either; for such cases load D
1766 into scratch reg. */
1767 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1768 {
1769 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1770 emit_move_insn (scratch_reg,
1771 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1772 Pmode,
1773 XEXP (XEXP (op0, 0), 0),
1774 scratch_reg));
1775 }
1776 else
1777 emit_move_insn (scratch_reg, XEXP (op0, 0));
1778 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1779 operand1));
1780 return 1;
1781 }
1782 }
1783 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1784 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1785 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1786 {
1787 /* Load memory address into SCRATCH_REG. */
1788 scratch_reg = force_mode (word_mode, scratch_reg);
1789 emit_move_insn (scratch_reg, XEXP (op0, 0));
1790 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1791 operand1));
1792 return 1;
1793 }
1794 }
1795 /* Handle secondary reloads for loads of FP registers from constant
1796 expressions by forcing the constant into memory. For the most part,
1797 this is only necessary for SImode and DImode.
1798
1799 Use scratch_reg to hold the address of the memory location. */
1800 else if (scratch_reg
1801 && CONSTANT_P (operand1)
1802 && FP_REG_P (operand0))
1803 {
1804 rtx const_mem, xoperands[2];
1805
1806 if (operand1 == CONST0_RTX (mode))
1807 {
1808 emit_insn (gen_rtx_SET (operand0, operand1));
1809 return 1;
1810 }
1811
1812 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1813 it in WORD_MODE regardless of what mode it was originally given
1814 to us. */
1815 scratch_reg = force_mode (word_mode, scratch_reg);
1816
1817 /* Force the constant into memory and put the address of the
1818 memory location into scratch_reg. */
1819 const_mem = force_const_mem (mode, operand1);
1820 xoperands[0] = scratch_reg;
1821 xoperands[1] = XEXP (const_mem, 0);
1822 pa_emit_move_sequence (xoperands, Pmode, 0);
1823
1824 /* Now load the destination register. */
1825 emit_insn (gen_rtx_SET (operand0,
1826 replace_equiv_address (const_mem, scratch_reg)));
1827 return 1;
1828 }
1829 /* Handle secondary reloads for SAR. These occur when trying to load
1830 the SAR from memory or a constant. */
1831 else if (scratch_reg
1832 && GET_CODE (operand0) == REG
1833 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1834 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1835 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1836 {
1837 /* D might not fit in 14 bits either; for such cases load D into
1838 scratch reg. */
1839 if (GET_CODE (operand1) == MEM
1840 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1841 {
1842 /* We are reloading the address into the scratch register, so we
1843 want to make sure the scratch register is a full register. */
1844 scratch_reg = force_mode (word_mode, scratch_reg);
1845
1846 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1847 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1848 0)),
1849 Pmode,
1850 XEXP (XEXP (operand1, 0),
1851 0),
1852 scratch_reg));
1853
1854 /* Now we are going to load the scratch register from memory,
1855 we want to load it in the same width as the original MEM,
1856 which must be the same as the width of the ultimate destination,
1857 OPERAND0. */
1858 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1859
1860 emit_move_insn (scratch_reg,
1861 replace_equiv_address (operand1, scratch_reg));
1862 }
1863 else
1864 {
1865 /* We want to load the scratch register using the same mode as
1866 the ultimate destination. */
1867 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1868
1869 emit_move_insn (scratch_reg, operand1);
1870 }
1871
1872 /* And emit the insn to set the ultimate destination. We know that
1873 the scratch register has the same mode as the destination at this
1874 point. */
1875 emit_move_insn (operand0, scratch_reg);
1876 return 1;
1877 }
1878
1879 /* Handle the most common case: storing into a register. */
1880 if (register_operand (operand0, mode))
1881 {
1882 /* Legitimize TLS symbol references. This happens for references
1883 that aren't a legitimate constant. */
1884 if (PA_SYMBOL_REF_TLS_P (operand1))
1885 operand1 = legitimize_tls_address (operand1);
1886
1887 if (register_operand (operand1, mode)
1888 || (GET_CODE (operand1) == CONST_INT
1889 && pa_cint_ok_for_move (UINTVAL (operand1)))
1890 || (operand1 == CONST0_RTX (mode))
1891 || (GET_CODE (operand1) == HIGH
1892 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1893 /* Only `general_operands' can come here, so MEM is ok. */
1894 || GET_CODE (operand1) == MEM)
1895 {
1896 /* Various sets are created during RTL generation which don't
1897 have the REG_POINTER flag correctly set. After the CSE pass,
1898 instruction recognition can fail if we don't consistently
1899 set this flag when performing register copies. This should
1900 also improve the opportunities for creating insns that use
1901 unscaled indexing. */
1902 if (REG_P (operand0) && REG_P (operand1))
1903 {
1904 if (REG_POINTER (operand1)
1905 && !REG_POINTER (operand0)
1906 && !HARD_REGISTER_P (operand0))
1907 copy_reg_pointer (operand0, operand1);
1908 }
1909
1910 /* When MEMs are broken out, the REG_POINTER flag doesn't
1911 get set. In some cases, we can set the REG_POINTER flag
1912 from the declaration for the MEM. */
1913 if (REG_P (operand0)
1914 && GET_CODE (operand1) == MEM
1915 && !REG_POINTER (operand0))
1916 {
1917 tree decl = MEM_EXPR (operand1);
1918
1919 /* Set the register pointer flag and register alignment
1920 if the declaration for this memory reference is a
1921 pointer type. */
1922 if (decl)
1923 {
1924 tree type;
1925
1926 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1927 tree operand 1. */
1928 if (TREE_CODE (decl) == COMPONENT_REF)
1929 decl = TREE_OPERAND (decl, 1);
1930
1931 type = TREE_TYPE (decl);
1932 type = strip_array_types (type);
1933
1934 if (POINTER_TYPE_P (type))
1935 mark_reg_pointer (operand0, BITS_PER_UNIT);
1936 }
1937 }
1938
1939 emit_insn (gen_rtx_SET (operand0, operand1));
1940 return 1;
1941 }
1942 }
1943 else if (GET_CODE (operand0) == MEM)
1944 {
1945 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1946 && !(reload_in_progress || reload_completed))
1947 {
1948 rtx temp = gen_reg_rtx (DFmode);
1949
1950 emit_insn (gen_rtx_SET (temp, operand1));
1951 emit_insn (gen_rtx_SET (operand0, temp));
1952 return 1;
1953 }
1954 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1955 {
1956 /* Run this case quickly. */
1957 emit_insn (gen_rtx_SET (operand0, operand1));
1958 return 1;
1959 }
1960 if (! (reload_in_progress || reload_completed))
1961 {
1962 operands[0] = validize_mem (operand0);
1963 operands[1] = operand1 = force_reg (mode, operand1);
1964 }
1965 }
1966
1967 /* Simplify the source if we need to.
1968 Note we do have to handle function labels here, even though we do
1969 not consider them legitimate constants. Loop optimizations can
1970 call the emit_move_xxx with one as a source. */
1971 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1972 || (GET_CODE (operand1) == HIGH
1973 && symbolic_operand (XEXP (operand1, 0), mode))
1974 || function_label_operand (operand1, VOIDmode)
1975 || tls_referenced_p (operand1))
1976 {
1977 int ishighonly = 0;
1978
1979 if (GET_CODE (operand1) == HIGH)
1980 {
1981 ishighonly = 1;
1982 operand1 = XEXP (operand1, 0);
1983 }
1984 if (symbolic_operand (operand1, mode))
1985 {
1986 /* Argh. The assembler and linker can't handle arithmetic
1987 involving plabels.
1988
1989 So we force the plabel into memory, load operand0 from
1990 the memory location, then add in the constant part. */
1991 if ((GET_CODE (operand1) == CONST
1992 && GET_CODE (XEXP (operand1, 0)) == PLUS
1993 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1994 VOIDmode))
1995 || function_label_operand (operand1, VOIDmode))
1996 {
1997 rtx temp, const_part;
1998
1999 /* Figure out what (if any) scratch register to use. */
2000 if (reload_in_progress || reload_completed)
2001 {
2002 scratch_reg = scratch_reg ? scratch_reg : operand0;
2003 /* SCRATCH_REG will hold an address and maybe the actual
2004 data. We want it in WORD_MODE regardless of what mode it
2005 was originally given to us. */
2006 scratch_reg = force_mode (word_mode, scratch_reg);
2007 }
2008 else if (flag_pic)
2009 scratch_reg = gen_reg_rtx (Pmode);
2010
2011 if (GET_CODE (operand1) == CONST)
2012 {
2013 /* Save away the constant part of the expression. */
2014 const_part = XEXP (XEXP (operand1, 0), 1);
2015 gcc_assert (GET_CODE (const_part) == CONST_INT);
2016
2017 /* Force the function label into memory. */
2018 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2019 }
2020 else
2021 {
2022 /* No constant part. */
2023 const_part = NULL_RTX;
2024
2025 /* Force the function label into memory. */
2026 temp = force_const_mem (mode, operand1);
2027 }
2028
2029
2030 /* Get the address of the memory location. PIC-ify it if
2031 necessary. */
2032 temp = XEXP (temp, 0);
2033 if (flag_pic)
2034 temp = legitimize_pic_address (temp, mode, scratch_reg);
2035
2036 /* Put the address of the memory location into our destination
2037 register. */
2038 operands[1] = temp;
2039 pa_emit_move_sequence (operands, mode, scratch_reg);
2040
2041 /* Now load from the memory location into our destination
2042 register. */
2043 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2044 pa_emit_move_sequence (operands, mode, scratch_reg);
2045
2046 /* And add back in the constant part. */
2047 if (const_part != NULL_RTX)
2048 expand_inc (operand0, const_part);
2049
2050 return 1;
2051 }
2052
2053 if (flag_pic)
2054 {
2055 rtx_insn *insn;
2056 rtx temp;
2057
2058 if (reload_in_progress || reload_completed)
2059 {
2060 temp = scratch_reg ? scratch_reg : operand0;
2061 /* TEMP will hold an address and maybe the actual
2062 data. We want it in WORD_MODE regardless of what mode it
2063 was originally given to us. */
2064 temp = force_mode (word_mode, temp);
2065 }
2066 else
2067 temp = gen_reg_rtx (Pmode);
2068
2069 /* Force (const (plus (symbol) (const_int))) to memory
2070 if the const_int will not fit in 14 bits. Although
2071 this requires a relocation, the instruction sequence
2072 needed to load the value is shorter. */
2073 if (GET_CODE (operand1) == CONST
2074 && GET_CODE (XEXP (operand1, 0)) == PLUS
2075 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2076 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2077 {
2078 rtx x, m = force_const_mem (mode, operand1);
2079
2080 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2081 x = replace_equiv_address (m, x);
2082 insn = emit_move_insn (operand0, x);
2083 }
2084 else
2085 {
2086 operands[1] = legitimize_pic_address (operand1, mode, temp);
2087 if (REG_P (operand0) && REG_P (operands[1]))
2088 copy_reg_pointer (operand0, operands[1]);
2089 insn = emit_move_insn (operand0, operands[1]);
2090 }
2091
2092 /* Put a REG_EQUAL note on this insn. */
2093 set_unique_reg_note (insn, REG_EQUAL, operand1);
2094 }
2095 /* On the HPPA, references to data space are supposed to use dp,
2096 register 27, but showing it in the RTL inhibits various cse
2097 and loop optimizations. */
2098 else
2099 {
2100 rtx temp, set;
2101
2102 if (reload_in_progress || reload_completed)
2103 {
2104 temp = scratch_reg ? scratch_reg : operand0;
2105 /* TEMP will hold an address and maybe the actual
2106 data. We want it in WORD_MODE regardless of what mode it
2107 was originally given to us. */
2108 temp = force_mode (word_mode, temp);
2109 }
2110 else
2111 temp = gen_reg_rtx (mode);
2112
2113 /* Loading a SYMBOL_REF into a register makes that register
2114 safe to be used as the base in an indexed address.
2115
2116 Don't mark hard registers though. That loses. */
2117 if (GET_CODE (operand0) == REG
2118 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2119 mark_reg_pointer (operand0, BITS_PER_UNIT);
2120 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2121 mark_reg_pointer (temp, BITS_PER_UNIT);
2122
2123 if (ishighonly)
2124 set = gen_rtx_SET (operand0, temp);
2125 else
2126 set = gen_rtx_SET (operand0,
2127 gen_rtx_LO_SUM (mode, temp, operand1));
2128
2129 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2130 emit_insn (set);
2131
2132 }
2133 return 1;
2134 }
2135 else if (tls_referenced_p (operand1))
2136 {
2137 rtx tmp = operand1;
2138 rtx addend = NULL;
2139
2140 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2141 {
2142 addend = XEXP (XEXP (tmp, 0), 1);
2143 tmp = XEXP (XEXP (tmp, 0), 0);
2144 }
2145
2146 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2147 tmp = legitimize_tls_address (tmp);
2148 if (addend)
2149 {
2150 tmp = gen_rtx_PLUS (mode, tmp, addend);
2151 tmp = force_operand (tmp, operands[0]);
2152 }
2153 operands[1] = tmp;
2154 }
2155 else if (GET_CODE (operand1) != CONST_INT
2156 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2157 {
2158 rtx temp;
2159 rtx_insn *insn;
2160 rtx op1 = operand1;
2161 HOST_WIDE_INT value = 0;
2162 HOST_WIDE_INT insv = 0;
2163 int insert = 0;
2164
2165 if (GET_CODE (operand1) == CONST_INT)
2166 value = INTVAL (operand1);
2167
2168 if (TARGET_64BIT
2169 && GET_CODE (operand1) == CONST_INT
2170 && HOST_BITS_PER_WIDE_INT > 32
2171 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2172 {
2173 HOST_WIDE_INT nval;
2174
2175 /* Extract the low order 32 bits of the value and sign extend.
2176 If the new value is the same as the original value, we can
2177 can use the original value as-is. If the new value is
2178 different, we use it and insert the most-significant 32-bits
2179 of the original value into the final result. */
2180 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2181 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2182 if (value != nval)
2183 {
2184 #if HOST_BITS_PER_WIDE_INT > 32
2185 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2186 #endif
2187 insert = 1;
2188 value = nval;
2189 operand1 = GEN_INT (nval);
2190 }
2191 }
2192
2193 if (reload_in_progress || reload_completed)
2194 temp = scratch_reg ? scratch_reg : operand0;
2195 else
2196 temp = gen_reg_rtx (mode);
2197
2198 /* We don't directly split DImode constants on 32-bit targets
2199 because PLUS uses an 11-bit immediate and the insn sequence
2200 generated is not as efficient as the one using HIGH/LO_SUM. */
2201 if (GET_CODE (operand1) == CONST_INT
2202 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2203 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2204 && !insert)
2205 {
2206 /* Directly break constant into high and low parts. This
2207 provides better optimization opportunities because various
2208 passes recognize constants split with PLUS but not LO_SUM.
2209 We use a 14-bit signed low part except when the addition
2210 of 0x4000 to the high part might change the sign of the
2211 high part. */
2212 HOST_WIDE_INT low = value & 0x3fff;
2213 HOST_WIDE_INT high = value & ~ 0x3fff;
2214
2215 if (low >= 0x2000)
2216 {
2217 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2218 high += 0x2000;
2219 else
2220 high += 0x4000;
2221 }
2222
2223 low = value - high;
2224
2225 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2226 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2227 }
2228 else
2229 {
2230 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2231 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2232 }
2233
2234 insn = emit_move_insn (operands[0], operands[1]);
2235
2236 /* Now insert the most significant 32 bits of the value
2237 into the register. When we don't have a second register
2238 available, it could take up to nine instructions to load
2239 a 64-bit integer constant. Prior to reload, we force
2240 constants that would take more than three instructions
2241 to load to the constant pool. During and after reload,
2242 we have to handle all possible values. */
2243 if (insert)
2244 {
2245 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2246 register and the value to be inserted is outside the
2247 range that can be loaded with three depdi instructions. */
2248 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2249 {
2250 operand1 = GEN_INT (insv);
2251
2252 emit_insn (gen_rtx_SET (temp,
2253 gen_rtx_HIGH (mode, operand1)));
2254 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2255 if (mode == DImode)
2256 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2257 const0_rtx, temp));
2258 else
2259 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2260 const0_rtx, temp));
2261 }
2262 else
2263 {
2264 int len = 5, pos = 27;
2265
2266 /* Insert the bits using the depdi instruction. */
2267 while (pos >= 0)
2268 {
2269 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2270 HOST_WIDE_INT sign = v5 < 0;
2271
2272 /* Left extend the insertion. */
2273 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2274 while (pos > 0 && (insv & 1) == sign)
2275 {
2276 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2277 len += 1;
2278 pos -= 1;
2279 }
2280
2281 if (mode == DImode)
2282 insn = emit_insn (gen_insvdi (operand0,
2283 GEN_INT (len),
2284 GEN_INT (pos),
2285 GEN_INT (v5)));
2286 else
2287 insn = emit_insn (gen_insvsi (operand0,
2288 GEN_INT (len),
2289 GEN_INT (pos),
2290 GEN_INT (v5)));
2291
2292 len = pos > 0 && pos < 5 ? pos : 5;
2293 pos -= len;
2294 }
2295 }
2296 }
2297
2298 set_unique_reg_note (insn, REG_EQUAL, op1);
2299
2300 return 1;
2301 }
2302 }
2303 /* Now have insn-emit do whatever it normally does. */
2304 return 0;
2305 }
2306
2307 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2308 it will need a link/runtime reloc). */
2309
2310 int
2311 pa_reloc_needed (tree exp)
2312 {
2313 int reloc = 0;
2314
2315 switch (TREE_CODE (exp))
2316 {
2317 case ADDR_EXPR:
2318 return 1;
2319
2320 case POINTER_PLUS_EXPR:
2321 case PLUS_EXPR:
2322 case MINUS_EXPR:
2323 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2324 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2325 break;
2326
2327 CASE_CONVERT:
2328 case NON_LVALUE_EXPR:
2329 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2330 break;
2331
2332 case CONSTRUCTOR:
2333 {
2334 tree value;
2335 unsigned HOST_WIDE_INT ix;
2336
2337 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2338 if (value)
2339 reloc |= pa_reloc_needed (value);
2340 }
2341 break;
2342
2343 case ERROR_MARK:
2344 break;
2345
2346 default:
2347 break;
2348 }
2349 return reloc;
2350 }
2351
2352 \f
2353 /* Return the best assembler insn template
2354 for moving operands[1] into operands[0] as a fullword. */
2355 const char *
2356 pa_singlemove_string (rtx *operands)
2357 {
2358 HOST_WIDE_INT intval;
2359
2360 if (GET_CODE (operands[0]) == MEM)
2361 return "stw %r1,%0";
2362 if (GET_CODE (operands[1]) == MEM)
2363 return "ldw %1,%0";
2364 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2365 {
2366 long i;
2367
2368 gcc_assert (GET_MODE (operands[1]) == SFmode);
2369
2370 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2371 bit pattern. */
2372 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2373
2374 operands[1] = GEN_INT (i);
2375 /* Fall through to CONST_INT case. */
2376 }
2377 if (GET_CODE (operands[1]) == CONST_INT)
2378 {
2379 intval = INTVAL (operands[1]);
2380
2381 if (VAL_14_BITS_P (intval))
2382 return "ldi %1,%0";
2383 else if ((intval & 0x7ff) == 0)
2384 return "ldil L'%1,%0";
2385 else if (pa_zdepi_cint_p (intval))
2386 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2387 else
2388 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2389 }
2390 return "copy %1,%0";
2391 }
2392 \f
2393
2394 /* Compute position (in OP[1]) and width (in OP[2])
2395 useful for copying IMM to a register using the zdepi
2396 instructions. Store the immediate value to insert in OP[0]. */
2397 static void
2398 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2399 {
2400 int lsb, len;
2401
2402 /* Find the least significant set bit in IMM. */
2403 for (lsb = 0; lsb < 32; lsb++)
2404 {
2405 if ((imm & 1) != 0)
2406 break;
2407 imm >>= 1;
2408 }
2409
2410 /* Choose variants based on *sign* of the 5-bit field. */
2411 if ((imm & 0x10) == 0)
2412 len = (lsb <= 28) ? 4 : 32 - lsb;
2413 else
2414 {
2415 /* Find the width of the bitstring in IMM. */
2416 for (len = 5; len < 32 - lsb; len++)
2417 {
2418 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2419 break;
2420 }
2421
2422 /* Sign extend IMM as a 5-bit value. */
2423 imm = (imm & 0xf) - 0x10;
2424 }
2425
2426 op[0] = imm;
2427 op[1] = 31 - lsb;
2428 op[2] = len;
2429 }
2430
2431 /* Compute position (in OP[1]) and width (in OP[2])
2432 useful for copying IMM to a register using the depdi,z
2433 instructions. Store the immediate value to insert in OP[0]. */
2434
2435 static void
2436 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2437 {
2438 int lsb, len, maxlen;
2439
2440 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2441
2442 /* Find the least significant set bit in IMM. */
2443 for (lsb = 0; lsb < maxlen; lsb++)
2444 {
2445 if ((imm & 1) != 0)
2446 break;
2447 imm >>= 1;
2448 }
2449
2450 /* Choose variants based on *sign* of the 5-bit field. */
2451 if ((imm & 0x10) == 0)
2452 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2453 else
2454 {
2455 /* Find the width of the bitstring in IMM. */
2456 for (len = 5; len < maxlen - lsb; len++)
2457 {
2458 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2459 break;
2460 }
2461
2462 /* Extend length if host is narrow and IMM is negative. */
2463 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2464 len += 32;
2465
2466 /* Sign extend IMM as a 5-bit value. */
2467 imm = (imm & 0xf) - 0x10;
2468 }
2469
2470 op[0] = imm;
2471 op[1] = 63 - lsb;
2472 op[2] = len;
2473 }
2474
2475 /* Output assembler code to perform a doubleword move insn
2476 with operands OPERANDS. */
2477
2478 const char *
2479 pa_output_move_double (rtx *operands)
2480 {
2481 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2482 rtx latehalf[2];
2483 rtx addreg0 = 0, addreg1 = 0;
2484 int highonly = 0;
2485
2486 /* First classify both operands. */
2487
2488 if (REG_P (operands[0]))
2489 optype0 = REGOP;
2490 else if (offsettable_memref_p (operands[0]))
2491 optype0 = OFFSOP;
2492 else if (GET_CODE (operands[0]) == MEM)
2493 optype0 = MEMOP;
2494 else
2495 optype0 = RNDOP;
2496
2497 if (REG_P (operands[1]))
2498 optype1 = REGOP;
2499 else if (CONSTANT_P (operands[1]))
2500 optype1 = CNSTOP;
2501 else if (offsettable_memref_p (operands[1]))
2502 optype1 = OFFSOP;
2503 else if (GET_CODE (operands[1]) == MEM)
2504 optype1 = MEMOP;
2505 else
2506 optype1 = RNDOP;
2507
2508 /* Check for the cases that the operand constraints are not
2509 supposed to allow to happen. */
2510 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2511
2512 /* Handle copies between general and floating registers. */
2513
2514 if (optype0 == REGOP && optype1 == REGOP
2515 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2516 {
2517 if (FP_REG_P (operands[0]))
2518 {
2519 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2520 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2521 return "{fldds|fldd} -16(%%sp),%0";
2522 }
2523 else
2524 {
2525 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2526 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2527 return "{ldws|ldw} -12(%%sp),%R0";
2528 }
2529 }
2530
2531 /* Handle auto decrementing and incrementing loads and stores
2532 specifically, since the structure of the function doesn't work
2533 for them without major modification. Do it better when we learn
2534 this port about the general inc/dec addressing of PA.
2535 (This was written by tege. Chide him if it doesn't work.) */
2536
2537 if (optype0 == MEMOP)
2538 {
2539 /* We have to output the address syntax ourselves, since print_operand
2540 doesn't deal with the addresses we want to use. Fix this later. */
2541
2542 rtx addr = XEXP (operands[0], 0);
2543 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2544 {
2545 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2546
2547 operands[0] = XEXP (addr, 0);
2548 gcc_assert (GET_CODE (operands[1]) == REG
2549 && GET_CODE (operands[0]) == REG);
2550
2551 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2552
2553 /* No overlap between high target register and address
2554 register. (We do this in a non-obvious way to
2555 save a register file writeback) */
2556 if (GET_CODE (addr) == POST_INC)
2557 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2558 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2559 }
2560 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2561 {
2562 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2563
2564 operands[0] = XEXP (addr, 0);
2565 gcc_assert (GET_CODE (operands[1]) == REG
2566 && GET_CODE (operands[0]) == REG);
2567
2568 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2569 /* No overlap between high target register and address
2570 register. (We do this in a non-obvious way to save a
2571 register file writeback) */
2572 if (GET_CODE (addr) == PRE_INC)
2573 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2574 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2575 }
2576 }
2577 if (optype1 == MEMOP)
2578 {
2579 /* We have to output the address syntax ourselves, since print_operand
2580 doesn't deal with the addresses we want to use. Fix this later. */
2581
2582 rtx addr = XEXP (operands[1], 0);
2583 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2584 {
2585 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2586
2587 operands[1] = XEXP (addr, 0);
2588 gcc_assert (GET_CODE (operands[0]) == REG
2589 && GET_CODE (operands[1]) == REG);
2590
2591 if (!reg_overlap_mentioned_p (high_reg, addr))
2592 {
2593 /* No overlap between high target register and address
2594 register. (We do this in a non-obvious way to
2595 save a register file writeback) */
2596 if (GET_CODE (addr) == POST_INC)
2597 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2598 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2599 }
2600 else
2601 {
2602 /* This is an undefined situation. We should load into the
2603 address register *and* update that register. Probably
2604 we don't need to handle this at all. */
2605 if (GET_CODE (addr) == POST_INC)
2606 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2607 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2608 }
2609 }
2610 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2611 {
2612 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2613
2614 operands[1] = XEXP (addr, 0);
2615 gcc_assert (GET_CODE (operands[0]) == REG
2616 && GET_CODE (operands[1]) == REG);
2617
2618 if (!reg_overlap_mentioned_p (high_reg, addr))
2619 {
2620 /* No overlap between high target register and address
2621 register. (We do this in a non-obvious way to
2622 save a register file writeback) */
2623 if (GET_CODE (addr) == PRE_INC)
2624 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2625 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2626 }
2627 else
2628 {
2629 /* This is an undefined situation. We should load into the
2630 address register *and* update that register. Probably
2631 we don't need to handle this at all. */
2632 if (GET_CODE (addr) == PRE_INC)
2633 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2634 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2635 }
2636 }
2637 else if (GET_CODE (addr) == PLUS
2638 && GET_CODE (XEXP (addr, 0)) == MULT)
2639 {
2640 rtx xoperands[4];
2641
2642 /* Load address into left half of destination register. */
2643 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2644 xoperands[1] = XEXP (addr, 1);
2645 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2646 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2647 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2648 xoperands);
2649 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2650 }
2651 else if (GET_CODE (addr) == PLUS
2652 && REG_P (XEXP (addr, 0))
2653 && REG_P (XEXP (addr, 1)))
2654 {
2655 rtx xoperands[3];
2656
2657 /* Load address into left half of destination register. */
2658 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2659 xoperands[1] = XEXP (addr, 0);
2660 xoperands[2] = XEXP (addr, 1);
2661 output_asm_insn ("{addl|add,l} %1,%2,%0",
2662 xoperands);
2663 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2664 }
2665 }
2666
2667 /* If an operand is an unoffsettable memory ref, find a register
2668 we can increment temporarily to make it refer to the second word. */
2669
2670 if (optype0 == MEMOP)
2671 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2672
2673 if (optype1 == MEMOP)
2674 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2675
2676 /* Ok, we can do one word at a time.
2677 Normally we do the low-numbered word first.
2678
2679 In either case, set up in LATEHALF the operands to use
2680 for the high-numbered word and in some cases alter the
2681 operands in OPERANDS to be suitable for the low-numbered word. */
2682
2683 if (optype0 == REGOP)
2684 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2685 else if (optype0 == OFFSOP)
2686 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2687 else
2688 latehalf[0] = operands[0];
2689
2690 if (optype1 == REGOP)
2691 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2692 else if (optype1 == OFFSOP)
2693 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2694 else if (optype1 == CNSTOP)
2695 {
2696 if (GET_CODE (operands[1]) == HIGH)
2697 {
2698 operands[1] = XEXP (operands[1], 0);
2699 highonly = 1;
2700 }
2701 split_double (operands[1], &operands[1], &latehalf[1]);
2702 }
2703 else
2704 latehalf[1] = operands[1];
2705
2706 /* If the first move would clobber the source of the second one,
2707 do them in the other order.
2708
2709 This can happen in two cases:
2710
2711 mem -> register where the first half of the destination register
2712 is the same register used in the memory's address. Reload
2713 can create such insns.
2714
2715 mem in this case will be either register indirect or register
2716 indirect plus a valid offset.
2717
2718 register -> register move where REGNO(dst) == REGNO(src + 1)
2719 someone (Tim/Tege?) claimed this can happen for parameter loads.
2720
2721 Handle mem -> register case first. */
2722 if (optype0 == REGOP
2723 && (optype1 == MEMOP || optype1 == OFFSOP)
2724 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2725 {
2726 /* Do the late half first. */
2727 if (addreg1)
2728 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2729 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2730
2731 /* Then clobber. */
2732 if (addreg1)
2733 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2734 return pa_singlemove_string (operands);
2735 }
2736
2737 /* Now handle register -> register case. */
2738 if (optype0 == REGOP && optype1 == REGOP
2739 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2740 {
2741 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2742 return pa_singlemove_string (operands);
2743 }
2744
2745 /* Normal case: do the two words, low-numbered first. */
2746
2747 output_asm_insn (pa_singlemove_string (operands), operands);
2748
2749 /* Make any unoffsettable addresses point at high-numbered word. */
2750 if (addreg0)
2751 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2752 if (addreg1)
2753 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2754
2755 /* Do high-numbered word. */
2756 if (highonly)
2757 output_asm_insn ("ldil L'%1,%0", latehalf);
2758 else
2759 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2760
2761 /* Undo the adds we just did. */
2762 if (addreg0)
2763 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2764 if (addreg1)
2765 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2766
2767 return "";
2768 }
2769 \f
2770 const char *
2771 pa_output_fp_move_double (rtx *operands)
2772 {
2773 if (FP_REG_P (operands[0]))
2774 {
2775 if (FP_REG_P (operands[1])
2776 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2777 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2778 else
2779 output_asm_insn ("fldd%F1 %1,%0", operands);
2780 }
2781 else if (FP_REG_P (operands[1]))
2782 {
2783 output_asm_insn ("fstd%F0 %1,%0", operands);
2784 }
2785 else
2786 {
2787 rtx xoperands[2];
2788
2789 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2790
2791 /* This is a pain. You have to be prepared to deal with an
2792 arbitrary address here including pre/post increment/decrement.
2793
2794 so avoid this in the MD. */
2795 gcc_assert (GET_CODE (operands[0]) == REG);
2796
2797 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2798 xoperands[0] = operands[0];
2799 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2800 }
2801 return "";
2802 }
2803 \f
2804 /* Return a REG that occurs in ADDR with coefficient 1.
2805 ADDR can be effectively incremented by incrementing REG. */
2806
2807 static rtx
2808 find_addr_reg (rtx addr)
2809 {
2810 while (GET_CODE (addr) == PLUS)
2811 {
2812 if (GET_CODE (XEXP (addr, 0)) == REG)
2813 addr = XEXP (addr, 0);
2814 else if (GET_CODE (XEXP (addr, 1)) == REG)
2815 addr = XEXP (addr, 1);
2816 else if (CONSTANT_P (XEXP (addr, 0)))
2817 addr = XEXP (addr, 1);
2818 else if (CONSTANT_P (XEXP (addr, 1)))
2819 addr = XEXP (addr, 0);
2820 else
2821 gcc_unreachable ();
2822 }
2823 gcc_assert (GET_CODE (addr) == REG);
2824 return addr;
2825 }
2826
2827 /* Emit code to perform a block move.
2828
2829 OPERANDS[0] is the destination pointer as a REG, clobbered.
2830 OPERANDS[1] is the source pointer as a REG, clobbered.
2831 OPERANDS[2] is a register for temporary storage.
2832 OPERANDS[3] is a register for temporary storage.
2833 OPERANDS[4] is the size as a CONST_INT
2834 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2835 OPERANDS[6] is another temporary register. */
2836
2837 const char *
2838 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2839 {
2840 int align = INTVAL (operands[5]);
2841 unsigned long n_bytes = INTVAL (operands[4]);
2842
2843 /* We can't move more than a word at a time because the PA
2844 has no longer integer move insns. (Could use fp mem ops?) */
2845 if (align > (TARGET_64BIT ? 8 : 4))
2846 align = (TARGET_64BIT ? 8 : 4);
2847
2848 /* Note that we know each loop below will execute at least twice
2849 (else we would have open-coded the copy). */
2850 switch (align)
2851 {
2852 case 8:
2853 /* Pre-adjust the loop counter. */
2854 operands[4] = GEN_INT (n_bytes - 16);
2855 output_asm_insn ("ldi %4,%2", operands);
2856
2857 /* Copying loop. */
2858 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2859 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2860 output_asm_insn ("std,ma %3,8(%0)", operands);
2861 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2862 output_asm_insn ("std,ma %6,8(%0)", operands);
2863
2864 /* Handle the residual. There could be up to 7 bytes of
2865 residual to copy! */
2866 if (n_bytes % 16 != 0)
2867 {
2868 operands[4] = GEN_INT (n_bytes % 8);
2869 if (n_bytes % 16 >= 8)
2870 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2871 if (n_bytes % 8 != 0)
2872 output_asm_insn ("ldd 0(%1),%6", operands);
2873 if (n_bytes % 16 >= 8)
2874 output_asm_insn ("std,ma %3,8(%0)", operands);
2875 if (n_bytes % 8 != 0)
2876 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2877 }
2878 return "";
2879
2880 case 4:
2881 /* Pre-adjust the loop counter. */
2882 operands[4] = GEN_INT (n_bytes - 8);
2883 output_asm_insn ("ldi %4,%2", operands);
2884
2885 /* Copying loop. */
2886 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2887 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2888 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2889 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2890 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2891
2892 /* Handle the residual. There could be up to 7 bytes of
2893 residual to copy! */
2894 if (n_bytes % 8 != 0)
2895 {
2896 operands[4] = GEN_INT (n_bytes % 4);
2897 if (n_bytes % 8 >= 4)
2898 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2899 if (n_bytes % 4 != 0)
2900 output_asm_insn ("ldw 0(%1),%6", operands);
2901 if (n_bytes % 8 >= 4)
2902 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2903 if (n_bytes % 4 != 0)
2904 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2905 }
2906 return "";
2907
2908 case 2:
2909 /* Pre-adjust the loop counter. */
2910 operands[4] = GEN_INT (n_bytes - 4);
2911 output_asm_insn ("ldi %4,%2", operands);
2912
2913 /* Copying loop. */
2914 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2915 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2916 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2917 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2918 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2919
2920 /* Handle the residual. */
2921 if (n_bytes % 4 != 0)
2922 {
2923 if (n_bytes % 4 >= 2)
2924 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2925 if (n_bytes % 2 != 0)
2926 output_asm_insn ("ldb 0(%1),%6", operands);
2927 if (n_bytes % 4 >= 2)
2928 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2929 if (n_bytes % 2 != 0)
2930 output_asm_insn ("stb %6,0(%0)", operands);
2931 }
2932 return "";
2933
2934 case 1:
2935 /* Pre-adjust the loop counter. */
2936 operands[4] = GEN_INT (n_bytes - 2);
2937 output_asm_insn ("ldi %4,%2", operands);
2938
2939 /* Copying loop. */
2940 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2941 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2942 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2943 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2944 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2945
2946 /* Handle the residual. */
2947 if (n_bytes % 2 != 0)
2948 {
2949 output_asm_insn ("ldb 0(%1),%3", operands);
2950 output_asm_insn ("stb %3,0(%0)", operands);
2951 }
2952 return "";
2953
2954 default:
2955 gcc_unreachable ();
2956 }
2957 }
2958
2959 /* Count the number of insns necessary to handle this block move.
2960
2961 Basic structure is the same as emit_block_move, except that we
2962 count insns rather than emit them. */
2963
2964 static int
2965 compute_movmem_length (rtx_insn *insn)
2966 {
2967 rtx pat = PATTERN (insn);
2968 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2969 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2970 unsigned int n_insns = 0;
2971
2972 /* We can't move more than four bytes at a time because the PA
2973 has no longer integer move insns. (Could use fp mem ops?) */
2974 if (align > (TARGET_64BIT ? 8 : 4))
2975 align = (TARGET_64BIT ? 8 : 4);
2976
2977 /* The basic copying loop. */
2978 n_insns = 6;
2979
2980 /* Residuals. */
2981 if (n_bytes % (2 * align) != 0)
2982 {
2983 if ((n_bytes % (2 * align)) >= align)
2984 n_insns += 2;
2985
2986 if ((n_bytes % align) != 0)
2987 n_insns += 2;
2988 }
2989
2990 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2991 return n_insns * 4;
2992 }
2993
2994 /* Emit code to perform a block clear.
2995
2996 OPERANDS[0] is the destination pointer as a REG, clobbered.
2997 OPERANDS[1] is a register for temporary storage.
2998 OPERANDS[2] is the size as a CONST_INT
2999 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3000
3001 const char *
3002 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3003 {
3004 int align = INTVAL (operands[3]);
3005 unsigned long n_bytes = INTVAL (operands[2]);
3006
3007 /* We can't clear more than a word at a time because the PA
3008 has no longer integer move insns. */
3009 if (align > (TARGET_64BIT ? 8 : 4))
3010 align = (TARGET_64BIT ? 8 : 4);
3011
3012 /* Note that we know each loop below will execute at least twice
3013 (else we would have open-coded the copy). */
3014 switch (align)
3015 {
3016 case 8:
3017 /* Pre-adjust the loop counter. */
3018 operands[2] = GEN_INT (n_bytes - 16);
3019 output_asm_insn ("ldi %2,%1", operands);
3020
3021 /* Loop. */
3022 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3023 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3024 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3025
3026 /* Handle the residual. There could be up to 7 bytes of
3027 residual to copy! */
3028 if (n_bytes % 16 != 0)
3029 {
3030 operands[2] = GEN_INT (n_bytes % 8);
3031 if (n_bytes % 16 >= 8)
3032 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3033 if (n_bytes % 8 != 0)
3034 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3035 }
3036 return "";
3037
3038 case 4:
3039 /* Pre-adjust the loop counter. */
3040 operands[2] = GEN_INT (n_bytes - 8);
3041 output_asm_insn ("ldi %2,%1", operands);
3042
3043 /* Loop. */
3044 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3045 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3046 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3047
3048 /* Handle the residual. There could be up to 7 bytes of
3049 residual to copy! */
3050 if (n_bytes % 8 != 0)
3051 {
3052 operands[2] = GEN_INT (n_bytes % 4);
3053 if (n_bytes % 8 >= 4)
3054 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3055 if (n_bytes % 4 != 0)
3056 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3057 }
3058 return "";
3059
3060 case 2:
3061 /* Pre-adjust the loop counter. */
3062 operands[2] = GEN_INT (n_bytes - 4);
3063 output_asm_insn ("ldi %2,%1", operands);
3064
3065 /* Loop. */
3066 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3067 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3068 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3069
3070 /* Handle the residual. */
3071 if (n_bytes % 4 != 0)
3072 {
3073 if (n_bytes % 4 >= 2)
3074 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3075 if (n_bytes % 2 != 0)
3076 output_asm_insn ("stb %%r0,0(%0)", operands);
3077 }
3078 return "";
3079
3080 case 1:
3081 /* Pre-adjust the loop counter. */
3082 operands[2] = GEN_INT (n_bytes - 2);
3083 output_asm_insn ("ldi %2,%1", operands);
3084
3085 /* Loop. */
3086 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3087 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3088 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3089
3090 /* Handle the residual. */
3091 if (n_bytes % 2 != 0)
3092 output_asm_insn ("stb %%r0,0(%0)", operands);
3093
3094 return "";
3095
3096 default:
3097 gcc_unreachable ();
3098 }
3099 }
3100
3101 /* Count the number of insns necessary to handle this block move.
3102
3103 Basic structure is the same as emit_block_move, except that we
3104 count insns rather than emit them. */
3105
3106 static int
3107 compute_clrmem_length (rtx_insn *insn)
3108 {
3109 rtx pat = PATTERN (insn);
3110 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3111 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3112 unsigned int n_insns = 0;
3113
3114 /* We can't clear more than a word at a time because the PA
3115 has no longer integer move insns. */
3116 if (align > (TARGET_64BIT ? 8 : 4))
3117 align = (TARGET_64BIT ? 8 : 4);
3118
3119 /* The basic loop. */
3120 n_insns = 4;
3121
3122 /* Residuals. */
3123 if (n_bytes % (2 * align) != 0)
3124 {
3125 if ((n_bytes % (2 * align)) >= align)
3126 n_insns++;
3127
3128 if ((n_bytes % align) != 0)
3129 n_insns++;
3130 }
3131
3132 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3133 return n_insns * 4;
3134 }
3135 \f
3136
3137 const char *
3138 pa_output_and (rtx *operands)
3139 {
3140 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3141 {
3142 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3143 int ls0, ls1, ms0, p, len;
3144
3145 for (ls0 = 0; ls0 < 32; ls0++)
3146 if ((mask & (1 << ls0)) == 0)
3147 break;
3148
3149 for (ls1 = ls0; ls1 < 32; ls1++)
3150 if ((mask & (1 << ls1)) != 0)
3151 break;
3152
3153 for (ms0 = ls1; ms0 < 32; ms0++)
3154 if ((mask & (1 << ms0)) == 0)
3155 break;
3156
3157 gcc_assert (ms0 == 32);
3158
3159 if (ls1 == 32)
3160 {
3161 len = ls0;
3162
3163 gcc_assert (len);
3164
3165 operands[2] = GEN_INT (len);
3166 return "{extru|extrw,u} %1,31,%2,%0";
3167 }
3168 else
3169 {
3170 /* We could use this `depi' for the case above as well, but `depi'
3171 requires one more register file access than an `extru'. */
3172
3173 p = 31 - ls0;
3174 len = ls1 - ls0;
3175
3176 operands[2] = GEN_INT (p);
3177 operands[3] = GEN_INT (len);
3178 return "{depi|depwi} 0,%2,%3,%0";
3179 }
3180 }
3181 else
3182 return "and %1,%2,%0";
3183 }
3184
3185 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3186 storing the result in operands[0]. */
3187 const char *
3188 pa_output_64bit_and (rtx *operands)
3189 {
3190 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3191 {
3192 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3193 int ls0, ls1, ms0, p, len;
3194
3195 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3196 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3197 break;
3198
3199 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3200 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3201 break;
3202
3203 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3204 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3205 break;
3206
3207 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3208
3209 if (ls1 == HOST_BITS_PER_WIDE_INT)
3210 {
3211 len = ls0;
3212
3213 gcc_assert (len);
3214
3215 operands[2] = GEN_INT (len);
3216 return "extrd,u %1,63,%2,%0";
3217 }
3218 else
3219 {
3220 /* We could use this `depi' for the case above as well, but `depi'
3221 requires one more register file access than an `extru'. */
3222
3223 p = 63 - ls0;
3224 len = ls1 - ls0;
3225
3226 operands[2] = GEN_INT (p);
3227 operands[3] = GEN_INT (len);
3228 return "depdi 0,%2,%3,%0";
3229 }
3230 }
3231 else
3232 return "and %1,%2,%0";
3233 }
3234
3235 const char *
3236 pa_output_ior (rtx *operands)
3237 {
3238 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3239 int bs0, bs1, p, len;
3240
3241 if (INTVAL (operands[2]) == 0)
3242 return "copy %1,%0";
3243
3244 for (bs0 = 0; bs0 < 32; bs0++)
3245 if ((mask & (1 << bs0)) != 0)
3246 break;
3247
3248 for (bs1 = bs0; bs1 < 32; bs1++)
3249 if ((mask & (1 << bs1)) == 0)
3250 break;
3251
3252 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3253
3254 p = 31 - bs0;
3255 len = bs1 - bs0;
3256
3257 operands[2] = GEN_INT (p);
3258 operands[3] = GEN_INT (len);
3259 return "{depi|depwi} -1,%2,%3,%0";
3260 }
3261
3262 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3263 storing the result in operands[0]. */
3264 const char *
3265 pa_output_64bit_ior (rtx *operands)
3266 {
3267 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3268 int bs0, bs1, p, len;
3269
3270 if (INTVAL (operands[2]) == 0)
3271 return "copy %1,%0";
3272
3273 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3274 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3275 break;
3276
3277 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3278 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3279 break;
3280
3281 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3282 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3283
3284 p = 63 - bs0;
3285 len = bs1 - bs0;
3286
3287 operands[2] = GEN_INT (p);
3288 operands[3] = GEN_INT (len);
3289 return "depdi -1,%2,%3,%0";
3290 }
3291 \f
3292 /* Target hook for assembling integer objects. This code handles
3293 aligned SI and DI integers specially since function references
3294 must be preceded by P%. */
3295
3296 static bool
3297 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3298 {
3299 if (size == UNITS_PER_WORD
3300 && aligned_p
3301 && function_label_operand (x, VOIDmode))
3302 {
3303 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3304
3305 /* We don't want an OPD when generating fast indirect calls. */
3306 if (!TARGET_FAST_INDIRECT_CALLS)
3307 fputs ("P%", asm_out_file);
3308
3309 output_addr_const (asm_out_file, x);
3310 fputc ('\n', asm_out_file);
3311 return true;
3312 }
3313 return default_assemble_integer (x, size, aligned_p);
3314 }
3315 \f
3316 /* Output an ascii string. */
3317 void
3318 pa_output_ascii (FILE *file, const char *p, int size)
3319 {
3320 int i;
3321 int chars_output;
3322 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3323
3324 /* The HP assembler can only take strings of 256 characters at one
3325 time. This is a limitation on input line length, *not* the
3326 length of the string. Sigh. Even worse, it seems that the
3327 restriction is in number of input characters (see \xnn &
3328 \whatever). So we have to do this very carefully. */
3329
3330 fputs ("\t.STRING \"", file);
3331
3332 chars_output = 0;
3333 for (i = 0; i < size; i += 4)
3334 {
3335 int co = 0;
3336 int io = 0;
3337 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3338 {
3339 register unsigned int c = (unsigned char) p[i + io];
3340
3341 if (c == '\"' || c == '\\')
3342 partial_output[co++] = '\\';
3343 if (c >= ' ' && c < 0177)
3344 partial_output[co++] = c;
3345 else
3346 {
3347 unsigned int hexd;
3348 partial_output[co++] = '\\';
3349 partial_output[co++] = 'x';
3350 hexd = c / 16 - 0 + '0';
3351 if (hexd > '9')
3352 hexd -= '9' - 'a' + 1;
3353 partial_output[co++] = hexd;
3354 hexd = c % 16 - 0 + '0';
3355 if (hexd > '9')
3356 hexd -= '9' - 'a' + 1;
3357 partial_output[co++] = hexd;
3358 }
3359 }
3360 if (chars_output + co > 243)
3361 {
3362 fputs ("\"\n\t.STRING \"", file);
3363 chars_output = 0;
3364 }
3365 fwrite (partial_output, 1, (size_t) co, file);
3366 chars_output += co;
3367 co = 0;
3368 }
3369 fputs ("\"\n", file);
3370 }
3371
3372 /* Try to rewrite floating point comparisons & branches to avoid
3373 useless add,tr insns.
3374
3375 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3376 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3377 first attempt to remove useless add,tr insns. It is zero
3378 for the second pass as reorg sometimes leaves bogus REG_DEAD
3379 notes lying around.
3380
3381 When CHECK_NOTES is zero we can only eliminate add,tr insns
3382 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3383 instructions. */
3384 static void
3385 remove_useless_addtr_insns (int check_notes)
3386 {
3387 rtx_insn *insn;
3388 static int pass = 0;
3389
3390 /* This is fairly cheap, so always run it when optimizing. */
3391 if (optimize > 0)
3392 {
3393 int fcmp_count = 0;
3394 int fbranch_count = 0;
3395
3396 /* Walk all the insns in this function looking for fcmp & fbranch
3397 instructions. Keep track of how many of each we find. */
3398 for (insn = get_insns (); insn; insn = next_insn (insn))
3399 {
3400 rtx tmp;
3401
3402 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3403 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3404 continue;
3405
3406 tmp = PATTERN (insn);
3407
3408 /* It must be a set. */
3409 if (GET_CODE (tmp) != SET)
3410 continue;
3411
3412 /* If the destination is CCFP, then we've found an fcmp insn. */
3413 tmp = SET_DEST (tmp);
3414 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3415 {
3416 fcmp_count++;
3417 continue;
3418 }
3419
3420 tmp = PATTERN (insn);
3421 /* If this is an fbranch instruction, bump the fbranch counter. */
3422 if (GET_CODE (tmp) == SET
3423 && SET_DEST (tmp) == pc_rtx
3424 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3425 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3426 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3427 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3428 {
3429 fbranch_count++;
3430 continue;
3431 }
3432 }
3433
3434
3435 /* Find all floating point compare + branch insns. If possible,
3436 reverse the comparison & the branch to avoid add,tr insns. */
3437 for (insn = get_insns (); insn; insn = next_insn (insn))
3438 {
3439 rtx tmp;
3440 rtx_insn *next;
3441
3442 /* Ignore anything that isn't an INSN. */
3443 if (! NONJUMP_INSN_P (insn))
3444 continue;
3445
3446 tmp = PATTERN (insn);
3447
3448 /* It must be a set. */
3449 if (GET_CODE (tmp) != SET)
3450 continue;
3451
3452 /* The destination must be CCFP, which is register zero. */
3453 tmp = SET_DEST (tmp);
3454 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3455 continue;
3456
3457 /* INSN should be a set of CCFP.
3458
3459 See if the result of this insn is used in a reversed FP
3460 conditional branch. If so, reverse our condition and
3461 the branch. Doing so avoids useless add,tr insns. */
3462 next = next_insn (insn);
3463 while (next)
3464 {
3465 /* Jumps, calls and labels stop our search. */
3466 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3467 break;
3468
3469 /* As does another fcmp insn. */
3470 if (NONJUMP_INSN_P (next)
3471 && GET_CODE (PATTERN (next)) == SET
3472 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3473 && REGNO (SET_DEST (PATTERN (next))) == 0)
3474 break;
3475
3476 next = next_insn (next);
3477 }
3478
3479 /* Is NEXT_INSN a branch? */
3480 if (next && JUMP_P (next))
3481 {
3482 rtx pattern = PATTERN (next);
3483
3484 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3485 and CCFP dies, then reverse our conditional and the branch
3486 to avoid the add,tr. */
3487 if (GET_CODE (pattern) == SET
3488 && SET_DEST (pattern) == pc_rtx
3489 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3490 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3491 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3492 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3493 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3494 && (fcmp_count == fbranch_count
3495 || (check_notes
3496 && find_regno_note (next, REG_DEAD, 0))))
3497 {
3498 /* Reverse the branch. */
3499 tmp = XEXP (SET_SRC (pattern), 1);
3500 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3501 XEXP (SET_SRC (pattern), 2) = tmp;
3502 INSN_CODE (next) = -1;
3503
3504 /* Reverse our condition. */
3505 tmp = PATTERN (insn);
3506 PUT_CODE (XEXP (tmp, 1),
3507 (reverse_condition_maybe_unordered
3508 (GET_CODE (XEXP (tmp, 1)))));
3509 }
3510 }
3511 }
3512 }
3513
3514 pass = !pass;
3515
3516 }
3517 \f
3518 /* You may have trouble believing this, but this is the 32 bit HP-PA
3519 stack layout. Wow.
3520
3521 Offset Contents
3522
3523 Variable arguments (optional; any number may be allocated)
3524
3525 SP-(4*(N+9)) arg word N
3526 : :
3527 SP-56 arg word 5
3528 SP-52 arg word 4
3529
3530 Fixed arguments (must be allocated; may remain unused)
3531
3532 SP-48 arg word 3
3533 SP-44 arg word 2
3534 SP-40 arg word 1
3535 SP-36 arg word 0
3536
3537 Frame Marker
3538
3539 SP-32 External Data Pointer (DP)
3540 SP-28 External sr4
3541 SP-24 External/stub RP (RP')
3542 SP-20 Current RP
3543 SP-16 Static Link
3544 SP-12 Clean up
3545 SP-8 Calling Stub RP (RP'')
3546 SP-4 Previous SP
3547
3548 Top of Frame
3549
3550 SP-0 Stack Pointer (points to next available address)
3551
3552 */
3553
3554 /* This function saves registers as follows. Registers marked with ' are
3555 this function's registers (as opposed to the previous function's).
3556 If a frame_pointer isn't needed, r4 is saved as a general register;
3557 the space for the frame pointer is still allocated, though, to keep
3558 things simple.
3559
3560
3561 Top of Frame
3562
3563 SP (FP') Previous FP
3564 SP + 4 Alignment filler (sigh)
3565 SP + 8 Space for locals reserved here.
3566 .
3567 .
3568 .
3569 SP + n All call saved register used.
3570 .
3571 .
3572 .
3573 SP + o All call saved fp registers used.
3574 .
3575 .
3576 .
3577 SP + p (SP') points to next available address.
3578
3579 */
3580
3581 /* Global variables set by output_function_prologue(). */
3582 /* Size of frame. Need to know this to emit return insns from
3583 leaf procedures. */
3584 static HOST_WIDE_INT actual_fsize, local_fsize;
3585 static int save_fregs;
3586
3587 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3588 Handle case where DISP > 8k by using the add_high_const patterns.
3589
3590 Note in DISP > 8k case, we will leave the high part of the address
3591 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3592
3593 static void
3594 store_reg (int reg, HOST_WIDE_INT disp, int base)
3595 {
3596 rtx dest, src, basereg;
3597 rtx_insn *insn;
3598
3599 src = gen_rtx_REG (word_mode, reg);
3600 basereg = gen_rtx_REG (Pmode, base);
3601 if (VAL_14_BITS_P (disp))
3602 {
3603 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3604 insn = emit_move_insn (dest, src);
3605 }
3606 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3607 {
3608 rtx delta = GEN_INT (disp);
3609 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3610
3611 emit_move_insn (tmpreg, delta);
3612 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3613 if (DO_FRAME_NOTES)
3614 {
3615 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3616 gen_rtx_SET (tmpreg,
3617 gen_rtx_PLUS (Pmode, basereg, delta)));
3618 RTX_FRAME_RELATED_P (insn) = 1;
3619 }
3620 dest = gen_rtx_MEM (word_mode, tmpreg);
3621 insn = emit_move_insn (dest, src);
3622 }
3623 else
3624 {
3625 rtx delta = GEN_INT (disp);
3626 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3627 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3628
3629 emit_move_insn (tmpreg, high);
3630 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3631 insn = emit_move_insn (dest, src);
3632 if (DO_FRAME_NOTES)
3633 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3634 gen_rtx_SET (gen_rtx_MEM (word_mode,
3635 gen_rtx_PLUS (word_mode,
3636 basereg,
3637 delta)),
3638 src));
3639 }
3640
3641 if (DO_FRAME_NOTES)
3642 RTX_FRAME_RELATED_P (insn) = 1;
3643 }
3644
3645 /* Emit RTL to store REG at the memory location specified by BASE and then
3646 add MOD to BASE. MOD must be <= 8k. */
3647
3648 static void
3649 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3650 {
3651 rtx basereg, srcreg, delta;
3652 rtx_insn *insn;
3653
3654 gcc_assert (VAL_14_BITS_P (mod));
3655
3656 basereg = gen_rtx_REG (Pmode, base);
3657 srcreg = gen_rtx_REG (word_mode, reg);
3658 delta = GEN_INT (mod);
3659
3660 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3661 if (DO_FRAME_NOTES)
3662 {
3663 RTX_FRAME_RELATED_P (insn) = 1;
3664
3665 /* RTX_FRAME_RELATED_P must be set on each frame related set
3666 in a parallel with more than one element. */
3667 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3668 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3669 }
3670 }
3671
3672 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3673 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3674 whether to add a frame note or not.
3675
3676 In the DISP > 8k case, we leave the high part of the address in %r1.
3677 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3678
3679 static void
3680 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3681 {
3682 rtx_insn *insn;
3683
3684 if (VAL_14_BITS_P (disp))
3685 {
3686 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3687 plus_constant (Pmode,
3688 gen_rtx_REG (Pmode, base), disp));
3689 }
3690 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3691 {
3692 rtx basereg = gen_rtx_REG (Pmode, base);
3693 rtx delta = GEN_INT (disp);
3694 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3695
3696 emit_move_insn (tmpreg, delta);
3697 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3698 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3699 if (DO_FRAME_NOTES)
3700 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3701 gen_rtx_SET (tmpreg,
3702 gen_rtx_PLUS (Pmode, basereg, delta)));
3703 }
3704 else
3705 {
3706 rtx basereg = gen_rtx_REG (Pmode, base);
3707 rtx delta = GEN_INT (disp);
3708 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3709
3710 emit_move_insn (tmpreg,
3711 gen_rtx_PLUS (Pmode, basereg,
3712 gen_rtx_HIGH (Pmode, delta)));
3713 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3714 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3715 }
3716
3717 if (DO_FRAME_NOTES && note)
3718 RTX_FRAME_RELATED_P (insn) = 1;
3719 }
3720
3721 HOST_WIDE_INT
3722 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3723 {
3724 int freg_saved = 0;
3725 int i, j;
3726
3727 /* The code in pa_expand_prologue and pa_expand_epilogue must
3728 be consistent with the rounding and size calculation done here.
3729 Change them at the same time. */
3730
3731 /* We do our own stack alignment. First, round the size of the
3732 stack locals up to a word boundary. */
3733 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3734
3735 /* Space for previous frame pointer + filler. If any frame is
3736 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3737 waste some space here for the sake of HP compatibility. The
3738 first slot is only used when the frame pointer is needed. */
3739 if (size || frame_pointer_needed)
3740 size += STARTING_FRAME_OFFSET;
3741
3742 /* If the current function calls __builtin_eh_return, then we need
3743 to allocate stack space for registers that will hold data for
3744 the exception handler. */
3745 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3746 {
3747 unsigned int i;
3748
3749 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3750 continue;
3751 size += i * UNITS_PER_WORD;
3752 }
3753
3754 /* Account for space used by the callee general register saves. */
3755 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3756 if (df_regs_ever_live_p (i))
3757 size += UNITS_PER_WORD;
3758
3759 /* Account for space used by the callee floating point register saves. */
3760 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3761 if (df_regs_ever_live_p (i)
3762 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3763 {
3764 freg_saved = 1;
3765
3766 /* We always save both halves of the FP register, so always
3767 increment the frame size by 8 bytes. */
3768 size += 8;
3769 }
3770
3771 /* If any of the floating registers are saved, account for the
3772 alignment needed for the floating point register save block. */
3773 if (freg_saved)
3774 {
3775 size = (size + 7) & ~7;
3776 if (fregs_live)
3777 *fregs_live = 1;
3778 }
3779
3780 /* The various ABIs include space for the outgoing parameters in the
3781 size of the current function's stack frame. We don't need to align
3782 for the outgoing arguments as their alignment is set by the final
3783 rounding for the frame as a whole. */
3784 size += crtl->outgoing_args_size;
3785
3786 /* Allocate space for the fixed frame marker. This space must be
3787 allocated for any function that makes calls or allocates
3788 stack space. */
3789 if (!crtl->is_leaf || size)
3790 size += TARGET_64BIT ? 48 : 32;
3791
3792 /* Finally, round to the preferred stack boundary. */
3793 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3794 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3795 }
3796
3797 /* Generate the assembly code for function entry. FILE is a stdio
3798 stream to output the code to. SIZE is an int: how many units of
3799 temporary storage to allocate.
3800
3801 Refer to the array `regs_ever_live' to determine which registers to
3802 save; `regs_ever_live[I]' is nonzero if register number I is ever
3803 used in the function. This function is responsible for knowing
3804 which registers should not be saved even if used. */
3805
3806 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3807 of memory. If any fpu reg is used in the function, we allocate
3808 such a block here, at the bottom of the frame, just in case it's needed.
3809
3810 If this function is a leaf procedure, then we may choose not
3811 to do a "save" insn. The decision about whether or not
3812 to do this is made in regclass.c. */
3813
3814 static void
3815 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3816 {
3817 /* The function's label and associated .PROC must never be
3818 separated and must be output *after* any profiling declarations
3819 to avoid changing spaces/subspaces within a procedure. */
3820 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3821 fputs ("\t.PROC\n", file);
3822
3823 /* pa_expand_prologue does the dirty work now. We just need
3824 to output the assembler directives which denote the start
3825 of a function. */
3826 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3827 if (crtl->is_leaf)
3828 fputs (",NO_CALLS", file);
3829 else
3830 fputs (",CALLS", file);
3831 if (rp_saved)
3832 fputs (",SAVE_RP", file);
3833
3834 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3835 at the beginning of the frame and that it is used as the frame
3836 pointer for the frame. We do this because our current frame
3837 layout doesn't conform to that specified in the HP runtime
3838 documentation and we need a way to indicate to programs such as
3839 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3840 isn't used by HP compilers but is supported by the assembler.
3841 However, SAVE_SP is supposed to indicate that the previous stack
3842 pointer has been saved in the frame marker. */
3843 if (frame_pointer_needed)
3844 fputs (",SAVE_SP", file);
3845
3846 /* Pass on information about the number of callee register saves
3847 performed in the prologue.
3848
3849 The compiler is supposed to pass the highest register number
3850 saved, the assembler then has to adjust that number before
3851 entering it into the unwind descriptor (to account for any
3852 caller saved registers with lower register numbers than the
3853 first callee saved register). */
3854 if (gr_saved)
3855 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3856
3857 if (fr_saved)
3858 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3859
3860 fputs ("\n\t.ENTRY\n", file);
3861
3862 remove_useless_addtr_insns (0);
3863 }
3864
3865 void
3866 pa_expand_prologue (void)
3867 {
3868 int merge_sp_adjust_with_store = 0;
3869 HOST_WIDE_INT size = get_frame_size ();
3870 HOST_WIDE_INT offset;
3871 int i;
3872 rtx tmpreg;
3873 rtx_insn *insn;
3874
3875 gr_saved = 0;
3876 fr_saved = 0;
3877 save_fregs = 0;
3878
3879 /* Compute total size for frame pointer, filler, locals and rounding to
3880 the next word boundary. Similar code appears in pa_compute_frame_size
3881 and must be changed in tandem with this code. */
3882 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3883 if (local_fsize || frame_pointer_needed)
3884 local_fsize += STARTING_FRAME_OFFSET;
3885
3886 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3887 if (flag_stack_usage_info)
3888 current_function_static_stack_size = actual_fsize;
3889
3890 /* Compute a few things we will use often. */
3891 tmpreg = gen_rtx_REG (word_mode, 1);
3892
3893 /* Save RP first. The calling conventions manual states RP will
3894 always be stored into the caller's frame at sp - 20 or sp - 16
3895 depending on which ABI is in use. */
3896 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3897 {
3898 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3899 rp_saved = true;
3900 }
3901 else
3902 rp_saved = false;
3903
3904 /* Allocate the local frame and set up the frame pointer if needed. */
3905 if (actual_fsize != 0)
3906 {
3907 if (frame_pointer_needed)
3908 {
3909 /* Copy the old frame pointer temporarily into %r1. Set up the
3910 new stack pointer, then store away the saved old frame pointer
3911 into the stack at sp and at the same time update the stack
3912 pointer by actual_fsize bytes. Two versions, first
3913 handles small (<8k) frames. The second handles large (>=8k)
3914 frames. */
3915 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3916 if (DO_FRAME_NOTES)
3917 RTX_FRAME_RELATED_P (insn) = 1;
3918
3919 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3920 if (DO_FRAME_NOTES)
3921 RTX_FRAME_RELATED_P (insn) = 1;
3922
3923 if (VAL_14_BITS_P (actual_fsize))
3924 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3925 else
3926 {
3927 /* It is incorrect to store the saved frame pointer at *sp,
3928 then increment sp (writes beyond the current stack boundary).
3929
3930 So instead use stwm to store at *sp and post-increment the
3931 stack pointer as an atomic operation. Then increment sp to
3932 finish allocating the new frame. */
3933 HOST_WIDE_INT adjust1 = 8192 - 64;
3934 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3935
3936 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3937 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3938 adjust2, 1);
3939 }
3940
3941 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3942 we need to store the previous stack pointer (frame pointer)
3943 into the frame marker on targets that use the HP unwind
3944 library. This allows the HP unwind library to be used to
3945 unwind GCC frames. However, we are not fully compatible
3946 with the HP library because our frame layout differs from
3947 that specified in the HP runtime specification.
3948
3949 We don't want a frame note on this instruction as the frame
3950 marker moves during dynamic stack allocation.
3951
3952 This instruction also serves as a blockage to prevent
3953 register spills from being scheduled before the stack
3954 pointer is raised. This is necessary as we store
3955 registers using the frame pointer as a base register,
3956 and the frame pointer is set before sp is raised. */
3957 if (TARGET_HPUX_UNWIND_LIBRARY)
3958 {
3959 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3960 GEN_INT (TARGET_64BIT ? -8 : -4));
3961
3962 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3963 hard_frame_pointer_rtx);
3964 }
3965 else
3966 emit_insn (gen_blockage ());
3967 }
3968 /* no frame pointer needed. */
3969 else
3970 {
3971 /* In some cases we can perform the first callee register save
3972 and allocating the stack frame at the same time. If so, just
3973 make a note of it and defer allocating the frame until saving
3974 the callee registers. */
3975 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3976 merge_sp_adjust_with_store = 1;
3977 /* Can not optimize. Adjust the stack frame by actual_fsize
3978 bytes. */
3979 else
3980 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3981 actual_fsize, 1);
3982 }
3983 }
3984
3985 /* Normal register save.
3986
3987 Do not save the frame pointer in the frame_pointer_needed case. It
3988 was done earlier. */
3989 if (frame_pointer_needed)
3990 {
3991 offset = local_fsize;
3992
3993 /* Saving the EH return data registers in the frame is the simplest
3994 way to get the frame unwind information emitted. We put them
3995 just before the general registers. */
3996 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3997 {
3998 unsigned int i, regno;
3999
4000 for (i = 0; ; ++i)
4001 {
4002 regno = EH_RETURN_DATA_REGNO (i);
4003 if (regno == INVALID_REGNUM)
4004 break;
4005
4006 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4007 offset += UNITS_PER_WORD;
4008 }
4009 }
4010
4011 for (i = 18; i >= 4; i--)
4012 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4013 {
4014 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4015 offset += UNITS_PER_WORD;
4016 gr_saved++;
4017 }
4018 /* Account for %r3 which is saved in a special place. */
4019 gr_saved++;
4020 }
4021 /* No frame pointer needed. */
4022 else
4023 {
4024 offset = local_fsize - actual_fsize;
4025
4026 /* Saving the EH return data registers in the frame is the simplest
4027 way to get the frame unwind information emitted. */
4028 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4029 {
4030 unsigned int i, regno;
4031
4032 for (i = 0; ; ++i)
4033 {
4034 regno = EH_RETURN_DATA_REGNO (i);
4035 if (regno == INVALID_REGNUM)
4036 break;
4037
4038 /* If merge_sp_adjust_with_store is nonzero, then we can
4039 optimize the first save. */
4040 if (merge_sp_adjust_with_store)
4041 {
4042 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4043 merge_sp_adjust_with_store = 0;
4044 }
4045 else
4046 store_reg (regno, offset, STACK_POINTER_REGNUM);
4047 offset += UNITS_PER_WORD;
4048 }
4049 }
4050
4051 for (i = 18; i >= 3; i--)
4052 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4053 {
4054 /* If merge_sp_adjust_with_store is nonzero, then we can
4055 optimize the first GR save. */
4056 if (merge_sp_adjust_with_store)
4057 {
4058 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4059 merge_sp_adjust_with_store = 0;
4060 }
4061 else
4062 store_reg (i, offset, STACK_POINTER_REGNUM);
4063 offset += UNITS_PER_WORD;
4064 gr_saved++;
4065 }
4066
4067 /* If we wanted to merge the SP adjustment with a GR save, but we never
4068 did any GR saves, then just emit the adjustment here. */
4069 if (merge_sp_adjust_with_store)
4070 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4071 actual_fsize, 1);
4072 }
4073
4074 /* The hppa calling conventions say that %r19, the pic offset
4075 register, is saved at sp - 32 (in this function's frame)
4076 when generating PIC code. FIXME: What is the correct thing
4077 to do for functions which make no calls and allocate no
4078 frame? Do we need to allocate a frame, or can we just omit
4079 the save? For now we'll just omit the save.
4080
4081 We don't want a note on this insn as the frame marker can
4082 move if there is a dynamic stack allocation. */
4083 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4084 {
4085 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4086
4087 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4088
4089 }
4090
4091 /* Align pointer properly (doubleword boundary). */
4092 offset = (offset + 7) & ~7;
4093
4094 /* Floating point register store. */
4095 if (save_fregs)
4096 {
4097 rtx base;
4098
4099 /* First get the frame or stack pointer to the start of the FP register
4100 save area. */
4101 if (frame_pointer_needed)
4102 {
4103 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4104 base = hard_frame_pointer_rtx;
4105 }
4106 else
4107 {
4108 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4109 base = stack_pointer_rtx;
4110 }
4111
4112 /* Now actually save the FP registers. */
4113 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4114 {
4115 if (df_regs_ever_live_p (i)
4116 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4117 {
4118 rtx addr, reg;
4119 rtx_insn *insn;
4120 addr = gen_rtx_MEM (DFmode,
4121 gen_rtx_POST_INC (word_mode, tmpreg));
4122 reg = gen_rtx_REG (DFmode, i);
4123 insn = emit_move_insn (addr, reg);
4124 if (DO_FRAME_NOTES)
4125 {
4126 RTX_FRAME_RELATED_P (insn) = 1;
4127 if (TARGET_64BIT)
4128 {
4129 rtx mem = gen_rtx_MEM (DFmode,
4130 plus_constant (Pmode, base,
4131 offset));
4132 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4133 gen_rtx_SET (mem, reg));
4134 }
4135 else
4136 {
4137 rtx meml = gen_rtx_MEM (SFmode,
4138 plus_constant (Pmode, base,
4139 offset));
4140 rtx memr = gen_rtx_MEM (SFmode,
4141 plus_constant (Pmode, base,
4142 offset + 4));
4143 rtx regl = gen_rtx_REG (SFmode, i);
4144 rtx regr = gen_rtx_REG (SFmode, i + 1);
4145 rtx setl = gen_rtx_SET (meml, regl);
4146 rtx setr = gen_rtx_SET (memr, regr);
4147 rtvec vec;
4148
4149 RTX_FRAME_RELATED_P (setl) = 1;
4150 RTX_FRAME_RELATED_P (setr) = 1;
4151 vec = gen_rtvec (2, setl, setr);
4152 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4153 gen_rtx_SEQUENCE (VOIDmode, vec));
4154 }
4155 }
4156 offset += GET_MODE_SIZE (DFmode);
4157 fr_saved++;
4158 }
4159 }
4160 }
4161 }
4162
4163 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4164 Handle case where DISP > 8k by using the add_high_const patterns. */
4165
4166 static void
4167 load_reg (int reg, HOST_WIDE_INT disp, int base)
4168 {
4169 rtx dest = gen_rtx_REG (word_mode, reg);
4170 rtx basereg = gen_rtx_REG (Pmode, base);
4171 rtx src;
4172
4173 if (VAL_14_BITS_P (disp))
4174 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4175 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4176 {
4177 rtx delta = GEN_INT (disp);
4178 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4179
4180 emit_move_insn (tmpreg, delta);
4181 if (TARGET_DISABLE_INDEXING)
4182 {
4183 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4184 src = gen_rtx_MEM (word_mode, tmpreg);
4185 }
4186 else
4187 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4188 }
4189 else
4190 {
4191 rtx delta = GEN_INT (disp);
4192 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4193 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4194
4195 emit_move_insn (tmpreg, high);
4196 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4197 }
4198
4199 emit_move_insn (dest, src);
4200 }
4201
4202 /* Update the total code bytes output to the text section. */
4203
4204 static void
4205 update_total_code_bytes (unsigned int nbytes)
4206 {
4207 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4208 && !IN_NAMED_SECTION_P (cfun->decl))
4209 {
4210 unsigned int old_total = total_code_bytes;
4211
4212 total_code_bytes += nbytes;
4213
4214 /* Be prepared to handle overflows. */
4215 if (old_total > total_code_bytes)
4216 total_code_bytes = UINT_MAX;
4217 }
4218 }
4219
4220 /* This function generates the assembly code for function exit.
4221 Args are as for output_function_prologue ().
4222
4223 The function epilogue should not depend on the current stack
4224 pointer! It should use the frame pointer only. This is mandatory
4225 because of alloca; we also take advantage of it to omit stack
4226 adjustments before returning. */
4227
4228 static void
4229 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4230 {
4231 rtx_insn *insn = get_last_insn ();
4232 bool extra_nop;
4233
4234 /* pa_expand_epilogue does the dirty work now. We just need
4235 to output the assembler directives which denote the end
4236 of a function.
4237
4238 To make debuggers happy, emit a nop if the epilogue was completely
4239 eliminated due to a volatile call as the last insn in the
4240 current function. That way the return address (in %r2) will
4241 always point to a valid instruction in the current function. */
4242
4243 /* Get the last real insn. */
4244 if (NOTE_P (insn))
4245 insn = prev_real_insn (insn);
4246
4247 /* If it is a sequence, then look inside. */
4248 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4249 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4250
4251 /* If insn is a CALL_INSN, then it must be a call to a volatile
4252 function (otherwise there would be epilogue insns). */
4253 if (insn && CALL_P (insn))
4254 {
4255 fputs ("\tnop\n", file);
4256 extra_nop = true;
4257 }
4258 else
4259 extra_nop = false;
4260
4261 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4262
4263 if (TARGET_SOM && TARGET_GAS)
4264 {
4265 /* We are done with this subspace except possibly for some additional
4266 debug information. Forget that we are in this subspace to ensure
4267 that the next function is output in its own subspace. */
4268 in_section = NULL;
4269 cfun->machine->in_nsubspa = 2;
4270 }
4271
4272 /* Thunks do their own insn accounting. */
4273 if (cfun->is_thunk)
4274 return;
4275
4276 if (INSN_ADDRESSES_SET_P ())
4277 {
4278 last_address = extra_nop ? 4 : 0;
4279 insn = get_last_nonnote_insn ();
4280 if (insn)
4281 {
4282 last_address += INSN_ADDRESSES (INSN_UID (insn));
4283 if (INSN_P (insn))
4284 last_address += insn_default_length (insn);
4285 }
4286 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4287 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4288 }
4289 else
4290 last_address = UINT_MAX;
4291
4292 /* Finally, update the total number of code bytes output so far. */
4293 update_total_code_bytes (last_address);
4294 }
4295
4296 void
4297 pa_expand_epilogue (void)
4298 {
4299 rtx tmpreg;
4300 HOST_WIDE_INT offset;
4301 HOST_WIDE_INT ret_off = 0;
4302 int i;
4303 int merge_sp_adjust_with_load = 0;
4304
4305 /* We will use this often. */
4306 tmpreg = gen_rtx_REG (word_mode, 1);
4307
4308 /* Try to restore RP early to avoid load/use interlocks when
4309 RP gets used in the return (bv) instruction. This appears to still
4310 be necessary even when we schedule the prologue and epilogue. */
4311 if (rp_saved)
4312 {
4313 ret_off = TARGET_64BIT ? -16 : -20;
4314 if (frame_pointer_needed)
4315 {
4316 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4317 ret_off = 0;
4318 }
4319 else
4320 {
4321 /* No frame pointer, and stack is smaller than 8k. */
4322 if (VAL_14_BITS_P (ret_off - actual_fsize))
4323 {
4324 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4325 ret_off = 0;
4326 }
4327 }
4328 }
4329
4330 /* General register restores. */
4331 if (frame_pointer_needed)
4332 {
4333 offset = local_fsize;
4334
4335 /* If the current function calls __builtin_eh_return, then we need
4336 to restore the saved EH data registers. */
4337 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4338 {
4339 unsigned int i, regno;
4340
4341 for (i = 0; ; ++i)
4342 {
4343 regno = EH_RETURN_DATA_REGNO (i);
4344 if (regno == INVALID_REGNUM)
4345 break;
4346
4347 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4348 offset += UNITS_PER_WORD;
4349 }
4350 }
4351
4352 for (i = 18; i >= 4; i--)
4353 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4354 {
4355 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4356 offset += UNITS_PER_WORD;
4357 }
4358 }
4359 else
4360 {
4361 offset = local_fsize - actual_fsize;
4362
4363 /* If the current function calls __builtin_eh_return, then we need
4364 to restore the saved EH data registers. */
4365 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4366 {
4367 unsigned int i, regno;
4368
4369 for (i = 0; ; ++i)
4370 {
4371 regno = EH_RETURN_DATA_REGNO (i);
4372 if (regno == INVALID_REGNUM)
4373 break;
4374
4375 /* Only for the first load.
4376 merge_sp_adjust_with_load holds the register load
4377 with which we will merge the sp adjustment. */
4378 if (merge_sp_adjust_with_load == 0
4379 && local_fsize == 0
4380 && VAL_14_BITS_P (-actual_fsize))
4381 merge_sp_adjust_with_load = regno;
4382 else
4383 load_reg (regno, offset, STACK_POINTER_REGNUM);
4384 offset += UNITS_PER_WORD;
4385 }
4386 }
4387
4388 for (i = 18; i >= 3; i--)
4389 {
4390 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4391 {
4392 /* Only for the first load.
4393 merge_sp_adjust_with_load holds the register load
4394 with which we will merge the sp adjustment. */
4395 if (merge_sp_adjust_with_load == 0
4396 && local_fsize == 0
4397 && VAL_14_BITS_P (-actual_fsize))
4398 merge_sp_adjust_with_load = i;
4399 else
4400 load_reg (i, offset, STACK_POINTER_REGNUM);
4401 offset += UNITS_PER_WORD;
4402 }
4403 }
4404 }
4405
4406 /* Align pointer properly (doubleword boundary). */
4407 offset = (offset + 7) & ~7;
4408
4409 /* FP register restores. */
4410 if (save_fregs)
4411 {
4412 /* Adjust the register to index off of. */
4413 if (frame_pointer_needed)
4414 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4415 else
4416 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4417
4418 /* Actually do the restores now. */
4419 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4420 if (df_regs_ever_live_p (i)
4421 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4422 {
4423 rtx src = gen_rtx_MEM (DFmode,
4424 gen_rtx_POST_INC (word_mode, tmpreg));
4425 rtx dest = gen_rtx_REG (DFmode, i);
4426 emit_move_insn (dest, src);
4427 }
4428 }
4429
4430 /* Emit a blockage insn here to keep these insns from being moved to
4431 an earlier spot in the epilogue, or into the main instruction stream.
4432
4433 This is necessary as we must not cut the stack back before all the
4434 restores are finished. */
4435 emit_insn (gen_blockage ());
4436
4437 /* Reset stack pointer (and possibly frame pointer). The stack
4438 pointer is initially set to fp + 64 to avoid a race condition. */
4439 if (frame_pointer_needed)
4440 {
4441 rtx delta = GEN_INT (-64);
4442
4443 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4444 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4445 stack_pointer_rtx, delta));
4446 }
4447 /* If we were deferring a callee register restore, do it now. */
4448 else if (merge_sp_adjust_with_load)
4449 {
4450 rtx delta = GEN_INT (-actual_fsize);
4451 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4452
4453 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4454 }
4455 else if (actual_fsize != 0)
4456 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4457 - actual_fsize, 0);
4458
4459 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4460 frame greater than 8k), do so now. */
4461 if (ret_off != 0)
4462 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4463
4464 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4465 {
4466 rtx sa = EH_RETURN_STACKADJ_RTX;
4467
4468 emit_insn (gen_blockage ());
4469 emit_insn (TARGET_64BIT
4470 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4471 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4472 }
4473 }
4474
4475 bool
4476 pa_can_use_return_insn (void)
4477 {
4478 if (!reload_completed)
4479 return false;
4480
4481 if (frame_pointer_needed)
4482 return false;
4483
4484 if (df_regs_ever_live_p (2))
4485 return false;
4486
4487 if (crtl->profile)
4488 return false;
4489
4490 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4491 }
4492
4493 rtx
4494 hppa_pic_save_rtx (void)
4495 {
4496 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4497 }
4498
4499 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4500 #define NO_DEFERRED_PROFILE_COUNTERS 0
4501 #endif
4502
4503
4504 /* Vector of funcdef numbers. */
4505 static vec<int> funcdef_nos;
4506
4507 /* Output deferred profile counters. */
4508 static void
4509 output_deferred_profile_counters (void)
4510 {
4511 unsigned int i;
4512 int align, n;
4513
4514 if (funcdef_nos.is_empty ())
4515 return;
4516
4517 switch_to_section (data_section);
4518 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4519 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4520
4521 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4522 {
4523 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4524 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4525 }
4526
4527 funcdef_nos.release ();
4528 }
4529
4530 void
4531 hppa_profile_hook (int label_no)
4532 {
4533 /* We use SImode for the address of the function in both 32 and
4534 64-bit code to avoid having to provide DImode versions of the
4535 lcla2 and load_offset_label_address insn patterns. */
4536 rtx reg = gen_reg_rtx (SImode);
4537 rtx_code_label *label_rtx = gen_label_rtx ();
4538 rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
4539 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4540 rtx arg_bytes, begin_label_rtx;
4541 rtx_insn *call_insn;
4542 char begin_label_name[16];
4543 bool use_mcount_pcrel_call;
4544
4545 /* If we can reach _mcount with a pc-relative call, we can optimize
4546 loading the address of the current function. This requires linker
4547 long branch stub support. */
4548 if (!TARGET_PORTABLE_RUNTIME
4549 && !TARGET_LONG_CALLS
4550 && (TARGET_SOM || flag_function_sections))
4551 use_mcount_pcrel_call = TRUE;
4552 else
4553 use_mcount_pcrel_call = FALSE;
4554
4555 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4556 label_no);
4557 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4558
4559 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4560
4561 if (!use_mcount_pcrel_call)
4562 {
4563 /* The address of the function is loaded into %r25 with an instruction-
4564 relative sequence that avoids the use of relocations. The sequence
4565 is split so that the load_offset_label_address instruction can
4566 occupy the delay slot of the call to _mcount. */
4567 if (TARGET_PA_20)
4568 emit_insn (gen_lcla2 (reg, label_rtx));
4569 else
4570 emit_insn (gen_lcla1 (reg, label_rtx));
4571
4572 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4573 reg,
4574 begin_label_rtx,
4575 label_rtx));
4576 }
4577
4578 if (!NO_DEFERRED_PROFILE_COUNTERS)
4579 {
4580 rtx count_label_rtx, addr, r24;
4581 char count_label_name[16];
4582
4583 funcdef_nos.safe_push (label_no);
4584 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4585 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4586 ggc_strdup (count_label_name));
4587
4588 addr = force_reg (Pmode, count_label_rtx);
4589 r24 = gen_rtx_REG (Pmode, 24);
4590 emit_move_insn (r24, addr);
4591
4592 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4593 if (use_mcount_pcrel_call)
4594 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4595 begin_label_rtx));
4596 else
4597 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4598
4599 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4600 }
4601 else
4602 {
4603 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4604 if (use_mcount_pcrel_call)
4605 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4606 begin_label_rtx));
4607 else
4608 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4609 }
4610
4611 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4612 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4613
4614 /* Indicate the _mcount call cannot throw, nor will it execute a
4615 non-local goto. */
4616 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4617
4618 /* Allocate space for fixed arguments. */
4619 if (reg_parm_stack_space > crtl->outgoing_args_size)
4620 crtl->outgoing_args_size = reg_parm_stack_space;
4621 }
4622
4623 /* Fetch the return address for the frame COUNT steps up from
4624 the current frame, after the prologue. FRAMEADDR is the
4625 frame pointer of the COUNT frame.
4626
4627 We want to ignore any export stub remnants here. To handle this,
4628 we examine the code at the return address, and if it is an export
4629 stub, we return a memory rtx for the stub return address stored
4630 at frame-24.
4631
4632 The value returned is used in two different ways:
4633
4634 1. To find a function's caller.
4635
4636 2. To change the return address for a function.
4637
4638 This function handles most instances of case 1; however, it will
4639 fail if there are two levels of stubs to execute on the return
4640 path. The only way I believe that can happen is if the return value
4641 needs a parameter relocation, which never happens for C code.
4642
4643 This function handles most instances of case 2; however, it will
4644 fail if we did not originally have stub code on the return path
4645 but will need stub code on the new return path. This can happen if
4646 the caller & callee are both in the main program, but the new
4647 return location is in a shared library. */
4648
4649 rtx
4650 pa_return_addr_rtx (int count, rtx frameaddr)
4651 {
4652 rtx label;
4653 rtx rp;
4654 rtx saved_rp;
4655 rtx ins;
4656
4657 /* The instruction stream at the return address of a PA1.X export stub is:
4658
4659 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4660 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4661 0x00011820 | stub+16: mtsp r1,sr0
4662 0xe0400002 | stub+20: be,n 0(sr0,rp)
4663
4664 0xe0400002 must be specified as -532676606 so that it won't be
4665 rejected as an invalid immediate operand on 64-bit hosts.
4666
4667 The instruction stream at the return address of a PA2.0 export stub is:
4668
4669 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4670 0xe840d002 | stub+12: bve,n (rp)
4671 */
4672
4673 HOST_WIDE_INT insns[4];
4674 int i, len;
4675
4676 if (count != 0)
4677 return NULL_RTX;
4678
4679 rp = get_hard_reg_initial_val (Pmode, 2);
4680
4681 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4682 return rp;
4683
4684 /* If there is no export stub then just use the value saved from
4685 the return pointer register. */
4686
4687 saved_rp = gen_reg_rtx (Pmode);
4688 emit_move_insn (saved_rp, rp);
4689
4690 /* Get pointer to the instruction stream. We have to mask out the
4691 privilege level from the two low order bits of the return address
4692 pointer here so that ins will point to the start of the first
4693 instruction that would have been executed if we returned. */
4694 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4695 label = gen_label_rtx ();
4696
4697 if (TARGET_PA_20)
4698 {
4699 insns[0] = 0x4bc23fd1;
4700 insns[1] = -398405630;
4701 len = 2;
4702 }
4703 else
4704 {
4705 insns[0] = 0x4bc23fd1;
4706 insns[1] = 0x004010a1;
4707 insns[2] = 0x00011820;
4708 insns[3] = -532676606;
4709 len = 4;
4710 }
4711
4712 /* Check the instruction stream at the normal return address for the
4713 export stub. If it is an export stub, than our return address is
4714 really in -24[frameaddr]. */
4715
4716 for (i = 0; i < len; i++)
4717 {
4718 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4719 rtx op1 = GEN_INT (insns[i]);
4720 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4721 }
4722
4723 /* Here we know that our return address points to an export
4724 stub. We don't want to return the address of the export stub,
4725 but rather the return address of the export stub. That return
4726 address is stored at -24[frameaddr]. */
4727
4728 emit_move_insn (saved_rp,
4729 gen_rtx_MEM (Pmode,
4730 memory_address (Pmode,
4731 plus_constant (Pmode, frameaddr,
4732 -24))));
4733
4734 emit_label (label);
4735
4736 return saved_rp;
4737 }
4738
4739 void
4740 pa_emit_bcond_fp (rtx operands[])
4741 {
4742 enum rtx_code code = GET_CODE (operands[0]);
4743 rtx operand0 = operands[1];
4744 rtx operand1 = operands[2];
4745 rtx label = operands[3];
4746
4747 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4748 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4749
4750 emit_jump_insn (gen_rtx_SET (pc_rtx,
4751 gen_rtx_IF_THEN_ELSE (VOIDmode,
4752 gen_rtx_fmt_ee (NE,
4753 VOIDmode,
4754 gen_rtx_REG (CCFPmode, 0),
4755 const0_rtx),
4756 gen_rtx_LABEL_REF (VOIDmode, label),
4757 pc_rtx)));
4758
4759 }
4760
4761 /* Adjust the cost of a scheduling dependency. Return the new cost of
4762 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4763
4764 static int
4765 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4766 unsigned int)
4767 {
4768 enum attr_type attr_type;
4769
4770 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4771 true dependencies as they are described with bypasses now. */
4772 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4773 return cost;
4774
4775 if (! recog_memoized (insn))
4776 return 0;
4777
4778 attr_type = get_attr_type (insn);
4779
4780 switch (dep_type)
4781 {
4782 case REG_DEP_ANTI:
4783 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4784 cycles later. */
4785
4786 if (attr_type == TYPE_FPLOAD)
4787 {
4788 rtx pat = PATTERN (insn);
4789 rtx dep_pat = PATTERN (dep_insn);
4790 if (GET_CODE (pat) == PARALLEL)
4791 {
4792 /* This happens for the fldXs,mb patterns. */
4793 pat = XVECEXP (pat, 0, 0);
4794 }
4795 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4796 /* If this happens, we have to extend this to schedule
4797 optimally. Return 0 for now. */
4798 return 0;
4799
4800 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4801 {
4802 if (! recog_memoized (dep_insn))
4803 return 0;
4804 switch (get_attr_type (dep_insn))
4805 {
4806 case TYPE_FPALU:
4807 case TYPE_FPMULSGL:
4808 case TYPE_FPMULDBL:
4809 case TYPE_FPDIVSGL:
4810 case TYPE_FPDIVDBL:
4811 case TYPE_FPSQRTSGL:
4812 case TYPE_FPSQRTDBL:
4813 /* A fpload can't be issued until one cycle before a
4814 preceding arithmetic operation has finished if
4815 the target of the fpload is any of the sources
4816 (or destination) of the arithmetic operation. */
4817 return insn_default_latency (dep_insn) - 1;
4818
4819 default:
4820 return 0;
4821 }
4822 }
4823 }
4824 else if (attr_type == TYPE_FPALU)
4825 {
4826 rtx pat = PATTERN (insn);
4827 rtx dep_pat = PATTERN (dep_insn);
4828 if (GET_CODE (pat) == PARALLEL)
4829 {
4830 /* This happens for the fldXs,mb patterns. */
4831 pat = XVECEXP (pat, 0, 0);
4832 }
4833 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4834 /* If this happens, we have to extend this to schedule
4835 optimally. Return 0 for now. */
4836 return 0;
4837
4838 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4839 {
4840 if (! recog_memoized (dep_insn))
4841 return 0;
4842 switch (get_attr_type (dep_insn))
4843 {
4844 case TYPE_FPDIVSGL:
4845 case TYPE_FPDIVDBL:
4846 case TYPE_FPSQRTSGL:
4847 case TYPE_FPSQRTDBL:
4848 /* An ALU flop can't be issued until two cycles before a
4849 preceding divide or sqrt operation has finished if
4850 the target of the ALU flop is any of the sources
4851 (or destination) of the divide or sqrt operation. */
4852 return insn_default_latency (dep_insn) - 2;
4853
4854 default:
4855 return 0;
4856 }
4857 }
4858 }
4859
4860 /* For other anti dependencies, the cost is 0. */
4861 return 0;
4862
4863 case REG_DEP_OUTPUT:
4864 /* Output dependency; DEP_INSN writes a register that INSN writes some
4865 cycles later. */
4866 if (attr_type == TYPE_FPLOAD)
4867 {
4868 rtx pat = PATTERN (insn);
4869 rtx dep_pat = PATTERN (dep_insn);
4870 if (GET_CODE (pat) == PARALLEL)
4871 {
4872 /* This happens for the fldXs,mb patterns. */
4873 pat = XVECEXP (pat, 0, 0);
4874 }
4875 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4876 /* If this happens, we have to extend this to schedule
4877 optimally. Return 0 for now. */
4878 return 0;
4879
4880 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4881 {
4882 if (! recog_memoized (dep_insn))
4883 return 0;
4884 switch (get_attr_type (dep_insn))
4885 {
4886 case TYPE_FPALU:
4887 case TYPE_FPMULSGL:
4888 case TYPE_FPMULDBL:
4889 case TYPE_FPDIVSGL:
4890 case TYPE_FPDIVDBL:
4891 case TYPE_FPSQRTSGL:
4892 case TYPE_FPSQRTDBL:
4893 /* A fpload can't be issued until one cycle before a
4894 preceding arithmetic operation has finished if
4895 the target of the fpload is the destination of the
4896 arithmetic operation.
4897
4898 Exception: For PA7100LC, PA7200 and PA7300, the cost
4899 is 3 cycles, unless they bundle together. We also
4900 pay the penalty if the second insn is a fpload. */
4901 return insn_default_latency (dep_insn) - 1;
4902
4903 default:
4904 return 0;
4905 }
4906 }
4907 }
4908 else if (attr_type == TYPE_FPALU)
4909 {
4910 rtx pat = PATTERN (insn);
4911 rtx dep_pat = PATTERN (dep_insn);
4912 if (GET_CODE (pat) == PARALLEL)
4913 {
4914 /* This happens for the fldXs,mb patterns. */
4915 pat = XVECEXP (pat, 0, 0);
4916 }
4917 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4918 /* If this happens, we have to extend this to schedule
4919 optimally. Return 0 for now. */
4920 return 0;
4921
4922 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4923 {
4924 if (! recog_memoized (dep_insn))
4925 return 0;
4926 switch (get_attr_type (dep_insn))
4927 {
4928 case TYPE_FPDIVSGL:
4929 case TYPE_FPDIVDBL:
4930 case TYPE_FPSQRTSGL:
4931 case TYPE_FPSQRTDBL:
4932 /* An ALU flop can't be issued until two cycles before a
4933 preceding divide or sqrt operation has finished if
4934 the target of the ALU flop is also the target of
4935 the divide or sqrt operation. */
4936 return insn_default_latency (dep_insn) - 2;
4937
4938 default:
4939 return 0;
4940 }
4941 }
4942 }
4943
4944 /* For other output dependencies, the cost is 0. */
4945 return 0;
4946
4947 default:
4948 gcc_unreachable ();
4949 }
4950 }
4951
4952 /* Adjust scheduling priorities. We use this to try and keep addil
4953 and the next use of %r1 close together. */
4954 static int
4955 pa_adjust_priority (rtx_insn *insn, int priority)
4956 {
4957 rtx set = single_set (insn);
4958 rtx src, dest;
4959 if (set)
4960 {
4961 src = SET_SRC (set);
4962 dest = SET_DEST (set);
4963 if (GET_CODE (src) == LO_SUM
4964 && symbolic_operand (XEXP (src, 1), VOIDmode)
4965 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4966 priority >>= 3;
4967
4968 else if (GET_CODE (src) == MEM
4969 && GET_CODE (XEXP (src, 0)) == LO_SUM
4970 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4971 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4972 priority >>= 1;
4973
4974 else if (GET_CODE (dest) == MEM
4975 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4976 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4977 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4978 priority >>= 3;
4979 }
4980 return priority;
4981 }
4982
4983 /* The 700 can only issue a single insn at a time.
4984 The 7XXX processors can issue two insns at a time.
4985 The 8000 can issue 4 insns at a time. */
4986 static int
4987 pa_issue_rate (void)
4988 {
4989 switch (pa_cpu)
4990 {
4991 case PROCESSOR_700: return 1;
4992 case PROCESSOR_7100: return 2;
4993 case PROCESSOR_7100LC: return 2;
4994 case PROCESSOR_7200: return 2;
4995 case PROCESSOR_7300: return 2;
4996 case PROCESSOR_8000: return 4;
4997
4998 default:
4999 gcc_unreachable ();
5000 }
5001 }
5002
5003
5004
5005 /* Return any length plus adjustment needed by INSN which already has
5006 its length computed as LENGTH. Return LENGTH if no adjustment is
5007 necessary.
5008
5009 Also compute the length of an inline block move here as it is too
5010 complicated to express as a length attribute in pa.md. */
5011 int
5012 pa_adjust_insn_length (rtx_insn *insn, int length)
5013 {
5014 rtx pat = PATTERN (insn);
5015
5016 /* If length is negative or undefined, provide initial length. */
5017 if ((unsigned int) length >= INT_MAX)
5018 {
5019 if (GET_CODE (pat) == SEQUENCE)
5020 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5021
5022 switch (get_attr_type (insn))
5023 {
5024 case TYPE_MILLI:
5025 length = pa_attr_length_millicode_call (insn);
5026 break;
5027 case TYPE_CALL:
5028 length = pa_attr_length_call (insn, 0);
5029 break;
5030 case TYPE_SIBCALL:
5031 length = pa_attr_length_call (insn, 1);
5032 break;
5033 case TYPE_DYNCALL:
5034 length = pa_attr_length_indirect_call (insn);
5035 break;
5036 case TYPE_SH_FUNC_ADRS:
5037 length = pa_attr_length_millicode_call (insn) + 20;
5038 break;
5039 default:
5040 gcc_unreachable ();
5041 }
5042 }
5043
5044 /* Block move pattern. */
5045 if (NONJUMP_INSN_P (insn)
5046 && GET_CODE (pat) == PARALLEL
5047 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5048 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5049 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5050 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5051 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5052 length += compute_movmem_length (insn) - 4;
5053 /* Block clear pattern. */
5054 else if (NONJUMP_INSN_P (insn)
5055 && GET_CODE (pat) == PARALLEL
5056 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5057 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5058 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5059 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5060 length += compute_clrmem_length (insn) - 4;
5061 /* Conditional branch with an unfilled delay slot. */
5062 else if (JUMP_P (insn) && ! simplejump_p (insn))
5063 {
5064 /* Adjust a short backwards conditional with an unfilled delay slot. */
5065 if (GET_CODE (pat) == SET
5066 && length == 4
5067 && JUMP_LABEL (insn) != NULL_RTX
5068 && ! forward_branch_p (insn))
5069 length += 4;
5070 else if (GET_CODE (pat) == PARALLEL
5071 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5072 && length == 4)
5073 length += 4;
5074 /* Adjust dbra insn with short backwards conditional branch with
5075 unfilled delay slot -- only for case where counter is in a
5076 general register register. */
5077 else if (GET_CODE (pat) == PARALLEL
5078 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5079 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5080 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5081 && length == 4
5082 && ! forward_branch_p (insn))
5083 length += 4;
5084 }
5085 return length;
5086 }
5087
5088 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5089
5090 static bool
5091 pa_print_operand_punct_valid_p (unsigned char code)
5092 {
5093 if (code == '@'
5094 || code == '#'
5095 || code == '*'
5096 || code == '^')
5097 return true;
5098
5099 return false;
5100 }
5101
5102 /* Print operand X (an rtx) in assembler syntax to file FILE.
5103 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5104 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5105
5106 void
5107 pa_print_operand (FILE *file, rtx x, int code)
5108 {
5109 switch (code)
5110 {
5111 case '#':
5112 /* Output a 'nop' if there's nothing for the delay slot. */
5113 if (dbr_sequence_length () == 0)
5114 fputs ("\n\tnop", file);
5115 return;
5116 case '*':
5117 /* Output a nullification completer if there's nothing for the */
5118 /* delay slot or nullification is requested. */
5119 if (dbr_sequence_length () == 0 ||
5120 (final_sequence &&
5121 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5122 fputs (",n", file);
5123 return;
5124 case 'R':
5125 /* Print out the second register name of a register pair.
5126 I.e., R (6) => 7. */
5127 fputs (reg_names[REGNO (x) + 1], file);
5128 return;
5129 case 'r':
5130 /* A register or zero. */
5131 if (x == const0_rtx
5132 || (x == CONST0_RTX (DFmode))
5133 || (x == CONST0_RTX (SFmode)))
5134 {
5135 fputs ("%r0", file);
5136 return;
5137 }
5138 else
5139 break;
5140 case 'f':
5141 /* A register or zero (floating point). */
5142 if (x == const0_rtx
5143 || (x == CONST0_RTX (DFmode))
5144 || (x == CONST0_RTX (SFmode)))
5145 {
5146 fputs ("%fr0", file);
5147 return;
5148 }
5149 else
5150 break;
5151 case 'A':
5152 {
5153 rtx xoperands[2];
5154
5155 xoperands[0] = XEXP (XEXP (x, 0), 0);
5156 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5157 pa_output_global_address (file, xoperands[1], 0);
5158 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5159 return;
5160 }
5161
5162 case 'C': /* Plain (C)ondition */
5163 case 'X':
5164 switch (GET_CODE (x))
5165 {
5166 case EQ:
5167 fputs ("=", file); break;
5168 case NE:
5169 fputs ("<>", file); break;
5170 case GT:
5171 fputs (">", file); break;
5172 case GE:
5173 fputs (">=", file); break;
5174 case GEU:
5175 fputs (">>=", file); break;
5176 case GTU:
5177 fputs (">>", file); break;
5178 case LT:
5179 fputs ("<", file); break;
5180 case LE:
5181 fputs ("<=", file); break;
5182 case LEU:
5183 fputs ("<<=", file); break;
5184 case LTU:
5185 fputs ("<<", file); break;
5186 default:
5187 gcc_unreachable ();
5188 }
5189 return;
5190 case 'N': /* Condition, (N)egated */
5191 switch (GET_CODE (x))
5192 {
5193 case EQ:
5194 fputs ("<>", file); break;
5195 case NE:
5196 fputs ("=", file); break;
5197 case GT:
5198 fputs ("<=", file); break;
5199 case GE:
5200 fputs ("<", file); break;
5201 case GEU:
5202 fputs ("<<", file); break;
5203 case GTU:
5204 fputs ("<<=", file); break;
5205 case LT:
5206 fputs (">=", file); break;
5207 case LE:
5208 fputs (">", file); break;
5209 case LEU:
5210 fputs (">>", file); break;
5211 case LTU:
5212 fputs (">>=", file); break;
5213 default:
5214 gcc_unreachable ();
5215 }
5216 return;
5217 /* For floating point comparisons. Note that the output
5218 predicates are the complement of the desired mode. The
5219 conditions for GT, GE, LT, LE and LTGT cause an invalid
5220 operation exception if the result is unordered and this
5221 exception is enabled in the floating-point status register. */
5222 case 'Y':
5223 switch (GET_CODE (x))
5224 {
5225 case EQ:
5226 fputs ("!=", file); break;
5227 case NE:
5228 fputs ("=", file); break;
5229 case GT:
5230 fputs ("!>", file); break;
5231 case GE:
5232 fputs ("!>=", file); break;
5233 case LT:
5234 fputs ("!<", file); break;
5235 case LE:
5236 fputs ("!<=", file); break;
5237 case LTGT:
5238 fputs ("!<>", file); break;
5239 case UNLE:
5240 fputs ("!?<=", file); break;
5241 case UNLT:
5242 fputs ("!?<", file); break;
5243 case UNGE:
5244 fputs ("!?>=", file); break;
5245 case UNGT:
5246 fputs ("!?>", file); break;
5247 case UNEQ:
5248 fputs ("!?=", file); break;
5249 case UNORDERED:
5250 fputs ("!?", file); break;
5251 case ORDERED:
5252 fputs ("?", file); break;
5253 default:
5254 gcc_unreachable ();
5255 }
5256 return;
5257 case 'S': /* Condition, operands are (S)wapped. */
5258 switch (GET_CODE (x))
5259 {
5260 case EQ:
5261 fputs ("=", file); break;
5262 case NE:
5263 fputs ("<>", file); break;
5264 case GT:
5265 fputs ("<", file); break;
5266 case GE:
5267 fputs ("<=", file); break;
5268 case GEU:
5269 fputs ("<<=", file); break;
5270 case GTU:
5271 fputs ("<<", file); break;
5272 case LT:
5273 fputs (">", file); break;
5274 case LE:
5275 fputs (">=", file); break;
5276 case LEU:
5277 fputs (">>=", file); break;
5278 case LTU:
5279 fputs (">>", file); break;
5280 default:
5281 gcc_unreachable ();
5282 }
5283 return;
5284 case 'B': /* Condition, (B)oth swapped and negate. */
5285 switch (GET_CODE (x))
5286 {
5287 case EQ:
5288 fputs ("<>", file); break;
5289 case NE:
5290 fputs ("=", file); break;
5291 case GT:
5292 fputs (">=", file); break;
5293 case GE:
5294 fputs (">", file); break;
5295 case GEU:
5296 fputs (">>", file); break;
5297 case GTU:
5298 fputs (">>=", file); break;
5299 case LT:
5300 fputs ("<=", file); break;
5301 case LE:
5302 fputs ("<", file); break;
5303 case LEU:
5304 fputs ("<<", file); break;
5305 case LTU:
5306 fputs ("<<=", file); break;
5307 default:
5308 gcc_unreachable ();
5309 }
5310 return;
5311 case 'k':
5312 gcc_assert (GET_CODE (x) == CONST_INT);
5313 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5314 return;
5315 case 'Q':
5316 gcc_assert (GET_CODE (x) == CONST_INT);
5317 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5318 return;
5319 case 'L':
5320 gcc_assert (GET_CODE (x) == CONST_INT);
5321 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5322 return;
5323 case 'o':
5324 gcc_assert (GET_CODE (x) == CONST_INT
5325 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5326 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5327 return;
5328 case 'O':
5329 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5330 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5331 return;
5332 case 'p':
5333 gcc_assert (GET_CODE (x) == CONST_INT);
5334 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5335 return;
5336 case 'P':
5337 gcc_assert (GET_CODE (x) == CONST_INT);
5338 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5339 return;
5340 case 'I':
5341 if (GET_CODE (x) == CONST_INT)
5342 fputs ("i", file);
5343 return;
5344 case 'M':
5345 case 'F':
5346 switch (GET_CODE (XEXP (x, 0)))
5347 {
5348 case PRE_DEC:
5349 case PRE_INC:
5350 if (ASSEMBLER_DIALECT == 0)
5351 fputs ("s,mb", file);
5352 else
5353 fputs (",mb", file);
5354 break;
5355 case POST_DEC:
5356 case POST_INC:
5357 if (ASSEMBLER_DIALECT == 0)
5358 fputs ("s,ma", file);
5359 else
5360 fputs (",ma", file);
5361 break;
5362 case PLUS:
5363 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5364 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5365 {
5366 if (ASSEMBLER_DIALECT == 0)
5367 fputs ("x", file);
5368 }
5369 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5370 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5371 {
5372 if (ASSEMBLER_DIALECT == 0)
5373 fputs ("x,s", file);
5374 else
5375 fputs (",s", file);
5376 }
5377 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5378 fputs ("s", file);
5379 break;
5380 default:
5381 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5382 fputs ("s", file);
5383 break;
5384 }
5385 return;
5386 case 'G':
5387 pa_output_global_address (file, x, 0);
5388 return;
5389 case 'H':
5390 pa_output_global_address (file, x, 1);
5391 return;
5392 case 0: /* Don't do anything special */
5393 break;
5394 case 'Z':
5395 {
5396 unsigned op[3];
5397 compute_zdepwi_operands (INTVAL (x), op);
5398 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5399 return;
5400 }
5401 case 'z':
5402 {
5403 unsigned op[3];
5404 compute_zdepdi_operands (INTVAL (x), op);
5405 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5406 return;
5407 }
5408 case 'c':
5409 /* We can get here from a .vtable_inherit due to our
5410 CONSTANT_ADDRESS_P rejecting perfectly good constant
5411 addresses. */
5412 break;
5413 default:
5414 gcc_unreachable ();
5415 }
5416 if (GET_CODE (x) == REG)
5417 {
5418 fputs (reg_names [REGNO (x)], file);
5419 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5420 {
5421 fputs ("R", file);
5422 return;
5423 }
5424 if (FP_REG_P (x)
5425 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5426 && (REGNO (x) & 1) == 0)
5427 fputs ("L", file);
5428 }
5429 else if (GET_CODE (x) == MEM)
5430 {
5431 int size = GET_MODE_SIZE (GET_MODE (x));
5432 rtx base = NULL_RTX;
5433 switch (GET_CODE (XEXP (x, 0)))
5434 {
5435 case PRE_DEC:
5436 case POST_DEC:
5437 base = XEXP (XEXP (x, 0), 0);
5438 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5439 break;
5440 case PRE_INC:
5441 case POST_INC:
5442 base = XEXP (XEXP (x, 0), 0);
5443 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5444 break;
5445 case PLUS:
5446 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5447 fprintf (file, "%s(%s)",
5448 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5449 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5450 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5451 fprintf (file, "%s(%s)",
5452 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5453 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5454 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5455 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5456 {
5457 /* Because the REG_POINTER flag can get lost during reload,
5458 pa_legitimate_address_p canonicalizes the order of the
5459 index and base registers in the combined move patterns. */
5460 rtx base = XEXP (XEXP (x, 0), 1);
5461 rtx index = XEXP (XEXP (x, 0), 0);
5462
5463 fprintf (file, "%s(%s)",
5464 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5465 }
5466 else
5467 output_address (GET_MODE (x), XEXP (x, 0));
5468 break;
5469 default:
5470 output_address (GET_MODE (x), XEXP (x, 0));
5471 break;
5472 }
5473 }
5474 else
5475 output_addr_const (file, x);
5476 }
5477
5478 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5479
5480 void
5481 pa_output_global_address (FILE *file, rtx x, int round_constant)
5482 {
5483
5484 /* Imagine (high (const (plus ...))). */
5485 if (GET_CODE (x) == HIGH)
5486 x = XEXP (x, 0);
5487
5488 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5489 output_addr_const (file, x);
5490 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5491 {
5492 output_addr_const (file, x);
5493 fputs ("-$global$", file);
5494 }
5495 else if (GET_CODE (x) == CONST)
5496 {
5497 const char *sep = "";
5498 int offset = 0; /* assembler wants -$global$ at end */
5499 rtx base = NULL_RTX;
5500
5501 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5502 {
5503 case LABEL_REF:
5504 case SYMBOL_REF:
5505 base = XEXP (XEXP (x, 0), 0);
5506 output_addr_const (file, base);
5507 break;
5508 case CONST_INT:
5509 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5510 break;
5511 default:
5512 gcc_unreachable ();
5513 }
5514
5515 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5516 {
5517 case LABEL_REF:
5518 case SYMBOL_REF:
5519 base = XEXP (XEXP (x, 0), 1);
5520 output_addr_const (file, base);
5521 break;
5522 case CONST_INT:
5523 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5524 break;
5525 default:
5526 gcc_unreachable ();
5527 }
5528
5529 /* How bogus. The compiler is apparently responsible for
5530 rounding the constant if it uses an LR field selector.
5531
5532 The linker and/or assembler seem a better place since
5533 they have to do this kind of thing already.
5534
5535 If we fail to do this, HP's optimizing linker may eliminate
5536 an addil, but not update the ldw/stw/ldo instruction that
5537 uses the result of the addil. */
5538 if (round_constant)
5539 offset = ((offset + 0x1000) & ~0x1fff);
5540
5541 switch (GET_CODE (XEXP (x, 0)))
5542 {
5543 case PLUS:
5544 if (offset < 0)
5545 {
5546 offset = -offset;
5547 sep = "-";
5548 }
5549 else
5550 sep = "+";
5551 break;
5552
5553 case MINUS:
5554 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5555 sep = "-";
5556 break;
5557
5558 default:
5559 gcc_unreachable ();
5560 }
5561
5562 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5563 fputs ("-$global$", file);
5564 if (offset)
5565 fprintf (file, "%s%d", sep, offset);
5566 }
5567 else
5568 output_addr_const (file, x);
5569 }
5570
5571 /* Output boilerplate text to appear at the beginning of the file.
5572 There are several possible versions. */
5573 #define aputs(x) fputs(x, asm_out_file)
5574 static inline void
5575 pa_file_start_level (void)
5576 {
5577 if (TARGET_64BIT)
5578 aputs ("\t.LEVEL 2.0w\n");
5579 else if (TARGET_PA_20)
5580 aputs ("\t.LEVEL 2.0\n");
5581 else if (TARGET_PA_11)
5582 aputs ("\t.LEVEL 1.1\n");
5583 else
5584 aputs ("\t.LEVEL 1.0\n");
5585 }
5586
5587 static inline void
5588 pa_file_start_space (int sortspace)
5589 {
5590 aputs ("\t.SPACE $PRIVATE$");
5591 if (sortspace)
5592 aputs (",SORT=16");
5593 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5594 if (flag_tm)
5595 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5596 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5597 "\n\t.SPACE $TEXT$");
5598 if (sortspace)
5599 aputs (",SORT=8");
5600 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5601 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5602 }
5603
5604 static inline void
5605 pa_file_start_file (int want_version)
5606 {
5607 if (write_symbols != NO_DEBUG)
5608 {
5609 output_file_directive (asm_out_file, main_input_filename);
5610 if (want_version)
5611 aputs ("\t.version\t\"01.01\"\n");
5612 }
5613 }
5614
5615 static inline void
5616 pa_file_start_mcount (const char *aswhat)
5617 {
5618 if (profile_flag)
5619 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5620 }
5621
5622 static void
5623 pa_elf_file_start (void)
5624 {
5625 pa_file_start_level ();
5626 pa_file_start_mcount ("ENTRY");
5627 pa_file_start_file (0);
5628 }
5629
5630 static void
5631 pa_som_file_start (void)
5632 {
5633 pa_file_start_level ();
5634 pa_file_start_space (0);
5635 aputs ("\t.IMPORT $global$,DATA\n"
5636 "\t.IMPORT $$dyncall,MILLICODE\n");
5637 pa_file_start_mcount ("CODE");
5638 pa_file_start_file (0);
5639 }
5640
5641 static void
5642 pa_linux_file_start (void)
5643 {
5644 pa_file_start_file (1);
5645 pa_file_start_level ();
5646 pa_file_start_mcount ("CODE");
5647 }
5648
5649 static void
5650 pa_hpux64_gas_file_start (void)
5651 {
5652 pa_file_start_level ();
5653 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5654 if (profile_flag)
5655 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5656 #endif
5657 pa_file_start_file (1);
5658 }
5659
5660 static void
5661 pa_hpux64_hpas_file_start (void)
5662 {
5663 pa_file_start_level ();
5664 pa_file_start_space (1);
5665 pa_file_start_mcount ("CODE");
5666 pa_file_start_file (0);
5667 }
5668 #undef aputs
5669
5670 /* Search the deferred plabel list for SYMBOL and return its internal
5671 label. If an entry for SYMBOL is not found, a new entry is created. */
5672
5673 rtx
5674 pa_get_deferred_plabel (rtx symbol)
5675 {
5676 const char *fname = XSTR (symbol, 0);
5677 size_t i;
5678
5679 /* See if we have already put this function on the list of deferred
5680 plabels. This list is generally small, so a liner search is not
5681 too ugly. If it proves too slow replace it with something faster. */
5682 for (i = 0; i < n_deferred_plabels; i++)
5683 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5684 break;
5685
5686 /* If the deferred plabel list is empty, or this entry was not found
5687 on the list, create a new entry on the list. */
5688 if (deferred_plabels == NULL || i == n_deferred_plabels)
5689 {
5690 tree id;
5691
5692 if (deferred_plabels == 0)
5693 deferred_plabels = ggc_alloc<deferred_plabel> ();
5694 else
5695 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5696 deferred_plabels,
5697 n_deferred_plabels + 1);
5698
5699 i = n_deferred_plabels++;
5700 deferred_plabels[i].internal_label = gen_label_rtx ();
5701 deferred_plabels[i].symbol = symbol;
5702
5703 /* Gross. We have just implicitly taken the address of this
5704 function. Mark it in the same manner as assemble_name. */
5705 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5706 if (id)
5707 mark_referenced (id);
5708 }
5709
5710 return deferred_plabels[i].internal_label;
5711 }
5712
5713 static void
5714 output_deferred_plabels (void)
5715 {
5716 size_t i;
5717
5718 /* If we have some deferred plabels, then we need to switch into the
5719 data or readonly data section, and align it to a 4 byte boundary
5720 before outputting the deferred plabels. */
5721 if (n_deferred_plabels)
5722 {
5723 switch_to_section (flag_pic ? data_section : readonly_data_section);
5724 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5725 }
5726
5727 /* Now output the deferred plabels. */
5728 for (i = 0; i < n_deferred_plabels; i++)
5729 {
5730 targetm.asm_out.internal_label (asm_out_file, "L",
5731 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5732 assemble_integer (deferred_plabels[i].symbol,
5733 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5734 }
5735 }
5736
5737 /* Initialize optabs to point to emulation routines. */
5738
5739 static void
5740 pa_init_libfuncs (void)
5741 {
5742 if (HPUX_LONG_DOUBLE_LIBRARY)
5743 {
5744 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5745 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5746 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5747 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5748 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5749 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5750 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5751 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5752 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5753
5754 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5755 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5756 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5757 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5758 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5759 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5760 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5761
5762 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5763 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5764 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5765 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5766
5767 set_conv_libfunc (sfix_optab, SImode, TFmode,
5768 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5769 : "_U_Qfcnvfxt_quad_to_sgl");
5770 set_conv_libfunc (sfix_optab, DImode, TFmode,
5771 "_U_Qfcnvfxt_quad_to_dbl");
5772 set_conv_libfunc (ufix_optab, SImode, TFmode,
5773 "_U_Qfcnvfxt_quad_to_usgl");
5774 set_conv_libfunc (ufix_optab, DImode, TFmode,
5775 "_U_Qfcnvfxt_quad_to_udbl");
5776
5777 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5778 "_U_Qfcnvxf_sgl_to_quad");
5779 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5780 "_U_Qfcnvxf_dbl_to_quad");
5781 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5782 "_U_Qfcnvxf_usgl_to_quad");
5783 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5784 "_U_Qfcnvxf_udbl_to_quad");
5785 }
5786
5787 if (TARGET_SYNC_LIBCALL)
5788 init_sync_libfuncs (8);
5789 }
5790
5791 /* HP's millicode routines mean something special to the assembler.
5792 Keep track of which ones we have used. */
5793
5794 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5795 static void import_milli (enum millicodes);
5796 static char imported[(int) end1000];
5797 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5798 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5799 #define MILLI_START 10
5800
5801 static void
5802 import_milli (enum millicodes code)
5803 {
5804 char str[sizeof (import_string)];
5805
5806 if (!imported[(int) code])
5807 {
5808 imported[(int) code] = 1;
5809 strcpy (str, import_string);
5810 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5811 output_asm_insn (str, 0);
5812 }
5813 }
5814
5815 /* The register constraints have put the operands and return value in
5816 the proper registers. */
5817
5818 const char *
5819 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5820 {
5821 import_milli (mulI);
5822 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5823 }
5824
5825 /* Emit the rtl for doing a division by a constant. */
5826
5827 /* Do magic division millicodes exist for this value? */
5828 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5829
5830 /* We'll use an array to keep track of the magic millicodes and
5831 whether or not we've used them already. [n][0] is signed, [n][1] is
5832 unsigned. */
5833
5834 static int div_milli[16][2];
5835
5836 int
5837 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5838 {
5839 if (GET_CODE (operands[2]) == CONST_INT
5840 && INTVAL (operands[2]) > 0
5841 && INTVAL (operands[2]) < 16
5842 && pa_magic_milli[INTVAL (operands[2])])
5843 {
5844 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5845
5846 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5847 emit
5848 (gen_rtx_PARALLEL
5849 (VOIDmode,
5850 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5851 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5852 SImode,
5853 gen_rtx_REG (SImode, 26),
5854 operands[2])),
5855 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5856 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5857 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5858 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5859 gen_rtx_CLOBBER (VOIDmode, ret))));
5860 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5861 return 1;
5862 }
5863 return 0;
5864 }
5865
5866 const char *
5867 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5868 {
5869 int divisor;
5870
5871 /* If the divisor is a constant, try to use one of the special
5872 opcodes .*/
5873 if (GET_CODE (operands[0]) == CONST_INT)
5874 {
5875 static char buf[100];
5876 divisor = INTVAL (operands[0]);
5877 if (!div_milli[divisor][unsignedp])
5878 {
5879 div_milli[divisor][unsignedp] = 1;
5880 if (unsignedp)
5881 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5882 else
5883 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5884 }
5885 if (unsignedp)
5886 {
5887 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5888 INTVAL (operands[0]));
5889 return pa_output_millicode_call (insn,
5890 gen_rtx_SYMBOL_REF (SImode, buf));
5891 }
5892 else
5893 {
5894 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5895 INTVAL (operands[0]));
5896 return pa_output_millicode_call (insn,
5897 gen_rtx_SYMBOL_REF (SImode, buf));
5898 }
5899 }
5900 /* Divisor isn't a special constant. */
5901 else
5902 {
5903 if (unsignedp)
5904 {
5905 import_milli (divU);
5906 return pa_output_millicode_call (insn,
5907 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5908 }
5909 else
5910 {
5911 import_milli (divI);
5912 return pa_output_millicode_call (insn,
5913 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5914 }
5915 }
5916 }
5917
5918 /* Output a $$rem millicode to do mod. */
5919
5920 const char *
5921 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5922 {
5923 if (unsignedp)
5924 {
5925 import_milli (remU);
5926 return pa_output_millicode_call (insn,
5927 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5928 }
5929 else
5930 {
5931 import_milli (remI);
5932 return pa_output_millicode_call (insn,
5933 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5934 }
5935 }
5936
5937 void
5938 pa_output_arg_descriptor (rtx_insn *call_insn)
5939 {
5940 const char *arg_regs[4];
5941 machine_mode arg_mode;
5942 rtx link;
5943 int i, output_flag = 0;
5944 int regno;
5945
5946 /* We neither need nor want argument location descriptors for the
5947 64bit runtime environment or the ELF32 environment. */
5948 if (TARGET_64BIT || TARGET_ELF32)
5949 return;
5950
5951 for (i = 0; i < 4; i++)
5952 arg_regs[i] = 0;
5953
5954 /* Specify explicitly that no argument relocations should take place
5955 if using the portable runtime calling conventions. */
5956 if (TARGET_PORTABLE_RUNTIME)
5957 {
5958 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5959 asm_out_file);
5960 return;
5961 }
5962
5963 gcc_assert (CALL_P (call_insn));
5964 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5965 link; link = XEXP (link, 1))
5966 {
5967 rtx use = XEXP (link, 0);
5968
5969 if (! (GET_CODE (use) == USE
5970 && GET_CODE (XEXP (use, 0)) == REG
5971 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5972 continue;
5973
5974 arg_mode = GET_MODE (XEXP (use, 0));
5975 regno = REGNO (XEXP (use, 0));
5976 if (regno >= 23 && regno <= 26)
5977 {
5978 arg_regs[26 - regno] = "GR";
5979 if (arg_mode == DImode)
5980 arg_regs[25 - regno] = "GR";
5981 }
5982 else if (regno >= 32 && regno <= 39)
5983 {
5984 if (arg_mode == SFmode)
5985 arg_regs[(regno - 32) / 2] = "FR";
5986 else
5987 {
5988 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5989 arg_regs[(regno - 34) / 2] = "FR";
5990 arg_regs[(regno - 34) / 2 + 1] = "FU";
5991 #else
5992 arg_regs[(regno - 34) / 2] = "FU";
5993 arg_regs[(regno - 34) / 2 + 1] = "FR";
5994 #endif
5995 }
5996 }
5997 }
5998 fputs ("\t.CALL ", asm_out_file);
5999 for (i = 0; i < 4; i++)
6000 {
6001 if (arg_regs[i])
6002 {
6003 if (output_flag++)
6004 fputc (',', asm_out_file);
6005 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6006 }
6007 }
6008 fputc ('\n', asm_out_file);
6009 }
6010 \f
6011 /* Inform reload about cases where moving X with a mode MODE to or from
6012 a register in RCLASS requires an extra scratch or immediate register.
6013 Return the class needed for the immediate register. */
6014
6015 static reg_class_t
6016 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6017 machine_mode mode, secondary_reload_info *sri)
6018 {
6019 int regno;
6020 enum reg_class rclass = (enum reg_class) rclass_i;
6021
6022 /* Handle the easy stuff first. */
6023 if (rclass == R1_REGS)
6024 return NO_REGS;
6025
6026 if (REG_P (x))
6027 {
6028 regno = REGNO (x);
6029 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6030 return NO_REGS;
6031 }
6032 else
6033 regno = -1;
6034
6035 /* If we have something like (mem (mem (...)), we can safely assume the
6036 inner MEM will end up in a general register after reloading, so there's
6037 no need for a secondary reload. */
6038 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6039 return NO_REGS;
6040
6041 /* Trying to load a constant into a FP register during PIC code
6042 generation requires %r1 as a scratch register. For float modes,
6043 the only legitimate constant is CONST0_RTX. However, there are
6044 a few patterns that accept constant double operands. */
6045 if (flag_pic
6046 && FP_REG_CLASS_P (rclass)
6047 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6048 {
6049 switch (mode)
6050 {
6051 case SImode:
6052 sri->icode = CODE_FOR_reload_insi_r1;
6053 break;
6054
6055 case DImode:
6056 sri->icode = CODE_FOR_reload_indi_r1;
6057 break;
6058
6059 case SFmode:
6060 sri->icode = CODE_FOR_reload_insf_r1;
6061 break;
6062
6063 case DFmode:
6064 sri->icode = CODE_FOR_reload_indf_r1;
6065 break;
6066
6067 default:
6068 gcc_unreachable ();
6069 }
6070 return NO_REGS;
6071 }
6072
6073 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6074 register when we're generating PIC code or when the operand isn't
6075 readonly. */
6076 if (pa_symbolic_expression_p (x))
6077 {
6078 if (GET_CODE (x) == HIGH)
6079 x = XEXP (x, 0);
6080
6081 if (flag_pic || !read_only_operand (x, VOIDmode))
6082 {
6083 switch (mode)
6084 {
6085 case SImode:
6086 sri->icode = CODE_FOR_reload_insi_r1;
6087 break;
6088
6089 case DImode:
6090 sri->icode = CODE_FOR_reload_indi_r1;
6091 break;
6092
6093 default:
6094 gcc_unreachable ();
6095 }
6096 return NO_REGS;
6097 }
6098 }
6099
6100 /* Profiling showed the PA port spends about 1.3% of its compilation
6101 time in true_regnum from calls inside pa_secondary_reload_class. */
6102 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6103 regno = true_regnum (x);
6104
6105 /* Handle reloads for floating point loads and stores. */
6106 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6107 && FP_REG_CLASS_P (rclass))
6108 {
6109 if (MEM_P (x))
6110 {
6111 x = XEXP (x, 0);
6112
6113 /* We don't need a secondary reload for indexed memory addresses.
6114
6115 When INT14_OK_STRICT is true, it might appear that we could
6116 directly allow register indirect memory addresses. However,
6117 this doesn't work because we don't support SUBREGs in
6118 floating-point register copies and reload doesn't tell us
6119 when it's going to use a SUBREG. */
6120 if (IS_INDEX_ADDR_P (x))
6121 return NO_REGS;
6122 }
6123
6124 /* Request a secondary reload with a general scratch register
6125 for everything else. ??? Could symbolic operands be handled
6126 directly when generating non-pic PA 2.0 code? */
6127 sri->icode = (in_p
6128 ? direct_optab_handler (reload_in_optab, mode)
6129 : direct_optab_handler (reload_out_optab, mode));
6130 return NO_REGS;
6131 }
6132
6133 /* A SAR<->FP register copy requires an intermediate general register
6134 and secondary memory. We need a secondary reload with a general
6135 scratch register for spills. */
6136 if (rclass == SHIFT_REGS)
6137 {
6138 /* Handle spill. */
6139 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6140 {
6141 sri->icode = (in_p
6142 ? direct_optab_handler (reload_in_optab, mode)
6143 : direct_optab_handler (reload_out_optab, mode));
6144 return NO_REGS;
6145 }
6146
6147 /* Handle FP copy. */
6148 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6149 return GENERAL_REGS;
6150 }
6151
6152 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6153 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6154 && FP_REG_CLASS_P (rclass))
6155 return GENERAL_REGS;
6156
6157 return NO_REGS;
6158 }
6159
6160 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6161 is only marked as live on entry by df-scan when it is a fixed
6162 register. It isn't a fixed register in the 64-bit runtime,
6163 so we need to mark it here. */
6164
6165 static void
6166 pa_extra_live_on_entry (bitmap regs)
6167 {
6168 if (TARGET_64BIT)
6169 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6170 }
6171
6172 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6173 to prevent it from being deleted. */
6174
6175 rtx
6176 pa_eh_return_handler_rtx (void)
6177 {
6178 rtx tmp;
6179
6180 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6181 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6182 tmp = gen_rtx_MEM (word_mode, tmp);
6183 tmp->volatil = 1;
6184 return tmp;
6185 }
6186
6187 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6188 by invisible reference. As a GCC extension, we also pass anything
6189 with a zero or variable size by reference.
6190
6191 The 64-bit runtime does not describe passing any types by invisible
6192 reference. The internals of GCC can't currently handle passing
6193 empty structures, and zero or variable length arrays when they are
6194 not passed entirely on the stack or by reference. Thus, as a GCC
6195 extension, we pass these types by reference. The HP compiler doesn't
6196 support these types, so hopefully there shouldn't be any compatibility
6197 issues. This may have to be revisited when HP releases a C99 compiler
6198 or updates the ABI. */
6199
6200 static bool
6201 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6202 machine_mode mode, const_tree type,
6203 bool named ATTRIBUTE_UNUSED)
6204 {
6205 HOST_WIDE_INT size;
6206
6207 if (type)
6208 size = int_size_in_bytes (type);
6209 else
6210 size = GET_MODE_SIZE (mode);
6211
6212 if (TARGET_64BIT)
6213 return size <= 0;
6214 else
6215 return size <= 0 || size > 8;
6216 }
6217
6218 enum direction
6219 pa_function_arg_padding (machine_mode mode, const_tree type)
6220 {
6221 if (mode == BLKmode
6222 || (TARGET_64BIT
6223 && type
6224 && (AGGREGATE_TYPE_P (type)
6225 || TREE_CODE (type) == COMPLEX_TYPE
6226 || TREE_CODE (type) == VECTOR_TYPE)))
6227 {
6228 /* Return none if justification is not required. */
6229 if (type
6230 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6231 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6232 return none;
6233
6234 /* The directions set here are ignored when a BLKmode argument larger
6235 than a word is placed in a register. Different code is used for
6236 the stack and registers. This makes it difficult to have a
6237 consistent data representation for both the stack and registers.
6238 For both runtimes, the justification and padding for arguments on
6239 the stack and in registers should be identical. */
6240 if (TARGET_64BIT)
6241 /* The 64-bit runtime specifies left justification for aggregates. */
6242 return upward;
6243 else
6244 /* The 32-bit runtime architecture specifies right justification.
6245 When the argument is passed on the stack, the argument is padded
6246 with garbage on the left. The HP compiler pads with zeros. */
6247 return downward;
6248 }
6249
6250 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6251 return downward;
6252 else
6253 return none;
6254 }
6255
6256 \f
6257 /* Do what is necessary for `va_start'. We look at the current function
6258 to determine if stdargs or varargs is used and fill in an initial
6259 va_list. A pointer to this constructor is returned. */
6260
6261 static rtx
6262 hppa_builtin_saveregs (void)
6263 {
6264 rtx offset, dest;
6265 tree fntype = TREE_TYPE (current_function_decl);
6266 int argadj = ((!stdarg_p (fntype))
6267 ? UNITS_PER_WORD : 0);
6268
6269 if (argadj)
6270 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6271 else
6272 offset = crtl->args.arg_offset_rtx;
6273
6274 if (TARGET_64BIT)
6275 {
6276 int i, off;
6277
6278 /* Adjust for varargs/stdarg differences. */
6279 if (argadj)
6280 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6281 else
6282 offset = crtl->args.arg_offset_rtx;
6283
6284 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6285 from the incoming arg pointer and growing to larger addresses. */
6286 for (i = 26, off = -64; i >= 19; i--, off += 8)
6287 emit_move_insn (gen_rtx_MEM (word_mode,
6288 plus_constant (Pmode,
6289 arg_pointer_rtx, off)),
6290 gen_rtx_REG (word_mode, i));
6291
6292 /* The incoming args pointer points just beyond the flushback area;
6293 normally this is not a serious concern. However, when we are doing
6294 varargs/stdargs we want to make the arg pointer point to the start
6295 of the incoming argument area. */
6296 emit_move_insn (virtual_incoming_args_rtx,
6297 plus_constant (Pmode, arg_pointer_rtx, -64));
6298
6299 /* Now return a pointer to the first anonymous argument. */
6300 return copy_to_reg (expand_binop (Pmode, add_optab,
6301 virtual_incoming_args_rtx,
6302 offset, 0, 0, OPTAB_LIB_WIDEN));
6303 }
6304
6305 /* Store general registers on the stack. */
6306 dest = gen_rtx_MEM (BLKmode,
6307 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6308 -16));
6309 set_mem_alias_set (dest, get_varargs_alias_set ());
6310 set_mem_align (dest, BITS_PER_WORD);
6311 move_block_from_reg (23, dest, 4);
6312
6313 /* move_block_from_reg will emit code to store the argument registers
6314 individually as scalar stores.
6315
6316 However, other insns may later load from the same addresses for
6317 a structure load (passing a struct to a varargs routine).
6318
6319 The alias code assumes that such aliasing can never happen, so we
6320 have to keep memory referencing insns from moving up beyond the
6321 last argument register store. So we emit a blockage insn here. */
6322 emit_insn (gen_blockage ());
6323
6324 return copy_to_reg (expand_binop (Pmode, add_optab,
6325 crtl->args.internal_arg_pointer,
6326 offset, 0, 0, OPTAB_LIB_WIDEN));
6327 }
6328
6329 static void
6330 hppa_va_start (tree valist, rtx nextarg)
6331 {
6332 nextarg = expand_builtin_saveregs ();
6333 std_expand_builtin_va_start (valist, nextarg);
6334 }
6335
6336 static tree
6337 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6338 gimple_seq *post_p)
6339 {
6340 if (TARGET_64BIT)
6341 {
6342 /* Args grow upward. We can use the generic routines. */
6343 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6344 }
6345 else /* !TARGET_64BIT */
6346 {
6347 tree ptr = build_pointer_type (type);
6348 tree valist_type;
6349 tree t, u;
6350 unsigned int size, ofs;
6351 bool indirect;
6352
6353 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6354 if (indirect)
6355 {
6356 type = ptr;
6357 ptr = build_pointer_type (type);
6358 }
6359 size = int_size_in_bytes (type);
6360 valist_type = TREE_TYPE (valist);
6361
6362 /* Args grow down. Not handled by generic routines. */
6363
6364 u = fold_convert (sizetype, size_in_bytes (type));
6365 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6366 t = fold_build_pointer_plus (valist, u);
6367
6368 /* Align to 4 or 8 byte boundary depending on argument size. */
6369
6370 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6371 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6372 t = fold_convert (valist_type, t);
6373
6374 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6375
6376 ofs = (8 - size) % 4;
6377 if (ofs != 0)
6378 t = fold_build_pointer_plus_hwi (t, ofs);
6379
6380 t = fold_convert (ptr, t);
6381 t = build_va_arg_indirect_ref (t);
6382
6383 if (indirect)
6384 t = build_va_arg_indirect_ref (t);
6385
6386 return t;
6387 }
6388 }
6389
6390 /* True if MODE is valid for the target. By "valid", we mean able to
6391 be manipulated in non-trivial ways. In particular, this means all
6392 the arithmetic is supported.
6393
6394 Currently, TImode is not valid as the HP 64-bit runtime documentation
6395 doesn't document the alignment and calling conventions for this type.
6396 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6397 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6398
6399 static bool
6400 pa_scalar_mode_supported_p (machine_mode mode)
6401 {
6402 int precision = GET_MODE_PRECISION (mode);
6403
6404 switch (GET_MODE_CLASS (mode))
6405 {
6406 case MODE_PARTIAL_INT:
6407 case MODE_INT:
6408 if (precision == CHAR_TYPE_SIZE)
6409 return true;
6410 if (precision == SHORT_TYPE_SIZE)
6411 return true;
6412 if (precision == INT_TYPE_SIZE)
6413 return true;
6414 if (precision == LONG_TYPE_SIZE)
6415 return true;
6416 if (precision == LONG_LONG_TYPE_SIZE)
6417 return true;
6418 return false;
6419
6420 case MODE_FLOAT:
6421 if (precision == FLOAT_TYPE_SIZE)
6422 return true;
6423 if (precision == DOUBLE_TYPE_SIZE)
6424 return true;
6425 if (precision == LONG_DOUBLE_TYPE_SIZE)
6426 return true;
6427 return false;
6428
6429 case MODE_DECIMAL_FLOAT:
6430 return false;
6431
6432 default:
6433 gcc_unreachable ();
6434 }
6435 }
6436
6437 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6438 it branches into the delay slot. Otherwise, return FALSE. */
6439
6440 static bool
6441 branch_to_delay_slot_p (rtx_insn *insn)
6442 {
6443 rtx_insn *jump_insn;
6444
6445 if (dbr_sequence_length ())
6446 return FALSE;
6447
6448 jump_insn = next_active_insn (JUMP_LABEL (insn));
6449 while (insn)
6450 {
6451 insn = next_active_insn (insn);
6452 if (jump_insn == insn)
6453 return TRUE;
6454
6455 /* We can't rely on the length of asms. So, we return FALSE when
6456 the branch is followed by an asm. */
6457 if (!insn
6458 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6459 || asm_noperands (PATTERN (insn)) >= 0
6460 || get_attr_length (insn) > 0)
6461 break;
6462 }
6463
6464 return FALSE;
6465 }
6466
6467 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6468
6469 This occurs when INSN has an unfilled delay slot and is followed
6470 by an asm. Disaster can occur if the asm is empty and the jump
6471 branches into the delay slot. So, we add a nop in the delay slot
6472 when this occurs. */
6473
6474 static bool
6475 branch_needs_nop_p (rtx_insn *insn)
6476 {
6477 rtx_insn *jump_insn;
6478
6479 if (dbr_sequence_length ())
6480 return FALSE;
6481
6482 jump_insn = next_active_insn (JUMP_LABEL (insn));
6483 while (insn)
6484 {
6485 insn = next_active_insn (insn);
6486 if (!insn || jump_insn == insn)
6487 return TRUE;
6488
6489 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6490 || asm_noperands (PATTERN (insn)) >= 0)
6491 && get_attr_length (insn) > 0)
6492 break;
6493 }
6494
6495 return FALSE;
6496 }
6497
6498 /* Return TRUE if INSN, a forward jump insn, can use nullification
6499 to skip the following instruction. This avoids an extra cycle due
6500 to a mis-predicted branch when we fall through. */
6501
6502 static bool
6503 use_skip_p (rtx_insn *insn)
6504 {
6505 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL (insn));
6506
6507 while (insn)
6508 {
6509 insn = next_active_insn (insn);
6510
6511 /* We can't rely on the length of asms, so we can't skip asms. */
6512 if (!insn
6513 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6514 || asm_noperands (PATTERN (insn)) >= 0)
6515 break;
6516 if (get_attr_length (insn) == 4
6517 && jump_insn == next_active_insn (insn))
6518 return TRUE;
6519 if (get_attr_length (insn) > 0)
6520 break;
6521 }
6522
6523 return FALSE;
6524 }
6525
6526 /* This routine handles all the normal conditional branch sequences we
6527 might need to generate. It handles compare immediate vs compare
6528 register, nullification of delay slots, varying length branches,
6529 negated branches, and all combinations of the above. It returns the
6530 output appropriate to emit the branch corresponding to all given
6531 parameters. */
6532
6533 const char *
6534 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6535 {
6536 static char buf[100];
6537 bool useskip;
6538 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6539 int length = get_attr_length (insn);
6540 int xdelay;
6541
6542 /* A conditional branch to the following instruction (e.g. the delay slot)
6543 is asking for a disaster. This can happen when not optimizing and
6544 when jump optimization fails.
6545
6546 While it is usually safe to emit nothing, this can fail if the
6547 preceding instruction is a nullified branch with an empty delay
6548 slot and the same branch target as this branch. We could check
6549 for this but jump optimization should eliminate nop jumps. It
6550 is always safe to emit a nop. */
6551 if (branch_to_delay_slot_p (insn))
6552 return "nop";
6553
6554 /* The doubleword form of the cmpib instruction doesn't have the LEU
6555 and GTU conditions while the cmpb instruction does. Since we accept
6556 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6557 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6558 operands[2] = gen_rtx_REG (DImode, 0);
6559 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6560 operands[1] = gen_rtx_REG (DImode, 0);
6561
6562 /* If this is a long branch with its delay slot unfilled, set `nullify'
6563 as it can nullify the delay slot and save a nop. */
6564 if (length == 8 && dbr_sequence_length () == 0)
6565 nullify = 1;
6566
6567 /* If this is a short forward conditional branch which did not get
6568 its delay slot filled, the delay slot can still be nullified. */
6569 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6570 nullify = forward_branch_p (insn);
6571
6572 /* A forward branch over a single nullified insn can be done with a
6573 comclr instruction. This avoids a single cycle penalty due to
6574 mis-predicted branch if we fall through (branch not taken). */
6575 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6576
6577 switch (length)
6578 {
6579 /* All short conditional branches except backwards with an unfilled
6580 delay slot. */
6581 case 4:
6582 if (useskip)
6583 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6584 else
6585 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6586 if (GET_MODE (operands[1]) == DImode)
6587 strcat (buf, "*");
6588 if (negated)
6589 strcat (buf, "%B3");
6590 else
6591 strcat (buf, "%S3");
6592 if (useskip)
6593 strcat (buf, " %2,%r1,%%r0");
6594 else if (nullify)
6595 {
6596 if (branch_needs_nop_p (insn))
6597 strcat (buf, ",n %2,%r1,%0%#");
6598 else
6599 strcat (buf, ",n %2,%r1,%0");
6600 }
6601 else
6602 strcat (buf, " %2,%r1,%0");
6603 break;
6604
6605 /* All long conditionals. Note a short backward branch with an
6606 unfilled delay slot is treated just like a long backward branch
6607 with an unfilled delay slot. */
6608 case 8:
6609 /* Handle weird backwards branch with a filled delay slot
6610 which is nullified. */
6611 if (dbr_sequence_length () != 0
6612 && ! forward_branch_p (insn)
6613 && nullify)
6614 {
6615 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6616 if (GET_MODE (operands[1]) == DImode)
6617 strcat (buf, "*");
6618 if (negated)
6619 strcat (buf, "%S3");
6620 else
6621 strcat (buf, "%B3");
6622 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6623 }
6624 /* Handle short backwards branch with an unfilled delay slot.
6625 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6626 taken and untaken branches. */
6627 else if (dbr_sequence_length () == 0
6628 && ! forward_branch_p (insn)
6629 && INSN_ADDRESSES_SET_P ()
6630 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6631 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6632 {
6633 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6634 if (GET_MODE (operands[1]) == DImode)
6635 strcat (buf, "*");
6636 if (negated)
6637 strcat (buf, "%B3 %2,%r1,%0%#");
6638 else
6639 strcat (buf, "%S3 %2,%r1,%0%#");
6640 }
6641 else
6642 {
6643 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6644 if (GET_MODE (operands[1]) == DImode)
6645 strcat (buf, "*");
6646 if (negated)
6647 strcat (buf, "%S3");
6648 else
6649 strcat (buf, "%B3");
6650 if (nullify)
6651 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6652 else
6653 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6654 }
6655 break;
6656
6657 default:
6658 /* The reversed conditional branch must branch over one additional
6659 instruction if the delay slot is filled and needs to be extracted
6660 by pa_output_lbranch. If the delay slot is empty or this is a
6661 nullified forward branch, the instruction after the reversed
6662 condition branch must be nullified. */
6663 if (dbr_sequence_length () == 0
6664 || (nullify && forward_branch_p (insn)))
6665 {
6666 nullify = 1;
6667 xdelay = 0;
6668 operands[4] = GEN_INT (length);
6669 }
6670 else
6671 {
6672 xdelay = 1;
6673 operands[4] = GEN_INT (length + 4);
6674 }
6675
6676 /* Create a reversed conditional branch which branches around
6677 the following insns. */
6678 if (GET_MODE (operands[1]) != DImode)
6679 {
6680 if (nullify)
6681 {
6682 if (negated)
6683 strcpy (buf,
6684 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6685 else
6686 strcpy (buf,
6687 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6688 }
6689 else
6690 {
6691 if (negated)
6692 strcpy (buf,
6693 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6694 else
6695 strcpy (buf,
6696 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6697 }
6698 }
6699 else
6700 {
6701 if (nullify)
6702 {
6703 if (negated)
6704 strcpy (buf,
6705 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6706 else
6707 strcpy (buf,
6708 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6709 }
6710 else
6711 {
6712 if (negated)
6713 strcpy (buf,
6714 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6715 else
6716 strcpy (buf,
6717 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6718 }
6719 }
6720
6721 output_asm_insn (buf, operands);
6722 return pa_output_lbranch (operands[0], insn, xdelay);
6723 }
6724 return buf;
6725 }
6726
6727 /* Output a PIC pc-relative instruction sequence to load the address of
6728 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6729 or a code label. OPERANDS[1] specifies the register to use to load
6730 the program counter. OPERANDS[3] may be used for label generation
6731 The sequence is always three instructions in length. The program
6732 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6733 Register %r1 is clobbered. */
6734
6735 static void
6736 pa_output_pic_pcrel_sequence (rtx *operands)
6737 {
6738 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6739 if (TARGET_PA_20)
6740 {
6741 /* We can use mfia to determine the current program counter. */
6742 if (TARGET_SOM || !TARGET_GAS)
6743 {
6744 operands[3] = gen_label_rtx ();
6745 targetm.asm_out.internal_label (asm_out_file, "L",
6746 CODE_LABEL_NUMBER (operands[3]));
6747 output_asm_insn ("mfia %1", operands);
6748 output_asm_insn ("addil L'%0-%l3,%1", operands);
6749 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6750 }
6751 else
6752 {
6753 output_asm_insn ("mfia %1", operands);
6754 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6755 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6756 }
6757 }
6758 else
6759 {
6760 /* We need to use a branch to determine the current program counter. */
6761 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6762 if (TARGET_SOM || !TARGET_GAS)
6763 {
6764 operands[3] = gen_label_rtx ();
6765 output_asm_insn ("addil L'%0-%l3,%1", operands);
6766 targetm.asm_out.internal_label (asm_out_file, "L",
6767 CODE_LABEL_NUMBER (operands[3]));
6768 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6769 }
6770 else
6771 {
6772 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6773 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6774 }
6775 }
6776 }
6777
6778 /* This routine handles output of long unconditional branches that
6779 exceed the maximum range of a simple branch instruction. Since
6780 we don't have a register available for the branch, we save register
6781 %r1 in the frame marker, load the branch destination DEST into %r1,
6782 execute the branch, and restore %r1 in the delay slot of the branch.
6783
6784 Since long branches may have an insn in the delay slot and the
6785 delay slot is used to restore %r1, we in general need to extract
6786 this insn and execute it before the branch. However, to facilitate
6787 use of this function by conditional branches, we also provide an
6788 option to not extract the delay insn so that it will be emitted
6789 after the long branch. So, if there is an insn in the delay slot,
6790 it is extracted if XDELAY is nonzero.
6791
6792 The lengths of the various long-branch sequences are 20, 16 and 24
6793 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6794
6795 const char *
6796 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6797 {
6798 rtx xoperands[4];
6799
6800 xoperands[0] = dest;
6801
6802 /* First, free up the delay slot. */
6803 if (xdelay && dbr_sequence_length () != 0)
6804 {
6805 /* We can't handle a jump in the delay slot. */
6806 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6807
6808 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6809 optimize, 0, NULL);
6810
6811 /* Now delete the delay insn. */
6812 SET_INSN_DELETED (NEXT_INSN (insn));
6813 }
6814
6815 /* Output an insn to save %r1. The runtime documentation doesn't
6816 specify whether the "Clean Up" slot in the callers frame can
6817 be clobbered by the callee. It isn't copied by HP's builtin
6818 alloca, so this suggests that it can be clobbered if necessary.
6819 The "Static Link" location is copied by HP builtin alloca, so
6820 we avoid using it. Using the cleanup slot might be a problem
6821 if we have to interoperate with languages that pass cleanup
6822 information. However, it should be possible to handle these
6823 situations with GCC's asm feature.
6824
6825 The "Current RP" slot is reserved for the called procedure, so
6826 we try to use it when we don't have a frame of our own. It's
6827 rather unlikely that we won't have a frame when we need to emit
6828 a very long branch.
6829
6830 Really the way to go long term is a register scavenger; goto
6831 the target of the jump and find a register which we can use
6832 as a scratch to hold the value in %r1. Then, we wouldn't have
6833 to free up the delay slot or clobber a slot that may be needed
6834 for other purposes. */
6835 if (TARGET_64BIT)
6836 {
6837 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6838 /* Use the return pointer slot in the frame marker. */
6839 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6840 else
6841 /* Use the slot at -40 in the frame marker since HP builtin
6842 alloca doesn't copy it. */
6843 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6844 }
6845 else
6846 {
6847 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6848 /* Use the return pointer slot in the frame marker. */
6849 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6850 else
6851 /* Use the "Clean Up" slot in the frame marker. In GCC,
6852 the only other use of this location is for copying a
6853 floating point double argument from a floating-point
6854 register to two general registers. The copy is done
6855 as an "atomic" operation when outputting a call, so it
6856 won't interfere with our using the location here. */
6857 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6858 }
6859
6860 if (TARGET_PORTABLE_RUNTIME)
6861 {
6862 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6863 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6864 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6865 }
6866 else if (flag_pic)
6867 {
6868 xoperands[1] = gen_rtx_REG (Pmode, 1);
6869 xoperands[2] = xoperands[1];
6870 pa_output_pic_pcrel_sequence (xoperands);
6871 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6872 }
6873 else
6874 /* Now output a very long branch to the original target. */
6875 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6876
6877 /* Now restore the value of %r1 in the delay slot. */
6878 if (TARGET_64BIT)
6879 {
6880 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6881 return "ldd -16(%%r30),%%r1";
6882 else
6883 return "ldd -40(%%r30),%%r1";
6884 }
6885 else
6886 {
6887 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6888 return "ldw -20(%%r30),%%r1";
6889 else
6890 return "ldw -12(%%r30),%%r1";
6891 }
6892 }
6893
6894 /* This routine handles all the branch-on-bit conditional branch sequences we
6895 might need to generate. It handles nullification of delay slots,
6896 varying length branches, negated branches and all combinations of the
6897 above. it returns the appropriate output template to emit the branch. */
6898
6899 const char *
6900 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6901 {
6902 static char buf[100];
6903 bool useskip;
6904 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6905 int length = get_attr_length (insn);
6906 int xdelay;
6907
6908 /* A conditional branch to the following instruction (e.g. the delay slot) is
6909 asking for a disaster. I do not think this can happen as this pattern
6910 is only used when optimizing; jump optimization should eliminate the
6911 jump. But be prepared just in case. */
6912
6913 if (branch_to_delay_slot_p (insn))
6914 return "nop";
6915
6916 /* If this is a long branch with its delay slot unfilled, set `nullify'
6917 as it can nullify the delay slot and save a nop. */
6918 if (length == 8 && dbr_sequence_length () == 0)
6919 nullify = 1;
6920
6921 /* If this is a short forward conditional branch which did not get
6922 its delay slot filled, the delay slot can still be nullified. */
6923 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6924 nullify = forward_branch_p (insn);
6925
6926 /* A forward branch over a single nullified insn can be done with a
6927 extrs instruction. This avoids a single cycle penalty due to
6928 mis-predicted branch if we fall through (branch not taken). */
6929 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6930
6931 switch (length)
6932 {
6933
6934 /* All short conditional branches except backwards with an unfilled
6935 delay slot. */
6936 case 4:
6937 if (useskip)
6938 strcpy (buf, "{extrs,|extrw,s,}");
6939 else
6940 strcpy (buf, "bb,");
6941 if (useskip && GET_MODE (operands[0]) == DImode)
6942 strcpy (buf, "extrd,s,*");
6943 else if (GET_MODE (operands[0]) == DImode)
6944 strcpy (buf, "bb,*");
6945 if ((which == 0 && negated)
6946 || (which == 1 && ! negated))
6947 strcat (buf, ">=");
6948 else
6949 strcat (buf, "<");
6950 if (useskip)
6951 strcat (buf, " %0,%1,1,%%r0");
6952 else if (nullify && negated)
6953 {
6954 if (branch_needs_nop_p (insn))
6955 strcat (buf, ",n %0,%1,%3%#");
6956 else
6957 strcat (buf, ",n %0,%1,%3");
6958 }
6959 else if (nullify && ! negated)
6960 {
6961 if (branch_needs_nop_p (insn))
6962 strcat (buf, ",n %0,%1,%2%#");
6963 else
6964 strcat (buf, ",n %0,%1,%2");
6965 }
6966 else if (! nullify && negated)
6967 strcat (buf, " %0,%1,%3");
6968 else if (! nullify && ! negated)
6969 strcat (buf, " %0,%1,%2");
6970 break;
6971
6972 /* All long conditionals. Note a short backward branch with an
6973 unfilled delay slot is treated just like a long backward branch
6974 with an unfilled delay slot. */
6975 case 8:
6976 /* Handle weird backwards branch with a filled delay slot
6977 which is nullified. */
6978 if (dbr_sequence_length () != 0
6979 && ! forward_branch_p (insn)
6980 && nullify)
6981 {
6982 strcpy (buf, "bb,");
6983 if (GET_MODE (operands[0]) == DImode)
6984 strcat (buf, "*");
6985 if ((which == 0 && negated)
6986 || (which == 1 && ! negated))
6987 strcat (buf, "<");
6988 else
6989 strcat (buf, ">=");
6990 if (negated)
6991 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6992 else
6993 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6994 }
6995 /* Handle short backwards branch with an unfilled delay slot.
6996 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6997 taken and untaken branches. */
6998 else if (dbr_sequence_length () == 0
6999 && ! forward_branch_p (insn)
7000 && INSN_ADDRESSES_SET_P ()
7001 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7002 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7003 {
7004 strcpy (buf, "bb,");
7005 if (GET_MODE (operands[0]) == DImode)
7006 strcat (buf, "*");
7007 if ((which == 0 && negated)
7008 || (which == 1 && ! negated))
7009 strcat (buf, ">=");
7010 else
7011 strcat (buf, "<");
7012 if (negated)
7013 strcat (buf, " %0,%1,%3%#");
7014 else
7015 strcat (buf, " %0,%1,%2%#");
7016 }
7017 else
7018 {
7019 if (GET_MODE (operands[0]) == DImode)
7020 strcpy (buf, "extrd,s,*");
7021 else
7022 strcpy (buf, "{extrs,|extrw,s,}");
7023 if ((which == 0 && negated)
7024 || (which == 1 && ! negated))
7025 strcat (buf, "<");
7026 else
7027 strcat (buf, ">=");
7028 if (nullify && negated)
7029 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7030 else if (nullify && ! negated)
7031 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7032 else if (negated)
7033 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7034 else
7035 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7036 }
7037 break;
7038
7039 default:
7040 /* The reversed conditional branch must branch over one additional
7041 instruction if the delay slot is filled and needs to be extracted
7042 by pa_output_lbranch. If the delay slot is empty or this is a
7043 nullified forward branch, the instruction after the reversed
7044 condition branch must be nullified. */
7045 if (dbr_sequence_length () == 0
7046 || (nullify && forward_branch_p (insn)))
7047 {
7048 nullify = 1;
7049 xdelay = 0;
7050 operands[4] = GEN_INT (length);
7051 }
7052 else
7053 {
7054 xdelay = 1;
7055 operands[4] = GEN_INT (length + 4);
7056 }
7057
7058 if (GET_MODE (operands[0]) == DImode)
7059 strcpy (buf, "bb,*");
7060 else
7061 strcpy (buf, "bb,");
7062 if ((which == 0 && negated)
7063 || (which == 1 && !negated))
7064 strcat (buf, "<");
7065 else
7066 strcat (buf, ">=");
7067 if (nullify)
7068 strcat (buf, ",n %0,%1,.+%4");
7069 else
7070 strcat (buf, " %0,%1,.+%4");
7071 output_asm_insn (buf, operands);
7072 return pa_output_lbranch (negated ? operands[3] : operands[2],
7073 insn, xdelay);
7074 }
7075 return buf;
7076 }
7077
7078 /* This routine handles all the branch-on-variable-bit conditional branch
7079 sequences we might need to generate. It handles nullification of delay
7080 slots, varying length branches, negated branches and all combinations
7081 of the above. it returns the appropriate output template to emit the
7082 branch. */
7083
7084 const char *
7085 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7086 int which)
7087 {
7088 static char buf[100];
7089 bool useskip;
7090 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7091 int length = get_attr_length (insn);
7092 int xdelay;
7093
7094 /* A conditional branch to the following instruction (e.g. the delay slot) is
7095 asking for a disaster. I do not think this can happen as this pattern
7096 is only used when optimizing; jump optimization should eliminate the
7097 jump. But be prepared just in case. */
7098
7099 if (branch_to_delay_slot_p (insn))
7100 return "nop";
7101
7102 /* If this is a long branch with its delay slot unfilled, set `nullify'
7103 as it can nullify the delay slot and save a nop. */
7104 if (length == 8 && dbr_sequence_length () == 0)
7105 nullify = 1;
7106
7107 /* If this is a short forward conditional branch which did not get
7108 its delay slot filled, the delay slot can still be nullified. */
7109 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7110 nullify = forward_branch_p (insn);
7111
7112 /* A forward branch over a single nullified insn can be done with a
7113 extrs instruction. This avoids a single cycle penalty due to
7114 mis-predicted branch if we fall through (branch not taken). */
7115 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7116
7117 switch (length)
7118 {
7119
7120 /* All short conditional branches except backwards with an unfilled
7121 delay slot. */
7122 case 4:
7123 if (useskip)
7124 strcpy (buf, "{vextrs,|extrw,s,}");
7125 else
7126 strcpy (buf, "{bvb,|bb,}");
7127 if (useskip && GET_MODE (operands[0]) == DImode)
7128 strcpy (buf, "extrd,s,*");
7129 else if (GET_MODE (operands[0]) == DImode)
7130 strcpy (buf, "bb,*");
7131 if ((which == 0 && negated)
7132 || (which == 1 && ! negated))
7133 strcat (buf, ">=");
7134 else
7135 strcat (buf, "<");
7136 if (useskip)
7137 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7138 else if (nullify && negated)
7139 {
7140 if (branch_needs_nop_p (insn))
7141 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7142 else
7143 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7144 }
7145 else if (nullify && ! negated)
7146 {
7147 if (branch_needs_nop_p (insn))
7148 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7149 else
7150 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7151 }
7152 else if (! nullify && negated)
7153 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7154 else if (! nullify && ! negated)
7155 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7156 break;
7157
7158 /* All long conditionals. Note a short backward branch with an
7159 unfilled delay slot is treated just like a long backward branch
7160 with an unfilled delay slot. */
7161 case 8:
7162 /* Handle weird backwards branch with a filled delay slot
7163 which is nullified. */
7164 if (dbr_sequence_length () != 0
7165 && ! forward_branch_p (insn)
7166 && nullify)
7167 {
7168 strcpy (buf, "{bvb,|bb,}");
7169 if (GET_MODE (operands[0]) == DImode)
7170 strcat (buf, "*");
7171 if ((which == 0 && negated)
7172 || (which == 1 && ! negated))
7173 strcat (buf, "<");
7174 else
7175 strcat (buf, ">=");
7176 if (negated)
7177 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7178 else
7179 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7180 }
7181 /* Handle short backwards branch with an unfilled delay slot.
7182 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7183 taken and untaken branches. */
7184 else if (dbr_sequence_length () == 0
7185 && ! forward_branch_p (insn)
7186 && INSN_ADDRESSES_SET_P ()
7187 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7188 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7189 {
7190 strcpy (buf, "{bvb,|bb,}");
7191 if (GET_MODE (operands[0]) == DImode)
7192 strcat (buf, "*");
7193 if ((which == 0 && negated)
7194 || (which == 1 && ! negated))
7195 strcat (buf, ">=");
7196 else
7197 strcat (buf, "<");
7198 if (negated)
7199 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7200 else
7201 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7202 }
7203 else
7204 {
7205 strcpy (buf, "{vextrs,|extrw,s,}");
7206 if (GET_MODE (operands[0]) == DImode)
7207 strcpy (buf, "extrd,s,*");
7208 if ((which == 0 && negated)
7209 || (which == 1 && ! negated))
7210 strcat (buf, "<");
7211 else
7212 strcat (buf, ">=");
7213 if (nullify && negated)
7214 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7215 else if (nullify && ! negated)
7216 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7217 else if (negated)
7218 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7219 else
7220 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7221 }
7222 break;
7223
7224 default:
7225 /* The reversed conditional branch must branch over one additional
7226 instruction if the delay slot is filled and needs to be extracted
7227 by pa_output_lbranch. If the delay slot is empty or this is a
7228 nullified forward branch, the instruction after the reversed
7229 condition branch must be nullified. */
7230 if (dbr_sequence_length () == 0
7231 || (nullify && forward_branch_p (insn)))
7232 {
7233 nullify = 1;
7234 xdelay = 0;
7235 operands[4] = GEN_INT (length);
7236 }
7237 else
7238 {
7239 xdelay = 1;
7240 operands[4] = GEN_INT (length + 4);
7241 }
7242
7243 if (GET_MODE (operands[0]) == DImode)
7244 strcpy (buf, "bb,*");
7245 else
7246 strcpy (buf, "{bvb,|bb,}");
7247 if ((which == 0 && negated)
7248 || (which == 1 && !negated))
7249 strcat (buf, "<");
7250 else
7251 strcat (buf, ">=");
7252 if (nullify)
7253 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7254 else
7255 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7256 output_asm_insn (buf, operands);
7257 return pa_output_lbranch (negated ? operands[3] : operands[2],
7258 insn, xdelay);
7259 }
7260 return buf;
7261 }
7262
7263 /* Return the output template for emitting a dbra type insn.
7264
7265 Note it may perform some output operations on its own before
7266 returning the final output string. */
7267 const char *
7268 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7269 {
7270 int length = get_attr_length (insn);
7271
7272 /* A conditional branch to the following instruction (e.g. the delay slot) is
7273 asking for a disaster. Be prepared! */
7274
7275 if (branch_to_delay_slot_p (insn))
7276 {
7277 if (which_alternative == 0)
7278 return "ldo %1(%0),%0";
7279 else if (which_alternative == 1)
7280 {
7281 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7282 output_asm_insn ("ldw -16(%%r30),%4", operands);
7283 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7284 return "{fldws|fldw} -16(%%r30),%0";
7285 }
7286 else
7287 {
7288 output_asm_insn ("ldw %0,%4", operands);
7289 return "ldo %1(%4),%4\n\tstw %4,%0";
7290 }
7291 }
7292
7293 if (which_alternative == 0)
7294 {
7295 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7296 int xdelay;
7297
7298 /* If this is a long branch with its delay slot unfilled, set `nullify'
7299 as it can nullify the delay slot and save a nop. */
7300 if (length == 8 && dbr_sequence_length () == 0)
7301 nullify = 1;
7302
7303 /* If this is a short forward conditional branch which did not get
7304 its delay slot filled, the delay slot can still be nullified. */
7305 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7306 nullify = forward_branch_p (insn);
7307
7308 switch (length)
7309 {
7310 case 4:
7311 if (nullify)
7312 {
7313 if (branch_needs_nop_p (insn))
7314 return "addib,%C2,n %1,%0,%3%#";
7315 else
7316 return "addib,%C2,n %1,%0,%3";
7317 }
7318 else
7319 return "addib,%C2 %1,%0,%3";
7320
7321 case 8:
7322 /* Handle weird backwards branch with a fulled delay slot
7323 which is nullified. */
7324 if (dbr_sequence_length () != 0
7325 && ! forward_branch_p (insn)
7326 && nullify)
7327 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7328 /* Handle short backwards branch with an unfilled delay slot.
7329 Using a addb;nop rather than addi;bl saves 1 cycle for both
7330 taken and untaken branches. */
7331 else if (dbr_sequence_length () == 0
7332 && ! forward_branch_p (insn)
7333 && INSN_ADDRESSES_SET_P ()
7334 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7335 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7336 return "addib,%C2 %1,%0,%3%#";
7337
7338 /* Handle normal cases. */
7339 if (nullify)
7340 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7341 else
7342 return "addi,%N2 %1,%0,%0\n\tb %3";
7343
7344 default:
7345 /* The reversed conditional branch must branch over one additional
7346 instruction if the delay slot is filled and needs to be extracted
7347 by pa_output_lbranch. If the delay slot is empty or this is a
7348 nullified forward branch, the instruction after the reversed
7349 condition branch must be nullified. */
7350 if (dbr_sequence_length () == 0
7351 || (nullify && forward_branch_p (insn)))
7352 {
7353 nullify = 1;
7354 xdelay = 0;
7355 operands[4] = GEN_INT (length);
7356 }
7357 else
7358 {
7359 xdelay = 1;
7360 operands[4] = GEN_INT (length + 4);
7361 }
7362
7363 if (nullify)
7364 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7365 else
7366 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7367
7368 return pa_output_lbranch (operands[3], insn, xdelay);
7369 }
7370
7371 }
7372 /* Deal with gross reload from FP register case. */
7373 else if (which_alternative == 1)
7374 {
7375 /* Move loop counter from FP register to MEM then into a GR,
7376 increment the GR, store the GR into MEM, and finally reload
7377 the FP register from MEM from within the branch's delay slot. */
7378 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7379 operands);
7380 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7381 if (length == 24)
7382 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7383 else if (length == 28)
7384 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7385 else
7386 {
7387 operands[5] = GEN_INT (length - 16);
7388 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7389 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7390 return pa_output_lbranch (operands[3], insn, 0);
7391 }
7392 }
7393 /* Deal with gross reload from memory case. */
7394 else
7395 {
7396 /* Reload loop counter from memory, the store back to memory
7397 happens in the branch's delay slot. */
7398 output_asm_insn ("ldw %0,%4", operands);
7399 if (length == 12)
7400 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7401 else if (length == 16)
7402 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7403 else
7404 {
7405 operands[5] = GEN_INT (length - 4);
7406 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7407 return pa_output_lbranch (operands[3], insn, 0);
7408 }
7409 }
7410 }
7411
7412 /* Return the output template for emitting a movb type insn.
7413
7414 Note it may perform some output operations on its own before
7415 returning the final output string. */
7416 const char *
7417 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7418 int reverse_comparison)
7419 {
7420 int length = get_attr_length (insn);
7421
7422 /* A conditional branch to the following instruction (e.g. the delay slot) is
7423 asking for a disaster. Be prepared! */
7424
7425 if (branch_to_delay_slot_p (insn))
7426 {
7427 if (which_alternative == 0)
7428 return "copy %1,%0";
7429 else if (which_alternative == 1)
7430 {
7431 output_asm_insn ("stw %1,-16(%%r30)", operands);
7432 return "{fldws|fldw} -16(%%r30),%0";
7433 }
7434 else if (which_alternative == 2)
7435 return "stw %1,%0";
7436 else
7437 return "mtsar %r1";
7438 }
7439
7440 /* Support the second variant. */
7441 if (reverse_comparison)
7442 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7443
7444 if (which_alternative == 0)
7445 {
7446 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7447 int xdelay;
7448
7449 /* If this is a long branch with its delay slot unfilled, set `nullify'
7450 as it can nullify the delay slot and save a nop. */
7451 if (length == 8 && dbr_sequence_length () == 0)
7452 nullify = 1;
7453
7454 /* If this is a short forward conditional branch which did not get
7455 its delay slot filled, the delay slot can still be nullified. */
7456 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7457 nullify = forward_branch_p (insn);
7458
7459 switch (length)
7460 {
7461 case 4:
7462 if (nullify)
7463 {
7464 if (branch_needs_nop_p (insn))
7465 return "movb,%C2,n %1,%0,%3%#";
7466 else
7467 return "movb,%C2,n %1,%0,%3";
7468 }
7469 else
7470 return "movb,%C2 %1,%0,%3";
7471
7472 case 8:
7473 /* Handle weird backwards branch with a filled delay slot
7474 which is nullified. */
7475 if (dbr_sequence_length () != 0
7476 && ! forward_branch_p (insn)
7477 && nullify)
7478 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7479
7480 /* Handle short backwards branch with an unfilled delay slot.
7481 Using a movb;nop rather than or;bl saves 1 cycle for both
7482 taken and untaken branches. */
7483 else if (dbr_sequence_length () == 0
7484 && ! forward_branch_p (insn)
7485 && INSN_ADDRESSES_SET_P ()
7486 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7487 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7488 return "movb,%C2 %1,%0,%3%#";
7489 /* Handle normal cases. */
7490 if (nullify)
7491 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7492 else
7493 return "or,%N2 %1,%%r0,%0\n\tb %3";
7494
7495 default:
7496 /* The reversed conditional branch must branch over one additional
7497 instruction if the delay slot is filled and needs to be extracted
7498 by pa_output_lbranch. If the delay slot is empty or this is a
7499 nullified forward branch, the instruction after the reversed
7500 condition branch must be nullified. */
7501 if (dbr_sequence_length () == 0
7502 || (nullify && forward_branch_p (insn)))
7503 {
7504 nullify = 1;
7505 xdelay = 0;
7506 operands[4] = GEN_INT (length);
7507 }
7508 else
7509 {
7510 xdelay = 1;
7511 operands[4] = GEN_INT (length + 4);
7512 }
7513
7514 if (nullify)
7515 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7516 else
7517 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7518
7519 return pa_output_lbranch (operands[3], insn, xdelay);
7520 }
7521 }
7522 /* Deal with gross reload for FP destination register case. */
7523 else if (which_alternative == 1)
7524 {
7525 /* Move source register to MEM, perform the branch test, then
7526 finally load the FP register from MEM from within the branch's
7527 delay slot. */
7528 output_asm_insn ("stw %1,-16(%%r30)", operands);
7529 if (length == 12)
7530 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7531 else if (length == 16)
7532 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7533 else
7534 {
7535 operands[4] = GEN_INT (length - 4);
7536 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7537 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7538 return pa_output_lbranch (operands[3], insn, 0);
7539 }
7540 }
7541 /* Deal with gross reload from memory case. */
7542 else if (which_alternative == 2)
7543 {
7544 /* Reload loop counter from memory, the store back to memory
7545 happens in the branch's delay slot. */
7546 if (length == 8)
7547 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7548 else if (length == 12)
7549 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7550 else
7551 {
7552 operands[4] = GEN_INT (length);
7553 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7554 operands);
7555 return pa_output_lbranch (operands[3], insn, 0);
7556 }
7557 }
7558 /* Handle SAR as a destination. */
7559 else
7560 {
7561 if (length == 8)
7562 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7563 else if (length == 12)
7564 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7565 else
7566 {
7567 operands[4] = GEN_INT (length);
7568 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7569 operands);
7570 return pa_output_lbranch (operands[3], insn, 0);
7571 }
7572 }
7573 }
7574
7575 /* Copy any FP arguments in INSN into integer registers. */
7576 static void
7577 copy_fp_args (rtx_insn *insn)
7578 {
7579 rtx link;
7580 rtx xoperands[2];
7581
7582 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7583 {
7584 int arg_mode, regno;
7585 rtx use = XEXP (link, 0);
7586
7587 if (! (GET_CODE (use) == USE
7588 && GET_CODE (XEXP (use, 0)) == REG
7589 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7590 continue;
7591
7592 arg_mode = GET_MODE (XEXP (use, 0));
7593 regno = REGNO (XEXP (use, 0));
7594
7595 /* Is it a floating point register? */
7596 if (regno >= 32 && regno <= 39)
7597 {
7598 /* Copy the FP register into an integer register via memory. */
7599 if (arg_mode == SFmode)
7600 {
7601 xoperands[0] = XEXP (use, 0);
7602 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7603 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7604 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7605 }
7606 else
7607 {
7608 xoperands[0] = XEXP (use, 0);
7609 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7610 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7611 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7612 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7613 }
7614 }
7615 }
7616 }
7617
7618 /* Compute length of the FP argument copy sequence for INSN. */
7619 static int
7620 length_fp_args (rtx_insn *insn)
7621 {
7622 int length = 0;
7623 rtx link;
7624
7625 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7626 {
7627 int arg_mode, regno;
7628 rtx use = XEXP (link, 0);
7629
7630 if (! (GET_CODE (use) == USE
7631 && GET_CODE (XEXP (use, 0)) == REG
7632 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7633 continue;
7634
7635 arg_mode = GET_MODE (XEXP (use, 0));
7636 regno = REGNO (XEXP (use, 0));
7637
7638 /* Is it a floating point register? */
7639 if (regno >= 32 && regno <= 39)
7640 {
7641 if (arg_mode == SFmode)
7642 length += 8;
7643 else
7644 length += 12;
7645 }
7646 }
7647
7648 return length;
7649 }
7650
7651 /* Return the attribute length for the millicode call instruction INSN.
7652 The length must match the code generated by pa_output_millicode_call.
7653 We include the delay slot in the returned length as it is better to
7654 over estimate the length than to under estimate it. */
7655
7656 int
7657 pa_attr_length_millicode_call (rtx_insn *insn)
7658 {
7659 unsigned long distance = -1;
7660 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7661
7662 if (INSN_ADDRESSES_SET_P ())
7663 {
7664 distance = (total + insn_current_reference_address (insn));
7665 if (distance < total)
7666 distance = -1;
7667 }
7668
7669 if (TARGET_64BIT)
7670 {
7671 if (!TARGET_LONG_CALLS && distance < 7600000)
7672 return 8;
7673
7674 return 20;
7675 }
7676 else if (TARGET_PORTABLE_RUNTIME)
7677 return 24;
7678 else
7679 {
7680 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7681 return 8;
7682
7683 if (!flag_pic)
7684 return 12;
7685
7686 return 24;
7687 }
7688 }
7689
7690 /* INSN is a function call.
7691
7692 CALL_DEST is the routine we are calling. */
7693
7694 const char *
7695 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7696 {
7697 int attr_length = get_attr_length (insn);
7698 int seq_length = dbr_sequence_length ();
7699 rtx xoperands[4];
7700
7701 xoperands[0] = call_dest;
7702
7703 /* Handle the common case where we are sure that the branch will
7704 reach the beginning of the $CODE$ subspace. The within reach
7705 form of the $$sh_func_adrs call has a length of 28. Because it
7706 has an attribute type of sh_func_adrs, it never has a nonzero
7707 sequence length (i.e., the delay slot is never filled). */
7708 if (!TARGET_LONG_CALLS
7709 && (attr_length == 8
7710 || (attr_length == 28
7711 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7712 {
7713 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7714 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7715 }
7716 else
7717 {
7718 if (TARGET_64BIT)
7719 {
7720 /* It might seem that one insn could be saved by accessing
7721 the millicode function using the linkage table. However,
7722 this doesn't work in shared libraries and other dynamically
7723 loaded objects. Using a pc-relative sequence also avoids
7724 problems related to the implicit use of the gp register. */
7725 xoperands[1] = gen_rtx_REG (Pmode, 1);
7726 xoperands[2] = xoperands[1];
7727 pa_output_pic_pcrel_sequence (xoperands);
7728 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7729 }
7730 else if (TARGET_PORTABLE_RUNTIME)
7731 {
7732 /* Pure portable runtime doesn't allow be/ble; we also don't
7733 have PIC support in the assembler/linker, so this sequence
7734 is needed. */
7735
7736 /* Get the address of our target into %r1. */
7737 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7738 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7739
7740 /* Get our return address into %r31. */
7741 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7742 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7743
7744 /* Jump to our target address in %r1. */
7745 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7746 }
7747 else if (!flag_pic)
7748 {
7749 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7750 if (TARGET_PA_20)
7751 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7752 else
7753 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7754 }
7755 else
7756 {
7757 xoperands[1] = gen_rtx_REG (Pmode, 31);
7758 xoperands[2] = gen_rtx_REG (Pmode, 1);
7759 pa_output_pic_pcrel_sequence (xoperands);
7760
7761 /* Adjust return address. */
7762 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7763
7764 /* Jump to our target address in %r1. */
7765 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7766 }
7767 }
7768
7769 if (seq_length == 0)
7770 output_asm_insn ("nop", xoperands);
7771
7772 return "";
7773 }
7774
7775 /* Return the attribute length of the call instruction INSN. The SIBCALL
7776 flag indicates whether INSN is a regular call or a sibling call. The
7777 length returned must be longer than the code actually generated by
7778 pa_output_call. Since branch shortening is done before delay branch
7779 sequencing, there is no way to determine whether or not the delay
7780 slot will be filled during branch shortening. Even when the delay
7781 slot is filled, we may have to add a nop if the delay slot contains
7782 a branch that can't reach its target. Thus, we always have to include
7783 the delay slot in the length estimate. This used to be done in
7784 pa_adjust_insn_length but we do it here now as some sequences always
7785 fill the delay slot and we can save four bytes in the estimate for
7786 these sequences. */
7787
7788 int
7789 pa_attr_length_call (rtx_insn *insn, int sibcall)
7790 {
7791 int local_call;
7792 rtx call, call_dest;
7793 tree call_decl;
7794 int length = 0;
7795 rtx pat = PATTERN (insn);
7796 unsigned long distance = -1;
7797
7798 gcc_assert (CALL_P (insn));
7799
7800 if (INSN_ADDRESSES_SET_P ())
7801 {
7802 unsigned long total;
7803
7804 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7805 distance = (total + insn_current_reference_address (insn));
7806 if (distance < total)
7807 distance = -1;
7808 }
7809
7810 gcc_assert (GET_CODE (pat) == PARALLEL);
7811
7812 /* Get the call rtx. */
7813 call = XVECEXP (pat, 0, 0);
7814 if (GET_CODE (call) == SET)
7815 call = SET_SRC (call);
7816
7817 gcc_assert (GET_CODE (call) == CALL);
7818
7819 /* Determine if this is a local call. */
7820 call_dest = XEXP (XEXP (call, 0), 0);
7821 call_decl = SYMBOL_REF_DECL (call_dest);
7822 local_call = call_decl && targetm.binds_local_p (call_decl);
7823
7824 /* pc-relative branch. */
7825 if (!TARGET_LONG_CALLS
7826 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7827 || distance < MAX_PCREL17F_OFFSET))
7828 length += 8;
7829
7830 /* 64-bit plabel sequence. */
7831 else if (TARGET_64BIT && !local_call)
7832 length += sibcall ? 28 : 24;
7833
7834 /* non-pic long absolute branch sequence. */
7835 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7836 length += 12;
7837
7838 /* long pc-relative branch sequence. */
7839 else if (TARGET_LONG_PIC_SDIFF_CALL
7840 || (TARGET_GAS && !TARGET_SOM && local_call))
7841 {
7842 length += 20;
7843
7844 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7845 length += 8;
7846 }
7847
7848 /* 32-bit plabel sequence. */
7849 else
7850 {
7851 length += 32;
7852
7853 if (TARGET_SOM)
7854 length += length_fp_args (insn);
7855
7856 if (flag_pic)
7857 length += 4;
7858
7859 if (!TARGET_PA_20)
7860 {
7861 if (!sibcall)
7862 length += 8;
7863
7864 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7865 length += 8;
7866 }
7867 }
7868
7869 return length;
7870 }
7871
7872 /* INSN is a function call.
7873
7874 CALL_DEST is the routine we are calling. */
7875
7876 const char *
7877 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7878 {
7879 int seq_length = dbr_sequence_length ();
7880 tree call_decl = SYMBOL_REF_DECL (call_dest);
7881 int local_call = call_decl && targetm.binds_local_p (call_decl);
7882 rtx xoperands[4];
7883
7884 xoperands[0] = call_dest;
7885
7886 /* Handle the common case where we're sure that the branch will reach
7887 the beginning of the "$CODE$" subspace. This is the beginning of
7888 the current function if we are in a named section. */
7889 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7890 {
7891 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7892 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7893 }
7894 else
7895 {
7896 if (TARGET_64BIT && !local_call)
7897 {
7898 /* ??? As far as I can tell, the HP linker doesn't support the
7899 long pc-relative sequence described in the 64-bit runtime
7900 architecture. So, we use a slightly longer indirect call. */
7901 xoperands[0] = pa_get_deferred_plabel (call_dest);
7902 xoperands[1] = gen_label_rtx ();
7903
7904 /* If this isn't a sibcall, we put the load of %r27 into the
7905 delay slot. We can't do this in a sibcall as we don't
7906 have a second call-clobbered scratch register available.
7907 We don't need to do anything when generating fast indirect
7908 calls. */
7909 if (seq_length != 0 && !sibcall)
7910 {
7911 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7912 optimize, 0, NULL);
7913
7914 /* Now delete the delay insn. */
7915 SET_INSN_DELETED (NEXT_INSN (insn));
7916 seq_length = 0;
7917 }
7918
7919 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7920 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7921 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7922
7923 if (sibcall)
7924 {
7925 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7926 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7927 output_asm_insn ("bve (%%r1)", xoperands);
7928 }
7929 else
7930 {
7931 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7932 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7933 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7934 seq_length = 1;
7935 }
7936 }
7937 else
7938 {
7939 int indirect_call = 0;
7940
7941 /* Emit a long call. There are several different sequences
7942 of increasing length and complexity. In most cases,
7943 they don't allow an instruction in the delay slot. */
7944 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7945 && !TARGET_LONG_PIC_SDIFF_CALL
7946 && !(TARGET_GAS && !TARGET_SOM && local_call)
7947 && !TARGET_64BIT)
7948 indirect_call = 1;
7949
7950 if (seq_length != 0
7951 && !sibcall
7952 && (!TARGET_PA_20
7953 || indirect_call
7954 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7955 {
7956 /* A non-jump insn in the delay slot. By definition we can
7957 emit this insn before the call (and in fact before argument
7958 relocating. */
7959 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7960 NULL);
7961
7962 /* Now delete the delay insn. */
7963 SET_INSN_DELETED (NEXT_INSN (insn));
7964 seq_length = 0;
7965 }
7966
7967 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7968 {
7969 /* This is the best sequence for making long calls in
7970 non-pic code. Unfortunately, GNU ld doesn't provide
7971 the stub needed for external calls, and GAS's support
7972 for this with the SOM linker is buggy. It is safe
7973 to use this for local calls. */
7974 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7975 if (sibcall)
7976 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7977 else
7978 {
7979 if (TARGET_PA_20)
7980 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7981 xoperands);
7982 else
7983 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7984
7985 output_asm_insn ("copy %%r31,%%r2", xoperands);
7986 seq_length = 1;
7987 }
7988 }
7989 else
7990 {
7991 /* The HP assembler and linker can handle relocations for
7992 the difference of two symbols. The HP assembler
7993 recognizes the sequence as a pc-relative call and
7994 the linker provides stubs when needed. */
7995
7996 /* GAS currently can't generate the relocations that
7997 are needed for the SOM linker under HP-UX using this
7998 sequence. The GNU linker doesn't generate the stubs
7999 that are needed for external calls on TARGET_ELF32
8000 with this sequence. For now, we have to use a longer
8001 plabel sequence when using GAS for non local calls. */
8002 if (TARGET_LONG_PIC_SDIFF_CALL
8003 || (TARGET_GAS && !TARGET_SOM && local_call))
8004 {
8005 xoperands[1] = gen_rtx_REG (Pmode, 1);
8006 xoperands[2] = xoperands[1];
8007 pa_output_pic_pcrel_sequence (xoperands);
8008 }
8009 else
8010 {
8011 /* Emit a long plabel-based call sequence. This is
8012 essentially an inline implementation of $$dyncall.
8013 We don't actually try to call $$dyncall as this is
8014 as difficult as calling the function itself. */
8015 xoperands[0] = pa_get_deferred_plabel (call_dest);
8016 xoperands[1] = gen_label_rtx ();
8017
8018 /* Since the call is indirect, FP arguments in registers
8019 need to be copied to the general registers. Then, the
8020 argument relocation stub will copy them back. */
8021 if (TARGET_SOM)
8022 copy_fp_args (insn);
8023
8024 if (flag_pic)
8025 {
8026 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8027 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8028 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8029 }
8030 else
8031 {
8032 output_asm_insn ("addil LR'%0-$global$,%%r27",
8033 xoperands);
8034 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8035 xoperands);
8036 }
8037
8038 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8039 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8040 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8041 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8042
8043 if (!sibcall && !TARGET_PA_20)
8044 {
8045 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8046 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8047 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8048 else
8049 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8050 }
8051 }
8052
8053 if (TARGET_PA_20)
8054 {
8055 if (sibcall)
8056 output_asm_insn ("bve (%%r1)", xoperands);
8057 else
8058 {
8059 if (indirect_call)
8060 {
8061 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8062 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8063 seq_length = 1;
8064 }
8065 else
8066 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8067 }
8068 }
8069 else
8070 {
8071 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8072 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8073 xoperands);
8074
8075 if (sibcall)
8076 {
8077 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8078 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8079 else
8080 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8081 }
8082 else
8083 {
8084 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8085 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8086 else
8087 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8088
8089 if (indirect_call)
8090 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8091 else
8092 output_asm_insn ("copy %%r31,%%r2", xoperands);
8093 seq_length = 1;
8094 }
8095 }
8096 }
8097 }
8098 }
8099
8100 if (seq_length == 0)
8101 output_asm_insn ("nop", xoperands);
8102
8103 return "";
8104 }
8105
8106 /* Return the attribute length of the indirect call instruction INSN.
8107 The length must match the code generated by output_indirect call.
8108 The returned length includes the delay slot. Currently, the delay
8109 slot of an indirect call sequence is not exposed and it is used by
8110 the sequence itself. */
8111
8112 int
8113 pa_attr_length_indirect_call (rtx_insn *insn)
8114 {
8115 unsigned long distance = -1;
8116 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8117
8118 if (INSN_ADDRESSES_SET_P ())
8119 {
8120 distance = (total + insn_current_reference_address (insn));
8121 if (distance < total)
8122 distance = -1;
8123 }
8124
8125 if (TARGET_64BIT)
8126 return 12;
8127
8128 if (TARGET_FAST_INDIRECT_CALLS)
8129 return 8;
8130
8131 if (TARGET_PORTABLE_RUNTIME)
8132 return 16;
8133
8134 /* Inline version of $$dyncall. */
8135 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8136 return 20;
8137
8138 if (!TARGET_LONG_CALLS
8139 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8140 || distance < MAX_PCREL17F_OFFSET))
8141 return 8;
8142
8143 /* Out of reach, can use ble. */
8144 if (!flag_pic)
8145 return 12;
8146
8147 /* Inline version of $$dyncall. */
8148 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8149 return 20;
8150
8151 if (!optimize_size)
8152 return 36;
8153
8154 /* Long PIC pc-relative call. */
8155 return 20;
8156 }
8157
8158 const char *
8159 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8160 {
8161 rtx xoperands[4];
8162 int length;
8163
8164 if (TARGET_64BIT)
8165 {
8166 xoperands[0] = call_dest;
8167 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8168 "bve,l (%%r2),%%r2\n\t"
8169 "ldd 24(%0),%%r27", xoperands);
8170 return "";
8171 }
8172
8173 /* First the special case for kernels, level 0 systems, etc. */
8174 if (TARGET_FAST_INDIRECT_CALLS)
8175 {
8176 pa_output_arg_descriptor (insn);
8177 if (TARGET_PA_20)
8178 return "bve,l,n (%%r22),%%r2\n\tnop";
8179 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8180 }
8181
8182 if (TARGET_PORTABLE_RUNTIME)
8183 {
8184 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8185 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8186 pa_output_arg_descriptor (insn);
8187 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8188 }
8189
8190 /* Maybe emit a fast inline version of $$dyncall. */
8191 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8192 {
8193 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8194 "ldw 2(%%r22),%%r19\n\t"
8195 "ldw -2(%%r22),%%r22", xoperands);
8196 pa_output_arg_descriptor (insn);
8197 if (TARGET_NO_SPACE_REGS)
8198 {
8199 if (TARGET_PA_20)
8200 return "bve,l,n (%%r22),%%r2\n\tnop";
8201 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8202 }
8203 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8204 }
8205
8206 /* Now the normal case -- we can reach $$dyncall directly or
8207 we're sure that we can get there via a long-branch stub.
8208
8209 No need to check target flags as the length uniquely identifies
8210 the remaining cases. */
8211 length = pa_attr_length_indirect_call (insn);
8212 if (length == 8)
8213 {
8214 pa_output_arg_descriptor (insn);
8215
8216 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8217 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8218 variant of the B,L instruction can't be used on the SOM target. */
8219 if (TARGET_PA_20 && !TARGET_SOM)
8220 return "b,l,n $$dyncall,%%r2\n\tnop";
8221 else
8222 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8223 }
8224
8225 /* Long millicode call, but we are not generating PIC or portable runtime
8226 code. */
8227 if (length == 12)
8228 {
8229 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8230 pa_output_arg_descriptor (insn);
8231 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8232 }
8233
8234 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8235 pc-relative call sequence is five instructions. The inline PA 2.0
8236 version of $$dyncall is also five instructions. The PA 1.X versions
8237 are longer but still an overall win. */
8238 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8239 {
8240 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8241 "ldw 2(%%r22),%%r19\n\t"
8242 "ldw -2(%%r22),%%r22", xoperands);
8243 if (TARGET_NO_SPACE_REGS)
8244 {
8245 pa_output_arg_descriptor (insn);
8246 if (TARGET_PA_20)
8247 return "bve,l,n (%%r22),%%r2\n\tnop";
8248 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8249 }
8250 if (TARGET_PA_20)
8251 {
8252 pa_output_arg_descriptor (insn);
8253 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8254 }
8255 output_asm_insn ("bl .+8,%%r2\n\t"
8256 "ldo 16(%%r2),%%r2\n\t"
8257 "ldsid (%%r22),%%r1\n\t"
8258 "mtsp %%r1,%%sr0", xoperands);
8259 pa_output_arg_descriptor (insn);
8260 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8261 }
8262
8263 /* We need a long PIC call to $$dyncall. */
8264 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8265 xoperands[1] = gen_rtx_REG (Pmode, 2);
8266 xoperands[2] = gen_rtx_REG (Pmode, 1);
8267 pa_output_pic_pcrel_sequence (xoperands);
8268 pa_output_arg_descriptor (insn);
8269 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8270 }
8271
8272 /* In HPUX 8.0's shared library scheme, special relocations are needed
8273 for function labels if they might be passed to a function
8274 in a shared library (because shared libraries don't live in code
8275 space), and special magic is needed to construct their address. */
8276
8277 void
8278 pa_encode_label (rtx sym)
8279 {
8280 const char *str = XSTR (sym, 0);
8281 int len = strlen (str) + 1;
8282 char *newstr, *p;
8283
8284 p = newstr = XALLOCAVEC (char, len + 1);
8285 *p++ = '@';
8286 strcpy (p, str);
8287
8288 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8289 }
8290
8291 static void
8292 pa_encode_section_info (tree decl, rtx rtl, int first)
8293 {
8294 int old_referenced = 0;
8295
8296 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8297 old_referenced
8298 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8299
8300 default_encode_section_info (decl, rtl, first);
8301
8302 if (first && TEXT_SPACE_P (decl))
8303 {
8304 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8305 if (TREE_CODE (decl) == FUNCTION_DECL)
8306 pa_encode_label (XEXP (rtl, 0));
8307 }
8308 else if (old_referenced)
8309 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8310 }
8311
8312 /* This is sort of inverse to pa_encode_section_info. */
8313
8314 static const char *
8315 pa_strip_name_encoding (const char *str)
8316 {
8317 str += (*str == '@');
8318 str += (*str == '*');
8319 return str;
8320 }
8321
8322 /* Returns 1 if OP is a function label involved in a simple addition
8323 with a constant. Used to keep certain patterns from matching
8324 during instruction combination. */
8325 int
8326 pa_is_function_label_plus_const (rtx op)
8327 {
8328 /* Strip off any CONST. */
8329 if (GET_CODE (op) == CONST)
8330 op = XEXP (op, 0);
8331
8332 return (GET_CODE (op) == PLUS
8333 && function_label_operand (XEXP (op, 0), VOIDmode)
8334 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8335 }
8336
8337 /* Output assembly code for a thunk to FUNCTION. */
8338
8339 static void
8340 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8341 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8342 tree function)
8343 {
8344 static unsigned int current_thunk_number;
8345 int val_14 = VAL_14_BITS_P (delta);
8346 unsigned int old_last_address = last_address, nbytes = 0;
8347 char label[16];
8348 rtx xoperands[4];
8349
8350 xoperands[0] = XEXP (DECL_RTL (function), 0);
8351 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8352 xoperands[2] = GEN_INT (delta);
8353
8354 final_start_function (emit_barrier (), file, 1);
8355
8356 /* Output the thunk. We know that the function is in the same
8357 translation unit (i.e., the same space) as the thunk, and that
8358 thunks are output after their method. Thus, we don't need an
8359 external branch to reach the function. With SOM and GAS,
8360 functions and thunks are effectively in different sections.
8361 Thus, we can always use a IA-relative branch and the linker
8362 will add a long branch stub if necessary.
8363
8364 However, we have to be careful when generating PIC code on the
8365 SOM port to ensure that the sequence does not transfer to an
8366 import stub for the target function as this could clobber the
8367 return value saved at SP-24. This would also apply to the
8368 32-bit linux port if the multi-space model is implemented. */
8369 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8370 && !(flag_pic && TREE_PUBLIC (function))
8371 && (TARGET_GAS || last_address < 262132))
8372 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8373 && ((targetm_common.have_named_sections
8374 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8375 /* The GNU 64-bit linker has rather poor stub management.
8376 So, we use a long branch from thunks that aren't in
8377 the same section as the target function. */
8378 && ((!TARGET_64BIT
8379 && (DECL_SECTION_NAME (thunk_fndecl)
8380 != DECL_SECTION_NAME (function)))
8381 || ((DECL_SECTION_NAME (thunk_fndecl)
8382 == DECL_SECTION_NAME (function))
8383 && last_address < 262132)))
8384 /* In this case, we need to be able to reach the start of
8385 the stub table even though the function is likely closer
8386 and can be jumped to directly. */
8387 || (targetm_common.have_named_sections
8388 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8389 && DECL_SECTION_NAME (function) == NULL
8390 && total_code_bytes < MAX_PCREL17F_OFFSET)
8391 /* Likewise. */
8392 || (!targetm_common.have_named_sections
8393 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8394 {
8395 if (!val_14)
8396 output_asm_insn ("addil L'%2,%%r26", xoperands);
8397
8398 output_asm_insn ("b %0", xoperands);
8399
8400 if (val_14)
8401 {
8402 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8403 nbytes += 8;
8404 }
8405 else
8406 {
8407 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8408 nbytes += 12;
8409 }
8410 }
8411 else if (TARGET_64BIT)
8412 {
8413 rtx xop[4];
8414
8415 /* We only have one call-clobbered scratch register, so we can't
8416 make use of the delay slot if delta doesn't fit in 14 bits. */
8417 if (!val_14)
8418 {
8419 output_asm_insn ("addil L'%2,%%r26", xoperands);
8420 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8421 }
8422
8423 /* Load function address into %r1. */
8424 xop[0] = xoperands[0];
8425 xop[1] = gen_rtx_REG (Pmode, 1);
8426 xop[2] = xop[1];
8427 pa_output_pic_pcrel_sequence (xop);
8428
8429 if (val_14)
8430 {
8431 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8432 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8433 nbytes += 20;
8434 }
8435 else
8436 {
8437 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8438 nbytes += 24;
8439 }
8440 }
8441 else if (TARGET_PORTABLE_RUNTIME)
8442 {
8443 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8444 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8445
8446 if (!val_14)
8447 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8448
8449 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8450
8451 if (val_14)
8452 {
8453 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8454 nbytes += 16;
8455 }
8456 else
8457 {
8458 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8459 nbytes += 20;
8460 }
8461 }
8462 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8463 {
8464 /* The function is accessible from outside this module. The only
8465 way to avoid an import stub between the thunk and function is to
8466 call the function directly with an indirect sequence similar to
8467 that used by $$dyncall. This is possible because $$dyncall acts
8468 as the import stub in an indirect call. */
8469 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8470 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8471 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8472 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8473 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8474 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8475 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8476 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8477 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8478
8479 if (!val_14)
8480 {
8481 output_asm_insn ("addil L'%2,%%r26", xoperands);
8482 nbytes += 4;
8483 }
8484
8485 if (TARGET_PA_20)
8486 {
8487 output_asm_insn ("bve (%%r22)", xoperands);
8488 nbytes += 36;
8489 }
8490 else if (TARGET_NO_SPACE_REGS)
8491 {
8492 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8493 nbytes += 36;
8494 }
8495 else
8496 {
8497 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8498 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8499 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8500 nbytes += 44;
8501 }
8502
8503 if (val_14)
8504 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8505 else
8506 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8507 }
8508 else if (flag_pic)
8509 {
8510 rtx xop[4];
8511
8512 /* Load function address into %r22. */
8513 xop[0] = xoperands[0];
8514 xop[1] = gen_rtx_REG (Pmode, 1);
8515 xop[2] = gen_rtx_REG (Pmode, 22);
8516 pa_output_pic_pcrel_sequence (xop);
8517
8518 if (!val_14)
8519 output_asm_insn ("addil L'%2,%%r26", xoperands);
8520
8521 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8522
8523 if (val_14)
8524 {
8525 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8526 nbytes += 20;
8527 }
8528 else
8529 {
8530 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8531 nbytes += 24;
8532 }
8533 }
8534 else
8535 {
8536 if (!val_14)
8537 output_asm_insn ("addil L'%2,%%r26", xoperands);
8538
8539 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8540 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8541
8542 if (val_14)
8543 {
8544 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8545 nbytes += 12;
8546 }
8547 else
8548 {
8549 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8550 nbytes += 16;
8551 }
8552 }
8553
8554 final_end_function ();
8555
8556 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8557 {
8558 switch_to_section (data_section);
8559 output_asm_insn (".align 4", xoperands);
8560 ASM_OUTPUT_LABEL (file, label);
8561 output_asm_insn (".word P'%0", xoperands);
8562 }
8563
8564 current_thunk_number++;
8565 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8566 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8567 last_address += nbytes;
8568 if (old_last_address > last_address)
8569 last_address = UINT_MAX;
8570 update_total_code_bytes (nbytes);
8571 }
8572
8573 /* Only direct calls to static functions are allowed to be sibling (tail)
8574 call optimized.
8575
8576 This restriction is necessary because some linker generated stubs will
8577 store return pointers into rp' in some cases which might clobber a
8578 live value already in rp'.
8579
8580 In a sibcall the current function and the target function share stack
8581 space. Thus if the path to the current function and the path to the
8582 target function save a value in rp', they save the value into the
8583 same stack slot, which has undesirable consequences.
8584
8585 Because of the deferred binding nature of shared libraries any function
8586 with external scope could be in a different load module and thus require
8587 rp' to be saved when calling that function. So sibcall optimizations
8588 can only be safe for static function.
8589
8590 Note that GCC never needs return value relocations, so we don't have to
8591 worry about static calls with return value relocations (which require
8592 saving rp').
8593
8594 It is safe to perform a sibcall optimization when the target function
8595 will never return. */
8596 static bool
8597 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8598 {
8599 if (TARGET_PORTABLE_RUNTIME)
8600 return false;
8601
8602 /* Sibcalls are not ok because the arg pointer register is not a fixed
8603 register. This prevents the sibcall optimization from occurring. In
8604 addition, there are problems with stub placement using GNU ld. This
8605 is because a normal sibcall branch uses a 17-bit relocation while
8606 a regular call branch uses a 22-bit relocation. As a result, more
8607 care needs to be taken in the placement of long-branch stubs. */
8608 if (TARGET_64BIT)
8609 return false;
8610
8611 /* Sibcalls are only ok within a translation unit. */
8612 return (decl && !TREE_PUBLIC (decl));
8613 }
8614
8615 /* ??? Addition is not commutative on the PA due to the weird implicit
8616 space register selection rules for memory addresses. Therefore, we
8617 don't consider a + b == b + a, as this might be inside a MEM. */
8618 static bool
8619 pa_commutative_p (const_rtx x, int outer_code)
8620 {
8621 return (COMMUTATIVE_P (x)
8622 && (TARGET_NO_SPACE_REGS
8623 || (outer_code != UNKNOWN && outer_code != MEM)
8624 || GET_CODE (x) != PLUS));
8625 }
8626
8627 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8628 use in fmpyadd instructions. */
8629 int
8630 pa_fmpyaddoperands (rtx *operands)
8631 {
8632 machine_mode mode = GET_MODE (operands[0]);
8633
8634 /* Must be a floating point mode. */
8635 if (mode != SFmode && mode != DFmode)
8636 return 0;
8637
8638 /* All modes must be the same. */
8639 if (! (mode == GET_MODE (operands[1])
8640 && mode == GET_MODE (operands[2])
8641 && mode == GET_MODE (operands[3])
8642 && mode == GET_MODE (operands[4])
8643 && mode == GET_MODE (operands[5])))
8644 return 0;
8645
8646 /* All operands must be registers. */
8647 if (! (GET_CODE (operands[1]) == REG
8648 && GET_CODE (operands[2]) == REG
8649 && GET_CODE (operands[3]) == REG
8650 && GET_CODE (operands[4]) == REG
8651 && GET_CODE (operands[5]) == REG))
8652 return 0;
8653
8654 /* Only 2 real operands to the addition. One of the input operands must
8655 be the same as the output operand. */
8656 if (! rtx_equal_p (operands[3], operands[4])
8657 && ! rtx_equal_p (operands[3], operands[5]))
8658 return 0;
8659
8660 /* Inout operand of add cannot conflict with any operands from multiply. */
8661 if (rtx_equal_p (operands[3], operands[0])
8662 || rtx_equal_p (operands[3], operands[1])
8663 || rtx_equal_p (operands[3], operands[2]))
8664 return 0;
8665
8666 /* multiply cannot feed into addition operands. */
8667 if (rtx_equal_p (operands[4], operands[0])
8668 || rtx_equal_p (operands[5], operands[0]))
8669 return 0;
8670
8671 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8672 if (mode == SFmode
8673 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8674 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8675 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8676 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8677 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8678 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8679 return 0;
8680
8681 /* Passed. Operands are suitable for fmpyadd. */
8682 return 1;
8683 }
8684
8685 #if !defined(USE_COLLECT2)
8686 static void
8687 pa_asm_out_constructor (rtx symbol, int priority)
8688 {
8689 if (!function_label_operand (symbol, VOIDmode))
8690 pa_encode_label (symbol);
8691
8692 #ifdef CTORS_SECTION_ASM_OP
8693 default_ctor_section_asm_out_constructor (symbol, priority);
8694 #else
8695 # ifdef TARGET_ASM_NAMED_SECTION
8696 default_named_section_asm_out_constructor (symbol, priority);
8697 # else
8698 default_stabs_asm_out_constructor (symbol, priority);
8699 # endif
8700 #endif
8701 }
8702
8703 static void
8704 pa_asm_out_destructor (rtx symbol, int priority)
8705 {
8706 if (!function_label_operand (symbol, VOIDmode))
8707 pa_encode_label (symbol);
8708
8709 #ifdef DTORS_SECTION_ASM_OP
8710 default_dtor_section_asm_out_destructor (symbol, priority);
8711 #else
8712 # ifdef TARGET_ASM_NAMED_SECTION
8713 default_named_section_asm_out_destructor (symbol, priority);
8714 # else
8715 default_stabs_asm_out_destructor (symbol, priority);
8716 # endif
8717 #endif
8718 }
8719 #endif
8720
8721 /* This function places uninitialized global data in the bss section.
8722 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8723 function on the SOM port to prevent uninitialized global data from
8724 being placed in the data section. */
8725
8726 void
8727 pa_asm_output_aligned_bss (FILE *stream,
8728 const char *name,
8729 unsigned HOST_WIDE_INT size,
8730 unsigned int align)
8731 {
8732 switch_to_section (bss_section);
8733 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8734
8735 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8736 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8737 #endif
8738
8739 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8740 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8741 #endif
8742
8743 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8744 ASM_OUTPUT_LABEL (stream, name);
8745 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8746 }
8747
8748 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8749 that doesn't allow the alignment of global common storage to be directly
8750 specified. The SOM linker aligns common storage based on the rounded
8751 value of the NUM_BYTES parameter in the .comm directive. It's not
8752 possible to use the .align directive as it doesn't affect the alignment
8753 of the label associated with a .comm directive. */
8754
8755 void
8756 pa_asm_output_aligned_common (FILE *stream,
8757 const char *name,
8758 unsigned HOST_WIDE_INT size,
8759 unsigned int align)
8760 {
8761 unsigned int max_common_align;
8762
8763 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8764 if (align > max_common_align)
8765 {
8766 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8767 "for global common data. Using %u",
8768 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8769 align = max_common_align;
8770 }
8771
8772 switch_to_section (bss_section);
8773
8774 assemble_name (stream, name);
8775 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8776 MAX (size, align / BITS_PER_UNIT));
8777 }
8778
8779 /* We can't use .comm for local common storage as the SOM linker effectively
8780 treats the symbol as universal and uses the same storage for local symbols
8781 with the same name in different object files. The .block directive
8782 reserves an uninitialized block of storage. However, it's not common
8783 storage. Fortunately, GCC never requests common storage with the same
8784 name in any given translation unit. */
8785
8786 void
8787 pa_asm_output_aligned_local (FILE *stream,
8788 const char *name,
8789 unsigned HOST_WIDE_INT size,
8790 unsigned int align)
8791 {
8792 switch_to_section (bss_section);
8793 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8794
8795 #ifdef LOCAL_ASM_OP
8796 fprintf (stream, "%s", LOCAL_ASM_OP);
8797 assemble_name (stream, name);
8798 fprintf (stream, "\n");
8799 #endif
8800
8801 ASM_OUTPUT_LABEL (stream, name);
8802 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8803 }
8804
8805 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8806 use in fmpysub instructions. */
8807 int
8808 pa_fmpysuboperands (rtx *operands)
8809 {
8810 machine_mode mode = GET_MODE (operands[0]);
8811
8812 /* Must be a floating point mode. */
8813 if (mode != SFmode && mode != DFmode)
8814 return 0;
8815
8816 /* All modes must be the same. */
8817 if (! (mode == GET_MODE (operands[1])
8818 && mode == GET_MODE (operands[2])
8819 && mode == GET_MODE (operands[3])
8820 && mode == GET_MODE (operands[4])
8821 && mode == GET_MODE (operands[5])))
8822 return 0;
8823
8824 /* All operands must be registers. */
8825 if (! (GET_CODE (operands[1]) == REG
8826 && GET_CODE (operands[2]) == REG
8827 && GET_CODE (operands[3]) == REG
8828 && GET_CODE (operands[4]) == REG
8829 && GET_CODE (operands[5]) == REG))
8830 return 0;
8831
8832 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8833 operation, so operands[4] must be the same as operand[3]. */
8834 if (! rtx_equal_p (operands[3], operands[4]))
8835 return 0;
8836
8837 /* multiply cannot feed into subtraction. */
8838 if (rtx_equal_p (operands[5], operands[0]))
8839 return 0;
8840
8841 /* Inout operand of sub cannot conflict with any operands from multiply. */
8842 if (rtx_equal_p (operands[3], operands[0])
8843 || rtx_equal_p (operands[3], operands[1])
8844 || rtx_equal_p (operands[3], operands[2]))
8845 return 0;
8846
8847 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8848 if (mode == SFmode
8849 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8850 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8851 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8852 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8853 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8854 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8855 return 0;
8856
8857 /* Passed. Operands are suitable for fmpysub. */
8858 return 1;
8859 }
8860
8861 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8862 constants for a MULT embedded inside a memory address. */
8863 int
8864 pa_mem_shadd_constant_p (int val)
8865 {
8866 if (val == 2 || val == 4 || val == 8)
8867 return 1;
8868 else
8869 return 0;
8870 }
8871
8872 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8873 constants for shadd instructions. */
8874 int
8875 pa_shadd_constant_p (int val)
8876 {
8877 if (val == 1 || val == 2 || val == 3)
8878 return 1;
8879 else
8880 return 0;
8881 }
8882
8883 /* Return TRUE if INSN branches forward. */
8884
8885 static bool
8886 forward_branch_p (rtx_insn *insn)
8887 {
8888 rtx lab = JUMP_LABEL (insn);
8889
8890 /* The INSN must have a jump label. */
8891 gcc_assert (lab != NULL_RTX);
8892
8893 if (INSN_ADDRESSES_SET_P ())
8894 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8895
8896 while (insn)
8897 {
8898 if (insn == lab)
8899 return true;
8900 else
8901 insn = NEXT_INSN (insn);
8902 }
8903
8904 return false;
8905 }
8906
8907 /* Output an unconditional move and branch insn. */
8908
8909 const char *
8910 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8911 {
8912 int length = get_attr_length (insn);
8913
8914 /* These are the cases in which we win. */
8915 if (length == 4)
8916 return "mov%I1b,tr %1,%0,%2";
8917
8918 /* None of the following cases win, but they don't lose either. */
8919 if (length == 8)
8920 {
8921 if (dbr_sequence_length () == 0)
8922 {
8923 /* Nothing in the delay slot, fake it by putting the combined
8924 insn (the copy or add) in the delay slot of a bl. */
8925 if (GET_CODE (operands[1]) == CONST_INT)
8926 return "b %2\n\tldi %1,%0";
8927 else
8928 return "b %2\n\tcopy %1,%0";
8929 }
8930 else
8931 {
8932 /* Something in the delay slot, but we've got a long branch. */
8933 if (GET_CODE (operands[1]) == CONST_INT)
8934 return "ldi %1,%0\n\tb %2";
8935 else
8936 return "copy %1,%0\n\tb %2";
8937 }
8938 }
8939
8940 if (GET_CODE (operands[1]) == CONST_INT)
8941 output_asm_insn ("ldi %1,%0", operands);
8942 else
8943 output_asm_insn ("copy %1,%0", operands);
8944 return pa_output_lbranch (operands[2], insn, 1);
8945 }
8946
8947 /* Output an unconditional add and branch insn. */
8948
8949 const char *
8950 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8951 {
8952 int length = get_attr_length (insn);
8953
8954 /* To make life easy we want operand0 to be the shared input/output
8955 operand and operand1 to be the readonly operand. */
8956 if (operands[0] == operands[1])
8957 operands[1] = operands[2];
8958
8959 /* These are the cases in which we win. */
8960 if (length == 4)
8961 return "add%I1b,tr %1,%0,%3";
8962
8963 /* None of the following cases win, but they don't lose either. */
8964 if (length == 8)
8965 {
8966 if (dbr_sequence_length () == 0)
8967 /* Nothing in the delay slot, fake it by putting the combined
8968 insn (the copy or add) in the delay slot of a bl. */
8969 return "b %3\n\tadd%I1 %1,%0,%0";
8970 else
8971 /* Something in the delay slot, but we've got a long branch. */
8972 return "add%I1 %1,%0,%0\n\tb %3";
8973 }
8974
8975 output_asm_insn ("add%I1 %1,%0,%0", operands);
8976 return pa_output_lbranch (operands[3], insn, 1);
8977 }
8978
8979 /* We use this hook to perform a PA specific optimization which is difficult
8980 to do in earlier passes. */
8981
8982 static void
8983 pa_reorg (void)
8984 {
8985 remove_useless_addtr_insns (1);
8986
8987 if (pa_cpu < PROCESSOR_8000)
8988 pa_combine_instructions ();
8989 }
8990
8991 /* The PA has a number of odd instructions which can perform multiple
8992 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8993 it may be profitable to combine two instructions into one instruction
8994 with two outputs. It's not profitable PA2.0 machines because the
8995 two outputs would take two slots in the reorder buffers.
8996
8997 This routine finds instructions which can be combined and combines
8998 them. We only support some of the potential combinations, and we
8999 only try common ways to find suitable instructions.
9000
9001 * addb can add two registers or a register and a small integer
9002 and jump to a nearby (+-8k) location. Normally the jump to the
9003 nearby location is conditional on the result of the add, but by
9004 using the "true" condition we can make the jump unconditional.
9005 Thus addb can perform two independent operations in one insn.
9006
9007 * movb is similar to addb in that it can perform a reg->reg
9008 or small immediate->reg copy and jump to a nearby (+-8k location).
9009
9010 * fmpyadd and fmpysub can perform a FP multiply and either an
9011 FP add or FP sub if the operands of the multiply and add/sub are
9012 independent (there are other minor restrictions). Note both
9013 the fmpy and fadd/fsub can in theory move to better spots according
9014 to data dependencies, but for now we require the fmpy stay at a
9015 fixed location.
9016
9017 * Many of the memory operations can perform pre & post updates
9018 of index registers. GCC's pre/post increment/decrement addressing
9019 is far too simple to take advantage of all the possibilities. This
9020 pass may not be suitable since those insns may not be independent.
9021
9022 * comclr can compare two ints or an int and a register, nullify
9023 the following instruction and zero some other register. This
9024 is more difficult to use as it's harder to find an insn which
9025 will generate a comclr than finding something like an unconditional
9026 branch. (conditional moves & long branches create comclr insns).
9027
9028 * Most arithmetic operations can conditionally skip the next
9029 instruction. They can be viewed as "perform this operation
9030 and conditionally jump to this nearby location" (where nearby
9031 is an insns away). These are difficult to use due to the
9032 branch length restrictions. */
9033
9034 static void
9035 pa_combine_instructions (void)
9036 {
9037 rtx_insn *anchor;
9038
9039 /* This can get expensive since the basic algorithm is on the
9040 order of O(n^2) (or worse). Only do it for -O2 or higher
9041 levels of optimization. */
9042 if (optimize < 2)
9043 return;
9044
9045 /* Walk down the list of insns looking for "anchor" insns which
9046 may be combined with "floating" insns. As the name implies,
9047 "anchor" instructions don't move, while "floating" insns may
9048 move around. */
9049 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9050 rtx_insn *new_rtx = make_insn_raw (par);
9051
9052 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9053 {
9054 enum attr_pa_combine_type anchor_attr;
9055 enum attr_pa_combine_type floater_attr;
9056
9057 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9058 Also ignore any special USE insns. */
9059 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9060 || GET_CODE (PATTERN (anchor)) == USE
9061 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9062 continue;
9063
9064 anchor_attr = get_attr_pa_combine_type (anchor);
9065 /* See if anchor is an insn suitable for combination. */
9066 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9067 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9068 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9069 && ! forward_branch_p (anchor)))
9070 {
9071 rtx_insn *floater;
9072
9073 for (floater = PREV_INSN (anchor);
9074 floater;
9075 floater = PREV_INSN (floater))
9076 {
9077 if (NOTE_P (floater)
9078 || (NONJUMP_INSN_P (floater)
9079 && (GET_CODE (PATTERN (floater)) == USE
9080 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9081 continue;
9082
9083 /* Anything except a regular INSN will stop our search. */
9084 if (! NONJUMP_INSN_P (floater))
9085 {
9086 floater = NULL;
9087 break;
9088 }
9089
9090 /* See if FLOATER is suitable for combination with the
9091 anchor. */
9092 floater_attr = get_attr_pa_combine_type (floater);
9093 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9094 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9095 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9096 && floater_attr == PA_COMBINE_TYPE_FMPY))
9097 {
9098 /* If ANCHOR and FLOATER can be combined, then we're
9099 done with this pass. */
9100 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9101 SET_DEST (PATTERN (floater)),
9102 XEXP (SET_SRC (PATTERN (floater)), 0),
9103 XEXP (SET_SRC (PATTERN (floater)), 1)))
9104 break;
9105 }
9106
9107 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9108 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9109 {
9110 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9111 {
9112 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9113 SET_DEST (PATTERN (floater)),
9114 XEXP (SET_SRC (PATTERN (floater)), 0),
9115 XEXP (SET_SRC (PATTERN (floater)), 1)))
9116 break;
9117 }
9118 else
9119 {
9120 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9121 SET_DEST (PATTERN (floater)),
9122 SET_SRC (PATTERN (floater)),
9123 SET_SRC (PATTERN (floater))))
9124 break;
9125 }
9126 }
9127 }
9128
9129 /* If we didn't find anything on the backwards scan try forwards. */
9130 if (!floater
9131 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9132 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9133 {
9134 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9135 {
9136 if (NOTE_P (floater)
9137 || (NONJUMP_INSN_P (floater)
9138 && (GET_CODE (PATTERN (floater)) == USE
9139 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9140
9141 continue;
9142
9143 /* Anything except a regular INSN will stop our search. */
9144 if (! NONJUMP_INSN_P (floater))
9145 {
9146 floater = NULL;
9147 break;
9148 }
9149
9150 /* See if FLOATER is suitable for combination with the
9151 anchor. */
9152 floater_attr = get_attr_pa_combine_type (floater);
9153 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9154 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9155 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9156 && floater_attr == PA_COMBINE_TYPE_FMPY))
9157 {
9158 /* If ANCHOR and FLOATER can be combined, then we're
9159 done with this pass. */
9160 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9161 SET_DEST (PATTERN (floater)),
9162 XEXP (SET_SRC (PATTERN (floater)),
9163 0),
9164 XEXP (SET_SRC (PATTERN (floater)),
9165 1)))
9166 break;
9167 }
9168 }
9169 }
9170
9171 /* FLOATER will be nonzero if we found a suitable floating
9172 insn for combination with ANCHOR. */
9173 if (floater
9174 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9175 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9176 {
9177 /* Emit the new instruction and delete the old anchor. */
9178 emit_insn_before (gen_rtx_PARALLEL
9179 (VOIDmode,
9180 gen_rtvec (2, PATTERN (anchor),
9181 PATTERN (floater))),
9182 anchor);
9183
9184 SET_INSN_DELETED (anchor);
9185
9186 /* Emit a special USE insn for FLOATER, then delete
9187 the floating insn. */
9188 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9189 delete_insn (floater);
9190
9191 continue;
9192 }
9193 else if (floater
9194 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9195 {
9196 rtx temp;
9197 /* Emit the new_jump instruction and delete the old anchor. */
9198 temp
9199 = emit_jump_insn_before (gen_rtx_PARALLEL
9200 (VOIDmode,
9201 gen_rtvec (2, PATTERN (anchor),
9202 PATTERN (floater))),
9203 anchor);
9204
9205 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9206 SET_INSN_DELETED (anchor);
9207
9208 /* Emit a special USE insn for FLOATER, then delete
9209 the floating insn. */
9210 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9211 delete_insn (floater);
9212 continue;
9213 }
9214 }
9215 }
9216 }
9217
9218 static int
9219 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9220 int reversed, rtx dest,
9221 rtx src1, rtx src2)
9222 {
9223 int insn_code_number;
9224 rtx_insn *start, *end;
9225
9226 /* Create a PARALLEL with the patterns of ANCHOR and
9227 FLOATER, try to recognize it, then test constraints
9228 for the resulting pattern.
9229
9230 If the pattern doesn't match or the constraints
9231 aren't met keep searching for a suitable floater
9232 insn. */
9233 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9234 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9235 INSN_CODE (new_rtx) = -1;
9236 insn_code_number = recog_memoized (new_rtx);
9237 basic_block bb = BLOCK_FOR_INSN (anchor);
9238 if (insn_code_number < 0
9239 || (extract_insn (new_rtx),
9240 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9241 return 0;
9242
9243 if (reversed)
9244 {
9245 start = anchor;
9246 end = floater;
9247 }
9248 else
9249 {
9250 start = floater;
9251 end = anchor;
9252 }
9253
9254 /* There's up to three operands to consider. One
9255 output and two inputs.
9256
9257 The output must not be used between FLOATER & ANCHOR
9258 exclusive. The inputs must not be set between
9259 FLOATER and ANCHOR exclusive. */
9260
9261 if (reg_used_between_p (dest, start, end))
9262 return 0;
9263
9264 if (reg_set_between_p (src1, start, end))
9265 return 0;
9266
9267 if (reg_set_between_p (src2, start, end))
9268 return 0;
9269
9270 /* If we get here, then everything is good. */
9271 return 1;
9272 }
9273
9274 /* Return nonzero if references for INSN are delayed.
9275
9276 Millicode insns are actually function calls with some special
9277 constraints on arguments and register usage.
9278
9279 Millicode calls always expect their arguments in the integer argument
9280 registers, and always return their result in %r29 (ret1). They
9281 are expected to clobber their arguments, %r1, %r29, and the return
9282 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9283
9284 This function tells reorg that the references to arguments and
9285 millicode calls do not appear to happen until after the millicode call.
9286 This allows reorg to put insns which set the argument registers into the
9287 delay slot of the millicode call -- thus they act more like traditional
9288 CALL_INSNs.
9289
9290 Note we cannot consider side effects of the insn to be delayed because
9291 the branch and link insn will clobber the return pointer. If we happened
9292 to use the return pointer in the delay slot of the call, then we lose.
9293
9294 get_attr_type will try to recognize the given insn, so make sure to
9295 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9296 in particular. */
9297 int
9298 pa_insn_refs_are_delayed (rtx_insn *insn)
9299 {
9300 return ((NONJUMP_INSN_P (insn)
9301 && GET_CODE (PATTERN (insn)) != SEQUENCE
9302 && GET_CODE (PATTERN (insn)) != USE
9303 && GET_CODE (PATTERN (insn)) != CLOBBER
9304 && get_attr_type (insn) == TYPE_MILLI));
9305 }
9306
9307 /* Promote the return value, but not the arguments. */
9308
9309 static machine_mode
9310 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9311 machine_mode mode,
9312 int *punsignedp ATTRIBUTE_UNUSED,
9313 const_tree fntype ATTRIBUTE_UNUSED,
9314 int for_return)
9315 {
9316 if (for_return == 0)
9317 return mode;
9318 return promote_mode (type, mode, punsignedp);
9319 }
9320
9321 /* On the HP-PA the value is found in register(s) 28(-29), unless
9322 the mode is SF or DF. Then the value is returned in fr4 (32).
9323
9324 This must perform the same promotions as PROMOTE_MODE, else promoting
9325 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9326
9327 Small structures must be returned in a PARALLEL on PA64 in order
9328 to match the HP Compiler ABI. */
9329
9330 static rtx
9331 pa_function_value (const_tree valtype,
9332 const_tree func ATTRIBUTE_UNUSED,
9333 bool outgoing ATTRIBUTE_UNUSED)
9334 {
9335 machine_mode valmode;
9336
9337 if (AGGREGATE_TYPE_P (valtype)
9338 || TREE_CODE (valtype) == COMPLEX_TYPE
9339 || TREE_CODE (valtype) == VECTOR_TYPE)
9340 {
9341 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9342
9343 /* Handle aggregates that fit exactly in a word or double word. */
9344 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9345 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9346
9347 if (TARGET_64BIT)
9348 {
9349 /* Aggregates with a size less than or equal to 128 bits are
9350 returned in GR 28(-29). They are left justified. The pad
9351 bits are undefined. Larger aggregates are returned in
9352 memory. */
9353 rtx loc[2];
9354 int i, offset = 0;
9355 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9356
9357 for (i = 0; i < ub; i++)
9358 {
9359 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9360 gen_rtx_REG (DImode, 28 + i),
9361 GEN_INT (offset));
9362 offset += 8;
9363 }
9364
9365 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9366 }
9367 else if (valsize > UNITS_PER_WORD)
9368 {
9369 /* Aggregates 5 to 8 bytes in size are returned in general
9370 registers r28-r29 in the same manner as other non
9371 floating-point objects. The data is right-justified and
9372 zero-extended to 64 bits. This is opposite to the normal
9373 justification used on big endian targets and requires
9374 special treatment. */
9375 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9376 gen_rtx_REG (DImode, 28), const0_rtx);
9377 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9378 }
9379 }
9380
9381 if ((INTEGRAL_TYPE_P (valtype)
9382 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9383 || POINTER_TYPE_P (valtype))
9384 valmode = word_mode;
9385 else
9386 valmode = TYPE_MODE (valtype);
9387
9388 if (TREE_CODE (valtype) == REAL_TYPE
9389 && !AGGREGATE_TYPE_P (valtype)
9390 && TYPE_MODE (valtype) != TFmode
9391 && !TARGET_SOFT_FLOAT)
9392 return gen_rtx_REG (valmode, 32);
9393
9394 return gen_rtx_REG (valmode, 28);
9395 }
9396
9397 /* Implement the TARGET_LIBCALL_VALUE hook. */
9398
9399 static rtx
9400 pa_libcall_value (machine_mode mode,
9401 const_rtx fun ATTRIBUTE_UNUSED)
9402 {
9403 if (! TARGET_SOFT_FLOAT
9404 && (mode == SFmode || mode == DFmode))
9405 return gen_rtx_REG (mode, 32);
9406 else
9407 return gen_rtx_REG (mode, 28);
9408 }
9409
9410 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9411
9412 static bool
9413 pa_function_value_regno_p (const unsigned int regno)
9414 {
9415 if (regno == 28
9416 || (! TARGET_SOFT_FLOAT && regno == 32))
9417 return true;
9418
9419 return false;
9420 }
9421
9422 /* Update the data in CUM to advance over an argument
9423 of mode MODE and data type TYPE.
9424 (TYPE is null for libcalls where that information may not be available.) */
9425
9426 static void
9427 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9428 const_tree type, bool named ATTRIBUTE_UNUSED)
9429 {
9430 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9431 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9432
9433 cum->nargs_prototype--;
9434 cum->words += (arg_size
9435 + ((cum->words & 01)
9436 && type != NULL_TREE
9437 && arg_size > 1));
9438 }
9439
9440 /* Return the location of a parameter that is passed in a register or NULL
9441 if the parameter has any component that is passed in memory.
9442
9443 This is new code and will be pushed to into the net sources after
9444 further testing.
9445
9446 ??? We might want to restructure this so that it looks more like other
9447 ports. */
9448 static rtx
9449 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9450 const_tree type, bool named ATTRIBUTE_UNUSED)
9451 {
9452 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9453 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9454 int alignment = 0;
9455 int arg_size;
9456 int fpr_reg_base;
9457 int gpr_reg_base;
9458 rtx retval;
9459
9460 if (mode == VOIDmode)
9461 return NULL_RTX;
9462
9463 arg_size = FUNCTION_ARG_SIZE (mode, type);
9464
9465 /* If this arg would be passed partially or totally on the stack, then
9466 this routine should return zero. pa_arg_partial_bytes will
9467 handle arguments which are split between regs and stack slots if
9468 the ABI mandates split arguments. */
9469 if (!TARGET_64BIT)
9470 {
9471 /* The 32-bit ABI does not split arguments. */
9472 if (cum->words + arg_size > max_arg_words)
9473 return NULL_RTX;
9474 }
9475 else
9476 {
9477 if (arg_size > 1)
9478 alignment = cum->words & 1;
9479 if (cum->words + alignment >= max_arg_words)
9480 return NULL_RTX;
9481 }
9482
9483 /* The 32bit ABIs and the 64bit ABIs are rather different,
9484 particularly in their handling of FP registers. We might
9485 be able to cleverly share code between them, but I'm not
9486 going to bother in the hope that splitting them up results
9487 in code that is more easily understood. */
9488
9489 if (TARGET_64BIT)
9490 {
9491 /* Advance the base registers to their current locations.
9492
9493 Remember, gprs grow towards smaller register numbers while
9494 fprs grow to higher register numbers. Also remember that
9495 although FP regs are 32-bit addressable, we pretend that
9496 the registers are 64-bits wide. */
9497 gpr_reg_base = 26 - cum->words;
9498 fpr_reg_base = 32 + cum->words;
9499
9500 /* Arguments wider than one word and small aggregates need special
9501 treatment. */
9502 if (arg_size > 1
9503 || mode == BLKmode
9504 || (type && (AGGREGATE_TYPE_P (type)
9505 || TREE_CODE (type) == COMPLEX_TYPE
9506 || TREE_CODE (type) == VECTOR_TYPE)))
9507 {
9508 /* Double-extended precision (80-bit), quad-precision (128-bit)
9509 and aggregates including complex numbers are aligned on
9510 128-bit boundaries. The first eight 64-bit argument slots
9511 are associated one-to-one, with general registers r26
9512 through r19, and also with floating-point registers fr4
9513 through fr11. Arguments larger than one word are always
9514 passed in general registers.
9515
9516 Using a PARALLEL with a word mode register results in left
9517 justified data on a big-endian target. */
9518
9519 rtx loc[8];
9520 int i, offset = 0, ub = arg_size;
9521
9522 /* Align the base register. */
9523 gpr_reg_base -= alignment;
9524
9525 ub = MIN (ub, max_arg_words - cum->words - alignment);
9526 for (i = 0; i < ub; i++)
9527 {
9528 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9529 gen_rtx_REG (DImode, gpr_reg_base),
9530 GEN_INT (offset));
9531 gpr_reg_base -= 1;
9532 offset += 8;
9533 }
9534
9535 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9536 }
9537 }
9538 else
9539 {
9540 /* If the argument is larger than a word, then we know precisely
9541 which registers we must use. */
9542 if (arg_size > 1)
9543 {
9544 if (cum->words)
9545 {
9546 gpr_reg_base = 23;
9547 fpr_reg_base = 38;
9548 }
9549 else
9550 {
9551 gpr_reg_base = 25;
9552 fpr_reg_base = 34;
9553 }
9554
9555 /* Structures 5 to 8 bytes in size are passed in the general
9556 registers in the same manner as other non floating-point
9557 objects. The data is right-justified and zero-extended
9558 to 64 bits. This is opposite to the normal justification
9559 used on big endian targets and requires special treatment.
9560 We now define BLOCK_REG_PADDING to pad these objects.
9561 Aggregates, complex and vector types are passed in the same
9562 manner as structures. */
9563 if (mode == BLKmode
9564 || (type && (AGGREGATE_TYPE_P (type)
9565 || TREE_CODE (type) == COMPLEX_TYPE
9566 || TREE_CODE (type) == VECTOR_TYPE)))
9567 {
9568 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9569 gen_rtx_REG (DImode, gpr_reg_base),
9570 const0_rtx);
9571 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9572 }
9573 }
9574 else
9575 {
9576 /* We have a single word (32 bits). A simple computation
9577 will get us the register #s we need. */
9578 gpr_reg_base = 26 - cum->words;
9579 fpr_reg_base = 32 + 2 * cum->words;
9580 }
9581 }
9582
9583 /* Determine if the argument needs to be passed in both general and
9584 floating point registers. */
9585 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9586 /* If we are doing soft-float with portable runtime, then there
9587 is no need to worry about FP regs. */
9588 && !TARGET_SOFT_FLOAT
9589 /* The parameter must be some kind of scalar float, else we just
9590 pass it in integer registers. */
9591 && GET_MODE_CLASS (mode) == MODE_FLOAT
9592 /* The target function must not have a prototype. */
9593 && cum->nargs_prototype <= 0
9594 /* libcalls do not need to pass items in both FP and general
9595 registers. */
9596 && type != NULL_TREE
9597 /* All this hair applies to "outgoing" args only. This includes
9598 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9599 && !cum->incoming)
9600 /* Also pass outgoing floating arguments in both registers in indirect
9601 calls with the 32 bit ABI and the HP assembler since there is no
9602 way to the specify argument locations in static functions. */
9603 || (!TARGET_64BIT
9604 && !TARGET_GAS
9605 && !cum->incoming
9606 && cum->indirect
9607 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9608 {
9609 retval
9610 = gen_rtx_PARALLEL
9611 (mode,
9612 gen_rtvec (2,
9613 gen_rtx_EXPR_LIST (VOIDmode,
9614 gen_rtx_REG (mode, fpr_reg_base),
9615 const0_rtx),
9616 gen_rtx_EXPR_LIST (VOIDmode,
9617 gen_rtx_REG (mode, gpr_reg_base),
9618 const0_rtx)));
9619 }
9620 else
9621 {
9622 /* See if we should pass this parameter in a general register. */
9623 if (TARGET_SOFT_FLOAT
9624 /* Indirect calls in the normal 32bit ABI require all arguments
9625 to be passed in general registers. */
9626 || (!TARGET_PORTABLE_RUNTIME
9627 && !TARGET_64BIT
9628 && !TARGET_ELF32
9629 && cum->indirect)
9630 /* If the parameter is not a scalar floating-point parameter,
9631 then it belongs in GPRs. */
9632 || GET_MODE_CLASS (mode) != MODE_FLOAT
9633 /* Structure with single SFmode field belongs in GPR. */
9634 || (type && AGGREGATE_TYPE_P (type)))
9635 retval = gen_rtx_REG (mode, gpr_reg_base);
9636 else
9637 retval = gen_rtx_REG (mode, fpr_reg_base);
9638 }
9639 return retval;
9640 }
9641
9642 /* Arguments larger than one word are double word aligned. */
9643
9644 static unsigned int
9645 pa_function_arg_boundary (machine_mode mode, const_tree type)
9646 {
9647 bool singleword = (type
9648 ? (integer_zerop (TYPE_SIZE (type))
9649 || !TREE_CONSTANT (TYPE_SIZE (type))
9650 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9651 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9652
9653 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9654 }
9655
9656 /* If this arg would be passed totally in registers or totally on the stack,
9657 then this routine should return zero. */
9658
9659 static int
9660 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9661 tree type, bool named ATTRIBUTE_UNUSED)
9662 {
9663 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9664 unsigned int max_arg_words = 8;
9665 unsigned int offset = 0;
9666
9667 if (!TARGET_64BIT)
9668 return 0;
9669
9670 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9671 offset = 1;
9672
9673 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9674 /* Arg fits fully into registers. */
9675 return 0;
9676 else if (cum->words + offset >= max_arg_words)
9677 /* Arg fully on the stack. */
9678 return 0;
9679 else
9680 /* Arg is split. */
9681 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9682 }
9683
9684
9685 /* A get_unnamed_section callback for switching to the text section.
9686
9687 This function is only used with SOM. Because we don't support
9688 named subspaces, we can only create a new subspace or switch back
9689 to the default text subspace. */
9690
9691 static void
9692 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9693 {
9694 gcc_assert (TARGET_SOM);
9695 if (TARGET_GAS)
9696 {
9697 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9698 {
9699 /* We only want to emit a .nsubspa directive once at the
9700 start of the function. */
9701 cfun->machine->in_nsubspa = 1;
9702
9703 /* Create a new subspace for the text. This provides
9704 better stub placement and one-only functions. */
9705 if (cfun->decl
9706 && DECL_ONE_ONLY (cfun->decl)
9707 && !DECL_WEAK (cfun->decl))
9708 {
9709 output_section_asm_op ("\t.SPACE $TEXT$\n"
9710 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9711 "ACCESS=44,SORT=24,COMDAT");
9712 return;
9713 }
9714 }
9715 else
9716 {
9717 /* There isn't a current function or the body of the current
9718 function has been completed. So, we are changing to the
9719 text section to output debugging information. Thus, we
9720 need to forget that we are in the text section so that
9721 varasm.c will call us when text_section is selected again. */
9722 gcc_assert (!cfun || !cfun->machine
9723 || cfun->machine->in_nsubspa == 2);
9724 in_section = NULL;
9725 }
9726 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9727 return;
9728 }
9729 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9730 }
9731
9732 /* A get_unnamed_section callback for switching to comdat data
9733 sections. This function is only used with SOM. */
9734
9735 static void
9736 som_output_comdat_data_section_asm_op (const void *data)
9737 {
9738 in_section = NULL;
9739 output_section_asm_op (data);
9740 }
9741
9742 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9743
9744 static void
9745 pa_som_asm_init_sections (void)
9746 {
9747 text_section
9748 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9749
9750 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9751 is not being generated. */
9752 som_readonly_data_section
9753 = get_unnamed_section (0, output_section_asm_op,
9754 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9755
9756 /* When secondary definitions are not supported, SOM makes readonly
9757 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9758 the comdat flag. */
9759 som_one_only_readonly_data_section
9760 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9761 "\t.SPACE $TEXT$\n"
9762 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9763 "ACCESS=0x2c,SORT=16,COMDAT");
9764
9765
9766 /* When secondary definitions are not supported, SOM makes data one-only
9767 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9768 som_one_only_data_section
9769 = get_unnamed_section (SECTION_WRITE,
9770 som_output_comdat_data_section_asm_op,
9771 "\t.SPACE $PRIVATE$\n"
9772 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9773 "ACCESS=31,SORT=24,COMDAT");
9774
9775 if (flag_tm)
9776 som_tm_clone_table_section
9777 = get_unnamed_section (0, output_section_asm_op,
9778 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9779
9780 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9781 which reference data within the $TEXT$ space (for example constant
9782 strings in the $LIT$ subspace).
9783
9784 The assemblers (GAS and HP as) both have problems with handling
9785 the difference of two symbols which is the other correct way to
9786 reference constant data during PIC code generation.
9787
9788 So, there's no way to reference constant data which is in the
9789 $TEXT$ space during PIC generation. Instead place all constant
9790 data into the $PRIVATE$ subspace (this reduces sharing, but it
9791 works correctly). */
9792 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9793
9794 /* We must not have a reference to an external symbol defined in a
9795 shared library in a readonly section, else the SOM linker will
9796 complain.
9797
9798 So, we force exception information into the data section. */
9799 exception_section = data_section;
9800 }
9801
9802 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9803
9804 static section *
9805 pa_som_tm_clone_table_section (void)
9806 {
9807 return som_tm_clone_table_section;
9808 }
9809
9810 /* On hpux10, the linker will give an error if we have a reference
9811 in the read-only data section to a symbol defined in a shared
9812 library. Therefore, expressions that might require a reloc can
9813 not be placed in the read-only data section. */
9814
9815 static section *
9816 pa_select_section (tree exp, int reloc,
9817 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9818 {
9819 if (TREE_CODE (exp) == VAR_DECL
9820 && TREE_READONLY (exp)
9821 && !TREE_THIS_VOLATILE (exp)
9822 && DECL_INITIAL (exp)
9823 && (DECL_INITIAL (exp) == error_mark_node
9824 || TREE_CONSTANT (DECL_INITIAL (exp)))
9825 && !reloc)
9826 {
9827 if (TARGET_SOM
9828 && DECL_ONE_ONLY (exp)
9829 && !DECL_WEAK (exp))
9830 return som_one_only_readonly_data_section;
9831 else
9832 return readonly_data_section;
9833 }
9834 else if (CONSTANT_CLASS_P (exp) && !reloc)
9835 return readonly_data_section;
9836 else if (TARGET_SOM
9837 && TREE_CODE (exp) == VAR_DECL
9838 && DECL_ONE_ONLY (exp)
9839 && !DECL_WEAK (exp))
9840 return som_one_only_data_section;
9841 else
9842 return data_section;
9843 }
9844
9845 /* Implement pa_reloc_rw_mask. */
9846
9847 static int
9848 pa_reloc_rw_mask (void)
9849 {
9850 /* We force (const (plus (symbol) (const_int))) to memory when the
9851 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9852 handle this construct in read-only memory and we want to avoid
9853 this for ELF. So, we always force an RTX needing relocation to
9854 the data section. */
9855 return 3;
9856 }
9857
9858 static void
9859 pa_globalize_label (FILE *stream, const char *name)
9860 {
9861 /* We only handle DATA objects here, functions are globalized in
9862 ASM_DECLARE_FUNCTION_NAME. */
9863 if (! FUNCTION_NAME_P (name))
9864 {
9865 fputs ("\t.EXPORT ", stream);
9866 assemble_name (stream, name);
9867 fputs (",DATA\n", stream);
9868 }
9869 }
9870
9871 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9872
9873 static rtx
9874 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9875 int incoming ATTRIBUTE_UNUSED)
9876 {
9877 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9878 }
9879
9880 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9881
9882 bool
9883 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9884 {
9885 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9886 PA64 ABI says that objects larger than 128 bits are returned in memory.
9887 Note, int_size_in_bytes can return -1 if the size of the object is
9888 variable or larger than the maximum value that can be expressed as
9889 a HOST_WIDE_INT. It can also return zero for an empty type. The
9890 simplest way to handle variable and empty types is to pass them in
9891 memory. This avoids problems in defining the boundaries of argument
9892 slots, allocating registers, etc. */
9893 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9894 || int_size_in_bytes (type) <= 0);
9895 }
9896
9897 /* Structure to hold declaration and name of external symbols that are
9898 emitted by GCC. We generate a vector of these symbols and output them
9899 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9900 This avoids putting out names that are never really used. */
9901
9902 typedef struct GTY(()) extern_symbol
9903 {
9904 tree decl;
9905 const char *name;
9906 } extern_symbol;
9907
9908 /* Define gc'd vector type for extern_symbol. */
9909
9910 /* Vector of extern_symbol pointers. */
9911 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9912
9913 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9914 /* Mark DECL (name NAME) as an external reference (assembler output
9915 file FILE). This saves the names to output at the end of the file
9916 if actually referenced. */
9917
9918 void
9919 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9920 {
9921 gcc_assert (file == asm_out_file);
9922 extern_symbol p = {decl, name};
9923 vec_safe_push (extern_symbols, p);
9924 }
9925
9926 /* Output text required at the end of an assembler file.
9927 This includes deferred plabels and .import directives for
9928 all external symbols that were actually referenced. */
9929
9930 static void
9931 pa_hpux_file_end (void)
9932 {
9933 unsigned int i;
9934 extern_symbol *p;
9935
9936 if (!NO_DEFERRED_PROFILE_COUNTERS)
9937 output_deferred_profile_counters ();
9938
9939 output_deferred_plabels ();
9940
9941 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9942 {
9943 tree decl = p->decl;
9944
9945 if (!TREE_ASM_WRITTEN (decl)
9946 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9947 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9948 }
9949
9950 vec_free (extern_symbols);
9951 }
9952 #endif
9953
9954 /* Return true if a change from mode FROM to mode TO for a register
9955 in register class RCLASS is invalid. */
9956
9957 bool
9958 pa_cannot_change_mode_class (machine_mode from, machine_mode to,
9959 enum reg_class rclass)
9960 {
9961 if (from == to)
9962 return false;
9963
9964 /* Reject changes to/from complex and vector modes. */
9965 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9966 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9967 return true;
9968
9969 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9970 return false;
9971
9972 /* There is no way to load QImode or HImode values directly from
9973 memory. SImode loads to the FP registers are not zero extended.
9974 On the 64-bit target, this conflicts with the definition of
9975 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9976 with different sizes in the floating-point registers. */
9977 if (MAYBE_FP_REG_CLASS_P (rclass))
9978 return true;
9979
9980 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9981 in specific sets of registers. Thus, we cannot allow changing
9982 to a larger mode when it's larger than a word. */
9983 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9984 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9985 return true;
9986
9987 return false;
9988 }
9989
9990 /* Returns TRUE if it is a good idea to tie two pseudo registers
9991 when one has mode MODE1 and one has mode MODE2.
9992 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9993 for any hard reg, then this must be FALSE for correct output.
9994
9995 We should return FALSE for QImode and HImode because these modes
9996 are not ok in the floating-point registers. However, this prevents
9997 tieing these modes to SImode and DImode in the general registers.
9998 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9999 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10000 in the floating-point registers. */
10001
10002 bool
10003 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10004 {
10005 /* Don't tie modes in different classes. */
10006 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10007 return false;
10008
10009 return true;
10010 }
10011
10012 \f
10013 /* Length in units of the trampoline instruction code. */
10014
10015 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10016
10017
10018 /* Output assembler code for a block containing the constant parts
10019 of a trampoline, leaving space for the variable parts.\
10020
10021 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10022 and then branches to the specified routine.
10023
10024 This code template is copied from text segment to stack location
10025 and then patched with pa_trampoline_init to contain valid values,
10026 and then entered as a subroutine.
10027
10028 It is best to keep this as small as possible to avoid having to
10029 flush multiple lines in the cache. */
10030
10031 static void
10032 pa_asm_trampoline_template (FILE *f)
10033 {
10034 if (!TARGET_64BIT)
10035 {
10036 fputs ("\tldw 36(%r22),%r21\n", f);
10037 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10038 if (ASSEMBLER_DIALECT == 0)
10039 fputs ("\tdepi 0,31,2,%r21\n", f);
10040 else
10041 fputs ("\tdepwi 0,31,2,%r21\n", f);
10042 fputs ("\tldw 4(%r21),%r19\n", f);
10043 fputs ("\tldw 0(%r21),%r21\n", f);
10044 if (TARGET_PA_20)
10045 {
10046 fputs ("\tbve (%r21)\n", f);
10047 fputs ("\tldw 40(%r22),%r29\n", f);
10048 fputs ("\t.word 0\n", f);
10049 fputs ("\t.word 0\n", f);
10050 }
10051 else
10052 {
10053 fputs ("\tldsid (%r21),%r1\n", f);
10054 fputs ("\tmtsp %r1,%sr0\n", f);
10055 fputs ("\tbe 0(%sr0,%r21)\n", f);
10056 fputs ("\tldw 40(%r22),%r29\n", f);
10057 }
10058 fputs ("\t.word 0\n", f);
10059 fputs ("\t.word 0\n", f);
10060 fputs ("\t.word 0\n", f);
10061 fputs ("\t.word 0\n", f);
10062 }
10063 else
10064 {
10065 fputs ("\t.dword 0\n", f);
10066 fputs ("\t.dword 0\n", f);
10067 fputs ("\t.dword 0\n", f);
10068 fputs ("\t.dword 0\n", f);
10069 fputs ("\tmfia %r31\n", f);
10070 fputs ("\tldd 24(%r31),%r1\n", f);
10071 fputs ("\tldd 24(%r1),%r27\n", f);
10072 fputs ("\tldd 16(%r1),%r1\n", f);
10073 fputs ("\tbve (%r1)\n", f);
10074 fputs ("\tldd 32(%r31),%r31\n", f);
10075 fputs ("\t.dword 0 ; fptr\n", f);
10076 fputs ("\t.dword 0 ; static link\n", f);
10077 }
10078 }
10079
10080 /* Emit RTL insns to initialize the variable parts of a trampoline.
10081 FNADDR is an RTX for the address of the function's pure code.
10082 CXT is an RTX for the static chain value for the function.
10083
10084 Move the function address to the trampoline template at offset 36.
10085 Move the static chain value to trampoline template at offset 40.
10086 Move the trampoline address to trampoline template at offset 44.
10087 Move r19 to trampoline template at offset 48. The latter two
10088 words create a plabel for the indirect call to the trampoline.
10089
10090 A similar sequence is used for the 64-bit port but the plabel is
10091 at the beginning of the trampoline.
10092
10093 Finally, the cache entries for the trampoline code are flushed.
10094 This is necessary to ensure that the trampoline instruction sequence
10095 is written to memory prior to any attempts at prefetching the code
10096 sequence. */
10097
10098 static void
10099 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10100 {
10101 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10102 rtx start_addr = gen_reg_rtx (Pmode);
10103 rtx end_addr = gen_reg_rtx (Pmode);
10104 rtx line_length = gen_reg_rtx (Pmode);
10105 rtx r_tramp, tmp;
10106
10107 emit_block_move (m_tramp, assemble_trampoline_template (),
10108 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10109 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10110
10111 if (!TARGET_64BIT)
10112 {
10113 tmp = adjust_address (m_tramp, Pmode, 36);
10114 emit_move_insn (tmp, fnaddr);
10115 tmp = adjust_address (m_tramp, Pmode, 40);
10116 emit_move_insn (tmp, chain_value);
10117
10118 /* Create a fat pointer for the trampoline. */
10119 tmp = adjust_address (m_tramp, Pmode, 44);
10120 emit_move_insn (tmp, r_tramp);
10121 tmp = adjust_address (m_tramp, Pmode, 48);
10122 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10123
10124 /* fdc and fic only use registers for the address to flush,
10125 they do not accept integer displacements. We align the
10126 start and end addresses to the beginning of their respective
10127 cache lines to minimize the number of lines flushed. */
10128 emit_insn (gen_andsi3 (start_addr, r_tramp,
10129 GEN_INT (-MIN_CACHELINE_SIZE)));
10130 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10131 TRAMPOLINE_CODE_SIZE-1));
10132 emit_insn (gen_andsi3 (end_addr, tmp,
10133 GEN_INT (-MIN_CACHELINE_SIZE)));
10134 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10135 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10136 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10137 gen_reg_rtx (Pmode),
10138 gen_reg_rtx (Pmode)));
10139 }
10140 else
10141 {
10142 tmp = adjust_address (m_tramp, Pmode, 56);
10143 emit_move_insn (tmp, fnaddr);
10144 tmp = adjust_address (m_tramp, Pmode, 64);
10145 emit_move_insn (tmp, chain_value);
10146
10147 /* Create a fat pointer for the trampoline. */
10148 tmp = adjust_address (m_tramp, Pmode, 16);
10149 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10150 r_tramp, 32)));
10151 tmp = adjust_address (m_tramp, Pmode, 24);
10152 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10153
10154 /* fdc and fic only use registers for the address to flush,
10155 they do not accept integer displacements. We align the
10156 start and end addresses to the beginning of their respective
10157 cache lines to minimize the number of lines flushed. */
10158 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10159 emit_insn (gen_anddi3 (start_addr, tmp,
10160 GEN_INT (-MIN_CACHELINE_SIZE)));
10161 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10162 TRAMPOLINE_CODE_SIZE - 1));
10163 emit_insn (gen_anddi3 (end_addr, tmp,
10164 GEN_INT (-MIN_CACHELINE_SIZE)));
10165 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10166 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10167 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10168 gen_reg_rtx (Pmode),
10169 gen_reg_rtx (Pmode)));
10170 }
10171
10172 #ifdef HAVE_ENABLE_EXECUTE_STACK
10173  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10174      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10175 #endif
10176 }
10177
10178 /* Perform any machine-specific adjustment in the address of the trampoline.
10179 ADDR contains the address that was passed to pa_trampoline_init.
10180 Adjust the trampoline address to point to the plabel at offset 44. */
10181
10182 static rtx
10183 pa_trampoline_adjust_address (rtx addr)
10184 {
10185 if (!TARGET_64BIT)
10186 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10187 return addr;
10188 }
10189
10190 static rtx
10191 pa_delegitimize_address (rtx orig_x)
10192 {
10193 rtx x = delegitimize_mem_from_attrs (orig_x);
10194
10195 if (GET_CODE (x) == LO_SUM
10196 && GET_CODE (XEXP (x, 1)) == UNSPEC
10197 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10198 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10199 return x;
10200 }
10201 \f
10202 static rtx
10203 pa_internal_arg_pointer (void)
10204 {
10205 /* The argument pointer and the hard frame pointer are the same in
10206 the 32-bit runtime, so we don't need a copy. */
10207 if (TARGET_64BIT)
10208 return copy_to_reg (virtual_incoming_args_rtx);
10209 else
10210 return virtual_incoming_args_rtx;
10211 }
10212
10213 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10214 Frame pointer elimination is automatically handled. */
10215
10216 static bool
10217 pa_can_eliminate (const int from, const int to)
10218 {
10219 /* The argument cannot be eliminated in the 64-bit runtime. */
10220 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10221 return false;
10222
10223 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10224 ? ! frame_pointer_needed
10225 : true);
10226 }
10227
10228 /* Define the offset between two registers, FROM to be eliminated and its
10229 replacement TO, at the start of a routine. */
10230 HOST_WIDE_INT
10231 pa_initial_elimination_offset (int from, int to)
10232 {
10233 HOST_WIDE_INT offset;
10234
10235 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10236 && to == STACK_POINTER_REGNUM)
10237 offset = -pa_compute_frame_size (get_frame_size (), 0);
10238 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10239 offset = 0;
10240 else
10241 gcc_unreachable ();
10242
10243 return offset;
10244 }
10245
10246 static void
10247 pa_conditional_register_usage (void)
10248 {
10249 int i;
10250
10251 if (!TARGET_64BIT && !TARGET_PA_11)
10252 {
10253 for (i = 56; i <= FP_REG_LAST; i++)
10254 fixed_regs[i] = call_used_regs[i] = 1;
10255 for (i = 33; i < 56; i += 2)
10256 fixed_regs[i] = call_used_regs[i] = 1;
10257 }
10258 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10259 {
10260 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10261 fixed_regs[i] = call_used_regs[i] = 1;
10262 }
10263 if (flag_pic)
10264 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10265 }
10266
10267 /* Target hook for c_mode_for_suffix. */
10268
10269 static machine_mode
10270 pa_c_mode_for_suffix (char suffix)
10271 {
10272 if (HPUX_LONG_DOUBLE_LIBRARY)
10273 {
10274 if (suffix == 'q')
10275 return TFmode;
10276 }
10277
10278 return VOIDmode;
10279 }
10280
10281 /* Target hook for function_section. */
10282
10283 static section *
10284 pa_function_section (tree decl, enum node_frequency freq,
10285 bool startup, bool exit)
10286 {
10287 /* Put functions in text section if target doesn't have named sections. */
10288 if (!targetm_common.have_named_sections)
10289 return text_section;
10290
10291 /* Force nested functions into the same section as the containing
10292 function. */
10293 if (decl
10294 && DECL_SECTION_NAME (decl) == NULL
10295 && DECL_CONTEXT (decl) != NULL_TREE
10296 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10297 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10298 return function_section (DECL_CONTEXT (decl));
10299
10300 /* Otherwise, use the default function section. */
10301 return default_function_section (decl, freq, startup, exit);
10302 }
10303
10304 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10305
10306 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10307 that need more than three instructions to load prior to reload. This
10308 limit is somewhat arbitrary. It takes three instructions to load a
10309 CONST_INT from memory but two are memory accesses. It may be better
10310 to increase the allowed range for CONST_INTS. We may also be able
10311 to handle CONST_DOUBLES. */
10312
10313 static bool
10314 pa_legitimate_constant_p (machine_mode mode, rtx x)
10315 {
10316 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10317 return false;
10318
10319 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10320 return false;
10321
10322 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10323 legitimate constants. The other variants can't be handled by
10324 the move patterns after reload starts. */
10325 if (tls_referenced_p (x))
10326 return false;
10327
10328 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10329 return false;
10330
10331 if (TARGET_64BIT
10332 && HOST_BITS_PER_WIDE_INT > 32
10333 && GET_CODE (x) == CONST_INT
10334 && !reload_in_progress
10335 && !reload_completed
10336 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10337 && !pa_cint_ok_for_move (UINTVAL (x)))
10338 return false;
10339
10340 if (function_label_operand (x, mode))
10341 return false;
10342
10343 return true;
10344 }
10345
10346 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10347
10348 static unsigned int
10349 pa_section_type_flags (tree decl, const char *name, int reloc)
10350 {
10351 unsigned int flags;
10352
10353 flags = default_section_type_flags (decl, name, reloc);
10354
10355 /* Function labels are placed in the constant pool. This can
10356 cause a section conflict if decls are put in ".data.rel.ro"
10357 or ".data.rel.ro.local" using the __attribute__ construct. */
10358 if (strcmp (name, ".data.rel.ro") == 0
10359 || strcmp (name, ".data.rel.ro.local") == 0)
10360 flags |= SECTION_WRITE | SECTION_RELRO;
10361
10362 return flags;
10363 }
10364
10365 /* pa_legitimate_address_p recognizes an RTL expression that is a
10366 valid memory address for an instruction. The MODE argument is the
10367 machine mode for the MEM expression that wants to use this address.
10368
10369 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10370 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10371 available with floating point loads and stores, and integer loads.
10372 We get better code by allowing indexed addresses in the initial
10373 RTL generation.
10374
10375 The acceptance of indexed addresses as legitimate implies that we
10376 must provide patterns for doing indexed integer stores, or the move
10377 expanders must force the address of an indexed store to a register.
10378 We have adopted the latter approach.
10379
10380 Another function of pa_legitimate_address_p is to ensure that
10381 the base register is a valid pointer for indexed instructions.
10382 On targets that have non-equivalent space registers, we have to
10383 know at the time of assembler output which register in a REG+REG
10384 pair is the base register. The REG_POINTER flag is sometimes lost
10385 in reload and the following passes, so it can't be relied on during
10386 code generation. Thus, we either have to canonicalize the order
10387 of the registers in REG+REG indexed addresses, or treat REG+REG
10388 addresses separately and provide patterns for both permutations.
10389
10390 The latter approach requires several hundred additional lines of
10391 code in pa.md. The downside to canonicalizing is that a PLUS
10392 in the wrong order can't combine to form to make a scaled indexed
10393 memory operand. As we won't need to canonicalize the operands if
10394 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10395
10396 We initially break out scaled indexed addresses in canonical order
10397 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10398 scaled indexed addresses during RTL generation. However, fold_rtx
10399 has its own opinion on how the operands of a PLUS should be ordered.
10400 If one of the operands is equivalent to a constant, it will make
10401 that operand the second operand. As the base register is likely to
10402 be equivalent to a SYMBOL_REF, we have made it the second operand.
10403
10404 pa_legitimate_address_p accepts REG+REG as legitimate when the
10405 operands are in the order INDEX+BASE on targets with non-equivalent
10406 space registers, and in any order on targets with equivalent space
10407 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10408
10409 We treat a SYMBOL_REF as legitimate if it is part of the current
10410 function's constant-pool, because such addresses can actually be
10411 output as REG+SMALLINT. */
10412
10413 static bool
10414 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10415 {
10416 if ((REG_P (x)
10417 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10418 : REG_OK_FOR_BASE_P (x)))
10419 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10420 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10421 && REG_P (XEXP (x, 0))
10422 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10423 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10424 return true;
10425
10426 if (GET_CODE (x) == PLUS)
10427 {
10428 rtx base, index;
10429
10430 /* For REG+REG, the base register should be in XEXP (x, 1),
10431 so check it first. */
10432 if (REG_P (XEXP (x, 1))
10433 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10434 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10435 base = XEXP (x, 1), index = XEXP (x, 0);
10436 else if (REG_P (XEXP (x, 0))
10437 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10438 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10439 base = XEXP (x, 0), index = XEXP (x, 1);
10440 else
10441 return false;
10442
10443 if (GET_CODE (index) == CONST_INT)
10444 {
10445 if (INT_5_BITS (index))
10446 return true;
10447
10448 /* When INT14_OK_STRICT is false, a secondary reload is needed
10449 to adjust the displacement of SImode and DImode floating point
10450 instructions but this may fail when the register also needs
10451 reloading. So, we return false when STRICT is true. We
10452 also reject long displacements for float mode addresses since
10453 the majority of accesses will use floating point instructions
10454 that don't support 14-bit offsets. */
10455 if (!INT14_OK_STRICT
10456 && (strict || !(reload_in_progress || reload_completed))
10457 && mode != QImode
10458 && mode != HImode)
10459 return false;
10460
10461 return base14_operand (index, mode);
10462 }
10463
10464 if (!TARGET_DISABLE_INDEXING
10465 /* Only accept the "canonical" INDEX+BASE operand order
10466 on targets with non-equivalent space registers. */
10467 && (TARGET_NO_SPACE_REGS
10468 ? REG_P (index)
10469 : (base == XEXP (x, 1) && REG_P (index)
10470 && (reload_completed
10471 || (reload_in_progress && HARD_REGISTER_P (base))
10472 || REG_POINTER (base))
10473 && (reload_completed
10474 || (reload_in_progress && HARD_REGISTER_P (index))
10475 || !REG_POINTER (index))))
10476 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10477 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10478 : REG_OK_FOR_INDEX_P (index))
10479 && borx_reg_operand (base, Pmode)
10480 && borx_reg_operand (index, Pmode))
10481 return true;
10482
10483 if (!TARGET_DISABLE_INDEXING
10484 && GET_CODE (index) == MULT
10485 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10486 && REG_P (XEXP (index, 0))
10487 && GET_MODE (XEXP (index, 0)) == Pmode
10488 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10489 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10490 && GET_CODE (XEXP (index, 1)) == CONST_INT
10491 && INTVAL (XEXP (index, 1))
10492 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10493 && borx_reg_operand (base, Pmode))
10494 return true;
10495
10496 return false;
10497 }
10498
10499 if (GET_CODE (x) == LO_SUM)
10500 {
10501 rtx y = XEXP (x, 0);
10502
10503 if (GET_CODE (y) == SUBREG)
10504 y = SUBREG_REG (y);
10505
10506 if (REG_P (y)
10507 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10508 : REG_OK_FOR_BASE_P (y)))
10509 {
10510 /* Needed for -fPIC */
10511 if (mode == Pmode
10512 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10513 return true;
10514
10515 if (!INT14_OK_STRICT
10516 && (strict || !(reload_in_progress || reload_completed))
10517 && mode != QImode
10518 && mode != HImode)
10519 return false;
10520
10521 if (CONSTANT_P (XEXP (x, 1)))
10522 return true;
10523 }
10524 return false;
10525 }
10526
10527 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10528 return true;
10529
10530 return false;
10531 }
10532
10533 /* Look for machine dependent ways to make the invalid address AD a
10534 valid address.
10535
10536 For the PA, transform:
10537
10538 memory(X + <large int>)
10539
10540 into:
10541
10542 if (<large int> & mask) >= 16
10543 Y = (<large int> & ~mask) + mask + 1 Round up.
10544 else
10545 Y = (<large int> & ~mask) Round down.
10546 Z = X + Y
10547 memory (Z + (<large int> - Y));
10548
10549 This makes reload inheritance and reload_cse work better since Z
10550 can be reused.
10551
10552 There may be more opportunities to improve code with this hook. */
10553
10554 rtx
10555 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10556 int opnum, int type,
10557 int ind_levels ATTRIBUTE_UNUSED)
10558 {
10559 long offset, newoffset, mask;
10560 rtx new_rtx, temp = NULL_RTX;
10561
10562 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10563 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10564
10565 if (optimize && GET_CODE (ad) == PLUS)
10566 temp = simplify_binary_operation (PLUS, Pmode,
10567 XEXP (ad, 0), XEXP (ad, 1));
10568
10569 new_rtx = temp ? temp : ad;
10570
10571 if (optimize
10572 && GET_CODE (new_rtx) == PLUS
10573 && GET_CODE (XEXP (new_rtx, 0)) == REG
10574 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10575 {
10576 offset = INTVAL (XEXP ((new_rtx), 1));
10577
10578 /* Choose rounding direction. Round up if we are >= halfway. */
10579 if ((offset & mask) >= ((mask + 1) / 2))
10580 newoffset = (offset & ~mask) + mask + 1;
10581 else
10582 newoffset = offset & ~mask;
10583
10584 /* Ensure that long displacements are aligned. */
10585 if (mask == 0x3fff
10586 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10587 || (TARGET_64BIT && (mode) == DImode)))
10588 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10589
10590 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10591 {
10592 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10593 GEN_INT (newoffset));
10594 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10595 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10596 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10597 opnum, (enum reload_type) type);
10598 return ad;
10599 }
10600 }
10601
10602 return NULL_RTX;
10603 }
10604
10605 /* Output address vector. */
10606
10607 void
10608 pa_output_addr_vec (rtx lab, rtx body)
10609 {
10610 int idx, vlen = XVECLEN (body, 0);
10611
10612 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10613 if (TARGET_GAS)
10614 fputs ("\t.begin_brtab\n", asm_out_file);
10615 for (idx = 0; idx < vlen; idx++)
10616 {
10617 ASM_OUTPUT_ADDR_VEC_ELT
10618 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10619 }
10620 if (TARGET_GAS)
10621 fputs ("\t.end_brtab\n", asm_out_file);
10622 }
10623
10624 /* Output address difference vector. */
10625
10626 void
10627 pa_output_addr_diff_vec (rtx lab, rtx body)
10628 {
10629 rtx base = XEXP (XEXP (body, 0), 0);
10630 int idx, vlen = XVECLEN (body, 1);
10631
10632 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10633 if (TARGET_GAS)
10634 fputs ("\t.begin_brtab\n", asm_out_file);
10635 for (idx = 0; idx < vlen; idx++)
10636 {
10637 ASM_OUTPUT_ADDR_DIFF_ELT
10638 (asm_out_file,
10639 body,
10640 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10641 CODE_LABEL_NUMBER (base));
10642 }
10643 if (TARGET_GAS)
10644 fputs ("\t.end_brtab\n", asm_out_file);
10645 }
10646
10647 /* This is a helper function for the other atomic operations. This function
10648 emits a loop that contains SEQ that iterates until a compare-and-swap
10649 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10650 a set of instructions that takes a value from OLD_REG as an input and
10651 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10652 set to the current contents of MEM. After SEQ, a compare-and-swap will
10653 attempt to update MEM with NEW_REG. The function returns true when the
10654 loop was generated successfully. */
10655
10656 static bool
10657 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10658 {
10659 machine_mode mode = GET_MODE (mem);
10660 rtx_code_label *label;
10661 rtx cmp_reg, success, oldval;
10662
10663 /* The loop we want to generate looks like
10664
10665 cmp_reg = mem;
10666 label:
10667 old_reg = cmp_reg;
10668 seq;
10669 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10670 if (success)
10671 goto label;
10672
10673 Note that we only do the plain load from memory once. Subsequent
10674 iterations use the value loaded by the compare-and-swap pattern. */
10675
10676 label = gen_label_rtx ();
10677 cmp_reg = gen_reg_rtx (mode);
10678
10679 emit_move_insn (cmp_reg, mem);
10680 emit_label (label);
10681 emit_move_insn (old_reg, cmp_reg);
10682 if (seq)
10683 emit_insn (seq);
10684
10685 success = NULL_RTX;
10686 oldval = cmp_reg;
10687 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10688 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10689 MEMMODEL_RELAXED))
10690 return false;
10691
10692 if (oldval != cmp_reg)
10693 emit_move_insn (cmp_reg, oldval);
10694
10695 /* Mark this jump predicted not taken. */
10696 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10697 GET_MODE (success), 1, label, 0);
10698 return true;
10699 }
10700
10701 /* This function tries to implement an atomic exchange operation using a
10702 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10703 *MEM are returned, using TARGET if possible. No memory model is required
10704 since a compare_and_swap loop is seq-cst. */
10705
10706 rtx
10707 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10708 {
10709 machine_mode mode = GET_MODE (mem);
10710
10711 if (can_compare_and_swap_p (mode, true))
10712 {
10713 if (!target || !register_operand (target, mode))
10714 target = gen_reg_rtx (mode);
10715 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10716 return target;
10717 }
10718
10719 return NULL_RTX;
10720 }
10721
10722 #include "gt-pa.h"