]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
NEXT_INSN and PREV_INSN take a const rtx_insn
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
55 #include "builtins.h"
56
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
59 int
60 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
61 {
62 enum machine_mode store_mode;
63 enum machine_mode other_mode;
64 rtx set;
65
66 if (recog_memoized (in_insn) < 0
67 || (get_attr_type (in_insn) != TYPE_FPSTORE
68 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
69 || recog_memoized (out_insn) < 0)
70 return 0;
71
72 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
73
74 set = single_set (out_insn);
75 if (!set)
76 return 0;
77
78 other_mode = GET_MODE (SET_SRC (set));
79
80 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
81 }
82
83
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
87 #else
88 #define DO_FRAME_NOTES 0
89 #endif
90 #endif
91
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx, rtx);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
96 reg_class_t);
97 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
98 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
99 static inline rtx force_mode (enum machine_mode, rtx);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx, rtx_insn *, rtx_insn *, int, rtx, rtx, rtx);
103 static bool forward_branch_p (rtx_insn *);
104 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static int compute_movmem_length (rtx);
107 static int compute_clrmem_length (rtx);
108 static bool pa_assemble_integer (rtx, unsigned int, int);
109 static void remove_useless_addtr_insns (int);
110 static void store_reg (int, HOST_WIDE_INT, int);
111 static void store_reg_modify (int, int, HOST_WIDE_INT);
112 static void load_reg (int, HOST_WIDE_INT, int);
113 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
114 static rtx pa_function_value (const_tree, const_tree, bool);
115 static rtx pa_libcall_value (enum machine_mode, const_rtx);
116 static bool pa_function_value_regno_p (const unsigned int);
117 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static void update_total_code_bytes (unsigned int);
119 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
120 static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
121 static int pa_adjust_priority (rtx_insn *, int);
122 static int pa_issue_rate (void);
123 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
124 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
125 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
126 ATTRIBUTE_UNUSED;
127 static void pa_encode_section_info (tree, rtx, int);
128 static const char *pa_strip_name_encoding (const char *);
129 static bool pa_function_ok_for_sibcall (tree, tree);
130 static void pa_globalize_label (FILE *, const char *)
131 ATTRIBUTE_UNUSED;
132 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
133 HOST_WIDE_INT, tree);
134 #if !defined(USE_COLLECT2)
135 static void pa_asm_out_constructor (rtx, int);
136 static void pa_asm_out_destructor (rtx, int);
137 #endif
138 static void pa_init_builtins (void);
139 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
140 static rtx hppa_builtin_saveregs (void);
141 static void hppa_va_start (tree, rtx);
142 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
143 static bool pa_scalar_mode_supported_p (enum machine_mode);
144 static bool pa_commutative_p (const_rtx x, int outer_code);
145 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
146 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
147 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
148 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
149 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
152 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
153 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
157 static void output_deferred_plabels (void);
158 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
159 #ifdef ASM_OUTPUT_EXTERNAL_REAL
160 static void pa_hpux_file_end (void);
161 #endif
162 static void pa_init_libfuncs (void);
163 static rtx pa_struct_value_rtx (tree, int);
164 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
165 const_tree, bool);
166 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
167 tree, bool);
168 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
169 const_tree, bool);
170 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
171 const_tree, bool);
172 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
173 static struct machine_function * pa_init_machine_status (void);
174 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
175 enum machine_mode,
176 secondary_reload_info *);
177 static void pa_extra_live_on_entry (bitmap);
178 static enum machine_mode pa_promote_function_mode (const_tree,
179 enum machine_mode, int *,
180 const_tree, int);
181
182 static void pa_asm_trampoline_template (FILE *);
183 static void pa_trampoline_init (rtx, tree, rtx);
184 static rtx pa_trampoline_adjust_address (rtx);
185 static rtx pa_delegitimize_address (rtx);
186 static bool pa_print_operand_punct_valid_p (unsigned char);
187 static rtx pa_internal_arg_pointer (void);
188 static bool pa_can_eliminate (const int, const int);
189 static void pa_conditional_register_usage (void);
190 static enum machine_mode pa_c_mode_for_suffix (char);
191 static section *pa_function_section (tree, enum node_frequency, bool, bool);
192 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
193 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
194 static unsigned int pa_section_type_flags (tree, const char *, int);
195 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
196
197 /* The following extra sections are only used for SOM. */
198 static GTY(()) section *som_readonly_data_section;
199 static GTY(()) section *som_one_only_readonly_data_section;
200 static GTY(()) section *som_one_only_data_section;
201 static GTY(()) section *som_tm_clone_table_section;
202
203 /* Counts for the number of callee-saved general and floating point
204 registers which were saved by the current function's prologue. */
205 static int gr_saved, fr_saved;
206
207 /* Boolean indicating whether the return pointer was saved by the
208 current function's prologue. */
209 static bool rp_saved;
210
211 static rtx find_addr_reg (rtx);
212
213 /* Keep track of the number of bytes we have output in the CODE subspace
214 during this compilation so we'll know when to emit inline long-calls. */
215 unsigned long total_code_bytes;
216
217 /* The last address of the previous function plus the number of bytes in
218 associated thunks that have been output. This is used to determine if
219 a thunk can use an IA-relative branch to reach its target function. */
220 static unsigned int last_address;
221
222 /* Variables to handle plabels that we discover are necessary at assembly
223 output time. They are output after the current function. */
224 struct GTY(()) deferred_plabel
225 {
226 rtx internal_label;
227 rtx symbol;
228 };
229 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
230 deferred_plabels;
231 static size_t n_deferred_plabels = 0;
232 \f
233 /* Initialize the GCC target structure. */
234
235 #undef TARGET_OPTION_OVERRIDE
236 #define TARGET_OPTION_OVERRIDE pa_option_override
237
238 #undef TARGET_ASM_ALIGNED_HI_OP
239 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
240 #undef TARGET_ASM_ALIGNED_SI_OP
241 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
242 #undef TARGET_ASM_ALIGNED_DI_OP
243 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
244 #undef TARGET_ASM_UNALIGNED_HI_OP
245 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
246 #undef TARGET_ASM_UNALIGNED_SI_OP
247 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
248 #undef TARGET_ASM_UNALIGNED_DI_OP
249 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
250 #undef TARGET_ASM_INTEGER
251 #define TARGET_ASM_INTEGER pa_assemble_integer
252
253 #undef TARGET_ASM_FUNCTION_PROLOGUE
254 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
255 #undef TARGET_ASM_FUNCTION_EPILOGUE
256 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
257
258 #undef TARGET_FUNCTION_VALUE
259 #define TARGET_FUNCTION_VALUE pa_function_value
260 #undef TARGET_LIBCALL_VALUE
261 #define TARGET_LIBCALL_VALUE pa_libcall_value
262 #undef TARGET_FUNCTION_VALUE_REGNO_P
263 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
264
265 #undef TARGET_LEGITIMIZE_ADDRESS
266 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
267
268 #undef TARGET_SCHED_ADJUST_COST
269 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
270 #undef TARGET_SCHED_ADJUST_PRIORITY
271 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
272 #undef TARGET_SCHED_ISSUE_RATE
273 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
274
275 #undef TARGET_ENCODE_SECTION_INFO
276 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
277 #undef TARGET_STRIP_NAME_ENCODING
278 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
279
280 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
281 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
282
283 #undef TARGET_COMMUTATIVE_P
284 #define TARGET_COMMUTATIVE_P pa_commutative_p
285
286 #undef TARGET_ASM_OUTPUT_MI_THUNK
287 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
288 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
289 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
290
291 #undef TARGET_ASM_FILE_END
292 #ifdef ASM_OUTPUT_EXTERNAL_REAL
293 #define TARGET_ASM_FILE_END pa_hpux_file_end
294 #else
295 #define TARGET_ASM_FILE_END output_deferred_plabels
296 #endif
297
298 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
299 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
300
301 #if !defined(USE_COLLECT2)
302 #undef TARGET_ASM_CONSTRUCTOR
303 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
304 #undef TARGET_ASM_DESTRUCTOR
305 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
306 #endif
307
308 #undef TARGET_INIT_BUILTINS
309 #define TARGET_INIT_BUILTINS pa_init_builtins
310
311 #undef TARGET_EXPAND_BUILTIN
312 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
313
314 #undef TARGET_REGISTER_MOVE_COST
315 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
316 #undef TARGET_RTX_COSTS
317 #define TARGET_RTX_COSTS hppa_rtx_costs
318 #undef TARGET_ADDRESS_COST
319 #define TARGET_ADDRESS_COST hppa_address_cost
320
321 #undef TARGET_MACHINE_DEPENDENT_REORG
322 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
323
324 #undef TARGET_INIT_LIBFUNCS
325 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
326
327 #undef TARGET_PROMOTE_FUNCTION_MODE
328 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
329 #undef TARGET_PROMOTE_PROTOTYPES
330 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
331
332 #undef TARGET_STRUCT_VALUE_RTX
333 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
334 #undef TARGET_RETURN_IN_MEMORY
335 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
336 #undef TARGET_MUST_PASS_IN_STACK
337 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
338 #undef TARGET_PASS_BY_REFERENCE
339 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
340 #undef TARGET_CALLEE_COPIES
341 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
342 #undef TARGET_ARG_PARTIAL_BYTES
343 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
344 #undef TARGET_FUNCTION_ARG
345 #define TARGET_FUNCTION_ARG pa_function_arg
346 #undef TARGET_FUNCTION_ARG_ADVANCE
347 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
348 #undef TARGET_FUNCTION_ARG_BOUNDARY
349 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
350
351 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
352 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
353 #undef TARGET_EXPAND_BUILTIN_VA_START
354 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
355 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
356 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
357
358 #undef TARGET_SCALAR_MODE_SUPPORTED_P
359 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
360
361 #undef TARGET_CANNOT_FORCE_CONST_MEM
362 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
363
364 #undef TARGET_SECONDARY_RELOAD
365 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
366
367 #undef TARGET_EXTRA_LIVE_ON_ENTRY
368 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
369
370 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
371 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
372 #undef TARGET_TRAMPOLINE_INIT
373 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
374 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
375 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
376 #undef TARGET_DELEGITIMIZE_ADDRESS
377 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
378 #undef TARGET_INTERNAL_ARG_POINTER
379 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
380 #undef TARGET_CAN_ELIMINATE
381 #define TARGET_CAN_ELIMINATE pa_can_eliminate
382 #undef TARGET_CONDITIONAL_REGISTER_USAGE
383 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
384 #undef TARGET_C_MODE_FOR_SUFFIX
385 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
386 #undef TARGET_ASM_FUNCTION_SECTION
387 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
388
389 #undef TARGET_LEGITIMATE_CONSTANT_P
390 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
391 #undef TARGET_SECTION_TYPE_FLAGS
392 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
393 #undef TARGET_LEGITIMATE_ADDRESS_P
394 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
395
396 struct gcc_target targetm = TARGET_INITIALIZER;
397 \f
398 /* Parse the -mfixed-range= option string. */
399
400 static void
401 fix_range (const char *const_str)
402 {
403 int i, first, last;
404 char *str, *dash, *comma;
405
406 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
407 REG2 are either register names or register numbers. The effect
408 of this option is to mark the registers in the range from REG1 to
409 REG2 as ``fixed'' so they won't be used by the compiler. This is
410 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
411
412 i = strlen (const_str);
413 str = (char *) alloca (i + 1);
414 memcpy (str, const_str, i + 1);
415
416 while (1)
417 {
418 dash = strchr (str, '-');
419 if (!dash)
420 {
421 warning (0, "value of -mfixed-range must have form REG1-REG2");
422 return;
423 }
424 *dash = '\0';
425
426 comma = strchr (dash + 1, ',');
427 if (comma)
428 *comma = '\0';
429
430 first = decode_reg_name (str);
431 if (first < 0)
432 {
433 warning (0, "unknown register name: %s", str);
434 return;
435 }
436
437 last = decode_reg_name (dash + 1);
438 if (last < 0)
439 {
440 warning (0, "unknown register name: %s", dash + 1);
441 return;
442 }
443
444 *dash = '-';
445
446 if (first > last)
447 {
448 warning (0, "%s-%s is an empty range", str, dash + 1);
449 return;
450 }
451
452 for (i = first; i <= last; ++i)
453 fixed_regs[i] = call_used_regs[i] = 1;
454
455 if (!comma)
456 break;
457
458 *comma = ',';
459 str = comma + 1;
460 }
461
462 /* Check if all floating point registers have been fixed. */
463 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
464 if (!fixed_regs[i])
465 break;
466
467 if (i > FP_REG_LAST)
468 target_flags |= MASK_DISABLE_FPREGS;
469 }
470
471 /* Implement the TARGET_OPTION_OVERRIDE hook. */
472
473 static void
474 pa_option_override (void)
475 {
476 unsigned int i;
477 cl_deferred_option *opt;
478 vec<cl_deferred_option> *v
479 = (vec<cl_deferred_option> *) pa_deferred_options;
480
481 if (v)
482 FOR_EACH_VEC_ELT (*v, i, opt)
483 {
484 switch (opt->opt_index)
485 {
486 case OPT_mfixed_range_:
487 fix_range (opt->arg);
488 break;
489
490 default:
491 gcc_unreachable ();
492 }
493 }
494
495 /* Unconditional branches in the delay slot are not compatible with dwarf2
496 call frame information. There is no benefit in using this optimization
497 on PA8000 and later processors. */
498 if (pa_cpu >= PROCESSOR_8000
499 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
500 && flag_exceptions)
501 || flag_unwind_tables)
502 target_flags &= ~MASK_JUMP_IN_DELAY;
503
504 if (flag_pic && TARGET_PORTABLE_RUNTIME)
505 {
506 warning (0, "PIC code generation is not supported in the portable runtime model");
507 }
508
509 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
510 {
511 warning (0, "PIC code generation is not compatible with fast indirect calls");
512 }
513
514 if (! TARGET_GAS && write_symbols != NO_DEBUG)
515 {
516 warning (0, "-g is only supported when using GAS on this processor,");
517 warning (0, "-g option disabled");
518 write_symbols = NO_DEBUG;
519 }
520
521 /* We only support the "big PIC" model now. And we always generate PIC
522 code when in 64bit mode. */
523 if (flag_pic == 1 || TARGET_64BIT)
524 flag_pic = 2;
525
526 /* Disable -freorder-blocks-and-partition as we don't support hot and
527 cold partitioning. */
528 if (flag_reorder_blocks_and_partition)
529 {
530 inform (input_location,
531 "-freorder-blocks-and-partition does not work "
532 "on this architecture");
533 flag_reorder_blocks_and_partition = 0;
534 flag_reorder_blocks = 1;
535 }
536
537 /* We can't guarantee that .dword is available for 32-bit targets. */
538 if (UNITS_PER_WORD == 4)
539 targetm.asm_out.aligned_op.di = NULL;
540
541 /* The unaligned ops are only available when using GAS. */
542 if (!TARGET_GAS)
543 {
544 targetm.asm_out.unaligned_op.hi = NULL;
545 targetm.asm_out.unaligned_op.si = NULL;
546 targetm.asm_out.unaligned_op.di = NULL;
547 }
548
549 init_machine_status = pa_init_machine_status;
550 }
551
552 enum pa_builtins
553 {
554 PA_BUILTIN_COPYSIGNQ,
555 PA_BUILTIN_FABSQ,
556 PA_BUILTIN_INFQ,
557 PA_BUILTIN_HUGE_VALQ,
558 PA_BUILTIN_max
559 };
560
561 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
562
563 static void
564 pa_init_builtins (void)
565 {
566 #ifdef DONT_HAVE_FPUTC_UNLOCKED
567 {
568 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
569 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
570 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
571 }
572 #endif
573 #if TARGET_HPUX_11
574 {
575 tree decl;
576
577 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
578 set_user_assembler_name (decl, "_Isfinite");
579 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
580 set_user_assembler_name (decl, "_Isfinitef");
581 }
582 #endif
583
584 if (HPUX_LONG_DOUBLE_LIBRARY)
585 {
586 tree decl, ftype;
587
588 /* Under HPUX, the __float128 type is a synonym for "long double". */
589 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
590 "__float128");
591
592 /* TFmode support builtins. */
593 ftype = build_function_type_list (long_double_type_node,
594 long_double_type_node,
595 NULL_TREE);
596 decl = add_builtin_function ("__builtin_fabsq", ftype,
597 PA_BUILTIN_FABSQ, BUILT_IN_MD,
598 "_U_Qfabs", NULL_TREE);
599 TREE_READONLY (decl) = 1;
600 pa_builtins[PA_BUILTIN_FABSQ] = decl;
601
602 ftype = build_function_type_list (long_double_type_node,
603 long_double_type_node,
604 long_double_type_node,
605 NULL_TREE);
606 decl = add_builtin_function ("__builtin_copysignq", ftype,
607 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
608 "_U_Qfcopysign", NULL_TREE);
609 TREE_READONLY (decl) = 1;
610 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
611
612 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
613 decl = add_builtin_function ("__builtin_infq", ftype,
614 PA_BUILTIN_INFQ, BUILT_IN_MD,
615 NULL, NULL_TREE);
616 pa_builtins[PA_BUILTIN_INFQ] = decl;
617
618 decl = add_builtin_function ("__builtin_huge_valq", ftype,
619 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
620 NULL, NULL_TREE);
621 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
622 }
623 }
624
625 static rtx
626 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
627 enum machine_mode mode ATTRIBUTE_UNUSED,
628 int ignore ATTRIBUTE_UNUSED)
629 {
630 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
631 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
632
633 switch (fcode)
634 {
635 case PA_BUILTIN_FABSQ:
636 case PA_BUILTIN_COPYSIGNQ:
637 return expand_call (exp, target, ignore);
638
639 case PA_BUILTIN_INFQ:
640 case PA_BUILTIN_HUGE_VALQ:
641 {
642 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
643 REAL_VALUE_TYPE inf;
644 rtx tmp;
645
646 real_inf (&inf);
647 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
648
649 tmp = validize_mem (force_const_mem (target_mode, tmp));
650
651 if (target == 0)
652 target = gen_reg_rtx (target_mode);
653
654 emit_move_insn (target, tmp);
655 return target;
656 }
657
658 default:
659 gcc_unreachable ();
660 }
661
662 return NULL_RTX;
663 }
664
665 /* Function to init struct machine_function.
666 This will be called, via a pointer variable,
667 from push_function_context. */
668
669 static struct machine_function *
670 pa_init_machine_status (void)
671 {
672 return ggc_cleared_alloc<machine_function> ();
673 }
674
675 /* If FROM is a probable pointer register, mark TO as a probable
676 pointer register with the same pointer alignment as FROM. */
677
678 static void
679 copy_reg_pointer (rtx to, rtx from)
680 {
681 if (REG_POINTER (from))
682 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
683 }
684
685 /* Return 1 if X contains a symbolic expression. We know these
686 expressions will have one of a few well defined forms, so
687 we need only check those forms. */
688 int
689 pa_symbolic_expression_p (rtx x)
690 {
691
692 /* Strip off any HIGH. */
693 if (GET_CODE (x) == HIGH)
694 x = XEXP (x, 0);
695
696 return symbolic_operand (x, VOIDmode);
697 }
698
699 /* Accept any constant that can be moved in one instruction into a
700 general register. */
701 int
702 pa_cint_ok_for_move (HOST_WIDE_INT ival)
703 {
704 /* OK if ldo, ldil, or zdepi, can be used. */
705 return (VAL_14_BITS_P (ival)
706 || pa_ldil_cint_p (ival)
707 || pa_zdepi_cint_p (ival));
708 }
709 \f
710 /* True iff ldil can be used to load this CONST_INT. The least
711 significant 11 bits of the value must be zero and the value must
712 not change sign when extended from 32 to 64 bits. */
713 int
714 pa_ldil_cint_p (HOST_WIDE_INT ival)
715 {
716 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
717
718 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
719 }
720
721 /* True iff zdepi can be used to generate this CONST_INT.
722 zdepi first sign extends a 5-bit signed number to a given field
723 length, then places this field anywhere in a zero. */
724 int
725 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
726 {
727 unsigned HOST_WIDE_INT lsb_mask, t;
728
729 /* This might not be obvious, but it's at least fast.
730 This function is critical; we don't have the time loops would take. */
731 lsb_mask = x & -x;
732 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
733 /* Return true iff t is a power of two. */
734 return ((t & (t - 1)) == 0);
735 }
736
737 /* True iff depi or extru can be used to compute (reg & mask).
738 Accept bit pattern like these:
739 0....01....1
740 1....10....0
741 1..10..01..1 */
742 int
743 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
744 {
745 mask = ~mask;
746 mask += mask & -mask;
747 return (mask & (mask - 1)) == 0;
748 }
749
750 /* True iff depi can be used to compute (reg | MASK). */
751 int
752 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
753 {
754 mask += mask & -mask;
755 return (mask & (mask - 1)) == 0;
756 }
757 \f
758 /* Legitimize PIC addresses. If the address is already
759 position-independent, we return ORIG. Newly generated
760 position-independent addresses go to REG. If we need more
761 than one register, we lose. */
762
763 static rtx
764 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
765 {
766 rtx pic_ref = orig;
767
768 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
769
770 /* Labels need special handling. */
771 if (pic_label_operand (orig, mode))
772 {
773 rtx insn;
774
775 /* We do not want to go through the movXX expanders here since that
776 would create recursion.
777
778 Nor do we really want to call a generator for a named pattern
779 since that requires multiple patterns if we want to support
780 multiple word sizes.
781
782 So instead we just emit the raw set, which avoids the movXX
783 expanders completely. */
784 mark_reg_pointer (reg, BITS_PER_UNIT);
785 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
786
787 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
788 add_reg_note (insn, REG_EQUAL, orig);
789
790 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
791 and update LABEL_NUSES because this is not done automatically. */
792 if (reload_in_progress || reload_completed)
793 {
794 /* Extract LABEL_REF. */
795 if (GET_CODE (orig) == CONST)
796 orig = XEXP (XEXP (orig, 0), 0);
797 /* Extract CODE_LABEL. */
798 orig = XEXP (orig, 0);
799 add_reg_note (insn, REG_LABEL_OPERAND, orig);
800 /* Make sure we have label and not a note. */
801 if (LABEL_P (orig))
802 LABEL_NUSES (orig)++;
803 }
804 crtl->uses_pic_offset_table = 1;
805 return reg;
806 }
807 if (GET_CODE (orig) == SYMBOL_REF)
808 {
809 rtx insn, tmp_reg;
810
811 gcc_assert (reg);
812
813 /* Before reload, allocate a temporary register for the intermediate
814 result. This allows the sequence to be deleted when the final
815 result is unused and the insns are trivially dead. */
816 tmp_reg = ((reload_in_progress || reload_completed)
817 ? reg : gen_reg_rtx (Pmode));
818
819 if (function_label_operand (orig, VOIDmode))
820 {
821 /* Force function label into memory in word mode. */
822 orig = XEXP (force_const_mem (word_mode, orig), 0);
823 /* Load plabel address from DLT. */
824 emit_move_insn (tmp_reg,
825 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
826 gen_rtx_HIGH (word_mode, orig)));
827 pic_ref
828 = gen_const_mem (Pmode,
829 gen_rtx_LO_SUM (Pmode, tmp_reg,
830 gen_rtx_UNSPEC (Pmode,
831 gen_rtvec (1, orig),
832 UNSPEC_DLTIND14R)));
833 emit_move_insn (reg, pic_ref);
834 /* Now load address of function descriptor. */
835 pic_ref = gen_rtx_MEM (Pmode, reg);
836 }
837 else
838 {
839 /* Load symbol reference from DLT. */
840 emit_move_insn (tmp_reg,
841 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
842 gen_rtx_HIGH (word_mode, orig)));
843 pic_ref
844 = gen_const_mem (Pmode,
845 gen_rtx_LO_SUM (Pmode, tmp_reg,
846 gen_rtx_UNSPEC (Pmode,
847 gen_rtvec (1, orig),
848 UNSPEC_DLTIND14R)));
849 }
850
851 crtl->uses_pic_offset_table = 1;
852 mark_reg_pointer (reg, BITS_PER_UNIT);
853 insn = emit_move_insn (reg, pic_ref);
854
855 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
856 set_unique_reg_note (insn, REG_EQUAL, orig);
857
858 return reg;
859 }
860 else if (GET_CODE (orig) == CONST)
861 {
862 rtx base;
863
864 if (GET_CODE (XEXP (orig, 0)) == PLUS
865 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
866 return orig;
867
868 gcc_assert (reg);
869 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
870
871 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
872 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
873 base == reg ? 0 : reg);
874
875 if (GET_CODE (orig) == CONST_INT)
876 {
877 if (INT_14_BITS (orig))
878 return plus_constant (Pmode, base, INTVAL (orig));
879 orig = force_reg (Pmode, orig);
880 }
881 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
882 /* Likewise, should we set special REG_NOTEs here? */
883 }
884
885 return pic_ref;
886 }
887
888 static GTY(()) rtx gen_tls_tga;
889
890 static rtx
891 gen_tls_get_addr (void)
892 {
893 if (!gen_tls_tga)
894 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
895 return gen_tls_tga;
896 }
897
898 static rtx
899 hppa_tls_call (rtx arg)
900 {
901 rtx ret;
902
903 ret = gen_reg_rtx (Pmode);
904 emit_library_call_value (gen_tls_get_addr (), ret,
905 LCT_CONST, Pmode, 1, arg, Pmode);
906
907 return ret;
908 }
909
910 static rtx
911 legitimize_tls_address (rtx addr)
912 {
913 rtx ret, insn, tmp, t1, t2, tp;
914
915 /* Currently, we can't handle anything but a SYMBOL_REF. */
916 if (GET_CODE (addr) != SYMBOL_REF)
917 return addr;
918
919 switch (SYMBOL_REF_TLS_MODEL (addr))
920 {
921 case TLS_MODEL_GLOBAL_DYNAMIC:
922 tmp = gen_reg_rtx (Pmode);
923 if (flag_pic)
924 emit_insn (gen_tgd_load_pic (tmp, addr));
925 else
926 emit_insn (gen_tgd_load (tmp, addr));
927 ret = hppa_tls_call (tmp);
928 break;
929
930 case TLS_MODEL_LOCAL_DYNAMIC:
931 ret = gen_reg_rtx (Pmode);
932 tmp = gen_reg_rtx (Pmode);
933 start_sequence ();
934 if (flag_pic)
935 emit_insn (gen_tld_load_pic (tmp, addr));
936 else
937 emit_insn (gen_tld_load (tmp, addr));
938 t1 = hppa_tls_call (tmp);
939 insn = get_insns ();
940 end_sequence ();
941 t2 = gen_reg_rtx (Pmode);
942 emit_libcall_block (insn, t2, t1,
943 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
944 UNSPEC_TLSLDBASE));
945 emit_insn (gen_tld_offset_load (ret, addr, t2));
946 break;
947
948 case TLS_MODEL_INITIAL_EXEC:
949 tp = gen_reg_rtx (Pmode);
950 tmp = gen_reg_rtx (Pmode);
951 ret = gen_reg_rtx (Pmode);
952 emit_insn (gen_tp_load (tp));
953 if (flag_pic)
954 emit_insn (gen_tie_load_pic (tmp, addr));
955 else
956 emit_insn (gen_tie_load (tmp, addr));
957 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
958 break;
959
960 case TLS_MODEL_LOCAL_EXEC:
961 tp = gen_reg_rtx (Pmode);
962 ret = gen_reg_rtx (Pmode);
963 emit_insn (gen_tp_load (tp));
964 emit_insn (gen_tle_load (ret, addr, tp));
965 break;
966
967 default:
968 gcc_unreachable ();
969 }
970
971 return ret;
972 }
973
974 /* Try machine-dependent ways of modifying an illegitimate address
975 to be legitimate. If we find one, return the new, valid address.
976 This macro is used in only one place: `memory_address' in explow.c.
977
978 OLDX is the address as it was before break_out_memory_refs was called.
979 In some cases it is useful to look at this to decide what needs to be done.
980
981 It is always safe for this macro to do nothing. It exists to recognize
982 opportunities to optimize the output.
983
984 For the PA, transform:
985
986 memory(X + <large int>)
987
988 into:
989
990 if (<large int> & mask) >= 16
991 Y = (<large int> & ~mask) + mask + 1 Round up.
992 else
993 Y = (<large int> & ~mask) Round down.
994 Z = X + Y
995 memory (Z + (<large int> - Y));
996
997 This is for CSE to find several similar references, and only use one Z.
998
999 X can either be a SYMBOL_REF or REG, but because combine cannot
1000 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1001 D will not fit in 14 bits.
1002
1003 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1004 0x1f as the mask.
1005
1006 MODE_INT references allow displacements which fit in 14 bits, so use
1007 0x3fff as the mask.
1008
1009 This relies on the fact that most mode MODE_FLOAT references will use FP
1010 registers and most mode MODE_INT references will use integer registers.
1011 (In the rare case of an FP register used in an integer MODE, we depend
1012 on secondary reloads to clean things up.)
1013
1014
1015 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1016 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1017 addressing modes to be used).
1018
1019 Put X and Z into registers. Then put the entire expression into
1020 a register. */
1021
1022 rtx
1023 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1024 enum machine_mode mode)
1025 {
1026 rtx orig = x;
1027
1028 /* We need to canonicalize the order of operands in unscaled indexed
1029 addresses since the code that checks if an address is valid doesn't
1030 always try both orders. */
1031 if (!TARGET_NO_SPACE_REGS
1032 && GET_CODE (x) == PLUS
1033 && GET_MODE (x) == Pmode
1034 && REG_P (XEXP (x, 0))
1035 && REG_P (XEXP (x, 1))
1036 && REG_POINTER (XEXP (x, 0))
1037 && !REG_POINTER (XEXP (x, 1)))
1038 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1039
1040 if (tls_referenced_p (x))
1041 return legitimize_tls_address (x);
1042 else if (flag_pic)
1043 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1044
1045 /* Strip off CONST. */
1046 if (GET_CODE (x) == CONST)
1047 x = XEXP (x, 0);
1048
1049 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1050 That should always be safe. */
1051 if (GET_CODE (x) == PLUS
1052 && GET_CODE (XEXP (x, 0)) == REG
1053 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1054 {
1055 rtx reg = force_reg (Pmode, XEXP (x, 1));
1056 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1057 }
1058
1059 /* Note we must reject symbols which represent function addresses
1060 since the assembler/linker can't handle arithmetic on plabels. */
1061 if (GET_CODE (x) == PLUS
1062 && GET_CODE (XEXP (x, 1)) == CONST_INT
1063 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1064 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1065 || GET_CODE (XEXP (x, 0)) == REG))
1066 {
1067 rtx int_part, ptr_reg;
1068 int newoffset;
1069 int offset = INTVAL (XEXP (x, 1));
1070 int mask;
1071
1072 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1073 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1074
1075 /* Choose which way to round the offset. Round up if we
1076 are >= halfway to the next boundary. */
1077 if ((offset & mask) >= ((mask + 1) / 2))
1078 newoffset = (offset & ~ mask) + mask + 1;
1079 else
1080 newoffset = (offset & ~ mask);
1081
1082 /* If the newoffset will not fit in 14 bits (ldo), then
1083 handling this would take 4 or 5 instructions (2 to load
1084 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1085 add the new offset and the SYMBOL_REF.) Combine can
1086 not handle 4->2 or 5->2 combinations, so do not create
1087 them. */
1088 if (! VAL_14_BITS_P (newoffset)
1089 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1090 {
1091 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1092 rtx tmp_reg
1093 = force_reg (Pmode,
1094 gen_rtx_HIGH (Pmode, const_part));
1095 ptr_reg
1096 = force_reg (Pmode,
1097 gen_rtx_LO_SUM (Pmode,
1098 tmp_reg, const_part));
1099 }
1100 else
1101 {
1102 if (! VAL_14_BITS_P (newoffset))
1103 int_part = force_reg (Pmode, GEN_INT (newoffset));
1104 else
1105 int_part = GEN_INT (newoffset);
1106
1107 ptr_reg = force_reg (Pmode,
1108 gen_rtx_PLUS (Pmode,
1109 force_reg (Pmode, XEXP (x, 0)),
1110 int_part));
1111 }
1112 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1113 }
1114
1115 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1116
1117 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1118 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1119 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1120 && (OBJECT_P (XEXP (x, 1))
1121 || GET_CODE (XEXP (x, 1)) == SUBREG)
1122 && GET_CODE (XEXP (x, 1)) != CONST)
1123 {
1124 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1125 rtx reg1, reg2;
1126
1127 reg1 = XEXP (x, 1);
1128 if (GET_CODE (reg1) != REG)
1129 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1130
1131 reg2 = XEXP (XEXP (x, 0), 0);
1132 if (GET_CODE (reg2) != REG)
1133 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1134
1135 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1136 gen_rtx_MULT (Pmode,
1137 reg2,
1138 GEN_INT (val)),
1139 reg1));
1140 }
1141
1142 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1143
1144 Only do so for floating point modes since this is more speculative
1145 and we lose if it's an integer store. */
1146 if (GET_CODE (x) == PLUS
1147 && GET_CODE (XEXP (x, 0)) == PLUS
1148 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1149 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1150 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1151 && (mode == SFmode || mode == DFmode))
1152 {
1153
1154 /* First, try and figure out what to use as a base register. */
1155 rtx reg1, reg2, base, idx;
1156
1157 reg1 = XEXP (XEXP (x, 0), 1);
1158 reg2 = XEXP (x, 1);
1159 base = NULL_RTX;
1160 idx = NULL_RTX;
1161
1162 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1163 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1164 it's a base register below. */
1165 if (GET_CODE (reg1) != REG)
1166 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1167
1168 if (GET_CODE (reg2) != REG)
1169 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1170
1171 /* Figure out what the base and index are. */
1172
1173 if (GET_CODE (reg1) == REG
1174 && REG_POINTER (reg1))
1175 {
1176 base = reg1;
1177 idx = gen_rtx_PLUS (Pmode,
1178 gen_rtx_MULT (Pmode,
1179 XEXP (XEXP (XEXP (x, 0), 0), 0),
1180 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1181 XEXP (x, 1));
1182 }
1183 else if (GET_CODE (reg2) == REG
1184 && REG_POINTER (reg2))
1185 {
1186 base = reg2;
1187 idx = XEXP (x, 0);
1188 }
1189
1190 if (base == 0)
1191 return orig;
1192
1193 /* If the index adds a large constant, try to scale the
1194 constant so that it can be loaded with only one insn. */
1195 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1196 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1197 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1198 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1199 {
1200 /* Divide the CONST_INT by the scale factor, then add it to A. */
1201 int val = INTVAL (XEXP (idx, 1));
1202
1203 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1204 reg1 = XEXP (XEXP (idx, 0), 0);
1205 if (GET_CODE (reg1) != REG)
1206 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1207
1208 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1209
1210 /* We can now generate a simple scaled indexed address. */
1211 return
1212 force_reg
1213 (Pmode, gen_rtx_PLUS (Pmode,
1214 gen_rtx_MULT (Pmode, reg1,
1215 XEXP (XEXP (idx, 0), 1)),
1216 base));
1217 }
1218
1219 /* If B + C is still a valid base register, then add them. */
1220 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1221 && INTVAL (XEXP (idx, 1)) <= 4096
1222 && INTVAL (XEXP (idx, 1)) >= -4096)
1223 {
1224 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1225 rtx reg1, reg2;
1226
1227 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1228
1229 reg2 = XEXP (XEXP (idx, 0), 0);
1230 if (GET_CODE (reg2) != CONST_INT)
1231 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1232
1233 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1234 gen_rtx_MULT (Pmode,
1235 reg2,
1236 GEN_INT (val)),
1237 reg1));
1238 }
1239
1240 /* Get the index into a register, then add the base + index and
1241 return a register holding the result. */
1242
1243 /* First get A into a register. */
1244 reg1 = XEXP (XEXP (idx, 0), 0);
1245 if (GET_CODE (reg1) != REG)
1246 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1247
1248 /* And get B into a register. */
1249 reg2 = XEXP (idx, 1);
1250 if (GET_CODE (reg2) != REG)
1251 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1252
1253 reg1 = force_reg (Pmode,
1254 gen_rtx_PLUS (Pmode,
1255 gen_rtx_MULT (Pmode, reg1,
1256 XEXP (XEXP (idx, 0), 1)),
1257 reg2));
1258
1259 /* Add the result to our base register and return. */
1260 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1261
1262 }
1263
1264 /* Uh-oh. We might have an address for x[n-100000]. This needs
1265 special handling to avoid creating an indexed memory address
1266 with x-100000 as the base.
1267
1268 If the constant part is small enough, then it's still safe because
1269 there is a guard page at the beginning and end of the data segment.
1270
1271 Scaled references are common enough that we want to try and rearrange the
1272 terms so that we can use indexing for these addresses too. Only
1273 do the optimization for floatint point modes. */
1274
1275 if (GET_CODE (x) == PLUS
1276 && pa_symbolic_expression_p (XEXP (x, 1)))
1277 {
1278 /* Ugly. We modify things here so that the address offset specified
1279 by the index expression is computed first, then added to x to form
1280 the entire address. */
1281
1282 rtx regx1, regx2, regy1, regy2, y;
1283
1284 /* Strip off any CONST. */
1285 y = XEXP (x, 1);
1286 if (GET_CODE (y) == CONST)
1287 y = XEXP (y, 0);
1288
1289 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1290 {
1291 /* See if this looks like
1292 (plus (mult (reg) (shadd_const))
1293 (const (plus (symbol_ref) (const_int))))
1294
1295 Where const_int is small. In that case the const
1296 expression is a valid pointer for indexing.
1297
1298 If const_int is big, but can be divided evenly by shadd_const
1299 and added to (reg). This allows more scaled indexed addresses. */
1300 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1301 && GET_CODE (XEXP (x, 0)) == MULT
1302 && GET_CODE (XEXP (y, 1)) == CONST_INT
1303 && INTVAL (XEXP (y, 1)) >= -4096
1304 && INTVAL (XEXP (y, 1)) <= 4095
1305 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1306 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1307 {
1308 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1309 rtx reg1, reg2;
1310
1311 reg1 = XEXP (x, 1);
1312 if (GET_CODE (reg1) != REG)
1313 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1314
1315 reg2 = XEXP (XEXP (x, 0), 0);
1316 if (GET_CODE (reg2) != REG)
1317 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1318
1319 return force_reg (Pmode,
1320 gen_rtx_PLUS (Pmode,
1321 gen_rtx_MULT (Pmode,
1322 reg2,
1323 GEN_INT (val)),
1324 reg1));
1325 }
1326 else if ((mode == DFmode || mode == SFmode)
1327 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1328 && GET_CODE (XEXP (x, 0)) == MULT
1329 && GET_CODE (XEXP (y, 1)) == CONST_INT
1330 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1331 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1332 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1333 {
1334 regx1
1335 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1336 / INTVAL (XEXP (XEXP (x, 0), 1))));
1337 regx2 = XEXP (XEXP (x, 0), 0);
1338 if (GET_CODE (regx2) != REG)
1339 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1340 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1341 regx2, regx1));
1342 return
1343 force_reg (Pmode,
1344 gen_rtx_PLUS (Pmode,
1345 gen_rtx_MULT (Pmode, regx2,
1346 XEXP (XEXP (x, 0), 1)),
1347 force_reg (Pmode, XEXP (y, 0))));
1348 }
1349 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1350 && INTVAL (XEXP (y, 1)) >= -4096
1351 && INTVAL (XEXP (y, 1)) <= 4095)
1352 {
1353 /* This is safe because of the guard page at the
1354 beginning and end of the data space. Just
1355 return the original address. */
1356 return orig;
1357 }
1358 else
1359 {
1360 /* Doesn't look like one we can optimize. */
1361 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1362 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1363 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1364 regx1 = force_reg (Pmode,
1365 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1366 regx1, regy2));
1367 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1368 }
1369 }
1370 }
1371
1372 return orig;
1373 }
1374
1375 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1376
1377 Compute extra cost of moving data between one register class
1378 and another.
1379
1380 Make moves from SAR so expensive they should never happen. We used to
1381 have 0xffff here, but that generates overflow in rare cases.
1382
1383 Copies involving a FP register and a non-FP register are relatively
1384 expensive because they must go through memory.
1385
1386 Other copies are reasonably cheap. */
1387
1388 static int
1389 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1390 reg_class_t from, reg_class_t to)
1391 {
1392 if (from == SHIFT_REGS)
1393 return 0x100;
1394 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1395 return 18;
1396 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1397 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1398 return 16;
1399 else
1400 return 2;
1401 }
1402
1403 /* For the HPPA, REG and REG+CONST is cost 0
1404 and addresses involving symbolic constants are cost 2.
1405
1406 PIC addresses are very expensive.
1407
1408 It is no coincidence that this has the same structure
1409 as pa_legitimate_address_p. */
1410
1411 static int
1412 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1413 addr_space_t as ATTRIBUTE_UNUSED,
1414 bool speed ATTRIBUTE_UNUSED)
1415 {
1416 switch (GET_CODE (X))
1417 {
1418 case REG:
1419 case PLUS:
1420 case LO_SUM:
1421 return 1;
1422 case HIGH:
1423 return 2;
1424 default:
1425 return 4;
1426 }
1427 }
1428
1429 /* Compute a (partial) cost for rtx X. Return true if the complete
1430 cost has been computed, and false if subexpressions should be
1431 scanned. In either case, *TOTAL contains the cost result. */
1432
1433 static bool
1434 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1435 int *total, bool speed ATTRIBUTE_UNUSED)
1436 {
1437 int factor;
1438
1439 switch (code)
1440 {
1441 case CONST_INT:
1442 if (INTVAL (x) == 0)
1443 *total = 0;
1444 else if (INT_14_BITS (x))
1445 *total = 1;
1446 else
1447 *total = 2;
1448 return true;
1449
1450 case HIGH:
1451 *total = 2;
1452 return true;
1453
1454 case CONST:
1455 case LABEL_REF:
1456 case SYMBOL_REF:
1457 *total = 4;
1458 return true;
1459
1460 case CONST_DOUBLE:
1461 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1462 && outer_code != SET)
1463 *total = 0;
1464 else
1465 *total = 8;
1466 return true;
1467
1468 case MULT:
1469 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1470 {
1471 *total = COSTS_N_INSNS (3);
1472 return true;
1473 }
1474
1475 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1476 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1477 if (factor == 0)
1478 factor = 1;
1479
1480 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1481 *total = factor * factor * COSTS_N_INSNS (8);
1482 else
1483 *total = factor * factor * COSTS_N_INSNS (20);
1484 return true;
1485
1486 case DIV:
1487 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1488 {
1489 *total = COSTS_N_INSNS (14);
1490 return true;
1491 }
1492 /* FALLTHRU */
1493
1494 case UDIV:
1495 case MOD:
1496 case UMOD:
1497 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1498 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1499 if (factor == 0)
1500 factor = 1;
1501
1502 *total = factor * factor * COSTS_N_INSNS (60);
1503 return true;
1504
1505 case PLUS: /* this includes shNadd insns */
1506 case MINUS:
1507 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1508 {
1509 *total = COSTS_N_INSNS (3);
1510 return true;
1511 }
1512
1513 /* A size N times larger than UNITS_PER_WORD needs N times as
1514 many insns, taking N times as long. */
1515 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1516 if (factor == 0)
1517 factor = 1;
1518 *total = factor * COSTS_N_INSNS (1);
1519 return true;
1520
1521 case ASHIFT:
1522 case ASHIFTRT:
1523 case LSHIFTRT:
1524 *total = COSTS_N_INSNS (1);
1525 return true;
1526
1527 default:
1528 return false;
1529 }
1530 }
1531
1532 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1533 new rtx with the correct mode. */
1534 static inline rtx
1535 force_mode (enum machine_mode mode, rtx orig)
1536 {
1537 if (mode == GET_MODE (orig))
1538 return orig;
1539
1540 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1541
1542 return gen_rtx_REG (mode, REGNO (orig));
1543 }
1544
1545 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1546
1547 static bool
1548 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1549 {
1550 return tls_referenced_p (x);
1551 }
1552
1553 /* Emit insns to move operands[1] into operands[0].
1554
1555 Return 1 if we have written out everything that needs to be done to
1556 do the move. Otherwise, return 0 and the caller will emit the move
1557 normally.
1558
1559 Note SCRATCH_REG may not be in the proper mode depending on how it
1560 will be used. This routine is responsible for creating a new copy
1561 of SCRATCH_REG in the proper mode. */
1562
1563 int
1564 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1565 {
1566 register rtx operand0 = operands[0];
1567 register rtx operand1 = operands[1];
1568 register rtx tem;
1569
1570 /* We can only handle indexed addresses in the destination operand
1571 of floating point stores. Thus, we need to break out indexed
1572 addresses from the destination operand. */
1573 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1574 {
1575 gcc_assert (can_create_pseudo_p ());
1576
1577 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1578 operand0 = replace_equiv_address (operand0, tem);
1579 }
1580
1581 /* On targets with non-equivalent space registers, break out unscaled
1582 indexed addresses from the source operand before the final CSE.
1583 We have to do this because the REG_POINTER flag is not correctly
1584 carried through various optimization passes and CSE may substitute
1585 a pseudo without the pointer set for one with the pointer set. As
1586 a result, we loose various opportunities to create insns with
1587 unscaled indexed addresses. */
1588 if (!TARGET_NO_SPACE_REGS
1589 && !cse_not_expected
1590 && GET_CODE (operand1) == MEM
1591 && GET_CODE (XEXP (operand1, 0)) == PLUS
1592 && REG_P (XEXP (XEXP (operand1, 0), 0))
1593 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1594 operand1
1595 = replace_equiv_address (operand1,
1596 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1597
1598 if (scratch_reg
1599 && reload_in_progress && GET_CODE (operand0) == REG
1600 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1601 operand0 = reg_equiv_mem (REGNO (operand0));
1602 else if (scratch_reg
1603 && reload_in_progress && GET_CODE (operand0) == SUBREG
1604 && GET_CODE (SUBREG_REG (operand0)) == REG
1605 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1606 {
1607 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1608 the code which tracks sets/uses for delete_output_reload. */
1609 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1610 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1611 SUBREG_BYTE (operand0));
1612 operand0 = alter_subreg (&temp, true);
1613 }
1614
1615 if (scratch_reg
1616 && reload_in_progress && GET_CODE (operand1) == REG
1617 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1618 operand1 = reg_equiv_mem (REGNO (operand1));
1619 else if (scratch_reg
1620 && reload_in_progress && GET_CODE (operand1) == SUBREG
1621 && GET_CODE (SUBREG_REG (operand1)) == REG
1622 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1623 {
1624 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1625 the code which tracks sets/uses for delete_output_reload. */
1626 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1627 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1628 SUBREG_BYTE (operand1));
1629 operand1 = alter_subreg (&temp, true);
1630 }
1631
1632 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1633 && ((tem = find_replacement (&XEXP (operand0, 0)))
1634 != XEXP (operand0, 0)))
1635 operand0 = replace_equiv_address (operand0, tem);
1636
1637 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1638 && ((tem = find_replacement (&XEXP (operand1, 0)))
1639 != XEXP (operand1, 0)))
1640 operand1 = replace_equiv_address (operand1, tem);
1641
1642 /* Handle secondary reloads for loads/stores of FP registers from
1643 REG+D addresses where D does not fit in 5 or 14 bits, including
1644 (subreg (mem (addr))) cases. */
1645 if (scratch_reg
1646 && fp_reg_operand (operand0, mode)
1647 && (MEM_P (operand1)
1648 || (GET_CODE (operand1) == SUBREG
1649 && MEM_P (XEXP (operand1, 0))))
1650 && !floating_point_store_memory_operand (operand1, mode))
1651 {
1652 if (GET_CODE (operand1) == SUBREG)
1653 operand1 = XEXP (operand1, 0);
1654
1655 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1656 it in WORD_MODE regardless of what mode it was originally given
1657 to us. */
1658 scratch_reg = force_mode (word_mode, scratch_reg);
1659
1660 /* D might not fit in 14 bits either; for such cases load D into
1661 scratch reg. */
1662 if (reg_plus_base_memory_operand (operand1, mode)
1663 && !(TARGET_PA_20
1664 && !TARGET_ELF32
1665 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1666 {
1667 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1668 emit_move_insn (scratch_reg,
1669 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1670 Pmode,
1671 XEXP (XEXP (operand1, 0), 0),
1672 scratch_reg));
1673 }
1674 else
1675 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1676 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1677 replace_equiv_address (operand1, scratch_reg)));
1678 return 1;
1679 }
1680 else if (scratch_reg
1681 && fp_reg_operand (operand1, mode)
1682 && (MEM_P (operand0)
1683 || (GET_CODE (operand0) == SUBREG
1684 && MEM_P (XEXP (operand0, 0))))
1685 && !floating_point_store_memory_operand (operand0, mode))
1686 {
1687 if (GET_CODE (operand0) == SUBREG)
1688 operand0 = XEXP (operand0, 0);
1689
1690 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1691 it in WORD_MODE regardless of what mode it was originally given
1692 to us. */
1693 scratch_reg = force_mode (word_mode, scratch_reg);
1694
1695 /* D might not fit in 14 bits either; for such cases load D into
1696 scratch reg. */
1697 if (reg_plus_base_memory_operand (operand0, mode)
1698 && !(TARGET_PA_20
1699 && !TARGET_ELF32
1700 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1701 {
1702 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1703 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1704 0)),
1705 Pmode,
1706 XEXP (XEXP (operand0, 0),
1707 0),
1708 scratch_reg));
1709 }
1710 else
1711 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1712 emit_insn (gen_rtx_SET (VOIDmode,
1713 replace_equiv_address (operand0, scratch_reg),
1714 operand1));
1715 return 1;
1716 }
1717 /* Handle secondary reloads for loads of FP registers from constant
1718 expressions by forcing the constant into memory. For the most part,
1719 this is only necessary for SImode and DImode.
1720
1721 Use scratch_reg to hold the address of the memory location. */
1722 else if (scratch_reg
1723 && CONSTANT_P (operand1)
1724 && fp_reg_operand (operand0, mode))
1725 {
1726 rtx const_mem, xoperands[2];
1727
1728 if (operand1 == CONST0_RTX (mode))
1729 {
1730 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1731 return 1;
1732 }
1733
1734 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1735 it in WORD_MODE regardless of what mode it was originally given
1736 to us. */
1737 scratch_reg = force_mode (word_mode, scratch_reg);
1738
1739 /* Force the constant into memory and put the address of the
1740 memory location into scratch_reg. */
1741 const_mem = force_const_mem (mode, operand1);
1742 xoperands[0] = scratch_reg;
1743 xoperands[1] = XEXP (const_mem, 0);
1744 pa_emit_move_sequence (xoperands, Pmode, 0);
1745
1746 /* Now load the destination register. */
1747 emit_insn (gen_rtx_SET (mode, operand0,
1748 replace_equiv_address (const_mem, scratch_reg)));
1749 return 1;
1750 }
1751 /* Handle secondary reloads for SAR. These occur when trying to load
1752 the SAR from memory or a constant. */
1753 else if (scratch_reg
1754 && GET_CODE (operand0) == REG
1755 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1756 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1757 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1758 {
1759 /* D might not fit in 14 bits either; for such cases load D into
1760 scratch reg. */
1761 if (GET_CODE (operand1) == MEM
1762 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1763 {
1764 /* We are reloading the address into the scratch register, so we
1765 want to make sure the scratch register is a full register. */
1766 scratch_reg = force_mode (word_mode, scratch_reg);
1767
1768 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1769 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1770 0)),
1771 Pmode,
1772 XEXP (XEXP (operand1, 0),
1773 0),
1774 scratch_reg));
1775
1776 /* Now we are going to load the scratch register from memory,
1777 we want to load it in the same width as the original MEM,
1778 which must be the same as the width of the ultimate destination,
1779 OPERAND0. */
1780 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1781
1782 emit_move_insn (scratch_reg,
1783 replace_equiv_address (operand1, scratch_reg));
1784 }
1785 else
1786 {
1787 /* We want to load the scratch register using the same mode as
1788 the ultimate destination. */
1789 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1790
1791 emit_move_insn (scratch_reg, operand1);
1792 }
1793
1794 /* And emit the insn to set the ultimate destination. We know that
1795 the scratch register has the same mode as the destination at this
1796 point. */
1797 emit_move_insn (operand0, scratch_reg);
1798 return 1;
1799 }
1800 /* Handle the most common case: storing into a register. */
1801 else if (register_operand (operand0, mode))
1802 {
1803 /* Legitimize TLS symbol references. This happens for references
1804 that aren't a legitimate constant. */
1805 if (PA_SYMBOL_REF_TLS_P (operand1))
1806 operand1 = legitimize_tls_address (operand1);
1807
1808 if (register_operand (operand1, mode)
1809 || (GET_CODE (operand1) == CONST_INT
1810 && pa_cint_ok_for_move (INTVAL (operand1)))
1811 || (operand1 == CONST0_RTX (mode))
1812 || (GET_CODE (operand1) == HIGH
1813 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1814 /* Only `general_operands' can come here, so MEM is ok. */
1815 || GET_CODE (operand1) == MEM)
1816 {
1817 /* Various sets are created during RTL generation which don't
1818 have the REG_POINTER flag correctly set. After the CSE pass,
1819 instruction recognition can fail if we don't consistently
1820 set this flag when performing register copies. This should
1821 also improve the opportunities for creating insns that use
1822 unscaled indexing. */
1823 if (REG_P (operand0) && REG_P (operand1))
1824 {
1825 if (REG_POINTER (operand1)
1826 && !REG_POINTER (operand0)
1827 && !HARD_REGISTER_P (operand0))
1828 copy_reg_pointer (operand0, operand1);
1829 }
1830
1831 /* When MEMs are broken out, the REG_POINTER flag doesn't
1832 get set. In some cases, we can set the REG_POINTER flag
1833 from the declaration for the MEM. */
1834 if (REG_P (operand0)
1835 && GET_CODE (operand1) == MEM
1836 && !REG_POINTER (operand0))
1837 {
1838 tree decl = MEM_EXPR (operand1);
1839
1840 /* Set the register pointer flag and register alignment
1841 if the declaration for this memory reference is a
1842 pointer type. */
1843 if (decl)
1844 {
1845 tree type;
1846
1847 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1848 tree operand 1. */
1849 if (TREE_CODE (decl) == COMPONENT_REF)
1850 decl = TREE_OPERAND (decl, 1);
1851
1852 type = TREE_TYPE (decl);
1853 type = strip_array_types (type);
1854
1855 if (POINTER_TYPE_P (type))
1856 {
1857 int align;
1858
1859 type = TREE_TYPE (type);
1860 /* Using TYPE_ALIGN_OK is rather conservative as
1861 only the ada frontend actually sets it. */
1862 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1863 : BITS_PER_UNIT);
1864 mark_reg_pointer (operand0, align);
1865 }
1866 }
1867 }
1868
1869 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1870 return 1;
1871 }
1872 }
1873 else if (GET_CODE (operand0) == MEM)
1874 {
1875 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1876 && !(reload_in_progress || reload_completed))
1877 {
1878 rtx temp = gen_reg_rtx (DFmode);
1879
1880 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1881 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1882 return 1;
1883 }
1884 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1885 {
1886 /* Run this case quickly. */
1887 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1888 return 1;
1889 }
1890 if (! (reload_in_progress || reload_completed))
1891 {
1892 operands[0] = validize_mem (operand0);
1893 operands[1] = operand1 = force_reg (mode, operand1);
1894 }
1895 }
1896
1897 /* Simplify the source if we need to.
1898 Note we do have to handle function labels here, even though we do
1899 not consider them legitimate constants. Loop optimizations can
1900 call the emit_move_xxx with one as a source. */
1901 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1902 || (GET_CODE (operand1) == HIGH
1903 && symbolic_operand (XEXP (operand1, 0), mode))
1904 || function_label_operand (operand1, VOIDmode)
1905 || tls_referenced_p (operand1))
1906 {
1907 int ishighonly = 0;
1908
1909 if (GET_CODE (operand1) == HIGH)
1910 {
1911 ishighonly = 1;
1912 operand1 = XEXP (operand1, 0);
1913 }
1914 if (symbolic_operand (operand1, mode))
1915 {
1916 /* Argh. The assembler and linker can't handle arithmetic
1917 involving plabels.
1918
1919 So we force the plabel into memory, load operand0 from
1920 the memory location, then add in the constant part. */
1921 if ((GET_CODE (operand1) == CONST
1922 && GET_CODE (XEXP (operand1, 0)) == PLUS
1923 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1924 VOIDmode))
1925 || function_label_operand (operand1, VOIDmode))
1926 {
1927 rtx temp, const_part;
1928
1929 /* Figure out what (if any) scratch register to use. */
1930 if (reload_in_progress || reload_completed)
1931 {
1932 scratch_reg = scratch_reg ? scratch_reg : operand0;
1933 /* SCRATCH_REG will hold an address and maybe the actual
1934 data. We want it in WORD_MODE regardless of what mode it
1935 was originally given to us. */
1936 scratch_reg = force_mode (word_mode, scratch_reg);
1937 }
1938 else if (flag_pic)
1939 scratch_reg = gen_reg_rtx (Pmode);
1940
1941 if (GET_CODE (operand1) == CONST)
1942 {
1943 /* Save away the constant part of the expression. */
1944 const_part = XEXP (XEXP (operand1, 0), 1);
1945 gcc_assert (GET_CODE (const_part) == CONST_INT);
1946
1947 /* Force the function label into memory. */
1948 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1949 }
1950 else
1951 {
1952 /* No constant part. */
1953 const_part = NULL_RTX;
1954
1955 /* Force the function label into memory. */
1956 temp = force_const_mem (mode, operand1);
1957 }
1958
1959
1960 /* Get the address of the memory location. PIC-ify it if
1961 necessary. */
1962 temp = XEXP (temp, 0);
1963 if (flag_pic)
1964 temp = legitimize_pic_address (temp, mode, scratch_reg);
1965
1966 /* Put the address of the memory location into our destination
1967 register. */
1968 operands[1] = temp;
1969 pa_emit_move_sequence (operands, mode, scratch_reg);
1970
1971 /* Now load from the memory location into our destination
1972 register. */
1973 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1974 pa_emit_move_sequence (operands, mode, scratch_reg);
1975
1976 /* And add back in the constant part. */
1977 if (const_part != NULL_RTX)
1978 expand_inc (operand0, const_part);
1979
1980 return 1;
1981 }
1982
1983 if (flag_pic)
1984 {
1985 rtx temp;
1986
1987 if (reload_in_progress || reload_completed)
1988 {
1989 temp = scratch_reg ? scratch_reg : operand0;
1990 /* TEMP will hold an address and maybe the actual
1991 data. We want it in WORD_MODE regardless of what mode it
1992 was originally given to us. */
1993 temp = force_mode (word_mode, temp);
1994 }
1995 else
1996 temp = gen_reg_rtx (Pmode);
1997
1998 /* (const (plus (symbol) (const_int))) must be forced to
1999 memory during/after reload if the const_int will not fit
2000 in 14 bits. */
2001 if (GET_CODE (operand1) == CONST
2002 && GET_CODE (XEXP (operand1, 0)) == PLUS
2003 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2004 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2005 && (reload_completed || reload_in_progress)
2006 && flag_pic)
2007 {
2008 rtx const_mem = force_const_mem (mode, operand1);
2009 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2010 mode, temp);
2011 operands[1] = replace_equiv_address (const_mem, operands[1]);
2012 pa_emit_move_sequence (operands, mode, temp);
2013 }
2014 else
2015 {
2016 operands[1] = legitimize_pic_address (operand1, mode, temp);
2017 if (REG_P (operand0) && REG_P (operands[1]))
2018 copy_reg_pointer (operand0, operands[1]);
2019 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2020 }
2021 }
2022 /* On the HPPA, references to data space are supposed to use dp,
2023 register 27, but showing it in the RTL inhibits various cse
2024 and loop optimizations. */
2025 else
2026 {
2027 rtx temp, set;
2028
2029 if (reload_in_progress || reload_completed)
2030 {
2031 temp = scratch_reg ? scratch_reg : operand0;
2032 /* TEMP will hold an address and maybe the actual
2033 data. We want it in WORD_MODE regardless of what mode it
2034 was originally given to us. */
2035 temp = force_mode (word_mode, temp);
2036 }
2037 else
2038 temp = gen_reg_rtx (mode);
2039
2040 /* Loading a SYMBOL_REF into a register makes that register
2041 safe to be used as the base in an indexed address.
2042
2043 Don't mark hard registers though. That loses. */
2044 if (GET_CODE (operand0) == REG
2045 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2046 mark_reg_pointer (operand0, BITS_PER_UNIT);
2047 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2048 mark_reg_pointer (temp, BITS_PER_UNIT);
2049
2050 if (ishighonly)
2051 set = gen_rtx_SET (mode, operand0, temp);
2052 else
2053 set = gen_rtx_SET (VOIDmode,
2054 operand0,
2055 gen_rtx_LO_SUM (mode, temp, operand1));
2056
2057 emit_insn (gen_rtx_SET (VOIDmode,
2058 temp,
2059 gen_rtx_HIGH (mode, operand1)));
2060 emit_insn (set);
2061
2062 }
2063 return 1;
2064 }
2065 else if (tls_referenced_p (operand1))
2066 {
2067 rtx tmp = operand1;
2068 rtx addend = NULL;
2069
2070 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2071 {
2072 addend = XEXP (XEXP (tmp, 0), 1);
2073 tmp = XEXP (XEXP (tmp, 0), 0);
2074 }
2075
2076 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2077 tmp = legitimize_tls_address (tmp);
2078 if (addend)
2079 {
2080 tmp = gen_rtx_PLUS (mode, tmp, addend);
2081 tmp = force_operand (tmp, operands[0]);
2082 }
2083 operands[1] = tmp;
2084 }
2085 else if (GET_CODE (operand1) != CONST_INT
2086 || !pa_cint_ok_for_move (INTVAL (operand1)))
2087 {
2088 rtx insn, temp;
2089 rtx op1 = operand1;
2090 HOST_WIDE_INT value = 0;
2091 HOST_WIDE_INT insv = 0;
2092 int insert = 0;
2093
2094 if (GET_CODE (operand1) == CONST_INT)
2095 value = INTVAL (operand1);
2096
2097 if (TARGET_64BIT
2098 && GET_CODE (operand1) == CONST_INT
2099 && HOST_BITS_PER_WIDE_INT > 32
2100 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2101 {
2102 HOST_WIDE_INT nval;
2103
2104 /* Extract the low order 32 bits of the value and sign extend.
2105 If the new value is the same as the original value, we can
2106 can use the original value as-is. If the new value is
2107 different, we use it and insert the most-significant 32-bits
2108 of the original value into the final result. */
2109 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2110 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2111 if (value != nval)
2112 {
2113 #if HOST_BITS_PER_WIDE_INT > 32
2114 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2115 #endif
2116 insert = 1;
2117 value = nval;
2118 operand1 = GEN_INT (nval);
2119 }
2120 }
2121
2122 if (reload_in_progress || reload_completed)
2123 temp = scratch_reg ? scratch_reg : operand0;
2124 else
2125 temp = gen_reg_rtx (mode);
2126
2127 /* We don't directly split DImode constants on 32-bit targets
2128 because PLUS uses an 11-bit immediate and the insn sequence
2129 generated is not as efficient as the one using HIGH/LO_SUM. */
2130 if (GET_CODE (operand1) == CONST_INT
2131 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2132 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2133 && !insert)
2134 {
2135 /* Directly break constant into high and low parts. This
2136 provides better optimization opportunities because various
2137 passes recognize constants split with PLUS but not LO_SUM.
2138 We use a 14-bit signed low part except when the addition
2139 of 0x4000 to the high part might change the sign of the
2140 high part. */
2141 HOST_WIDE_INT low = value & 0x3fff;
2142 HOST_WIDE_INT high = value & ~ 0x3fff;
2143
2144 if (low >= 0x2000)
2145 {
2146 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2147 high += 0x2000;
2148 else
2149 high += 0x4000;
2150 }
2151
2152 low = value - high;
2153
2154 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2155 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2156 }
2157 else
2158 {
2159 emit_insn (gen_rtx_SET (VOIDmode, temp,
2160 gen_rtx_HIGH (mode, operand1)));
2161 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2162 }
2163
2164 insn = emit_move_insn (operands[0], operands[1]);
2165
2166 /* Now insert the most significant 32 bits of the value
2167 into the register. When we don't have a second register
2168 available, it could take up to nine instructions to load
2169 a 64-bit integer constant. Prior to reload, we force
2170 constants that would take more than three instructions
2171 to load to the constant pool. During and after reload,
2172 we have to handle all possible values. */
2173 if (insert)
2174 {
2175 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2176 register and the value to be inserted is outside the
2177 range that can be loaded with three depdi instructions. */
2178 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2179 {
2180 operand1 = GEN_INT (insv);
2181
2182 emit_insn (gen_rtx_SET (VOIDmode, temp,
2183 gen_rtx_HIGH (mode, operand1)));
2184 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2185 if (mode == DImode)
2186 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2187 const0_rtx, temp));
2188 else
2189 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2190 const0_rtx, temp));
2191 }
2192 else
2193 {
2194 int len = 5, pos = 27;
2195
2196 /* Insert the bits using the depdi instruction. */
2197 while (pos >= 0)
2198 {
2199 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2200 HOST_WIDE_INT sign = v5 < 0;
2201
2202 /* Left extend the insertion. */
2203 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2204 while (pos > 0 && (insv & 1) == sign)
2205 {
2206 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2207 len += 1;
2208 pos -= 1;
2209 }
2210
2211 if (mode == DImode)
2212 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2213 GEN_INT (pos), GEN_INT (v5)));
2214 else
2215 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2216 GEN_INT (pos), GEN_INT (v5)));
2217
2218 len = pos > 0 && pos < 5 ? pos : 5;
2219 pos -= len;
2220 }
2221 }
2222 }
2223
2224 set_unique_reg_note (insn, REG_EQUAL, op1);
2225
2226 return 1;
2227 }
2228 }
2229 /* Now have insn-emit do whatever it normally does. */
2230 return 0;
2231 }
2232
2233 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2234 it will need a link/runtime reloc). */
2235
2236 int
2237 pa_reloc_needed (tree exp)
2238 {
2239 int reloc = 0;
2240
2241 switch (TREE_CODE (exp))
2242 {
2243 case ADDR_EXPR:
2244 return 1;
2245
2246 case POINTER_PLUS_EXPR:
2247 case PLUS_EXPR:
2248 case MINUS_EXPR:
2249 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2250 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2251 break;
2252
2253 CASE_CONVERT:
2254 case NON_LVALUE_EXPR:
2255 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2256 break;
2257
2258 case CONSTRUCTOR:
2259 {
2260 tree value;
2261 unsigned HOST_WIDE_INT ix;
2262
2263 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2264 if (value)
2265 reloc |= pa_reloc_needed (value);
2266 }
2267 break;
2268
2269 case ERROR_MARK:
2270 break;
2271
2272 default:
2273 break;
2274 }
2275 return reloc;
2276 }
2277
2278 \f
2279 /* Return the best assembler insn template
2280 for moving operands[1] into operands[0] as a fullword. */
2281 const char *
2282 pa_singlemove_string (rtx *operands)
2283 {
2284 HOST_WIDE_INT intval;
2285
2286 if (GET_CODE (operands[0]) == MEM)
2287 return "stw %r1,%0";
2288 if (GET_CODE (operands[1]) == MEM)
2289 return "ldw %1,%0";
2290 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2291 {
2292 long i;
2293 REAL_VALUE_TYPE d;
2294
2295 gcc_assert (GET_MODE (operands[1]) == SFmode);
2296
2297 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2298 bit pattern. */
2299 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2300 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2301
2302 operands[1] = GEN_INT (i);
2303 /* Fall through to CONST_INT case. */
2304 }
2305 if (GET_CODE (operands[1]) == CONST_INT)
2306 {
2307 intval = INTVAL (operands[1]);
2308
2309 if (VAL_14_BITS_P (intval))
2310 return "ldi %1,%0";
2311 else if ((intval & 0x7ff) == 0)
2312 return "ldil L'%1,%0";
2313 else if (pa_zdepi_cint_p (intval))
2314 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2315 else
2316 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2317 }
2318 return "copy %1,%0";
2319 }
2320 \f
2321
2322 /* Compute position (in OP[1]) and width (in OP[2])
2323 useful for copying IMM to a register using the zdepi
2324 instructions. Store the immediate value to insert in OP[0]. */
2325 static void
2326 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2327 {
2328 int lsb, len;
2329
2330 /* Find the least significant set bit in IMM. */
2331 for (lsb = 0; lsb < 32; lsb++)
2332 {
2333 if ((imm & 1) != 0)
2334 break;
2335 imm >>= 1;
2336 }
2337
2338 /* Choose variants based on *sign* of the 5-bit field. */
2339 if ((imm & 0x10) == 0)
2340 len = (lsb <= 28) ? 4 : 32 - lsb;
2341 else
2342 {
2343 /* Find the width of the bitstring in IMM. */
2344 for (len = 5; len < 32 - lsb; len++)
2345 {
2346 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2347 break;
2348 }
2349
2350 /* Sign extend IMM as a 5-bit value. */
2351 imm = (imm & 0xf) - 0x10;
2352 }
2353
2354 op[0] = imm;
2355 op[1] = 31 - lsb;
2356 op[2] = len;
2357 }
2358
2359 /* Compute position (in OP[1]) and width (in OP[2])
2360 useful for copying IMM to a register using the depdi,z
2361 instructions. Store the immediate value to insert in OP[0]. */
2362
2363 static void
2364 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2365 {
2366 int lsb, len, maxlen;
2367
2368 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2369
2370 /* Find the least significant set bit in IMM. */
2371 for (lsb = 0; lsb < maxlen; lsb++)
2372 {
2373 if ((imm & 1) != 0)
2374 break;
2375 imm >>= 1;
2376 }
2377
2378 /* Choose variants based on *sign* of the 5-bit field. */
2379 if ((imm & 0x10) == 0)
2380 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2381 else
2382 {
2383 /* Find the width of the bitstring in IMM. */
2384 for (len = 5; len < maxlen - lsb; len++)
2385 {
2386 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2387 break;
2388 }
2389
2390 /* Extend length if host is narrow and IMM is negative. */
2391 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2392 len += 32;
2393
2394 /* Sign extend IMM as a 5-bit value. */
2395 imm = (imm & 0xf) - 0x10;
2396 }
2397
2398 op[0] = imm;
2399 op[1] = 63 - lsb;
2400 op[2] = len;
2401 }
2402
2403 /* Output assembler code to perform a doubleword move insn
2404 with operands OPERANDS. */
2405
2406 const char *
2407 pa_output_move_double (rtx *operands)
2408 {
2409 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2410 rtx latehalf[2];
2411 rtx addreg0 = 0, addreg1 = 0;
2412
2413 /* First classify both operands. */
2414
2415 if (REG_P (operands[0]))
2416 optype0 = REGOP;
2417 else if (offsettable_memref_p (operands[0]))
2418 optype0 = OFFSOP;
2419 else if (GET_CODE (operands[0]) == MEM)
2420 optype0 = MEMOP;
2421 else
2422 optype0 = RNDOP;
2423
2424 if (REG_P (operands[1]))
2425 optype1 = REGOP;
2426 else if (CONSTANT_P (operands[1]))
2427 optype1 = CNSTOP;
2428 else if (offsettable_memref_p (operands[1]))
2429 optype1 = OFFSOP;
2430 else if (GET_CODE (operands[1]) == MEM)
2431 optype1 = MEMOP;
2432 else
2433 optype1 = RNDOP;
2434
2435 /* Check for the cases that the operand constraints are not
2436 supposed to allow to happen. */
2437 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2438
2439 /* Handle copies between general and floating registers. */
2440
2441 if (optype0 == REGOP && optype1 == REGOP
2442 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2443 {
2444 if (FP_REG_P (operands[0]))
2445 {
2446 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2447 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2448 return "{fldds|fldd} -16(%%sp),%0";
2449 }
2450 else
2451 {
2452 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2453 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2454 return "{ldws|ldw} -12(%%sp),%R0";
2455 }
2456 }
2457
2458 /* Handle auto decrementing and incrementing loads and stores
2459 specifically, since the structure of the function doesn't work
2460 for them without major modification. Do it better when we learn
2461 this port about the general inc/dec addressing of PA.
2462 (This was written by tege. Chide him if it doesn't work.) */
2463
2464 if (optype0 == MEMOP)
2465 {
2466 /* We have to output the address syntax ourselves, since print_operand
2467 doesn't deal with the addresses we want to use. Fix this later. */
2468
2469 rtx addr = XEXP (operands[0], 0);
2470 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2471 {
2472 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2473
2474 operands[0] = XEXP (addr, 0);
2475 gcc_assert (GET_CODE (operands[1]) == REG
2476 && GET_CODE (operands[0]) == REG);
2477
2478 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2479
2480 /* No overlap between high target register and address
2481 register. (We do this in a non-obvious way to
2482 save a register file writeback) */
2483 if (GET_CODE (addr) == POST_INC)
2484 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2485 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2486 }
2487 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2488 {
2489 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2490
2491 operands[0] = XEXP (addr, 0);
2492 gcc_assert (GET_CODE (operands[1]) == REG
2493 && GET_CODE (operands[0]) == REG);
2494
2495 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2496 /* No overlap between high target register and address
2497 register. (We do this in a non-obvious way to save a
2498 register file writeback) */
2499 if (GET_CODE (addr) == PRE_INC)
2500 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2501 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2502 }
2503 }
2504 if (optype1 == MEMOP)
2505 {
2506 /* We have to output the address syntax ourselves, since print_operand
2507 doesn't deal with the addresses we want to use. Fix this later. */
2508
2509 rtx addr = XEXP (operands[1], 0);
2510 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2511 {
2512 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2513
2514 operands[1] = XEXP (addr, 0);
2515 gcc_assert (GET_CODE (operands[0]) == REG
2516 && GET_CODE (operands[1]) == REG);
2517
2518 if (!reg_overlap_mentioned_p (high_reg, addr))
2519 {
2520 /* No overlap between high target register and address
2521 register. (We do this in a non-obvious way to
2522 save a register file writeback) */
2523 if (GET_CODE (addr) == POST_INC)
2524 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2525 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2526 }
2527 else
2528 {
2529 /* This is an undefined situation. We should load into the
2530 address register *and* update that register. Probably
2531 we don't need to handle this at all. */
2532 if (GET_CODE (addr) == POST_INC)
2533 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2534 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2535 }
2536 }
2537 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2538 {
2539 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2540
2541 operands[1] = XEXP (addr, 0);
2542 gcc_assert (GET_CODE (operands[0]) == REG
2543 && GET_CODE (operands[1]) == REG);
2544
2545 if (!reg_overlap_mentioned_p (high_reg, addr))
2546 {
2547 /* No overlap between high target register and address
2548 register. (We do this in a non-obvious way to
2549 save a register file writeback) */
2550 if (GET_CODE (addr) == PRE_INC)
2551 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2552 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2553 }
2554 else
2555 {
2556 /* This is an undefined situation. We should load into the
2557 address register *and* update that register. Probably
2558 we don't need to handle this at all. */
2559 if (GET_CODE (addr) == PRE_INC)
2560 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2561 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2562 }
2563 }
2564 else if (GET_CODE (addr) == PLUS
2565 && GET_CODE (XEXP (addr, 0)) == MULT)
2566 {
2567 rtx xoperands[4];
2568 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2569
2570 if (!reg_overlap_mentioned_p (high_reg, addr))
2571 {
2572 xoperands[0] = high_reg;
2573 xoperands[1] = XEXP (addr, 1);
2574 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2575 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2576 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2577 xoperands);
2578 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2579 }
2580 else
2581 {
2582 xoperands[0] = high_reg;
2583 xoperands[1] = XEXP (addr, 1);
2584 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2585 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2586 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2587 xoperands);
2588 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2589 }
2590 }
2591 }
2592
2593 /* If an operand is an unoffsettable memory ref, find a register
2594 we can increment temporarily to make it refer to the second word. */
2595
2596 if (optype0 == MEMOP)
2597 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2598
2599 if (optype1 == MEMOP)
2600 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2601
2602 /* Ok, we can do one word at a time.
2603 Normally we do the low-numbered word first.
2604
2605 In either case, set up in LATEHALF the operands to use
2606 for the high-numbered word and in some cases alter the
2607 operands in OPERANDS to be suitable for the low-numbered word. */
2608
2609 if (optype0 == REGOP)
2610 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2611 else if (optype0 == OFFSOP)
2612 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2613 else
2614 latehalf[0] = operands[0];
2615
2616 if (optype1 == REGOP)
2617 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2618 else if (optype1 == OFFSOP)
2619 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2620 else if (optype1 == CNSTOP)
2621 split_double (operands[1], &operands[1], &latehalf[1]);
2622 else
2623 latehalf[1] = operands[1];
2624
2625 /* If the first move would clobber the source of the second one,
2626 do them in the other order.
2627
2628 This can happen in two cases:
2629
2630 mem -> register where the first half of the destination register
2631 is the same register used in the memory's address. Reload
2632 can create such insns.
2633
2634 mem in this case will be either register indirect or register
2635 indirect plus a valid offset.
2636
2637 register -> register move where REGNO(dst) == REGNO(src + 1)
2638 someone (Tim/Tege?) claimed this can happen for parameter loads.
2639
2640 Handle mem -> register case first. */
2641 if (optype0 == REGOP
2642 && (optype1 == MEMOP || optype1 == OFFSOP)
2643 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2644 operands[1], 0))
2645 {
2646 /* Do the late half first. */
2647 if (addreg1)
2648 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2649 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2650
2651 /* Then clobber. */
2652 if (addreg1)
2653 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2654 return pa_singlemove_string (operands);
2655 }
2656
2657 /* Now handle register -> register case. */
2658 if (optype0 == REGOP && optype1 == REGOP
2659 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2660 {
2661 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2662 return pa_singlemove_string (operands);
2663 }
2664
2665 /* Normal case: do the two words, low-numbered first. */
2666
2667 output_asm_insn (pa_singlemove_string (operands), operands);
2668
2669 /* Make any unoffsettable addresses point at high-numbered word. */
2670 if (addreg0)
2671 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2672 if (addreg1)
2673 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2674
2675 /* Do that word. */
2676 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2677
2678 /* Undo the adds we just did. */
2679 if (addreg0)
2680 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2681 if (addreg1)
2682 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2683
2684 return "";
2685 }
2686 \f
2687 const char *
2688 pa_output_fp_move_double (rtx *operands)
2689 {
2690 if (FP_REG_P (operands[0]))
2691 {
2692 if (FP_REG_P (operands[1])
2693 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2694 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2695 else
2696 output_asm_insn ("fldd%F1 %1,%0", operands);
2697 }
2698 else if (FP_REG_P (operands[1]))
2699 {
2700 output_asm_insn ("fstd%F0 %1,%0", operands);
2701 }
2702 else
2703 {
2704 rtx xoperands[2];
2705
2706 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2707
2708 /* This is a pain. You have to be prepared to deal with an
2709 arbitrary address here including pre/post increment/decrement.
2710
2711 so avoid this in the MD. */
2712 gcc_assert (GET_CODE (operands[0]) == REG);
2713
2714 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2715 xoperands[0] = operands[0];
2716 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2717 }
2718 return "";
2719 }
2720 \f
2721 /* Return a REG that occurs in ADDR with coefficient 1.
2722 ADDR can be effectively incremented by incrementing REG. */
2723
2724 static rtx
2725 find_addr_reg (rtx addr)
2726 {
2727 while (GET_CODE (addr) == PLUS)
2728 {
2729 if (GET_CODE (XEXP (addr, 0)) == REG)
2730 addr = XEXP (addr, 0);
2731 else if (GET_CODE (XEXP (addr, 1)) == REG)
2732 addr = XEXP (addr, 1);
2733 else if (CONSTANT_P (XEXP (addr, 0)))
2734 addr = XEXP (addr, 1);
2735 else if (CONSTANT_P (XEXP (addr, 1)))
2736 addr = XEXP (addr, 0);
2737 else
2738 gcc_unreachable ();
2739 }
2740 gcc_assert (GET_CODE (addr) == REG);
2741 return addr;
2742 }
2743
2744 /* Emit code to perform a block move.
2745
2746 OPERANDS[0] is the destination pointer as a REG, clobbered.
2747 OPERANDS[1] is the source pointer as a REG, clobbered.
2748 OPERANDS[2] is a register for temporary storage.
2749 OPERANDS[3] is a register for temporary storage.
2750 OPERANDS[4] is the size as a CONST_INT
2751 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2752 OPERANDS[6] is another temporary register. */
2753
2754 const char *
2755 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2756 {
2757 int align = INTVAL (operands[5]);
2758 unsigned long n_bytes = INTVAL (operands[4]);
2759
2760 /* We can't move more than a word at a time because the PA
2761 has no longer integer move insns. (Could use fp mem ops?) */
2762 if (align > (TARGET_64BIT ? 8 : 4))
2763 align = (TARGET_64BIT ? 8 : 4);
2764
2765 /* Note that we know each loop below will execute at least twice
2766 (else we would have open-coded the copy). */
2767 switch (align)
2768 {
2769 case 8:
2770 /* Pre-adjust the loop counter. */
2771 operands[4] = GEN_INT (n_bytes - 16);
2772 output_asm_insn ("ldi %4,%2", operands);
2773
2774 /* Copying loop. */
2775 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2776 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2777 output_asm_insn ("std,ma %3,8(%0)", operands);
2778 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2779 output_asm_insn ("std,ma %6,8(%0)", operands);
2780
2781 /* Handle the residual. There could be up to 7 bytes of
2782 residual to copy! */
2783 if (n_bytes % 16 != 0)
2784 {
2785 operands[4] = GEN_INT (n_bytes % 8);
2786 if (n_bytes % 16 >= 8)
2787 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2788 if (n_bytes % 8 != 0)
2789 output_asm_insn ("ldd 0(%1),%6", operands);
2790 if (n_bytes % 16 >= 8)
2791 output_asm_insn ("std,ma %3,8(%0)", operands);
2792 if (n_bytes % 8 != 0)
2793 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2794 }
2795 return "";
2796
2797 case 4:
2798 /* Pre-adjust the loop counter. */
2799 operands[4] = GEN_INT (n_bytes - 8);
2800 output_asm_insn ("ldi %4,%2", operands);
2801
2802 /* Copying loop. */
2803 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2804 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2805 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2806 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2807 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2808
2809 /* Handle the residual. There could be up to 7 bytes of
2810 residual to copy! */
2811 if (n_bytes % 8 != 0)
2812 {
2813 operands[4] = GEN_INT (n_bytes % 4);
2814 if (n_bytes % 8 >= 4)
2815 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2816 if (n_bytes % 4 != 0)
2817 output_asm_insn ("ldw 0(%1),%6", operands);
2818 if (n_bytes % 8 >= 4)
2819 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2820 if (n_bytes % 4 != 0)
2821 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2822 }
2823 return "";
2824
2825 case 2:
2826 /* Pre-adjust the loop counter. */
2827 operands[4] = GEN_INT (n_bytes - 4);
2828 output_asm_insn ("ldi %4,%2", operands);
2829
2830 /* Copying loop. */
2831 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2832 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2833 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2834 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2835 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2836
2837 /* Handle the residual. */
2838 if (n_bytes % 4 != 0)
2839 {
2840 if (n_bytes % 4 >= 2)
2841 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2842 if (n_bytes % 2 != 0)
2843 output_asm_insn ("ldb 0(%1),%6", operands);
2844 if (n_bytes % 4 >= 2)
2845 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2846 if (n_bytes % 2 != 0)
2847 output_asm_insn ("stb %6,0(%0)", operands);
2848 }
2849 return "";
2850
2851 case 1:
2852 /* Pre-adjust the loop counter. */
2853 operands[4] = GEN_INT (n_bytes - 2);
2854 output_asm_insn ("ldi %4,%2", operands);
2855
2856 /* Copying loop. */
2857 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2858 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2859 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2860 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2861 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2862
2863 /* Handle the residual. */
2864 if (n_bytes % 2 != 0)
2865 {
2866 output_asm_insn ("ldb 0(%1),%3", operands);
2867 output_asm_insn ("stb %3,0(%0)", operands);
2868 }
2869 return "";
2870
2871 default:
2872 gcc_unreachable ();
2873 }
2874 }
2875
2876 /* Count the number of insns necessary to handle this block move.
2877
2878 Basic structure is the same as emit_block_move, except that we
2879 count insns rather than emit them. */
2880
2881 static int
2882 compute_movmem_length (rtx insn)
2883 {
2884 rtx pat = PATTERN (insn);
2885 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2886 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2887 unsigned int n_insns = 0;
2888
2889 /* We can't move more than four bytes at a time because the PA
2890 has no longer integer move insns. (Could use fp mem ops?) */
2891 if (align > (TARGET_64BIT ? 8 : 4))
2892 align = (TARGET_64BIT ? 8 : 4);
2893
2894 /* The basic copying loop. */
2895 n_insns = 6;
2896
2897 /* Residuals. */
2898 if (n_bytes % (2 * align) != 0)
2899 {
2900 if ((n_bytes % (2 * align)) >= align)
2901 n_insns += 2;
2902
2903 if ((n_bytes % align) != 0)
2904 n_insns += 2;
2905 }
2906
2907 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2908 return n_insns * 4;
2909 }
2910
2911 /* Emit code to perform a block clear.
2912
2913 OPERANDS[0] is the destination pointer as a REG, clobbered.
2914 OPERANDS[1] is a register for temporary storage.
2915 OPERANDS[2] is the size as a CONST_INT
2916 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2917
2918 const char *
2919 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2920 {
2921 int align = INTVAL (operands[3]);
2922 unsigned long n_bytes = INTVAL (operands[2]);
2923
2924 /* We can't clear more than a word at a time because the PA
2925 has no longer integer move insns. */
2926 if (align > (TARGET_64BIT ? 8 : 4))
2927 align = (TARGET_64BIT ? 8 : 4);
2928
2929 /* Note that we know each loop below will execute at least twice
2930 (else we would have open-coded the copy). */
2931 switch (align)
2932 {
2933 case 8:
2934 /* Pre-adjust the loop counter. */
2935 operands[2] = GEN_INT (n_bytes - 16);
2936 output_asm_insn ("ldi %2,%1", operands);
2937
2938 /* Loop. */
2939 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2940 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2941 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2942
2943 /* Handle the residual. There could be up to 7 bytes of
2944 residual to copy! */
2945 if (n_bytes % 16 != 0)
2946 {
2947 operands[2] = GEN_INT (n_bytes % 8);
2948 if (n_bytes % 16 >= 8)
2949 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2950 if (n_bytes % 8 != 0)
2951 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2952 }
2953 return "";
2954
2955 case 4:
2956 /* Pre-adjust the loop counter. */
2957 operands[2] = GEN_INT (n_bytes - 8);
2958 output_asm_insn ("ldi %2,%1", operands);
2959
2960 /* Loop. */
2961 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2962 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2963 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2964
2965 /* Handle the residual. There could be up to 7 bytes of
2966 residual to copy! */
2967 if (n_bytes % 8 != 0)
2968 {
2969 operands[2] = GEN_INT (n_bytes % 4);
2970 if (n_bytes % 8 >= 4)
2971 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2972 if (n_bytes % 4 != 0)
2973 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2974 }
2975 return "";
2976
2977 case 2:
2978 /* Pre-adjust the loop counter. */
2979 operands[2] = GEN_INT (n_bytes - 4);
2980 output_asm_insn ("ldi %2,%1", operands);
2981
2982 /* Loop. */
2983 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2984 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2985 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2986
2987 /* Handle the residual. */
2988 if (n_bytes % 4 != 0)
2989 {
2990 if (n_bytes % 4 >= 2)
2991 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2992 if (n_bytes % 2 != 0)
2993 output_asm_insn ("stb %%r0,0(%0)", operands);
2994 }
2995 return "";
2996
2997 case 1:
2998 /* Pre-adjust the loop counter. */
2999 operands[2] = GEN_INT (n_bytes - 2);
3000 output_asm_insn ("ldi %2,%1", operands);
3001
3002 /* Loop. */
3003 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3004 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3005 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3006
3007 /* Handle the residual. */
3008 if (n_bytes % 2 != 0)
3009 output_asm_insn ("stb %%r0,0(%0)", operands);
3010
3011 return "";
3012
3013 default:
3014 gcc_unreachable ();
3015 }
3016 }
3017
3018 /* Count the number of insns necessary to handle this block move.
3019
3020 Basic structure is the same as emit_block_move, except that we
3021 count insns rather than emit them. */
3022
3023 static int
3024 compute_clrmem_length (rtx insn)
3025 {
3026 rtx pat = PATTERN (insn);
3027 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3028 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3029 unsigned int n_insns = 0;
3030
3031 /* We can't clear more than a word at a time because the PA
3032 has no longer integer move insns. */
3033 if (align > (TARGET_64BIT ? 8 : 4))
3034 align = (TARGET_64BIT ? 8 : 4);
3035
3036 /* The basic loop. */
3037 n_insns = 4;
3038
3039 /* Residuals. */
3040 if (n_bytes % (2 * align) != 0)
3041 {
3042 if ((n_bytes % (2 * align)) >= align)
3043 n_insns++;
3044
3045 if ((n_bytes % align) != 0)
3046 n_insns++;
3047 }
3048
3049 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3050 return n_insns * 4;
3051 }
3052 \f
3053
3054 const char *
3055 pa_output_and (rtx *operands)
3056 {
3057 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3058 {
3059 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3060 int ls0, ls1, ms0, p, len;
3061
3062 for (ls0 = 0; ls0 < 32; ls0++)
3063 if ((mask & (1 << ls0)) == 0)
3064 break;
3065
3066 for (ls1 = ls0; ls1 < 32; ls1++)
3067 if ((mask & (1 << ls1)) != 0)
3068 break;
3069
3070 for (ms0 = ls1; ms0 < 32; ms0++)
3071 if ((mask & (1 << ms0)) == 0)
3072 break;
3073
3074 gcc_assert (ms0 == 32);
3075
3076 if (ls1 == 32)
3077 {
3078 len = ls0;
3079
3080 gcc_assert (len);
3081
3082 operands[2] = GEN_INT (len);
3083 return "{extru|extrw,u} %1,31,%2,%0";
3084 }
3085 else
3086 {
3087 /* We could use this `depi' for the case above as well, but `depi'
3088 requires one more register file access than an `extru'. */
3089
3090 p = 31 - ls0;
3091 len = ls1 - ls0;
3092
3093 operands[2] = GEN_INT (p);
3094 operands[3] = GEN_INT (len);
3095 return "{depi|depwi} 0,%2,%3,%0";
3096 }
3097 }
3098 else
3099 return "and %1,%2,%0";
3100 }
3101
3102 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3103 storing the result in operands[0]. */
3104 const char *
3105 pa_output_64bit_and (rtx *operands)
3106 {
3107 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3108 {
3109 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3110 int ls0, ls1, ms0, p, len;
3111
3112 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3113 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3114 break;
3115
3116 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3117 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3118 break;
3119
3120 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3121 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3122 break;
3123
3124 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3125
3126 if (ls1 == HOST_BITS_PER_WIDE_INT)
3127 {
3128 len = ls0;
3129
3130 gcc_assert (len);
3131
3132 operands[2] = GEN_INT (len);
3133 return "extrd,u %1,63,%2,%0";
3134 }
3135 else
3136 {
3137 /* We could use this `depi' for the case above as well, but `depi'
3138 requires one more register file access than an `extru'. */
3139
3140 p = 63 - ls0;
3141 len = ls1 - ls0;
3142
3143 operands[2] = GEN_INT (p);
3144 operands[3] = GEN_INT (len);
3145 return "depdi 0,%2,%3,%0";
3146 }
3147 }
3148 else
3149 return "and %1,%2,%0";
3150 }
3151
3152 const char *
3153 pa_output_ior (rtx *operands)
3154 {
3155 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3156 int bs0, bs1, p, len;
3157
3158 if (INTVAL (operands[2]) == 0)
3159 return "copy %1,%0";
3160
3161 for (bs0 = 0; bs0 < 32; bs0++)
3162 if ((mask & (1 << bs0)) != 0)
3163 break;
3164
3165 for (bs1 = bs0; bs1 < 32; bs1++)
3166 if ((mask & (1 << bs1)) == 0)
3167 break;
3168
3169 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3170
3171 p = 31 - bs0;
3172 len = bs1 - bs0;
3173
3174 operands[2] = GEN_INT (p);
3175 operands[3] = GEN_INT (len);
3176 return "{depi|depwi} -1,%2,%3,%0";
3177 }
3178
3179 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3180 storing the result in operands[0]. */
3181 const char *
3182 pa_output_64bit_ior (rtx *operands)
3183 {
3184 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3185 int bs0, bs1, p, len;
3186
3187 if (INTVAL (operands[2]) == 0)
3188 return "copy %1,%0";
3189
3190 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3191 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3192 break;
3193
3194 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3195 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3196 break;
3197
3198 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3199 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3200
3201 p = 63 - bs0;
3202 len = bs1 - bs0;
3203
3204 operands[2] = GEN_INT (p);
3205 operands[3] = GEN_INT (len);
3206 return "depdi -1,%2,%3,%0";
3207 }
3208 \f
3209 /* Target hook for assembling integer objects. This code handles
3210 aligned SI and DI integers specially since function references
3211 must be preceded by P%. */
3212
3213 static bool
3214 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3215 {
3216 if (size == UNITS_PER_WORD
3217 && aligned_p
3218 && function_label_operand (x, VOIDmode))
3219 {
3220 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3221 output_addr_const (asm_out_file, x);
3222 fputc ('\n', asm_out_file);
3223 return true;
3224 }
3225 return default_assemble_integer (x, size, aligned_p);
3226 }
3227 \f
3228 /* Output an ascii string. */
3229 void
3230 pa_output_ascii (FILE *file, const char *p, int size)
3231 {
3232 int i;
3233 int chars_output;
3234 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3235
3236 /* The HP assembler can only take strings of 256 characters at one
3237 time. This is a limitation on input line length, *not* the
3238 length of the string. Sigh. Even worse, it seems that the
3239 restriction is in number of input characters (see \xnn &
3240 \whatever). So we have to do this very carefully. */
3241
3242 fputs ("\t.STRING \"", file);
3243
3244 chars_output = 0;
3245 for (i = 0; i < size; i += 4)
3246 {
3247 int co = 0;
3248 int io = 0;
3249 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3250 {
3251 register unsigned int c = (unsigned char) p[i + io];
3252
3253 if (c == '\"' || c == '\\')
3254 partial_output[co++] = '\\';
3255 if (c >= ' ' && c < 0177)
3256 partial_output[co++] = c;
3257 else
3258 {
3259 unsigned int hexd;
3260 partial_output[co++] = '\\';
3261 partial_output[co++] = 'x';
3262 hexd = c / 16 - 0 + '0';
3263 if (hexd > '9')
3264 hexd -= '9' - 'a' + 1;
3265 partial_output[co++] = hexd;
3266 hexd = c % 16 - 0 + '0';
3267 if (hexd > '9')
3268 hexd -= '9' - 'a' + 1;
3269 partial_output[co++] = hexd;
3270 }
3271 }
3272 if (chars_output + co > 243)
3273 {
3274 fputs ("\"\n\t.STRING \"", file);
3275 chars_output = 0;
3276 }
3277 fwrite (partial_output, 1, (size_t) co, file);
3278 chars_output += co;
3279 co = 0;
3280 }
3281 fputs ("\"\n", file);
3282 }
3283
3284 /* Try to rewrite floating point comparisons & branches to avoid
3285 useless add,tr insns.
3286
3287 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3288 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3289 first attempt to remove useless add,tr insns. It is zero
3290 for the second pass as reorg sometimes leaves bogus REG_DEAD
3291 notes lying around.
3292
3293 When CHECK_NOTES is zero we can only eliminate add,tr insns
3294 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3295 instructions. */
3296 static void
3297 remove_useless_addtr_insns (int check_notes)
3298 {
3299 rtx insn;
3300 static int pass = 0;
3301
3302 /* This is fairly cheap, so always run it when optimizing. */
3303 if (optimize > 0)
3304 {
3305 int fcmp_count = 0;
3306 int fbranch_count = 0;
3307
3308 /* Walk all the insns in this function looking for fcmp & fbranch
3309 instructions. Keep track of how many of each we find. */
3310 for (insn = get_insns (); insn; insn = next_insn (insn))
3311 {
3312 rtx tmp;
3313
3314 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3315 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3316 continue;
3317
3318 tmp = PATTERN (insn);
3319
3320 /* It must be a set. */
3321 if (GET_CODE (tmp) != SET)
3322 continue;
3323
3324 /* If the destination is CCFP, then we've found an fcmp insn. */
3325 tmp = SET_DEST (tmp);
3326 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3327 {
3328 fcmp_count++;
3329 continue;
3330 }
3331
3332 tmp = PATTERN (insn);
3333 /* If this is an fbranch instruction, bump the fbranch counter. */
3334 if (GET_CODE (tmp) == SET
3335 && SET_DEST (tmp) == pc_rtx
3336 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3337 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3338 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3339 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3340 {
3341 fbranch_count++;
3342 continue;
3343 }
3344 }
3345
3346
3347 /* Find all floating point compare + branch insns. If possible,
3348 reverse the comparison & the branch to avoid add,tr insns. */
3349 for (insn = get_insns (); insn; insn = next_insn (insn))
3350 {
3351 rtx tmp, next;
3352
3353 /* Ignore anything that isn't an INSN. */
3354 if (! NONJUMP_INSN_P (insn))
3355 continue;
3356
3357 tmp = PATTERN (insn);
3358
3359 /* It must be a set. */
3360 if (GET_CODE (tmp) != SET)
3361 continue;
3362
3363 /* The destination must be CCFP, which is register zero. */
3364 tmp = SET_DEST (tmp);
3365 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3366 continue;
3367
3368 /* INSN should be a set of CCFP.
3369
3370 See if the result of this insn is used in a reversed FP
3371 conditional branch. If so, reverse our condition and
3372 the branch. Doing so avoids useless add,tr insns. */
3373 next = next_insn (insn);
3374 while (next)
3375 {
3376 /* Jumps, calls and labels stop our search. */
3377 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3378 break;
3379
3380 /* As does another fcmp insn. */
3381 if (NONJUMP_INSN_P (next)
3382 && GET_CODE (PATTERN (next)) == SET
3383 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3384 && REGNO (SET_DEST (PATTERN (next))) == 0)
3385 break;
3386
3387 next = next_insn (next);
3388 }
3389
3390 /* Is NEXT_INSN a branch? */
3391 if (next && JUMP_P (next))
3392 {
3393 rtx pattern = PATTERN (next);
3394
3395 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3396 and CCFP dies, then reverse our conditional and the branch
3397 to avoid the add,tr. */
3398 if (GET_CODE (pattern) == SET
3399 && SET_DEST (pattern) == pc_rtx
3400 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3401 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3402 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3403 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3404 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3405 && (fcmp_count == fbranch_count
3406 || (check_notes
3407 && find_regno_note (next, REG_DEAD, 0))))
3408 {
3409 /* Reverse the branch. */
3410 tmp = XEXP (SET_SRC (pattern), 1);
3411 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3412 XEXP (SET_SRC (pattern), 2) = tmp;
3413 INSN_CODE (next) = -1;
3414
3415 /* Reverse our condition. */
3416 tmp = PATTERN (insn);
3417 PUT_CODE (XEXP (tmp, 1),
3418 (reverse_condition_maybe_unordered
3419 (GET_CODE (XEXP (tmp, 1)))));
3420 }
3421 }
3422 }
3423 }
3424
3425 pass = !pass;
3426
3427 }
3428 \f
3429 /* You may have trouble believing this, but this is the 32 bit HP-PA
3430 stack layout. Wow.
3431
3432 Offset Contents
3433
3434 Variable arguments (optional; any number may be allocated)
3435
3436 SP-(4*(N+9)) arg word N
3437 : :
3438 SP-56 arg word 5
3439 SP-52 arg word 4
3440
3441 Fixed arguments (must be allocated; may remain unused)
3442
3443 SP-48 arg word 3
3444 SP-44 arg word 2
3445 SP-40 arg word 1
3446 SP-36 arg word 0
3447
3448 Frame Marker
3449
3450 SP-32 External Data Pointer (DP)
3451 SP-28 External sr4
3452 SP-24 External/stub RP (RP')
3453 SP-20 Current RP
3454 SP-16 Static Link
3455 SP-12 Clean up
3456 SP-8 Calling Stub RP (RP'')
3457 SP-4 Previous SP
3458
3459 Top of Frame
3460
3461 SP-0 Stack Pointer (points to next available address)
3462
3463 */
3464
3465 /* This function saves registers as follows. Registers marked with ' are
3466 this function's registers (as opposed to the previous function's).
3467 If a frame_pointer isn't needed, r4 is saved as a general register;
3468 the space for the frame pointer is still allocated, though, to keep
3469 things simple.
3470
3471
3472 Top of Frame
3473
3474 SP (FP') Previous FP
3475 SP + 4 Alignment filler (sigh)
3476 SP + 8 Space for locals reserved here.
3477 .
3478 .
3479 .
3480 SP + n All call saved register used.
3481 .
3482 .
3483 .
3484 SP + o All call saved fp registers used.
3485 .
3486 .
3487 .
3488 SP + p (SP') points to next available address.
3489
3490 */
3491
3492 /* Global variables set by output_function_prologue(). */
3493 /* Size of frame. Need to know this to emit return insns from
3494 leaf procedures. */
3495 static HOST_WIDE_INT actual_fsize, local_fsize;
3496 static int save_fregs;
3497
3498 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3499 Handle case where DISP > 8k by using the add_high_const patterns.
3500
3501 Note in DISP > 8k case, we will leave the high part of the address
3502 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3503
3504 static void
3505 store_reg (int reg, HOST_WIDE_INT disp, int base)
3506 {
3507 rtx insn, dest, src, basereg;
3508
3509 src = gen_rtx_REG (word_mode, reg);
3510 basereg = gen_rtx_REG (Pmode, base);
3511 if (VAL_14_BITS_P (disp))
3512 {
3513 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3514 insn = emit_move_insn (dest, src);
3515 }
3516 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3517 {
3518 rtx delta = GEN_INT (disp);
3519 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3520
3521 emit_move_insn (tmpreg, delta);
3522 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3523 if (DO_FRAME_NOTES)
3524 {
3525 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3526 gen_rtx_SET (VOIDmode, tmpreg,
3527 gen_rtx_PLUS (Pmode, basereg, delta)));
3528 RTX_FRAME_RELATED_P (insn) = 1;
3529 }
3530 dest = gen_rtx_MEM (word_mode, tmpreg);
3531 insn = emit_move_insn (dest, src);
3532 }
3533 else
3534 {
3535 rtx delta = GEN_INT (disp);
3536 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3537 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3538
3539 emit_move_insn (tmpreg, high);
3540 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3541 insn = emit_move_insn (dest, src);
3542 if (DO_FRAME_NOTES)
3543 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3544 gen_rtx_SET (VOIDmode,
3545 gen_rtx_MEM (word_mode,
3546 gen_rtx_PLUS (word_mode,
3547 basereg,
3548 delta)),
3549 src));
3550 }
3551
3552 if (DO_FRAME_NOTES)
3553 RTX_FRAME_RELATED_P (insn) = 1;
3554 }
3555
3556 /* Emit RTL to store REG at the memory location specified by BASE and then
3557 add MOD to BASE. MOD must be <= 8k. */
3558
3559 static void
3560 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3561 {
3562 rtx insn, basereg, srcreg, delta;
3563
3564 gcc_assert (VAL_14_BITS_P (mod));
3565
3566 basereg = gen_rtx_REG (Pmode, base);
3567 srcreg = gen_rtx_REG (word_mode, reg);
3568 delta = GEN_INT (mod);
3569
3570 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3571 if (DO_FRAME_NOTES)
3572 {
3573 RTX_FRAME_RELATED_P (insn) = 1;
3574
3575 /* RTX_FRAME_RELATED_P must be set on each frame related set
3576 in a parallel with more than one element. */
3577 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3578 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3579 }
3580 }
3581
3582 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3583 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3584 whether to add a frame note or not.
3585
3586 In the DISP > 8k case, we leave the high part of the address in %r1.
3587 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3588
3589 static void
3590 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3591 {
3592 rtx insn;
3593
3594 if (VAL_14_BITS_P (disp))
3595 {
3596 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3597 plus_constant (Pmode,
3598 gen_rtx_REG (Pmode, base), disp));
3599 }
3600 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3601 {
3602 rtx basereg = gen_rtx_REG (Pmode, base);
3603 rtx delta = GEN_INT (disp);
3604 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3605
3606 emit_move_insn (tmpreg, delta);
3607 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3608 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3609 if (DO_FRAME_NOTES)
3610 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3611 gen_rtx_SET (VOIDmode, tmpreg,
3612 gen_rtx_PLUS (Pmode, basereg, delta)));
3613 }
3614 else
3615 {
3616 rtx basereg = gen_rtx_REG (Pmode, base);
3617 rtx delta = GEN_INT (disp);
3618 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3619
3620 emit_move_insn (tmpreg,
3621 gen_rtx_PLUS (Pmode, basereg,
3622 gen_rtx_HIGH (Pmode, delta)));
3623 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3624 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3625 }
3626
3627 if (DO_FRAME_NOTES && note)
3628 RTX_FRAME_RELATED_P (insn) = 1;
3629 }
3630
3631 HOST_WIDE_INT
3632 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3633 {
3634 int freg_saved = 0;
3635 int i, j;
3636
3637 /* The code in pa_expand_prologue and pa_expand_epilogue must
3638 be consistent with the rounding and size calculation done here.
3639 Change them at the same time. */
3640
3641 /* We do our own stack alignment. First, round the size of the
3642 stack locals up to a word boundary. */
3643 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3644
3645 /* Space for previous frame pointer + filler. If any frame is
3646 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3647 waste some space here for the sake of HP compatibility. The
3648 first slot is only used when the frame pointer is needed. */
3649 if (size || frame_pointer_needed)
3650 size += STARTING_FRAME_OFFSET;
3651
3652 /* If the current function calls __builtin_eh_return, then we need
3653 to allocate stack space for registers that will hold data for
3654 the exception handler. */
3655 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3656 {
3657 unsigned int i;
3658
3659 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3660 continue;
3661 size += i * UNITS_PER_WORD;
3662 }
3663
3664 /* Account for space used by the callee general register saves. */
3665 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3666 if (df_regs_ever_live_p (i))
3667 size += UNITS_PER_WORD;
3668
3669 /* Account for space used by the callee floating point register saves. */
3670 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3671 if (df_regs_ever_live_p (i)
3672 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3673 {
3674 freg_saved = 1;
3675
3676 /* We always save both halves of the FP register, so always
3677 increment the frame size by 8 bytes. */
3678 size += 8;
3679 }
3680
3681 /* If any of the floating registers are saved, account for the
3682 alignment needed for the floating point register save block. */
3683 if (freg_saved)
3684 {
3685 size = (size + 7) & ~7;
3686 if (fregs_live)
3687 *fregs_live = 1;
3688 }
3689
3690 /* The various ABIs include space for the outgoing parameters in the
3691 size of the current function's stack frame. We don't need to align
3692 for the outgoing arguments as their alignment is set by the final
3693 rounding for the frame as a whole. */
3694 size += crtl->outgoing_args_size;
3695
3696 /* Allocate space for the fixed frame marker. This space must be
3697 allocated for any function that makes calls or allocates
3698 stack space. */
3699 if (!crtl->is_leaf || size)
3700 size += TARGET_64BIT ? 48 : 32;
3701
3702 /* Finally, round to the preferred stack boundary. */
3703 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3704 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3705 }
3706
3707 /* Generate the assembly code for function entry. FILE is a stdio
3708 stream to output the code to. SIZE is an int: how many units of
3709 temporary storage to allocate.
3710
3711 Refer to the array `regs_ever_live' to determine which registers to
3712 save; `regs_ever_live[I]' is nonzero if register number I is ever
3713 used in the function. This function is responsible for knowing
3714 which registers should not be saved even if used. */
3715
3716 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3717 of memory. If any fpu reg is used in the function, we allocate
3718 such a block here, at the bottom of the frame, just in case it's needed.
3719
3720 If this function is a leaf procedure, then we may choose not
3721 to do a "save" insn. The decision about whether or not
3722 to do this is made in regclass.c. */
3723
3724 static void
3725 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3726 {
3727 /* The function's label and associated .PROC must never be
3728 separated and must be output *after* any profiling declarations
3729 to avoid changing spaces/subspaces within a procedure. */
3730 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3731 fputs ("\t.PROC\n", file);
3732
3733 /* pa_expand_prologue does the dirty work now. We just need
3734 to output the assembler directives which denote the start
3735 of a function. */
3736 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3737 if (crtl->is_leaf)
3738 fputs (",NO_CALLS", file);
3739 else
3740 fputs (",CALLS", file);
3741 if (rp_saved)
3742 fputs (",SAVE_RP", file);
3743
3744 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3745 at the beginning of the frame and that it is used as the frame
3746 pointer for the frame. We do this because our current frame
3747 layout doesn't conform to that specified in the HP runtime
3748 documentation and we need a way to indicate to programs such as
3749 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3750 isn't used by HP compilers but is supported by the assembler.
3751 However, SAVE_SP is supposed to indicate that the previous stack
3752 pointer has been saved in the frame marker. */
3753 if (frame_pointer_needed)
3754 fputs (",SAVE_SP", file);
3755
3756 /* Pass on information about the number of callee register saves
3757 performed in the prologue.
3758
3759 The compiler is supposed to pass the highest register number
3760 saved, the assembler then has to adjust that number before
3761 entering it into the unwind descriptor (to account for any
3762 caller saved registers with lower register numbers than the
3763 first callee saved register). */
3764 if (gr_saved)
3765 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3766
3767 if (fr_saved)
3768 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3769
3770 fputs ("\n\t.ENTRY\n", file);
3771
3772 remove_useless_addtr_insns (0);
3773 }
3774
3775 void
3776 pa_expand_prologue (void)
3777 {
3778 int merge_sp_adjust_with_store = 0;
3779 HOST_WIDE_INT size = get_frame_size ();
3780 HOST_WIDE_INT offset;
3781 int i;
3782 rtx insn, tmpreg;
3783
3784 gr_saved = 0;
3785 fr_saved = 0;
3786 save_fregs = 0;
3787
3788 /* Compute total size for frame pointer, filler, locals and rounding to
3789 the next word boundary. Similar code appears in pa_compute_frame_size
3790 and must be changed in tandem with this code. */
3791 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3792 if (local_fsize || frame_pointer_needed)
3793 local_fsize += STARTING_FRAME_OFFSET;
3794
3795 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3796 if (flag_stack_usage_info)
3797 current_function_static_stack_size = actual_fsize;
3798
3799 /* Compute a few things we will use often. */
3800 tmpreg = gen_rtx_REG (word_mode, 1);
3801
3802 /* Save RP first. The calling conventions manual states RP will
3803 always be stored into the caller's frame at sp - 20 or sp - 16
3804 depending on which ABI is in use. */
3805 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3806 {
3807 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3808 rp_saved = true;
3809 }
3810 else
3811 rp_saved = false;
3812
3813 /* Allocate the local frame and set up the frame pointer if needed. */
3814 if (actual_fsize != 0)
3815 {
3816 if (frame_pointer_needed)
3817 {
3818 /* Copy the old frame pointer temporarily into %r1. Set up the
3819 new stack pointer, then store away the saved old frame pointer
3820 into the stack at sp and at the same time update the stack
3821 pointer by actual_fsize bytes. Two versions, first
3822 handles small (<8k) frames. The second handles large (>=8k)
3823 frames. */
3824 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3825 if (DO_FRAME_NOTES)
3826 RTX_FRAME_RELATED_P (insn) = 1;
3827
3828 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3829 if (DO_FRAME_NOTES)
3830 RTX_FRAME_RELATED_P (insn) = 1;
3831
3832 if (VAL_14_BITS_P (actual_fsize))
3833 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3834 else
3835 {
3836 /* It is incorrect to store the saved frame pointer at *sp,
3837 then increment sp (writes beyond the current stack boundary).
3838
3839 So instead use stwm to store at *sp and post-increment the
3840 stack pointer as an atomic operation. Then increment sp to
3841 finish allocating the new frame. */
3842 HOST_WIDE_INT adjust1 = 8192 - 64;
3843 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3844
3845 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3846 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3847 adjust2, 1);
3848 }
3849
3850 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3851 we need to store the previous stack pointer (frame pointer)
3852 into the frame marker on targets that use the HP unwind
3853 library. This allows the HP unwind library to be used to
3854 unwind GCC frames. However, we are not fully compatible
3855 with the HP library because our frame layout differs from
3856 that specified in the HP runtime specification.
3857
3858 We don't want a frame note on this instruction as the frame
3859 marker moves during dynamic stack allocation.
3860
3861 This instruction also serves as a blockage to prevent
3862 register spills from being scheduled before the stack
3863 pointer is raised. This is necessary as we store
3864 registers using the frame pointer as a base register,
3865 and the frame pointer is set before sp is raised. */
3866 if (TARGET_HPUX_UNWIND_LIBRARY)
3867 {
3868 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3869 GEN_INT (TARGET_64BIT ? -8 : -4));
3870
3871 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3872 hard_frame_pointer_rtx);
3873 }
3874 else
3875 emit_insn (gen_blockage ());
3876 }
3877 /* no frame pointer needed. */
3878 else
3879 {
3880 /* In some cases we can perform the first callee register save
3881 and allocating the stack frame at the same time. If so, just
3882 make a note of it and defer allocating the frame until saving
3883 the callee registers. */
3884 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3885 merge_sp_adjust_with_store = 1;
3886 /* Can not optimize. Adjust the stack frame by actual_fsize
3887 bytes. */
3888 else
3889 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3890 actual_fsize, 1);
3891 }
3892 }
3893
3894 /* Normal register save.
3895
3896 Do not save the frame pointer in the frame_pointer_needed case. It
3897 was done earlier. */
3898 if (frame_pointer_needed)
3899 {
3900 offset = local_fsize;
3901
3902 /* Saving the EH return data registers in the frame is the simplest
3903 way to get the frame unwind information emitted. We put them
3904 just before the general registers. */
3905 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3906 {
3907 unsigned int i, regno;
3908
3909 for (i = 0; ; ++i)
3910 {
3911 regno = EH_RETURN_DATA_REGNO (i);
3912 if (regno == INVALID_REGNUM)
3913 break;
3914
3915 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3916 offset += UNITS_PER_WORD;
3917 }
3918 }
3919
3920 for (i = 18; i >= 4; i--)
3921 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3922 {
3923 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3924 offset += UNITS_PER_WORD;
3925 gr_saved++;
3926 }
3927 /* Account for %r3 which is saved in a special place. */
3928 gr_saved++;
3929 }
3930 /* No frame pointer needed. */
3931 else
3932 {
3933 offset = local_fsize - actual_fsize;
3934
3935 /* Saving the EH return data registers in the frame is the simplest
3936 way to get the frame unwind information emitted. */
3937 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3938 {
3939 unsigned int i, regno;
3940
3941 for (i = 0; ; ++i)
3942 {
3943 regno = EH_RETURN_DATA_REGNO (i);
3944 if (regno == INVALID_REGNUM)
3945 break;
3946
3947 /* If merge_sp_adjust_with_store is nonzero, then we can
3948 optimize the first save. */
3949 if (merge_sp_adjust_with_store)
3950 {
3951 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3952 merge_sp_adjust_with_store = 0;
3953 }
3954 else
3955 store_reg (regno, offset, STACK_POINTER_REGNUM);
3956 offset += UNITS_PER_WORD;
3957 }
3958 }
3959
3960 for (i = 18; i >= 3; i--)
3961 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3962 {
3963 /* If merge_sp_adjust_with_store is nonzero, then we can
3964 optimize the first GR save. */
3965 if (merge_sp_adjust_with_store)
3966 {
3967 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3968 merge_sp_adjust_with_store = 0;
3969 }
3970 else
3971 store_reg (i, offset, STACK_POINTER_REGNUM);
3972 offset += UNITS_PER_WORD;
3973 gr_saved++;
3974 }
3975
3976 /* If we wanted to merge the SP adjustment with a GR save, but we never
3977 did any GR saves, then just emit the adjustment here. */
3978 if (merge_sp_adjust_with_store)
3979 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3980 actual_fsize, 1);
3981 }
3982
3983 /* The hppa calling conventions say that %r19, the pic offset
3984 register, is saved at sp - 32 (in this function's frame)
3985 when generating PIC code. FIXME: What is the correct thing
3986 to do for functions which make no calls and allocate no
3987 frame? Do we need to allocate a frame, or can we just omit
3988 the save? For now we'll just omit the save.
3989
3990 We don't want a note on this insn as the frame marker can
3991 move if there is a dynamic stack allocation. */
3992 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3993 {
3994 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3995
3996 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3997
3998 }
3999
4000 /* Align pointer properly (doubleword boundary). */
4001 offset = (offset + 7) & ~7;
4002
4003 /* Floating point register store. */
4004 if (save_fregs)
4005 {
4006 rtx base;
4007
4008 /* First get the frame or stack pointer to the start of the FP register
4009 save area. */
4010 if (frame_pointer_needed)
4011 {
4012 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4013 base = hard_frame_pointer_rtx;
4014 }
4015 else
4016 {
4017 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4018 base = stack_pointer_rtx;
4019 }
4020
4021 /* Now actually save the FP registers. */
4022 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4023 {
4024 if (df_regs_ever_live_p (i)
4025 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4026 {
4027 rtx addr, insn, reg;
4028 addr = gen_rtx_MEM (DFmode,
4029 gen_rtx_POST_INC (word_mode, tmpreg));
4030 reg = gen_rtx_REG (DFmode, i);
4031 insn = emit_move_insn (addr, reg);
4032 if (DO_FRAME_NOTES)
4033 {
4034 RTX_FRAME_RELATED_P (insn) = 1;
4035 if (TARGET_64BIT)
4036 {
4037 rtx mem = gen_rtx_MEM (DFmode,
4038 plus_constant (Pmode, base,
4039 offset));
4040 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4041 gen_rtx_SET (VOIDmode, mem, reg));
4042 }
4043 else
4044 {
4045 rtx meml = gen_rtx_MEM (SFmode,
4046 plus_constant (Pmode, base,
4047 offset));
4048 rtx memr = gen_rtx_MEM (SFmode,
4049 plus_constant (Pmode, base,
4050 offset + 4));
4051 rtx regl = gen_rtx_REG (SFmode, i);
4052 rtx regr = gen_rtx_REG (SFmode, i + 1);
4053 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4054 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4055 rtvec vec;
4056
4057 RTX_FRAME_RELATED_P (setl) = 1;
4058 RTX_FRAME_RELATED_P (setr) = 1;
4059 vec = gen_rtvec (2, setl, setr);
4060 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4061 gen_rtx_SEQUENCE (VOIDmode, vec));
4062 }
4063 }
4064 offset += GET_MODE_SIZE (DFmode);
4065 fr_saved++;
4066 }
4067 }
4068 }
4069 }
4070
4071 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4072 Handle case where DISP > 8k by using the add_high_const patterns. */
4073
4074 static void
4075 load_reg (int reg, HOST_WIDE_INT disp, int base)
4076 {
4077 rtx dest = gen_rtx_REG (word_mode, reg);
4078 rtx basereg = gen_rtx_REG (Pmode, base);
4079 rtx src;
4080
4081 if (VAL_14_BITS_P (disp))
4082 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4083 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4084 {
4085 rtx delta = GEN_INT (disp);
4086 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4087
4088 emit_move_insn (tmpreg, delta);
4089 if (TARGET_DISABLE_INDEXING)
4090 {
4091 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4092 src = gen_rtx_MEM (word_mode, tmpreg);
4093 }
4094 else
4095 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4096 }
4097 else
4098 {
4099 rtx delta = GEN_INT (disp);
4100 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4101 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4102
4103 emit_move_insn (tmpreg, high);
4104 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4105 }
4106
4107 emit_move_insn (dest, src);
4108 }
4109
4110 /* Update the total code bytes output to the text section. */
4111
4112 static void
4113 update_total_code_bytes (unsigned int nbytes)
4114 {
4115 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4116 && !IN_NAMED_SECTION_P (cfun->decl))
4117 {
4118 unsigned int old_total = total_code_bytes;
4119
4120 total_code_bytes += nbytes;
4121
4122 /* Be prepared to handle overflows. */
4123 if (old_total > total_code_bytes)
4124 total_code_bytes = UINT_MAX;
4125 }
4126 }
4127
4128 /* This function generates the assembly code for function exit.
4129 Args are as for output_function_prologue ().
4130
4131 The function epilogue should not depend on the current stack
4132 pointer! It should use the frame pointer only. This is mandatory
4133 because of alloca; we also take advantage of it to omit stack
4134 adjustments before returning. */
4135
4136 static void
4137 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4138 {
4139 rtx insn = get_last_insn ();
4140 bool extra_nop;
4141
4142 /* pa_expand_epilogue does the dirty work now. We just need
4143 to output the assembler directives which denote the end
4144 of a function.
4145
4146 To make debuggers happy, emit a nop if the epilogue was completely
4147 eliminated due to a volatile call as the last insn in the
4148 current function. That way the return address (in %r2) will
4149 always point to a valid instruction in the current function. */
4150
4151 /* Get the last real insn. */
4152 if (NOTE_P (insn))
4153 insn = prev_real_insn (insn);
4154
4155 /* If it is a sequence, then look inside. */
4156 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4157 insn = XVECEXP (PATTERN (insn), 0, 0);
4158
4159 /* If insn is a CALL_INSN, then it must be a call to a volatile
4160 function (otherwise there would be epilogue insns). */
4161 if (insn && CALL_P (insn))
4162 {
4163 fputs ("\tnop\n", file);
4164 extra_nop = true;
4165 }
4166 else
4167 extra_nop = false;
4168
4169 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4170
4171 if (TARGET_SOM && TARGET_GAS)
4172 {
4173 /* We are done with this subspace except possibly for some additional
4174 debug information. Forget that we are in this subspace to ensure
4175 that the next function is output in its own subspace. */
4176 in_section = NULL;
4177 cfun->machine->in_nsubspa = 2;
4178 }
4179
4180 /* Thunks do their own insn accounting. */
4181 if (cfun->is_thunk)
4182 return;
4183
4184 if (INSN_ADDRESSES_SET_P ())
4185 {
4186 last_address = extra_nop ? 4 : 0;
4187 insn = get_last_nonnote_insn ();
4188 last_address += INSN_ADDRESSES (INSN_UID (insn));
4189 if (INSN_P (insn))
4190 last_address += insn_default_length (insn);
4191 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4192 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4193 }
4194 else
4195 last_address = UINT_MAX;
4196
4197 /* Finally, update the total number of code bytes output so far. */
4198 update_total_code_bytes (last_address);
4199 }
4200
4201 void
4202 pa_expand_epilogue (void)
4203 {
4204 rtx tmpreg;
4205 HOST_WIDE_INT offset;
4206 HOST_WIDE_INT ret_off = 0;
4207 int i;
4208 int merge_sp_adjust_with_load = 0;
4209
4210 /* We will use this often. */
4211 tmpreg = gen_rtx_REG (word_mode, 1);
4212
4213 /* Try to restore RP early to avoid load/use interlocks when
4214 RP gets used in the return (bv) instruction. This appears to still
4215 be necessary even when we schedule the prologue and epilogue. */
4216 if (rp_saved)
4217 {
4218 ret_off = TARGET_64BIT ? -16 : -20;
4219 if (frame_pointer_needed)
4220 {
4221 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4222 ret_off = 0;
4223 }
4224 else
4225 {
4226 /* No frame pointer, and stack is smaller than 8k. */
4227 if (VAL_14_BITS_P (ret_off - actual_fsize))
4228 {
4229 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4230 ret_off = 0;
4231 }
4232 }
4233 }
4234
4235 /* General register restores. */
4236 if (frame_pointer_needed)
4237 {
4238 offset = local_fsize;
4239
4240 /* If the current function calls __builtin_eh_return, then we need
4241 to restore the saved EH data registers. */
4242 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4243 {
4244 unsigned int i, regno;
4245
4246 for (i = 0; ; ++i)
4247 {
4248 regno = EH_RETURN_DATA_REGNO (i);
4249 if (regno == INVALID_REGNUM)
4250 break;
4251
4252 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4253 offset += UNITS_PER_WORD;
4254 }
4255 }
4256
4257 for (i = 18; i >= 4; i--)
4258 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4259 {
4260 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4261 offset += UNITS_PER_WORD;
4262 }
4263 }
4264 else
4265 {
4266 offset = local_fsize - actual_fsize;
4267
4268 /* If the current function calls __builtin_eh_return, then we need
4269 to restore the saved EH data registers. */
4270 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4271 {
4272 unsigned int i, regno;
4273
4274 for (i = 0; ; ++i)
4275 {
4276 regno = EH_RETURN_DATA_REGNO (i);
4277 if (regno == INVALID_REGNUM)
4278 break;
4279
4280 /* Only for the first load.
4281 merge_sp_adjust_with_load holds the register load
4282 with which we will merge the sp adjustment. */
4283 if (merge_sp_adjust_with_load == 0
4284 && local_fsize == 0
4285 && VAL_14_BITS_P (-actual_fsize))
4286 merge_sp_adjust_with_load = regno;
4287 else
4288 load_reg (regno, offset, STACK_POINTER_REGNUM);
4289 offset += UNITS_PER_WORD;
4290 }
4291 }
4292
4293 for (i = 18; i >= 3; i--)
4294 {
4295 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4296 {
4297 /* Only for the first load.
4298 merge_sp_adjust_with_load holds the register load
4299 with which we will merge the sp adjustment. */
4300 if (merge_sp_adjust_with_load == 0
4301 && local_fsize == 0
4302 && VAL_14_BITS_P (-actual_fsize))
4303 merge_sp_adjust_with_load = i;
4304 else
4305 load_reg (i, offset, STACK_POINTER_REGNUM);
4306 offset += UNITS_PER_WORD;
4307 }
4308 }
4309 }
4310
4311 /* Align pointer properly (doubleword boundary). */
4312 offset = (offset + 7) & ~7;
4313
4314 /* FP register restores. */
4315 if (save_fregs)
4316 {
4317 /* Adjust the register to index off of. */
4318 if (frame_pointer_needed)
4319 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4320 else
4321 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4322
4323 /* Actually do the restores now. */
4324 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4325 if (df_regs_ever_live_p (i)
4326 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4327 {
4328 rtx src = gen_rtx_MEM (DFmode,
4329 gen_rtx_POST_INC (word_mode, tmpreg));
4330 rtx dest = gen_rtx_REG (DFmode, i);
4331 emit_move_insn (dest, src);
4332 }
4333 }
4334
4335 /* Emit a blockage insn here to keep these insns from being moved to
4336 an earlier spot in the epilogue, or into the main instruction stream.
4337
4338 This is necessary as we must not cut the stack back before all the
4339 restores are finished. */
4340 emit_insn (gen_blockage ());
4341
4342 /* Reset stack pointer (and possibly frame pointer). The stack
4343 pointer is initially set to fp + 64 to avoid a race condition. */
4344 if (frame_pointer_needed)
4345 {
4346 rtx delta = GEN_INT (-64);
4347
4348 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4349 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4350 stack_pointer_rtx, delta));
4351 }
4352 /* If we were deferring a callee register restore, do it now. */
4353 else if (merge_sp_adjust_with_load)
4354 {
4355 rtx delta = GEN_INT (-actual_fsize);
4356 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4357
4358 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4359 }
4360 else if (actual_fsize != 0)
4361 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4362 - actual_fsize, 0);
4363
4364 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4365 frame greater than 8k), do so now. */
4366 if (ret_off != 0)
4367 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4368
4369 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4370 {
4371 rtx sa = EH_RETURN_STACKADJ_RTX;
4372
4373 emit_insn (gen_blockage ());
4374 emit_insn (TARGET_64BIT
4375 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4376 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4377 }
4378 }
4379
4380 bool
4381 pa_can_use_return_insn (void)
4382 {
4383 if (!reload_completed)
4384 return false;
4385
4386 if (frame_pointer_needed)
4387 return false;
4388
4389 if (df_regs_ever_live_p (2))
4390 return false;
4391
4392 if (crtl->profile)
4393 return false;
4394
4395 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4396 }
4397
4398 rtx
4399 hppa_pic_save_rtx (void)
4400 {
4401 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4402 }
4403
4404 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4405 #define NO_DEFERRED_PROFILE_COUNTERS 0
4406 #endif
4407
4408
4409 /* Vector of funcdef numbers. */
4410 static vec<int> funcdef_nos;
4411
4412 /* Output deferred profile counters. */
4413 static void
4414 output_deferred_profile_counters (void)
4415 {
4416 unsigned int i;
4417 int align, n;
4418
4419 if (funcdef_nos.is_empty ())
4420 return;
4421
4422 switch_to_section (data_section);
4423 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4424 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4425
4426 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4427 {
4428 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4429 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4430 }
4431
4432 funcdef_nos.release ();
4433 }
4434
4435 void
4436 hppa_profile_hook (int label_no)
4437 {
4438 /* We use SImode for the address of the function in both 32 and
4439 64-bit code to avoid having to provide DImode versions of the
4440 lcla2 and load_offset_label_address insn patterns. */
4441 rtx reg = gen_reg_rtx (SImode);
4442 rtx label_rtx = gen_label_rtx ();
4443 rtx begin_label_rtx, call_insn;
4444 char begin_label_name[16];
4445
4446 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4447 label_no);
4448 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4449
4450 if (TARGET_64BIT)
4451 emit_move_insn (arg_pointer_rtx,
4452 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4453 GEN_INT (64)));
4454
4455 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4456
4457 /* The address of the function is loaded into %r25 with an instruction-
4458 relative sequence that avoids the use of relocations. The sequence
4459 is split so that the load_offset_label_address instruction can
4460 occupy the delay slot of the call to _mcount. */
4461 if (TARGET_PA_20)
4462 emit_insn (gen_lcla2 (reg, label_rtx));
4463 else
4464 emit_insn (gen_lcla1 (reg, label_rtx));
4465
4466 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4467 reg, begin_label_rtx, label_rtx));
4468
4469 #if !NO_DEFERRED_PROFILE_COUNTERS
4470 {
4471 rtx count_label_rtx, addr, r24;
4472 char count_label_name[16];
4473
4474 funcdef_nos.safe_push (label_no);
4475 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4476 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4477
4478 addr = force_reg (Pmode, count_label_rtx);
4479 r24 = gen_rtx_REG (Pmode, 24);
4480 emit_move_insn (r24, addr);
4481
4482 call_insn =
4483 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4484 gen_rtx_SYMBOL_REF (Pmode,
4485 "_mcount")),
4486 GEN_INT (TARGET_64BIT ? 24 : 12)));
4487
4488 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4489 }
4490 #else
4491
4492 call_insn =
4493 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4494 gen_rtx_SYMBOL_REF (Pmode,
4495 "_mcount")),
4496 GEN_INT (TARGET_64BIT ? 16 : 8)));
4497
4498 #endif
4499
4500 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4501 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4502
4503 /* Indicate the _mcount call cannot throw, nor will it execute a
4504 non-local goto. */
4505 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4506 }
4507
4508 /* Fetch the return address for the frame COUNT steps up from
4509 the current frame, after the prologue. FRAMEADDR is the
4510 frame pointer of the COUNT frame.
4511
4512 We want to ignore any export stub remnants here. To handle this,
4513 we examine the code at the return address, and if it is an export
4514 stub, we return a memory rtx for the stub return address stored
4515 at frame-24.
4516
4517 The value returned is used in two different ways:
4518
4519 1. To find a function's caller.
4520
4521 2. To change the return address for a function.
4522
4523 This function handles most instances of case 1; however, it will
4524 fail if there are two levels of stubs to execute on the return
4525 path. The only way I believe that can happen is if the return value
4526 needs a parameter relocation, which never happens for C code.
4527
4528 This function handles most instances of case 2; however, it will
4529 fail if we did not originally have stub code on the return path
4530 but will need stub code on the new return path. This can happen if
4531 the caller & callee are both in the main program, but the new
4532 return location is in a shared library. */
4533
4534 rtx
4535 pa_return_addr_rtx (int count, rtx frameaddr)
4536 {
4537 rtx label;
4538 rtx rp;
4539 rtx saved_rp;
4540 rtx ins;
4541
4542 /* The instruction stream at the return address of a PA1.X export stub is:
4543
4544 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4545 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4546 0x00011820 | stub+16: mtsp r1,sr0
4547 0xe0400002 | stub+20: be,n 0(sr0,rp)
4548
4549 0xe0400002 must be specified as -532676606 so that it won't be
4550 rejected as an invalid immediate operand on 64-bit hosts.
4551
4552 The instruction stream at the return address of a PA2.0 export stub is:
4553
4554 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4555 0xe840d002 | stub+12: bve,n (rp)
4556 */
4557
4558 HOST_WIDE_INT insns[4];
4559 int i, len;
4560
4561 if (count != 0)
4562 return NULL_RTX;
4563
4564 rp = get_hard_reg_initial_val (Pmode, 2);
4565
4566 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4567 return rp;
4568
4569 /* If there is no export stub then just use the value saved from
4570 the return pointer register. */
4571
4572 saved_rp = gen_reg_rtx (Pmode);
4573 emit_move_insn (saved_rp, rp);
4574
4575 /* Get pointer to the instruction stream. We have to mask out the
4576 privilege level from the two low order bits of the return address
4577 pointer here so that ins will point to the start of the first
4578 instruction that would have been executed if we returned. */
4579 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4580 label = gen_label_rtx ();
4581
4582 if (TARGET_PA_20)
4583 {
4584 insns[0] = 0x4bc23fd1;
4585 insns[1] = -398405630;
4586 len = 2;
4587 }
4588 else
4589 {
4590 insns[0] = 0x4bc23fd1;
4591 insns[1] = 0x004010a1;
4592 insns[2] = 0x00011820;
4593 insns[3] = -532676606;
4594 len = 4;
4595 }
4596
4597 /* Check the instruction stream at the normal return address for the
4598 export stub. If it is an export stub, than our return address is
4599 really in -24[frameaddr]. */
4600
4601 for (i = 0; i < len; i++)
4602 {
4603 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4604 rtx op1 = GEN_INT (insns[i]);
4605 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4606 }
4607
4608 /* Here we know that our return address points to an export
4609 stub. We don't want to return the address of the export stub,
4610 but rather the return address of the export stub. That return
4611 address is stored at -24[frameaddr]. */
4612
4613 emit_move_insn (saved_rp,
4614 gen_rtx_MEM (Pmode,
4615 memory_address (Pmode,
4616 plus_constant (Pmode, frameaddr,
4617 -24))));
4618
4619 emit_label (label);
4620
4621 return saved_rp;
4622 }
4623
4624 void
4625 pa_emit_bcond_fp (rtx operands[])
4626 {
4627 enum rtx_code code = GET_CODE (operands[0]);
4628 rtx operand0 = operands[1];
4629 rtx operand1 = operands[2];
4630 rtx label = operands[3];
4631
4632 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4633 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4634
4635 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4636 gen_rtx_IF_THEN_ELSE (VOIDmode,
4637 gen_rtx_fmt_ee (NE,
4638 VOIDmode,
4639 gen_rtx_REG (CCFPmode, 0),
4640 const0_rtx),
4641 gen_rtx_LABEL_REF (VOIDmode, label),
4642 pc_rtx)));
4643
4644 }
4645
4646 /* Adjust the cost of a scheduling dependency. Return the new cost of
4647 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4648
4649 static int
4650 pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4651 {
4652 enum attr_type attr_type;
4653
4654 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4655 true dependencies as they are described with bypasses now. */
4656 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4657 return cost;
4658
4659 if (! recog_memoized (insn))
4660 return 0;
4661
4662 attr_type = get_attr_type (insn);
4663
4664 switch (REG_NOTE_KIND (link))
4665 {
4666 case REG_DEP_ANTI:
4667 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4668 cycles later. */
4669
4670 if (attr_type == TYPE_FPLOAD)
4671 {
4672 rtx pat = PATTERN (insn);
4673 rtx dep_pat = PATTERN (dep_insn);
4674 if (GET_CODE (pat) == PARALLEL)
4675 {
4676 /* This happens for the fldXs,mb patterns. */
4677 pat = XVECEXP (pat, 0, 0);
4678 }
4679 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4680 /* If this happens, we have to extend this to schedule
4681 optimally. Return 0 for now. */
4682 return 0;
4683
4684 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4685 {
4686 if (! recog_memoized (dep_insn))
4687 return 0;
4688 switch (get_attr_type (dep_insn))
4689 {
4690 case TYPE_FPALU:
4691 case TYPE_FPMULSGL:
4692 case TYPE_FPMULDBL:
4693 case TYPE_FPDIVSGL:
4694 case TYPE_FPDIVDBL:
4695 case TYPE_FPSQRTSGL:
4696 case TYPE_FPSQRTDBL:
4697 /* A fpload can't be issued until one cycle before a
4698 preceding arithmetic operation has finished if
4699 the target of the fpload is any of the sources
4700 (or destination) of the arithmetic operation. */
4701 return insn_default_latency (dep_insn) - 1;
4702
4703 default:
4704 return 0;
4705 }
4706 }
4707 }
4708 else if (attr_type == TYPE_FPALU)
4709 {
4710 rtx pat = PATTERN (insn);
4711 rtx dep_pat = PATTERN (dep_insn);
4712 if (GET_CODE (pat) == PARALLEL)
4713 {
4714 /* This happens for the fldXs,mb patterns. */
4715 pat = XVECEXP (pat, 0, 0);
4716 }
4717 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4718 /* If this happens, we have to extend this to schedule
4719 optimally. Return 0 for now. */
4720 return 0;
4721
4722 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4723 {
4724 if (! recog_memoized (dep_insn))
4725 return 0;
4726 switch (get_attr_type (dep_insn))
4727 {
4728 case TYPE_FPDIVSGL:
4729 case TYPE_FPDIVDBL:
4730 case TYPE_FPSQRTSGL:
4731 case TYPE_FPSQRTDBL:
4732 /* An ALU flop can't be issued until two cycles before a
4733 preceding divide or sqrt operation has finished if
4734 the target of the ALU flop is any of the sources
4735 (or destination) of the divide or sqrt operation. */
4736 return insn_default_latency (dep_insn) - 2;
4737
4738 default:
4739 return 0;
4740 }
4741 }
4742 }
4743
4744 /* For other anti dependencies, the cost is 0. */
4745 return 0;
4746
4747 case REG_DEP_OUTPUT:
4748 /* Output dependency; DEP_INSN writes a register that INSN writes some
4749 cycles later. */
4750 if (attr_type == TYPE_FPLOAD)
4751 {
4752 rtx pat = PATTERN (insn);
4753 rtx dep_pat = PATTERN (dep_insn);
4754 if (GET_CODE (pat) == PARALLEL)
4755 {
4756 /* This happens for the fldXs,mb patterns. */
4757 pat = XVECEXP (pat, 0, 0);
4758 }
4759 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4760 /* If this happens, we have to extend this to schedule
4761 optimally. Return 0 for now. */
4762 return 0;
4763
4764 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4765 {
4766 if (! recog_memoized (dep_insn))
4767 return 0;
4768 switch (get_attr_type (dep_insn))
4769 {
4770 case TYPE_FPALU:
4771 case TYPE_FPMULSGL:
4772 case TYPE_FPMULDBL:
4773 case TYPE_FPDIVSGL:
4774 case TYPE_FPDIVDBL:
4775 case TYPE_FPSQRTSGL:
4776 case TYPE_FPSQRTDBL:
4777 /* A fpload can't be issued until one cycle before a
4778 preceding arithmetic operation has finished if
4779 the target of the fpload is the destination of the
4780 arithmetic operation.
4781
4782 Exception: For PA7100LC, PA7200 and PA7300, the cost
4783 is 3 cycles, unless they bundle together. We also
4784 pay the penalty if the second insn is a fpload. */
4785 return insn_default_latency (dep_insn) - 1;
4786
4787 default:
4788 return 0;
4789 }
4790 }
4791 }
4792 else if (attr_type == TYPE_FPALU)
4793 {
4794 rtx pat = PATTERN (insn);
4795 rtx dep_pat = PATTERN (dep_insn);
4796 if (GET_CODE (pat) == PARALLEL)
4797 {
4798 /* This happens for the fldXs,mb patterns. */
4799 pat = XVECEXP (pat, 0, 0);
4800 }
4801 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4802 /* If this happens, we have to extend this to schedule
4803 optimally. Return 0 for now. */
4804 return 0;
4805
4806 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4807 {
4808 if (! recog_memoized (dep_insn))
4809 return 0;
4810 switch (get_attr_type (dep_insn))
4811 {
4812 case TYPE_FPDIVSGL:
4813 case TYPE_FPDIVDBL:
4814 case TYPE_FPSQRTSGL:
4815 case TYPE_FPSQRTDBL:
4816 /* An ALU flop can't be issued until two cycles before a
4817 preceding divide or sqrt operation has finished if
4818 the target of the ALU flop is also the target of
4819 the divide or sqrt operation. */
4820 return insn_default_latency (dep_insn) - 2;
4821
4822 default:
4823 return 0;
4824 }
4825 }
4826 }
4827
4828 /* For other output dependencies, the cost is 0. */
4829 return 0;
4830
4831 default:
4832 gcc_unreachable ();
4833 }
4834 }
4835
4836 /* Adjust scheduling priorities. We use this to try and keep addil
4837 and the next use of %r1 close together. */
4838 static int
4839 pa_adjust_priority (rtx_insn *insn, int priority)
4840 {
4841 rtx set = single_set (insn);
4842 rtx src, dest;
4843 if (set)
4844 {
4845 src = SET_SRC (set);
4846 dest = SET_DEST (set);
4847 if (GET_CODE (src) == LO_SUM
4848 && symbolic_operand (XEXP (src, 1), VOIDmode)
4849 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4850 priority >>= 3;
4851
4852 else if (GET_CODE (src) == MEM
4853 && GET_CODE (XEXP (src, 0)) == LO_SUM
4854 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4855 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4856 priority >>= 1;
4857
4858 else if (GET_CODE (dest) == MEM
4859 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4860 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4861 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4862 priority >>= 3;
4863 }
4864 return priority;
4865 }
4866
4867 /* The 700 can only issue a single insn at a time.
4868 The 7XXX processors can issue two insns at a time.
4869 The 8000 can issue 4 insns at a time. */
4870 static int
4871 pa_issue_rate (void)
4872 {
4873 switch (pa_cpu)
4874 {
4875 case PROCESSOR_700: return 1;
4876 case PROCESSOR_7100: return 2;
4877 case PROCESSOR_7100LC: return 2;
4878 case PROCESSOR_7200: return 2;
4879 case PROCESSOR_7300: return 2;
4880 case PROCESSOR_8000: return 4;
4881
4882 default:
4883 gcc_unreachable ();
4884 }
4885 }
4886
4887
4888
4889 /* Return any length plus adjustment needed by INSN which already has
4890 its length computed as LENGTH. Return LENGTH if no adjustment is
4891 necessary.
4892
4893 Also compute the length of an inline block move here as it is too
4894 complicated to express as a length attribute in pa.md. */
4895 int
4896 pa_adjust_insn_length (rtx_insn *insn, int length)
4897 {
4898 rtx pat = PATTERN (insn);
4899
4900 /* If length is negative or undefined, provide initial length. */
4901 if ((unsigned int) length >= INT_MAX)
4902 {
4903 if (GET_CODE (pat) == SEQUENCE)
4904 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4905
4906 switch (get_attr_type (insn))
4907 {
4908 case TYPE_MILLI:
4909 length = pa_attr_length_millicode_call (insn);
4910 break;
4911 case TYPE_CALL:
4912 length = pa_attr_length_call (insn, 0);
4913 break;
4914 case TYPE_SIBCALL:
4915 length = pa_attr_length_call (insn, 1);
4916 break;
4917 case TYPE_DYNCALL:
4918 length = pa_attr_length_indirect_call (insn);
4919 break;
4920 case TYPE_SH_FUNC_ADRS:
4921 length = pa_attr_length_millicode_call (insn) + 20;
4922 break;
4923 default:
4924 gcc_unreachable ();
4925 }
4926 }
4927
4928 /* Block move pattern. */
4929 if (NONJUMP_INSN_P (insn)
4930 && GET_CODE (pat) == PARALLEL
4931 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4932 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4933 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4934 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4935 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4936 length += compute_movmem_length (insn) - 4;
4937 /* Block clear pattern. */
4938 else if (NONJUMP_INSN_P (insn)
4939 && GET_CODE (pat) == PARALLEL
4940 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4941 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4942 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4943 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4944 length += compute_clrmem_length (insn) - 4;
4945 /* Conditional branch with an unfilled delay slot. */
4946 else if (JUMP_P (insn) && ! simplejump_p (insn))
4947 {
4948 /* Adjust a short backwards conditional with an unfilled delay slot. */
4949 if (GET_CODE (pat) == SET
4950 && length == 4
4951 && JUMP_LABEL (insn) != NULL_RTX
4952 && ! forward_branch_p (insn))
4953 length += 4;
4954 else if (GET_CODE (pat) == PARALLEL
4955 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4956 && length == 4)
4957 length += 4;
4958 /* Adjust dbra insn with short backwards conditional branch with
4959 unfilled delay slot -- only for case where counter is in a
4960 general register register. */
4961 else if (GET_CODE (pat) == PARALLEL
4962 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4963 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4964 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4965 && length == 4
4966 && ! forward_branch_p (insn))
4967 length += 4;
4968 }
4969 return length;
4970 }
4971
4972 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4973
4974 static bool
4975 pa_print_operand_punct_valid_p (unsigned char code)
4976 {
4977 if (code == '@'
4978 || code == '#'
4979 || code == '*'
4980 || code == '^')
4981 return true;
4982
4983 return false;
4984 }
4985
4986 /* Print operand X (an rtx) in assembler syntax to file FILE.
4987 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4988 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4989
4990 void
4991 pa_print_operand (FILE *file, rtx x, int code)
4992 {
4993 switch (code)
4994 {
4995 case '#':
4996 /* Output a 'nop' if there's nothing for the delay slot. */
4997 if (dbr_sequence_length () == 0)
4998 fputs ("\n\tnop", file);
4999 return;
5000 case '*':
5001 /* Output a nullification completer if there's nothing for the */
5002 /* delay slot or nullification is requested. */
5003 if (dbr_sequence_length () == 0 ||
5004 (final_sequence &&
5005 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5006 fputs (",n", file);
5007 return;
5008 case 'R':
5009 /* Print out the second register name of a register pair.
5010 I.e., R (6) => 7. */
5011 fputs (reg_names[REGNO (x) + 1], file);
5012 return;
5013 case 'r':
5014 /* A register or zero. */
5015 if (x == const0_rtx
5016 || (x == CONST0_RTX (DFmode))
5017 || (x == CONST0_RTX (SFmode)))
5018 {
5019 fputs ("%r0", file);
5020 return;
5021 }
5022 else
5023 break;
5024 case 'f':
5025 /* A register or zero (floating point). */
5026 if (x == const0_rtx
5027 || (x == CONST0_RTX (DFmode))
5028 || (x == CONST0_RTX (SFmode)))
5029 {
5030 fputs ("%fr0", file);
5031 return;
5032 }
5033 else
5034 break;
5035 case 'A':
5036 {
5037 rtx xoperands[2];
5038
5039 xoperands[0] = XEXP (XEXP (x, 0), 0);
5040 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5041 pa_output_global_address (file, xoperands[1], 0);
5042 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5043 return;
5044 }
5045
5046 case 'C': /* Plain (C)ondition */
5047 case 'X':
5048 switch (GET_CODE (x))
5049 {
5050 case EQ:
5051 fputs ("=", file); break;
5052 case NE:
5053 fputs ("<>", file); break;
5054 case GT:
5055 fputs (">", file); break;
5056 case GE:
5057 fputs (">=", file); break;
5058 case GEU:
5059 fputs (">>=", file); break;
5060 case GTU:
5061 fputs (">>", file); break;
5062 case LT:
5063 fputs ("<", file); break;
5064 case LE:
5065 fputs ("<=", file); break;
5066 case LEU:
5067 fputs ("<<=", file); break;
5068 case LTU:
5069 fputs ("<<", file); break;
5070 default:
5071 gcc_unreachable ();
5072 }
5073 return;
5074 case 'N': /* Condition, (N)egated */
5075 switch (GET_CODE (x))
5076 {
5077 case EQ:
5078 fputs ("<>", file); break;
5079 case NE:
5080 fputs ("=", file); break;
5081 case GT:
5082 fputs ("<=", file); break;
5083 case GE:
5084 fputs ("<", file); break;
5085 case GEU:
5086 fputs ("<<", file); break;
5087 case GTU:
5088 fputs ("<<=", file); break;
5089 case LT:
5090 fputs (">=", file); break;
5091 case LE:
5092 fputs (">", file); break;
5093 case LEU:
5094 fputs (">>", file); break;
5095 case LTU:
5096 fputs (">>=", file); break;
5097 default:
5098 gcc_unreachable ();
5099 }
5100 return;
5101 /* For floating point comparisons. Note that the output
5102 predicates are the complement of the desired mode. The
5103 conditions for GT, GE, LT, LE and LTGT cause an invalid
5104 operation exception if the result is unordered and this
5105 exception is enabled in the floating-point status register. */
5106 case 'Y':
5107 switch (GET_CODE (x))
5108 {
5109 case EQ:
5110 fputs ("!=", file); break;
5111 case NE:
5112 fputs ("=", file); break;
5113 case GT:
5114 fputs ("!>", file); break;
5115 case GE:
5116 fputs ("!>=", file); break;
5117 case LT:
5118 fputs ("!<", file); break;
5119 case LE:
5120 fputs ("!<=", file); break;
5121 case LTGT:
5122 fputs ("!<>", file); break;
5123 case UNLE:
5124 fputs ("!?<=", file); break;
5125 case UNLT:
5126 fputs ("!?<", file); break;
5127 case UNGE:
5128 fputs ("!?>=", file); break;
5129 case UNGT:
5130 fputs ("!?>", file); break;
5131 case UNEQ:
5132 fputs ("!?=", file); break;
5133 case UNORDERED:
5134 fputs ("!?", file); break;
5135 case ORDERED:
5136 fputs ("?", file); break;
5137 default:
5138 gcc_unreachable ();
5139 }
5140 return;
5141 case 'S': /* Condition, operands are (S)wapped. */
5142 switch (GET_CODE (x))
5143 {
5144 case EQ:
5145 fputs ("=", file); break;
5146 case NE:
5147 fputs ("<>", file); break;
5148 case GT:
5149 fputs ("<", file); break;
5150 case GE:
5151 fputs ("<=", file); break;
5152 case GEU:
5153 fputs ("<<=", file); break;
5154 case GTU:
5155 fputs ("<<", file); break;
5156 case LT:
5157 fputs (">", file); break;
5158 case LE:
5159 fputs (">=", file); break;
5160 case LEU:
5161 fputs (">>=", file); break;
5162 case LTU:
5163 fputs (">>", file); break;
5164 default:
5165 gcc_unreachable ();
5166 }
5167 return;
5168 case 'B': /* Condition, (B)oth swapped and negate. */
5169 switch (GET_CODE (x))
5170 {
5171 case EQ:
5172 fputs ("<>", file); break;
5173 case NE:
5174 fputs ("=", file); break;
5175 case GT:
5176 fputs (">=", file); break;
5177 case GE:
5178 fputs (">", file); break;
5179 case GEU:
5180 fputs (">>", file); break;
5181 case GTU:
5182 fputs (">>=", file); break;
5183 case LT:
5184 fputs ("<=", file); break;
5185 case LE:
5186 fputs ("<", file); break;
5187 case LEU:
5188 fputs ("<<", file); break;
5189 case LTU:
5190 fputs ("<<=", file); break;
5191 default:
5192 gcc_unreachable ();
5193 }
5194 return;
5195 case 'k':
5196 gcc_assert (GET_CODE (x) == CONST_INT);
5197 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5198 return;
5199 case 'Q':
5200 gcc_assert (GET_CODE (x) == CONST_INT);
5201 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5202 return;
5203 case 'L':
5204 gcc_assert (GET_CODE (x) == CONST_INT);
5205 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5206 return;
5207 case 'O':
5208 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5209 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5210 return;
5211 case 'p':
5212 gcc_assert (GET_CODE (x) == CONST_INT);
5213 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5214 return;
5215 case 'P':
5216 gcc_assert (GET_CODE (x) == CONST_INT);
5217 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5218 return;
5219 case 'I':
5220 if (GET_CODE (x) == CONST_INT)
5221 fputs ("i", file);
5222 return;
5223 case 'M':
5224 case 'F':
5225 switch (GET_CODE (XEXP (x, 0)))
5226 {
5227 case PRE_DEC:
5228 case PRE_INC:
5229 if (ASSEMBLER_DIALECT == 0)
5230 fputs ("s,mb", file);
5231 else
5232 fputs (",mb", file);
5233 break;
5234 case POST_DEC:
5235 case POST_INC:
5236 if (ASSEMBLER_DIALECT == 0)
5237 fputs ("s,ma", file);
5238 else
5239 fputs (",ma", file);
5240 break;
5241 case PLUS:
5242 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5243 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5244 {
5245 if (ASSEMBLER_DIALECT == 0)
5246 fputs ("x", file);
5247 }
5248 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5249 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5250 {
5251 if (ASSEMBLER_DIALECT == 0)
5252 fputs ("x,s", file);
5253 else
5254 fputs (",s", file);
5255 }
5256 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5257 fputs ("s", file);
5258 break;
5259 default:
5260 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5261 fputs ("s", file);
5262 break;
5263 }
5264 return;
5265 case 'G':
5266 pa_output_global_address (file, x, 0);
5267 return;
5268 case 'H':
5269 pa_output_global_address (file, x, 1);
5270 return;
5271 case 0: /* Don't do anything special */
5272 break;
5273 case 'Z':
5274 {
5275 unsigned op[3];
5276 compute_zdepwi_operands (INTVAL (x), op);
5277 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5278 return;
5279 }
5280 case 'z':
5281 {
5282 unsigned op[3];
5283 compute_zdepdi_operands (INTVAL (x), op);
5284 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5285 return;
5286 }
5287 case 'c':
5288 /* We can get here from a .vtable_inherit due to our
5289 CONSTANT_ADDRESS_P rejecting perfectly good constant
5290 addresses. */
5291 break;
5292 default:
5293 gcc_unreachable ();
5294 }
5295 if (GET_CODE (x) == REG)
5296 {
5297 fputs (reg_names [REGNO (x)], file);
5298 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5299 {
5300 fputs ("R", file);
5301 return;
5302 }
5303 if (FP_REG_P (x)
5304 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5305 && (REGNO (x) & 1) == 0)
5306 fputs ("L", file);
5307 }
5308 else if (GET_CODE (x) == MEM)
5309 {
5310 int size = GET_MODE_SIZE (GET_MODE (x));
5311 rtx base = NULL_RTX;
5312 switch (GET_CODE (XEXP (x, 0)))
5313 {
5314 case PRE_DEC:
5315 case POST_DEC:
5316 base = XEXP (XEXP (x, 0), 0);
5317 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5318 break;
5319 case PRE_INC:
5320 case POST_INC:
5321 base = XEXP (XEXP (x, 0), 0);
5322 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5323 break;
5324 case PLUS:
5325 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5326 fprintf (file, "%s(%s)",
5327 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5328 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5329 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5330 fprintf (file, "%s(%s)",
5331 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5332 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5333 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5334 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5335 {
5336 /* Because the REG_POINTER flag can get lost during reload,
5337 pa_legitimate_address_p canonicalizes the order of the
5338 index and base registers in the combined move patterns. */
5339 rtx base = XEXP (XEXP (x, 0), 1);
5340 rtx index = XEXP (XEXP (x, 0), 0);
5341
5342 fprintf (file, "%s(%s)",
5343 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5344 }
5345 else
5346 output_address (XEXP (x, 0));
5347 break;
5348 default:
5349 output_address (XEXP (x, 0));
5350 break;
5351 }
5352 }
5353 else
5354 output_addr_const (file, x);
5355 }
5356
5357 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5358
5359 void
5360 pa_output_global_address (FILE *file, rtx x, int round_constant)
5361 {
5362
5363 /* Imagine (high (const (plus ...))). */
5364 if (GET_CODE (x) == HIGH)
5365 x = XEXP (x, 0);
5366
5367 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5368 output_addr_const (file, x);
5369 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5370 {
5371 output_addr_const (file, x);
5372 fputs ("-$global$", file);
5373 }
5374 else if (GET_CODE (x) == CONST)
5375 {
5376 const char *sep = "";
5377 int offset = 0; /* assembler wants -$global$ at end */
5378 rtx base = NULL_RTX;
5379
5380 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5381 {
5382 case SYMBOL_REF:
5383 base = XEXP (XEXP (x, 0), 0);
5384 output_addr_const (file, base);
5385 break;
5386 case CONST_INT:
5387 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5388 break;
5389 default:
5390 gcc_unreachable ();
5391 }
5392
5393 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5394 {
5395 case SYMBOL_REF:
5396 base = XEXP (XEXP (x, 0), 1);
5397 output_addr_const (file, base);
5398 break;
5399 case CONST_INT:
5400 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5401 break;
5402 default:
5403 gcc_unreachable ();
5404 }
5405
5406 /* How bogus. The compiler is apparently responsible for
5407 rounding the constant if it uses an LR field selector.
5408
5409 The linker and/or assembler seem a better place since
5410 they have to do this kind of thing already.
5411
5412 If we fail to do this, HP's optimizing linker may eliminate
5413 an addil, but not update the ldw/stw/ldo instruction that
5414 uses the result of the addil. */
5415 if (round_constant)
5416 offset = ((offset + 0x1000) & ~0x1fff);
5417
5418 switch (GET_CODE (XEXP (x, 0)))
5419 {
5420 case PLUS:
5421 if (offset < 0)
5422 {
5423 offset = -offset;
5424 sep = "-";
5425 }
5426 else
5427 sep = "+";
5428 break;
5429
5430 case MINUS:
5431 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5432 sep = "-";
5433 break;
5434
5435 default:
5436 gcc_unreachable ();
5437 }
5438
5439 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5440 fputs ("-$global$", file);
5441 if (offset)
5442 fprintf (file, "%s%d", sep, offset);
5443 }
5444 else
5445 output_addr_const (file, x);
5446 }
5447
5448 /* Output boilerplate text to appear at the beginning of the file.
5449 There are several possible versions. */
5450 #define aputs(x) fputs(x, asm_out_file)
5451 static inline void
5452 pa_file_start_level (void)
5453 {
5454 if (TARGET_64BIT)
5455 aputs ("\t.LEVEL 2.0w\n");
5456 else if (TARGET_PA_20)
5457 aputs ("\t.LEVEL 2.0\n");
5458 else if (TARGET_PA_11)
5459 aputs ("\t.LEVEL 1.1\n");
5460 else
5461 aputs ("\t.LEVEL 1.0\n");
5462 }
5463
5464 static inline void
5465 pa_file_start_space (int sortspace)
5466 {
5467 aputs ("\t.SPACE $PRIVATE$");
5468 if (sortspace)
5469 aputs (",SORT=16");
5470 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5471 if (flag_tm)
5472 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5473 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5474 "\n\t.SPACE $TEXT$");
5475 if (sortspace)
5476 aputs (",SORT=8");
5477 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5478 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5479 }
5480
5481 static inline void
5482 pa_file_start_file (int want_version)
5483 {
5484 if (write_symbols != NO_DEBUG)
5485 {
5486 output_file_directive (asm_out_file, main_input_filename);
5487 if (want_version)
5488 aputs ("\t.version\t\"01.01\"\n");
5489 }
5490 }
5491
5492 static inline void
5493 pa_file_start_mcount (const char *aswhat)
5494 {
5495 if (profile_flag)
5496 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5497 }
5498
5499 static void
5500 pa_elf_file_start (void)
5501 {
5502 pa_file_start_level ();
5503 pa_file_start_mcount ("ENTRY");
5504 pa_file_start_file (0);
5505 }
5506
5507 static void
5508 pa_som_file_start (void)
5509 {
5510 pa_file_start_level ();
5511 pa_file_start_space (0);
5512 aputs ("\t.IMPORT $global$,DATA\n"
5513 "\t.IMPORT $$dyncall,MILLICODE\n");
5514 pa_file_start_mcount ("CODE");
5515 pa_file_start_file (0);
5516 }
5517
5518 static void
5519 pa_linux_file_start (void)
5520 {
5521 pa_file_start_file (1);
5522 pa_file_start_level ();
5523 pa_file_start_mcount ("CODE");
5524 }
5525
5526 static void
5527 pa_hpux64_gas_file_start (void)
5528 {
5529 pa_file_start_level ();
5530 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5531 if (profile_flag)
5532 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5533 #endif
5534 pa_file_start_file (1);
5535 }
5536
5537 static void
5538 pa_hpux64_hpas_file_start (void)
5539 {
5540 pa_file_start_level ();
5541 pa_file_start_space (1);
5542 pa_file_start_mcount ("CODE");
5543 pa_file_start_file (0);
5544 }
5545 #undef aputs
5546
5547 /* Search the deferred plabel list for SYMBOL and return its internal
5548 label. If an entry for SYMBOL is not found, a new entry is created. */
5549
5550 rtx
5551 pa_get_deferred_plabel (rtx symbol)
5552 {
5553 const char *fname = XSTR (symbol, 0);
5554 size_t i;
5555
5556 /* See if we have already put this function on the list of deferred
5557 plabels. This list is generally small, so a liner search is not
5558 too ugly. If it proves too slow replace it with something faster. */
5559 for (i = 0; i < n_deferred_plabels; i++)
5560 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5561 break;
5562
5563 /* If the deferred plabel list is empty, or this entry was not found
5564 on the list, create a new entry on the list. */
5565 if (deferred_plabels == NULL || i == n_deferred_plabels)
5566 {
5567 tree id;
5568
5569 if (deferred_plabels == 0)
5570 deferred_plabels = ggc_alloc<deferred_plabel> ();
5571 else
5572 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5573 deferred_plabels,
5574 n_deferred_plabels + 1);
5575
5576 i = n_deferred_plabels++;
5577 deferred_plabels[i].internal_label = gen_label_rtx ();
5578 deferred_plabels[i].symbol = symbol;
5579
5580 /* Gross. We have just implicitly taken the address of this
5581 function. Mark it in the same manner as assemble_name. */
5582 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5583 if (id)
5584 mark_referenced (id);
5585 }
5586
5587 return deferred_plabels[i].internal_label;
5588 }
5589
5590 static void
5591 output_deferred_plabels (void)
5592 {
5593 size_t i;
5594
5595 /* If we have some deferred plabels, then we need to switch into the
5596 data or readonly data section, and align it to a 4 byte boundary
5597 before outputting the deferred plabels. */
5598 if (n_deferred_plabels)
5599 {
5600 switch_to_section (flag_pic ? data_section : readonly_data_section);
5601 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5602 }
5603
5604 /* Now output the deferred plabels. */
5605 for (i = 0; i < n_deferred_plabels; i++)
5606 {
5607 targetm.asm_out.internal_label (asm_out_file, "L",
5608 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5609 assemble_integer (deferred_plabels[i].symbol,
5610 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5611 }
5612 }
5613
5614 /* Initialize optabs to point to emulation routines. */
5615
5616 static void
5617 pa_init_libfuncs (void)
5618 {
5619 if (HPUX_LONG_DOUBLE_LIBRARY)
5620 {
5621 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5622 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5623 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5624 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5625 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5626 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5627 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5628 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5629 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5630
5631 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5632 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5633 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5634 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5635 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5636 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5637 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5638
5639 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5640 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5641 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5642 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5643
5644 set_conv_libfunc (sfix_optab, SImode, TFmode,
5645 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5646 : "_U_Qfcnvfxt_quad_to_sgl");
5647 set_conv_libfunc (sfix_optab, DImode, TFmode,
5648 "_U_Qfcnvfxt_quad_to_dbl");
5649 set_conv_libfunc (ufix_optab, SImode, TFmode,
5650 "_U_Qfcnvfxt_quad_to_usgl");
5651 set_conv_libfunc (ufix_optab, DImode, TFmode,
5652 "_U_Qfcnvfxt_quad_to_udbl");
5653
5654 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5655 "_U_Qfcnvxf_sgl_to_quad");
5656 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5657 "_U_Qfcnvxf_dbl_to_quad");
5658 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5659 "_U_Qfcnvxf_usgl_to_quad");
5660 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5661 "_U_Qfcnvxf_udbl_to_quad");
5662 }
5663
5664 if (TARGET_SYNC_LIBCALL)
5665 init_sync_libfuncs (UNITS_PER_WORD);
5666 }
5667
5668 /* HP's millicode routines mean something special to the assembler.
5669 Keep track of which ones we have used. */
5670
5671 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5672 static void import_milli (enum millicodes);
5673 static char imported[(int) end1000];
5674 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5675 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5676 #define MILLI_START 10
5677
5678 static void
5679 import_milli (enum millicodes code)
5680 {
5681 char str[sizeof (import_string)];
5682
5683 if (!imported[(int) code])
5684 {
5685 imported[(int) code] = 1;
5686 strcpy (str, import_string);
5687 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5688 output_asm_insn (str, 0);
5689 }
5690 }
5691
5692 /* The register constraints have put the operands and return value in
5693 the proper registers. */
5694
5695 const char *
5696 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5697 {
5698 import_milli (mulI);
5699 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5700 }
5701
5702 /* Emit the rtl for doing a division by a constant. */
5703
5704 /* Do magic division millicodes exist for this value? */
5705 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5706
5707 /* We'll use an array to keep track of the magic millicodes and
5708 whether or not we've used them already. [n][0] is signed, [n][1] is
5709 unsigned. */
5710
5711 static int div_milli[16][2];
5712
5713 int
5714 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5715 {
5716 if (GET_CODE (operands[2]) == CONST_INT
5717 && INTVAL (operands[2]) > 0
5718 && INTVAL (operands[2]) < 16
5719 && pa_magic_milli[INTVAL (operands[2])])
5720 {
5721 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5722
5723 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5724 emit
5725 (gen_rtx_PARALLEL
5726 (VOIDmode,
5727 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5728 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5729 SImode,
5730 gen_rtx_REG (SImode, 26),
5731 operands[2])),
5732 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5733 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5734 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5735 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5736 gen_rtx_CLOBBER (VOIDmode, ret))));
5737 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5738 return 1;
5739 }
5740 return 0;
5741 }
5742
5743 const char *
5744 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5745 {
5746 int divisor;
5747
5748 /* If the divisor is a constant, try to use one of the special
5749 opcodes .*/
5750 if (GET_CODE (operands[0]) == CONST_INT)
5751 {
5752 static char buf[100];
5753 divisor = INTVAL (operands[0]);
5754 if (!div_milli[divisor][unsignedp])
5755 {
5756 div_milli[divisor][unsignedp] = 1;
5757 if (unsignedp)
5758 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5759 else
5760 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5761 }
5762 if (unsignedp)
5763 {
5764 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5765 INTVAL (operands[0]));
5766 return pa_output_millicode_call (insn,
5767 gen_rtx_SYMBOL_REF (SImode, buf));
5768 }
5769 else
5770 {
5771 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5772 INTVAL (operands[0]));
5773 return pa_output_millicode_call (insn,
5774 gen_rtx_SYMBOL_REF (SImode, buf));
5775 }
5776 }
5777 /* Divisor isn't a special constant. */
5778 else
5779 {
5780 if (unsignedp)
5781 {
5782 import_milli (divU);
5783 return pa_output_millicode_call (insn,
5784 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5785 }
5786 else
5787 {
5788 import_milli (divI);
5789 return pa_output_millicode_call (insn,
5790 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5791 }
5792 }
5793 }
5794
5795 /* Output a $$rem millicode to do mod. */
5796
5797 const char *
5798 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5799 {
5800 if (unsignedp)
5801 {
5802 import_milli (remU);
5803 return pa_output_millicode_call (insn,
5804 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5805 }
5806 else
5807 {
5808 import_milli (remI);
5809 return pa_output_millicode_call (insn,
5810 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5811 }
5812 }
5813
5814 void
5815 pa_output_arg_descriptor (rtx call_insn)
5816 {
5817 const char *arg_regs[4];
5818 enum machine_mode arg_mode;
5819 rtx link;
5820 int i, output_flag = 0;
5821 int regno;
5822
5823 /* We neither need nor want argument location descriptors for the
5824 64bit runtime environment or the ELF32 environment. */
5825 if (TARGET_64BIT || TARGET_ELF32)
5826 return;
5827
5828 for (i = 0; i < 4; i++)
5829 arg_regs[i] = 0;
5830
5831 /* Specify explicitly that no argument relocations should take place
5832 if using the portable runtime calling conventions. */
5833 if (TARGET_PORTABLE_RUNTIME)
5834 {
5835 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5836 asm_out_file);
5837 return;
5838 }
5839
5840 gcc_assert (CALL_P (call_insn));
5841 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5842 link; link = XEXP (link, 1))
5843 {
5844 rtx use = XEXP (link, 0);
5845
5846 if (! (GET_CODE (use) == USE
5847 && GET_CODE (XEXP (use, 0)) == REG
5848 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5849 continue;
5850
5851 arg_mode = GET_MODE (XEXP (use, 0));
5852 regno = REGNO (XEXP (use, 0));
5853 if (regno >= 23 && regno <= 26)
5854 {
5855 arg_regs[26 - regno] = "GR";
5856 if (arg_mode == DImode)
5857 arg_regs[25 - regno] = "GR";
5858 }
5859 else if (regno >= 32 && regno <= 39)
5860 {
5861 if (arg_mode == SFmode)
5862 arg_regs[(regno - 32) / 2] = "FR";
5863 else
5864 {
5865 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5866 arg_regs[(regno - 34) / 2] = "FR";
5867 arg_regs[(regno - 34) / 2 + 1] = "FU";
5868 #else
5869 arg_regs[(regno - 34) / 2] = "FU";
5870 arg_regs[(regno - 34) / 2 + 1] = "FR";
5871 #endif
5872 }
5873 }
5874 }
5875 fputs ("\t.CALL ", asm_out_file);
5876 for (i = 0; i < 4; i++)
5877 {
5878 if (arg_regs[i])
5879 {
5880 if (output_flag++)
5881 fputc (',', asm_out_file);
5882 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5883 }
5884 }
5885 fputc ('\n', asm_out_file);
5886 }
5887 \f
5888 /* Inform reload about cases where moving X with a mode MODE to or from
5889 a register in RCLASS requires an extra scratch or immediate register.
5890 Return the class needed for the immediate register. */
5891
5892 static reg_class_t
5893 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5894 enum machine_mode mode, secondary_reload_info *sri)
5895 {
5896 int regno;
5897 enum reg_class rclass = (enum reg_class) rclass_i;
5898
5899 /* Handle the easy stuff first. */
5900 if (rclass == R1_REGS)
5901 return NO_REGS;
5902
5903 if (REG_P (x))
5904 {
5905 regno = REGNO (x);
5906 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5907 return NO_REGS;
5908 }
5909 else
5910 regno = -1;
5911
5912 /* If we have something like (mem (mem (...)), we can safely assume the
5913 inner MEM will end up in a general register after reloading, so there's
5914 no need for a secondary reload. */
5915 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5916 return NO_REGS;
5917
5918 /* Trying to load a constant into a FP register during PIC code
5919 generation requires %r1 as a scratch register. For float modes,
5920 the only legitimate constant is CONST0_RTX. However, there are
5921 a few patterns that accept constant double operands. */
5922 if (flag_pic
5923 && FP_REG_CLASS_P (rclass)
5924 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5925 {
5926 switch (mode)
5927 {
5928 case SImode:
5929 sri->icode = CODE_FOR_reload_insi_r1;
5930 break;
5931
5932 case DImode:
5933 sri->icode = CODE_FOR_reload_indi_r1;
5934 break;
5935
5936 case SFmode:
5937 sri->icode = CODE_FOR_reload_insf_r1;
5938 break;
5939
5940 case DFmode:
5941 sri->icode = CODE_FOR_reload_indf_r1;
5942 break;
5943
5944 default:
5945 gcc_unreachable ();
5946 }
5947 return NO_REGS;
5948 }
5949
5950 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5951 register when we're generating PIC code or when the operand isn't
5952 readonly. */
5953 if (pa_symbolic_expression_p (x))
5954 {
5955 if (GET_CODE (x) == HIGH)
5956 x = XEXP (x, 0);
5957
5958 if (flag_pic || !read_only_operand (x, VOIDmode))
5959 {
5960 switch (mode)
5961 {
5962 case SImode:
5963 sri->icode = CODE_FOR_reload_insi_r1;
5964 break;
5965
5966 case DImode:
5967 sri->icode = CODE_FOR_reload_indi_r1;
5968 break;
5969
5970 default:
5971 gcc_unreachable ();
5972 }
5973 return NO_REGS;
5974 }
5975 }
5976
5977 /* Profiling showed the PA port spends about 1.3% of its compilation
5978 time in true_regnum from calls inside pa_secondary_reload_class. */
5979 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5980 regno = true_regnum (x);
5981
5982 /* Handle reloads for floating point loads and stores. */
5983 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5984 && FP_REG_CLASS_P (rclass))
5985 {
5986 if (MEM_P (x))
5987 {
5988 x = XEXP (x, 0);
5989
5990 /* We don't need an intermediate for indexed and LO_SUM DLT
5991 memory addresses. When INT14_OK_STRICT is true, it might
5992 appear that we could directly allow register indirect
5993 memory addresses. However, this doesn't work because we
5994 don't support SUBREGs in floating-point register copies
5995 and reload doesn't tell us when it's going to use a SUBREG. */
5996 if (IS_INDEX_ADDR_P (x)
5997 || IS_LO_SUM_DLT_ADDR_P (x))
5998 return NO_REGS;
5999
6000 /* Request intermediate general register. */
6001 return GENERAL_REGS;
6002 }
6003
6004 /* Request a secondary reload with a general scratch register
6005 for everything else. ??? Could symbolic operands be handled
6006 directly when generating non-pic PA 2.0 code? */
6007 sri->icode = (in_p
6008 ? direct_optab_handler (reload_in_optab, mode)
6009 : direct_optab_handler (reload_out_optab, mode));
6010 return NO_REGS;
6011 }
6012
6013 /* A SAR<->FP register copy requires an intermediate general register
6014 and secondary memory. We need a secondary reload with a general
6015 scratch register for spills. */
6016 if (rclass == SHIFT_REGS)
6017 {
6018 /* Handle spill. */
6019 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6020 {
6021 sri->icode = (in_p
6022 ? direct_optab_handler (reload_in_optab, mode)
6023 : direct_optab_handler (reload_out_optab, mode));
6024 return NO_REGS;
6025 }
6026
6027 /* Handle FP copy. */
6028 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6029 return GENERAL_REGS;
6030 }
6031
6032 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6033 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6034 && FP_REG_CLASS_P (rclass))
6035 return GENERAL_REGS;
6036
6037 return NO_REGS;
6038 }
6039
6040 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6041 is only marked as live on entry by df-scan when it is a fixed
6042 register. It isn't a fixed register in the 64-bit runtime,
6043 so we need to mark it here. */
6044
6045 static void
6046 pa_extra_live_on_entry (bitmap regs)
6047 {
6048 if (TARGET_64BIT)
6049 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6050 }
6051
6052 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6053 to prevent it from being deleted. */
6054
6055 rtx
6056 pa_eh_return_handler_rtx (void)
6057 {
6058 rtx tmp;
6059
6060 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6061 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6062 tmp = gen_rtx_MEM (word_mode, tmp);
6063 tmp->volatil = 1;
6064 return tmp;
6065 }
6066
6067 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6068 by invisible reference. As a GCC extension, we also pass anything
6069 with a zero or variable size by reference.
6070
6071 The 64-bit runtime does not describe passing any types by invisible
6072 reference. The internals of GCC can't currently handle passing
6073 empty structures, and zero or variable length arrays when they are
6074 not passed entirely on the stack or by reference. Thus, as a GCC
6075 extension, we pass these types by reference. The HP compiler doesn't
6076 support these types, so hopefully there shouldn't be any compatibility
6077 issues. This may have to be revisited when HP releases a C99 compiler
6078 or updates the ABI. */
6079
6080 static bool
6081 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6082 enum machine_mode mode, const_tree type,
6083 bool named ATTRIBUTE_UNUSED)
6084 {
6085 HOST_WIDE_INT size;
6086
6087 if (type)
6088 size = int_size_in_bytes (type);
6089 else
6090 size = GET_MODE_SIZE (mode);
6091
6092 if (TARGET_64BIT)
6093 return size <= 0;
6094 else
6095 return size <= 0 || size > 8;
6096 }
6097
6098 enum direction
6099 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6100 {
6101 if (mode == BLKmode
6102 || (TARGET_64BIT
6103 && type
6104 && (AGGREGATE_TYPE_P (type)
6105 || TREE_CODE (type) == COMPLEX_TYPE
6106 || TREE_CODE (type) == VECTOR_TYPE)))
6107 {
6108 /* Return none if justification is not required. */
6109 if (type
6110 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6111 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6112 return none;
6113
6114 /* The directions set here are ignored when a BLKmode argument larger
6115 than a word is placed in a register. Different code is used for
6116 the stack and registers. This makes it difficult to have a
6117 consistent data representation for both the stack and registers.
6118 For both runtimes, the justification and padding for arguments on
6119 the stack and in registers should be identical. */
6120 if (TARGET_64BIT)
6121 /* The 64-bit runtime specifies left justification for aggregates. */
6122 return upward;
6123 else
6124 /* The 32-bit runtime architecture specifies right justification.
6125 When the argument is passed on the stack, the argument is padded
6126 with garbage on the left. The HP compiler pads with zeros. */
6127 return downward;
6128 }
6129
6130 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6131 return downward;
6132 else
6133 return none;
6134 }
6135
6136 \f
6137 /* Do what is necessary for `va_start'. We look at the current function
6138 to determine if stdargs or varargs is used and fill in an initial
6139 va_list. A pointer to this constructor is returned. */
6140
6141 static rtx
6142 hppa_builtin_saveregs (void)
6143 {
6144 rtx offset, dest;
6145 tree fntype = TREE_TYPE (current_function_decl);
6146 int argadj = ((!stdarg_p (fntype))
6147 ? UNITS_PER_WORD : 0);
6148
6149 if (argadj)
6150 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6151 else
6152 offset = crtl->args.arg_offset_rtx;
6153
6154 if (TARGET_64BIT)
6155 {
6156 int i, off;
6157
6158 /* Adjust for varargs/stdarg differences. */
6159 if (argadj)
6160 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6161 else
6162 offset = crtl->args.arg_offset_rtx;
6163
6164 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6165 from the incoming arg pointer and growing to larger addresses. */
6166 for (i = 26, off = -64; i >= 19; i--, off += 8)
6167 emit_move_insn (gen_rtx_MEM (word_mode,
6168 plus_constant (Pmode,
6169 arg_pointer_rtx, off)),
6170 gen_rtx_REG (word_mode, i));
6171
6172 /* The incoming args pointer points just beyond the flushback area;
6173 normally this is not a serious concern. However, when we are doing
6174 varargs/stdargs we want to make the arg pointer point to the start
6175 of the incoming argument area. */
6176 emit_move_insn (virtual_incoming_args_rtx,
6177 plus_constant (Pmode, arg_pointer_rtx, -64));
6178
6179 /* Now return a pointer to the first anonymous argument. */
6180 return copy_to_reg (expand_binop (Pmode, add_optab,
6181 virtual_incoming_args_rtx,
6182 offset, 0, 0, OPTAB_LIB_WIDEN));
6183 }
6184
6185 /* Store general registers on the stack. */
6186 dest = gen_rtx_MEM (BLKmode,
6187 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6188 -16));
6189 set_mem_alias_set (dest, get_varargs_alias_set ());
6190 set_mem_align (dest, BITS_PER_WORD);
6191 move_block_from_reg (23, dest, 4);
6192
6193 /* move_block_from_reg will emit code to store the argument registers
6194 individually as scalar stores.
6195
6196 However, other insns may later load from the same addresses for
6197 a structure load (passing a struct to a varargs routine).
6198
6199 The alias code assumes that such aliasing can never happen, so we
6200 have to keep memory referencing insns from moving up beyond the
6201 last argument register store. So we emit a blockage insn here. */
6202 emit_insn (gen_blockage ());
6203
6204 return copy_to_reg (expand_binop (Pmode, add_optab,
6205 crtl->args.internal_arg_pointer,
6206 offset, 0, 0, OPTAB_LIB_WIDEN));
6207 }
6208
6209 static void
6210 hppa_va_start (tree valist, rtx nextarg)
6211 {
6212 nextarg = expand_builtin_saveregs ();
6213 std_expand_builtin_va_start (valist, nextarg);
6214 }
6215
6216 static tree
6217 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6218 gimple_seq *post_p)
6219 {
6220 if (TARGET_64BIT)
6221 {
6222 /* Args grow upward. We can use the generic routines. */
6223 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6224 }
6225 else /* !TARGET_64BIT */
6226 {
6227 tree ptr = build_pointer_type (type);
6228 tree valist_type;
6229 tree t, u;
6230 unsigned int size, ofs;
6231 bool indirect;
6232
6233 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6234 if (indirect)
6235 {
6236 type = ptr;
6237 ptr = build_pointer_type (type);
6238 }
6239 size = int_size_in_bytes (type);
6240 valist_type = TREE_TYPE (valist);
6241
6242 /* Args grow down. Not handled by generic routines. */
6243
6244 u = fold_convert (sizetype, size_in_bytes (type));
6245 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6246 t = fold_build_pointer_plus (valist, u);
6247
6248 /* Align to 4 or 8 byte boundary depending on argument size. */
6249
6250 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6251 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6252 t = fold_convert (valist_type, t);
6253
6254 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6255
6256 ofs = (8 - size) % 4;
6257 if (ofs != 0)
6258 t = fold_build_pointer_plus_hwi (t, ofs);
6259
6260 t = fold_convert (ptr, t);
6261 t = build_va_arg_indirect_ref (t);
6262
6263 if (indirect)
6264 t = build_va_arg_indirect_ref (t);
6265
6266 return t;
6267 }
6268 }
6269
6270 /* True if MODE is valid for the target. By "valid", we mean able to
6271 be manipulated in non-trivial ways. In particular, this means all
6272 the arithmetic is supported.
6273
6274 Currently, TImode is not valid as the HP 64-bit runtime documentation
6275 doesn't document the alignment and calling conventions for this type.
6276 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6277 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6278
6279 static bool
6280 pa_scalar_mode_supported_p (enum machine_mode mode)
6281 {
6282 int precision = GET_MODE_PRECISION (mode);
6283
6284 switch (GET_MODE_CLASS (mode))
6285 {
6286 case MODE_PARTIAL_INT:
6287 case MODE_INT:
6288 if (precision == CHAR_TYPE_SIZE)
6289 return true;
6290 if (precision == SHORT_TYPE_SIZE)
6291 return true;
6292 if (precision == INT_TYPE_SIZE)
6293 return true;
6294 if (precision == LONG_TYPE_SIZE)
6295 return true;
6296 if (precision == LONG_LONG_TYPE_SIZE)
6297 return true;
6298 return false;
6299
6300 case MODE_FLOAT:
6301 if (precision == FLOAT_TYPE_SIZE)
6302 return true;
6303 if (precision == DOUBLE_TYPE_SIZE)
6304 return true;
6305 if (precision == LONG_DOUBLE_TYPE_SIZE)
6306 return true;
6307 return false;
6308
6309 case MODE_DECIMAL_FLOAT:
6310 return false;
6311
6312 default:
6313 gcc_unreachable ();
6314 }
6315 }
6316
6317 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6318 it branches into the delay slot. Otherwise, return FALSE. */
6319
6320 static bool
6321 branch_to_delay_slot_p (rtx insn)
6322 {
6323 rtx jump_insn;
6324
6325 if (dbr_sequence_length ())
6326 return FALSE;
6327
6328 jump_insn = next_active_insn (JUMP_LABEL (insn));
6329 while (insn)
6330 {
6331 insn = next_active_insn (insn);
6332 if (jump_insn == insn)
6333 return TRUE;
6334
6335 /* We can't rely on the length of asms. So, we return FALSE when
6336 the branch is followed by an asm. */
6337 if (!insn
6338 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6339 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6340 || get_attr_length (insn) > 0)
6341 break;
6342 }
6343
6344 return FALSE;
6345 }
6346
6347 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6348
6349 This occurs when INSN has an unfilled delay slot and is followed
6350 by an asm. Disaster can occur if the asm is empty and the jump
6351 branches into the delay slot. So, we add a nop in the delay slot
6352 when this occurs. */
6353
6354 static bool
6355 branch_needs_nop_p (rtx insn)
6356 {
6357 rtx jump_insn;
6358
6359 if (dbr_sequence_length ())
6360 return FALSE;
6361
6362 jump_insn = next_active_insn (JUMP_LABEL (insn));
6363 while (insn)
6364 {
6365 insn = next_active_insn (insn);
6366 if (!insn || jump_insn == insn)
6367 return TRUE;
6368
6369 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6370 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6371 && get_attr_length (insn) > 0)
6372 break;
6373 }
6374
6375 return FALSE;
6376 }
6377
6378 /* Return TRUE if INSN, a forward jump insn, can use nullification
6379 to skip the following instruction. This avoids an extra cycle due
6380 to a mis-predicted branch when we fall through. */
6381
6382 static bool
6383 use_skip_p (rtx insn)
6384 {
6385 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6386
6387 while (insn)
6388 {
6389 insn = next_active_insn (insn);
6390
6391 /* We can't rely on the length of asms, so we can't skip asms. */
6392 if (!insn
6393 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6394 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6395 break;
6396 if (get_attr_length (insn) == 4
6397 && jump_insn == next_active_insn (insn))
6398 return TRUE;
6399 if (get_attr_length (insn) > 0)
6400 break;
6401 }
6402
6403 return FALSE;
6404 }
6405
6406 /* This routine handles all the normal conditional branch sequences we
6407 might need to generate. It handles compare immediate vs compare
6408 register, nullification of delay slots, varying length branches,
6409 negated branches, and all combinations of the above. It returns the
6410 output appropriate to emit the branch corresponding to all given
6411 parameters. */
6412
6413 const char *
6414 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6415 {
6416 static char buf[100];
6417 bool useskip;
6418 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6419 int length = get_attr_length (insn);
6420 int xdelay;
6421
6422 /* A conditional branch to the following instruction (e.g. the delay slot)
6423 is asking for a disaster. This can happen when not optimizing and
6424 when jump optimization fails.
6425
6426 While it is usually safe to emit nothing, this can fail if the
6427 preceding instruction is a nullified branch with an empty delay
6428 slot and the same branch target as this branch. We could check
6429 for this but jump optimization should eliminate nop jumps. It
6430 is always safe to emit a nop. */
6431 if (branch_to_delay_slot_p (insn))
6432 return "nop";
6433
6434 /* The doubleword form of the cmpib instruction doesn't have the LEU
6435 and GTU conditions while the cmpb instruction does. Since we accept
6436 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6437 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6438 operands[2] = gen_rtx_REG (DImode, 0);
6439 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6440 operands[1] = gen_rtx_REG (DImode, 0);
6441
6442 /* If this is a long branch with its delay slot unfilled, set `nullify'
6443 as it can nullify the delay slot and save a nop. */
6444 if (length == 8 && dbr_sequence_length () == 0)
6445 nullify = 1;
6446
6447 /* If this is a short forward conditional branch which did not get
6448 its delay slot filled, the delay slot can still be nullified. */
6449 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6450 nullify = forward_branch_p (insn);
6451
6452 /* A forward branch over a single nullified insn can be done with a
6453 comclr instruction. This avoids a single cycle penalty due to
6454 mis-predicted branch if we fall through (branch not taken). */
6455 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6456
6457 switch (length)
6458 {
6459 /* All short conditional branches except backwards with an unfilled
6460 delay slot. */
6461 case 4:
6462 if (useskip)
6463 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6464 else
6465 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6466 if (GET_MODE (operands[1]) == DImode)
6467 strcat (buf, "*");
6468 if (negated)
6469 strcat (buf, "%B3");
6470 else
6471 strcat (buf, "%S3");
6472 if (useskip)
6473 strcat (buf, " %2,%r1,%%r0");
6474 else if (nullify)
6475 {
6476 if (branch_needs_nop_p (insn))
6477 strcat (buf, ",n %2,%r1,%0%#");
6478 else
6479 strcat (buf, ",n %2,%r1,%0");
6480 }
6481 else
6482 strcat (buf, " %2,%r1,%0");
6483 break;
6484
6485 /* All long conditionals. Note a short backward branch with an
6486 unfilled delay slot is treated just like a long backward branch
6487 with an unfilled delay slot. */
6488 case 8:
6489 /* Handle weird backwards branch with a filled delay slot
6490 which is nullified. */
6491 if (dbr_sequence_length () != 0
6492 && ! forward_branch_p (insn)
6493 && nullify)
6494 {
6495 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6496 if (GET_MODE (operands[1]) == DImode)
6497 strcat (buf, "*");
6498 if (negated)
6499 strcat (buf, "%S3");
6500 else
6501 strcat (buf, "%B3");
6502 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6503 }
6504 /* Handle short backwards branch with an unfilled delay slot.
6505 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6506 taken and untaken branches. */
6507 else if (dbr_sequence_length () == 0
6508 && ! forward_branch_p (insn)
6509 && INSN_ADDRESSES_SET_P ()
6510 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6511 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6512 {
6513 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6514 if (GET_MODE (operands[1]) == DImode)
6515 strcat (buf, "*");
6516 if (negated)
6517 strcat (buf, "%B3 %2,%r1,%0%#");
6518 else
6519 strcat (buf, "%S3 %2,%r1,%0%#");
6520 }
6521 else
6522 {
6523 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6524 if (GET_MODE (operands[1]) == DImode)
6525 strcat (buf, "*");
6526 if (negated)
6527 strcat (buf, "%S3");
6528 else
6529 strcat (buf, "%B3");
6530 if (nullify)
6531 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6532 else
6533 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6534 }
6535 break;
6536
6537 default:
6538 /* The reversed conditional branch must branch over one additional
6539 instruction if the delay slot is filled and needs to be extracted
6540 by pa_output_lbranch. If the delay slot is empty or this is a
6541 nullified forward branch, the instruction after the reversed
6542 condition branch must be nullified. */
6543 if (dbr_sequence_length () == 0
6544 || (nullify && forward_branch_p (insn)))
6545 {
6546 nullify = 1;
6547 xdelay = 0;
6548 operands[4] = GEN_INT (length);
6549 }
6550 else
6551 {
6552 xdelay = 1;
6553 operands[4] = GEN_INT (length + 4);
6554 }
6555
6556 /* Create a reversed conditional branch which branches around
6557 the following insns. */
6558 if (GET_MODE (operands[1]) != DImode)
6559 {
6560 if (nullify)
6561 {
6562 if (negated)
6563 strcpy (buf,
6564 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6565 else
6566 strcpy (buf,
6567 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6568 }
6569 else
6570 {
6571 if (negated)
6572 strcpy (buf,
6573 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6574 else
6575 strcpy (buf,
6576 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6577 }
6578 }
6579 else
6580 {
6581 if (nullify)
6582 {
6583 if (negated)
6584 strcpy (buf,
6585 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6586 else
6587 strcpy (buf,
6588 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6589 }
6590 else
6591 {
6592 if (negated)
6593 strcpy (buf,
6594 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6595 else
6596 strcpy (buf,
6597 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6598 }
6599 }
6600
6601 output_asm_insn (buf, operands);
6602 return pa_output_lbranch (operands[0], insn, xdelay);
6603 }
6604 return buf;
6605 }
6606
6607 /* This routine handles output of long unconditional branches that
6608 exceed the maximum range of a simple branch instruction. Since
6609 we don't have a register available for the branch, we save register
6610 %r1 in the frame marker, load the branch destination DEST into %r1,
6611 execute the branch, and restore %r1 in the delay slot of the branch.
6612
6613 Since long branches may have an insn in the delay slot and the
6614 delay slot is used to restore %r1, we in general need to extract
6615 this insn and execute it before the branch. However, to facilitate
6616 use of this function by conditional branches, we also provide an
6617 option to not extract the delay insn so that it will be emitted
6618 after the long branch. So, if there is an insn in the delay slot,
6619 it is extracted if XDELAY is nonzero.
6620
6621 The lengths of the various long-branch sequences are 20, 16 and 24
6622 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6623
6624 const char *
6625 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6626 {
6627 rtx xoperands[2];
6628
6629 xoperands[0] = dest;
6630
6631 /* First, free up the delay slot. */
6632 if (xdelay && dbr_sequence_length () != 0)
6633 {
6634 /* We can't handle a jump in the delay slot. */
6635 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6636
6637 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6638 optimize, 0, NULL);
6639
6640 /* Now delete the delay insn. */
6641 SET_INSN_DELETED (NEXT_INSN (insn));
6642 }
6643
6644 /* Output an insn to save %r1. The runtime documentation doesn't
6645 specify whether the "Clean Up" slot in the callers frame can
6646 be clobbered by the callee. It isn't copied by HP's builtin
6647 alloca, so this suggests that it can be clobbered if necessary.
6648 The "Static Link" location is copied by HP builtin alloca, so
6649 we avoid using it. Using the cleanup slot might be a problem
6650 if we have to interoperate with languages that pass cleanup
6651 information. However, it should be possible to handle these
6652 situations with GCC's asm feature.
6653
6654 The "Current RP" slot is reserved for the called procedure, so
6655 we try to use it when we don't have a frame of our own. It's
6656 rather unlikely that we won't have a frame when we need to emit
6657 a very long branch.
6658
6659 Really the way to go long term is a register scavenger; goto
6660 the target of the jump and find a register which we can use
6661 as a scratch to hold the value in %r1. Then, we wouldn't have
6662 to free up the delay slot or clobber a slot that may be needed
6663 for other purposes. */
6664 if (TARGET_64BIT)
6665 {
6666 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6667 /* Use the return pointer slot in the frame marker. */
6668 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6669 else
6670 /* Use the slot at -40 in the frame marker since HP builtin
6671 alloca doesn't copy it. */
6672 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6673 }
6674 else
6675 {
6676 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6677 /* Use the return pointer slot in the frame marker. */
6678 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6679 else
6680 /* Use the "Clean Up" slot in the frame marker. In GCC,
6681 the only other use of this location is for copying a
6682 floating point double argument from a floating-point
6683 register to two general registers. The copy is done
6684 as an "atomic" operation when outputting a call, so it
6685 won't interfere with our using the location here. */
6686 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6687 }
6688
6689 if (TARGET_PORTABLE_RUNTIME)
6690 {
6691 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6692 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6693 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6694 }
6695 else if (flag_pic)
6696 {
6697 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6698 if (TARGET_SOM || !TARGET_GAS)
6699 {
6700 xoperands[1] = gen_label_rtx ();
6701 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6702 targetm.asm_out.internal_label (asm_out_file, "L",
6703 CODE_LABEL_NUMBER (xoperands[1]));
6704 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6705 }
6706 else
6707 {
6708 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6709 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6710 }
6711 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6712 }
6713 else
6714 /* Now output a very long branch to the original target. */
6715 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6716
6717 /* Now restore the value of %r1 in the delay slot. */
6718 if (TARGET_64BIT)
6719 {
6720 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6721 return "ldd -16(%%r30),%%r1";
6722 else
6723 return "ldd -40(%%r30),%%r1";
6724 }
6725 else
6726 {
6727 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6728 return "ldw -20(%%r30),%%r1";
6729 else
6730 return "ldw -12(%%r30),%%r1";
6731 }
6732 }
6733
6734 /* This routine handles all the branch-on-bit conditional branch sequences we
6735 might need to generate. It handles nullification of delay slots,
6736 varying length branches, negated branches and all combinations of the
6737 above. it returns the appropriate output template to emit the branch. */
6738
6739 const char *
6740 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6741 {
6742 static char buf[100];
6743 bool useskip;
6744 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6745 int length = get_attr_length (insn);
6746 int xdelay;
6747
6748 /* A conditional branch to the following instruction (e.g. the delay slot) is
6749 asking for a disaster. I do not think this can happen as this pattern
6750 is only used when optimizing; jump optimization should eliminate the
6751 jump. But be prepared just in case. */
6752
6753 if (branch_to_delay_slot_p (insn))
6754 return "nop";
6755
6756 /* If this is a long branch with its delay slot unfilled, set `nullify'
6757 as it can nullify the delay slot and save a nop. */
6758 if (length == 8 && dbr_sequence_length () == 0)
6759 nullify = 1;
6760
6761 /* If this is a short forward conditional branch which did not get
6762 its delay slot filled, the delay slot can still be nullified. */
6763 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6764 nullify = forward_branch_p (insn);
6765
6766 /* A forward branch over a single nullified insn can be done with a
6767 extrs instruction. This avoids a single cycle penalty due to
6768 mis-predicted branch if we fall through (branch not taken). */
6769 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6770
6771 switch (length)
6772 {
6773
6774 /* All short conditional branches except backwards with an unfilled
6775 delay slot. */
6776 case 4:
6777 if (useskip)
6778 strcpy (buf, "{extrs,|extrw,s,}");
6779 else
6780 strcpy (buf, "bb,");
6781 if (useskip && GET_MODE (operands[0]) == DImode)
6782 strcpy (buf, "extrd,s,*");
6783 else if (GET_MODE (operands[0]) == DImode)
6784 strcpy (buf, "bb,*");
6785 if ((which == 0 && negated)
6786 || (which == 1 && ! negated))
6787 strcat (buf, ">=");
6788 else
6789 strcat (buf, "<");
6790 if (useskip)
6791 strcat (buf, " %0,%1,1,%%r0");
6792 else if (nullify && negated)
6793 {
6794 if (branch_needs_nop_p (insn))
6795 strcat (buf, ",n %0,%1,%3%#");
6796 else
6797 strcat (buf, ",n %0,%1,%3");
6798 }
6799 else if (nullify && ! negated)
6800 {
6801 if (branch_needs_nop_p (insn))
6802 strcat (buf, ",n %0,%1,%2%#");
6803 else
6804 strcat (buf, ",n %0,%1,%2");
6805 }
6806 else if (! nullify && negated)
6807 strcat (buf, " %0,%1,%3");
6808 else if (! nullify && ! negated)
6809 strcat (buf, " %0,%1,%2");
6810 break;
6811
6812 /* All long conditionals. Note a short backward branch with an
6813 unfilled delay slot is treated just like a long backward branch
6814 with an unfilled delay slot. */
6815 case 8:
6816 /* Handle weird backwards branch with a filled delay slot
6817 which is nullified. */
6818 if (dbr_sequence_length () != 0
6819 && ! forward_branch_p (insn)
6820 && nullify)
6821 {
6822 strcpy (buf, "bb,");
6823 if (GET_MODE (operands[0]) == DImode)
6824 strcat (buf, "*");
6825 if ((which == 0 && negated)
6826 || (which == 1 && ! negated))
6827 strcat (buf, "<");
6828 else
6829 strcat (buf, ">=");
6830 if (negated)
6831 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6832 else
6833 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6834 }
6835 /* Handle short backwards branch with an unfilled delay slot.
6836 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6837 taken and untaken branches. */
6838 else if (dbr_sequence_length () == 0
6839 && ! forward_branch_p (insn)
6840 && INSN_ADDRESSES_SET_P ()
6841 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6842 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6843 {
6844 strcpy (buf, "bb,");
6845 if (GET_MODE (operands[0]) == DImode)
6846 strcat (buf, "*");
6847 if ((which == 0 && negated)
6848 || (which == 1 && ! negated))
6849 strcat (buf, ">=");
6850 else
6851 strcat (buf, "<");
6852 if (negated)
6853 strcat (buf, " %0,%1,%3%#");
6854 else
6855 strcat (buf, " %0,%1,%2%#");
6856 }
6857 else
6858 {
6859 if (GET_MODE (operands[0]) == DImode)
6860 strcpy (buf, "extrd,s,*");
6861 else
6862 strcpy (buf, "{extrs,|extrw,s,}");
6863 if ((which == 0 && negated)
6864 || (which == 1 && ! negated))
6865 strcat (buf, "<");
6866 else
6867 strcat (buf, ">=");
6868 if (nullify && negated)
6869 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6870 else if (nullify && ! negated)
6871 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6872 else if (negated)
6873 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6874 else
6875 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6876 }
6877 break;
6878
6879 default:
6880 /* The reversed conditional branch must branch over one additional
6881 instruction if the delay slot is filled and needs to be extracted
6882 by pa_output_lbranch. If the delay slot is empty or this is a
6883 nullified forward branch, the instruction after the reversed
6884 condition branch must be nullified. */
6885 if (dbr_sequence_length () == 0
6886 || (nullify && forward_branch_p (insn)))
6887 {
6888 nullify = 1;
6889 xdelay = 0;
6890 operands[4] = GEN_INT (length);
6891 }
6892 else
6893 {
6894 xdelay = 1;
6895 operands[4] = GEN_INT (length + 4);
6896 }
6897
6898 if (GET_MODE (operands[0]) == DImode)
6899 strcpy (buf, "bb,*");
6900 else
6901 strcpy (buf, "bb,");
6902 if ((which == 0 && negated)
6903 || (which == 1 && !negated))
6904 strcat (buf, "<");
6905 else
6906 strcat (buf, ">=");
6907 if (nullify)
6908 strcat (buf, ",n %0,%1,.+%4");
6909 else
6910 strcat (buf, " %0,%1,.+%4");
6911 output_asm_insn (buf, operands);
6912 return pa_output_lbranch (negated ? operands[3] : operands[2],
6913 insn, xdelay);
6914 }
6915 return buf;
6916 }
6917
6918 /* This routine handles all the branch-on-variable-bit conditional branch
6919 sequences we might need to generate. It handles nullification of delay
6920 slots, varying length branches, negated branches and all combinations
6921 of the above. it returns the appropriate output template to emit the
6922 branch. */
6923
6924 const char *
6925 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
6926 int which)
6927 {
6928 static char buf[100];
6929 bool useskip;
6930 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6931 int length = get_attr_length (insn);
6932 int xdelay;
6933
6934 /* A conditional branch to the following instruction (e.g. the delay slot) is
6935 asking for a disaster. I do not think this can happen as this pattern
6936 is only used when optimizing; jump optimization should eliminate the
6937 jump. But be prepared just in case. */
6938
6939 if (branch_to_delay_slot_p (insn))
6940 return "nop";
6941
6942 /* If this is a long branch with its delay slot unfilled, set `nullify'
6943 as it can nullify the delay slot and save a nop. */
6944 if (length == 8 && dbr_sequence_length () == 0)
6945 nullify = 1;
6946
6947 /* If this is a short forward conditional branch which did not get
6948 its delay slot filled, the delay slot can still be nullified. */
6949 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6950 nullify = forward_branch_p (insn);
6951
6952 /* A forward branch over a single nullified insn can be done with a
6953 extrs instruction. This avoids a single cycle penalty due to
6954 mis-predicted branch if we fall through (branch not taken). */
6955 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6956
6957 switch (length)
6958 {
6959
6960 /* All short conditional branches except backwards with an unfilled
6961 delay slot. */
6962 case 4:
6963 if (useskip)
6964 strcpy (buf, "{vextrs,|extrw,s,}");
6965 else
6966 strcpy (buf, "{bvb,|bb,}");
6967 if (useskip && GET_MODE (operands[0]) == DImode)
6968 strcpy (buf, "extrd,s,*");
6969 else if (GET_MODE (operands[0]) == DImode)
6970 strcpy (buf, "bb,*");
6971 if ((which == 0 && negated)
6972 || (which == 1 && ! negated))
6973 strcat (buf, ">=");
6974 else
6975 strcat (buf, "<");
6976 if (useskip)
6977 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6978 else if (nullify && negated)
6979 {
6980 if (branch_needs_nop_p (insn))
6981 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6982 else
6983 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6984 }
6985 else if (nullify && ! negated)
6986 {
6987 if (branch_needs_nop_p (insn))
6988 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6989 else
6990 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6991 }
6992 else if (! nullify && negated)
6993 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
6994 else if (! nullify && ! negated)
6995 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6996 break;
6997
6998 /* All long conditionals. Note a short backward branch with an
6999 unfilled delay slot is treated just like a long backward branch
7000 with an unfilled delay slot. */
7001 case 8:
7002 /* Handle weird backwards branch with a filled delay slot
7003 which is nullified. */
7004 if (dbr_sequence_length () != 0
7005 && ! forward_branch_p (insn)
7006 && nullify)
7007 {
7008 strcpy (buf, "{bvb,|bb,}");
7009 if (GET_MODE (operands[0]) == DImode)
7010 strcat (buf, "*");
7011 if ((which == 0 && negated)
7012 || (which == 1 && ! negated))
7013 strcat (buf, "<");
7014 else
7015 strcat (buf, ">=");
7016 if (negated)
7017 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7018 else
7019 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7020 }
7021 /* Handle short backwards branch with an unfilled delay slot.
7022 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7023 taken and untaken branches. */
7024 else if (dbr_sequence_length () == 0
7025 && ! forward_branch_p (insn)
7026 && INSN_ADDRESSES_SET_P ()
7027 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7028 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7029 {
7030 strcpy (buf, "{bvb,|bb,}");
7031 if (GET_MODE (operands[0]) == DImode)
7032 strcat (buf, "*");
7033 if ((which == 0 && negated)
7034 || (which == 1 && ! negated))
7035 strcat (buf, ">=");
7036 else
7037 strcat (buf, "<");
7038 if (negated)
7039 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7040 else
7041 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7042 }
7043 else
7044 {
7045 strcpy (buf, "{vextrs,|extrw,s,}");
7046 if (GET_MODE (operands[0]) == DImode)
7047 strcpy (buf, "extrd,s,*");
7048 if ((which == 0 && negated)
7049 || (which == 1 && ! negated))
7050 strcat (buf, "<");
7051 else
7052 strcat (buf, ">=");
7053 if (nullify && negated)
7054 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7055 else if (nullify && ! negated)
7056 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7057 else if (negated)
7058 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7059 else
7060 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7061 }
7062 break;
7063
7064 default:
7065 /* The reversed conditional branch must branch over one additional
7066 instruction if the delay slot is filled and needs to be extracted
7067 by pa_output_lbranch. If the delay slot is empty or this is a
7068 nullified forward branch, the instruction after the reversed
7069 condition branch must be nullified. */
7070 if (dbr_sequence_length () == 0
7071 || (nullify && forward_branch_p (insn)))
7072 {
7073 nullify = 1;
7074 xdelay = 0;
7075 operands[4] = GEN_INT (length);
7076 }
7077 else
7078 {
7079 xdelay = 1;
7080 operands[4] = GEN_INT (length + 4);
7081 }
7082
7083 if (GET_MODE (operands[0]) == DImode)
7084 strcpy (buf, "bb,*");
7085 else
7086 strcpy (buf, "{bvb,|bb,}");
7087 if ((which == 0 && negated)
7088 || (which == 1 && !negated))
7089 strcat (buf, "<");
7090 else
7091 strcat (buf, ">=");
7092 if (nullify)
7093 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7094 else
7095 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7096 output_asm_insn (buf, operands);
7097 return pa_output_lbranch (negated ? operands[3] : operands[2],
7098 insn, xdelay);
7099 }
7100 return buf;
7101 }
7102
7103 /* Return the output template for emitting a dbra type insn.
7104
7105 Note it may perform some output operations on its own before
7106 returning the final output string. */
7107 const char *
7108 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7109 {
7110 int length = get_attr_length (insn);
7111
7112 /* A conditional branch to the following instruction (e.g. the delay slot) is
7113 asking for a disaster. Be prepared! */
7114
7115 if (branch_to_delay_slot_p (insn))
7116 {
7117 if (which_alternative == 0)
7118 return "ldo %1(%0),%0";
7119 else if (which_alternative == 1)
7120 {
7121 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7122 output_asm_insn ("ldw -16(%%r30),%4", operands);
7123 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7124 return "{fldws|fldw} -16(%%r30),%0";
7125 }
7126 else
7127 {
7128 output_asm_insn ("ldw %0,%4", operands);
7129 return "ldo %1(%4),%4\n\tstw %4,%0";
7130 }
7131 }
7132
7133 if (which_alternative == 0)
7134 {
7135 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7136 int xdelay;
7137
7138 /* If this is a long branch with its delay slot unfilled, set `nullify'
7139 as it can nullify the delay slot and save a nop. */
7140 if (length == 8 && dbr_sequence_length () == 0)
7141 nullify = 1;
7142
7143 /* If this is a short forward conditional branch which did not get
7144 its delay slot filled, the delay slot can still be nullified. */
7145 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7146 nullify = forward_branch_p (insn);
7147
7148 switch (length)
7149 {
7150 case 4:
7151 if (nullify)
7152 {
7153 if (branch_needs_nop_p (insn))
7154 return "addib,%C2,n %1,%0,%3%#";
7155 else
7156 return "addib,%C2,n %1,%0,%3";
7157 }
7158 else
7159 return "addib,%C2 %1,%0,%3";
7160
7161 case 8:
7162 /* Handle weird backwards branch with a fulled delay slot
7163 which is nullified. */
7164 if (dbr_sequence_length () != 0
7165 && ! forward_branch_p (insn)
7166 && nullify)
7167 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7168 /* Handle short backwards branch with an unfilled delay slot.
7169 Using a addb;nop rather than addi;bl saves 1 cycle for both
7170 taken and untaken branches. */
7171 else if (dbr_sequence_length () == 0
7172 && ! forward_branch_p (insn)
7173 && INSN_ADDRESSES_SET_P ()
7174 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7175 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7176 return "addib,%C2 %1,%0,%3%#";
7177
7178 /* Handle normal cases. */
7179 if (nullify)
7180 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7181 else
7182 return "addi,%N2 %1,%0,%0\n\tb %3";
7183
7184 default:
7185 /* The reversed conditional branch must branch over one additional
7186 instruction if the delay slot is filled and needs to be extracted
7187 by pa_output_lbranch. If the delay slot is empty or this is a
7188 nullified forward branch, the instruction after the reversed
7189 condition branch must be nullified. */
7190 if (dbr_sequence_length () == 0
7191 || (nullify && forward_branch_p (insn)))
7192 {
7193 nullify = 1;
7194 xdelay = 0;
7195 operands[4] = GEN_INT (length);
7196 }
7197 else
7198 {
7199 xdelay = 1;
7200 operands[4] = GEN_INT (length + 4);
7201 }
7202
7203 if (nullify)
7204 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7205 else
7206 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7207
7208 return pa_output_lbranch (operands[3], insn, xdelay);
7209 }
7210
7211 }
7212 /* Deal with gross reload from FP register case. */
7213 else if (which_alternative == 1)
7214 {
7215 /* Move loop counter from FP register to MEM then into a GR,
7216 increment the GR, store the GR into MEM, and finally reload
7217 the FP register from MEM from within the branch's delay slot. */
7218 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7219 operands);
7220 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7221 if (length == 24)
7222 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7223 else if (length == 28)
7224 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7225 else
7226 {
7227 operands[5] = GEN_INT (length - 16);
7228 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7229 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7230 return pa_output_lbranch (operands[3], insn, 0);
7231 }
7232 }
7233 /* Deal with gross reload from memory case. */
7234 else
7235 {
7236 /* Reload loop counter from memory, the store back to memory
7237 happens in the branch's delay slot. */
7238 output_asm_insn ("ldw %0,%4", operands);
7239 if (length == 12)
7240 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7241 else if (length == 16)
7242 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7243 else
7244 {
7245 operands[5] = GEN_INT (length - 4);
7246 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7247 return pa_output_lbranch (operands[3], insn, 0);
7248 }
7249 }
7250 }
7251
7252 /* Return the output template for emitting a movb type insn.
7253
7254 Note it may perform some output operations on its own before
7255 returning the final output string. */
7256 const char *
7257 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7258 int reverse_comparison)
7259 {
7260 int length = get_attr_length (insn);
7261
7262 /* A conditional branch to the following instruction (e.g. the delay slot) is
7263 asking for a disaster. Be prepared! */
7264
7265 if (branch_to_delay_slot_p (insn))
7266 {
7267 if (which_alternative == 0)
7268 return "copy %1,%0";
7269 else if (which_alternative == 1)
7270 {
7271 output_asm_insn ("stw %1,-16(%%r30)", operands);
7272 return "{fldws|fldw} -16(%%r30),%0";
7273 }
7274 else if (which_alternative == 2)
7275 return "stw %1,%0";
7276 else
7277 return "mtsar %r1";
7278 }
7279
7280 /* Support the second variant. */
7281 if (reverse_comparison)
7282 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7283
7284 if (which_alternative == 0)
7285 {
7286 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7287 int xdelay;
7288
7289 /* If this is a long branch with its delay slot unfilled, set `nullify'
7290 as it can nullify the delay slot and save a nop. */
7291 if (length == 8 && dbr_sequence_length () == 0)
7292 nullify = 1;
7293
7294 /* If this is a short forward conditional branch which did not get
7295 its delay slot filled, the delay slot can still be nullified. */
7296 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7297 nullify = forward_branch_p (insn);
7298
7299 switch (length)
7300 {
7301 case 4:
7302 if (nullify)
7303 {
7304 if (branch_needs_nop_p (insn))
7305 return "movb,%C2,n %1,%0,%3%#";
7306 else
7307 return "movb,%C2,n %1,%0,%3";
7308 }
7309 else
7310 return "movb,%C2 %1,%0,%3";
7311
7312 case 8:
7313 /* Handle weird backwards branch with a filled delay slot
7314 which is nullified. */
7315 if (dbr_sequence_length () != 0
7316 && ! forward_branch_p (insn)
7317 && nullify)
7318 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7319
7320 /* Handle short backwards branch with an unfilled delay slot.
7321 Using a movb;nop rather than or;bl saves 1 cycle for both
7322 taken and untaken branches. */
7323 else if (dbr_sequence_length () == 0
7324 && ! forward_branch_p (insn)
7325 && INSN_ADDRESSES_SET_P ()
7326 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7327 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7328 return "movb,%C2 %1,%0,%3%#";
7329 /* Handle normal cases. */
7330 if (nullify)
7331 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7332 else
7333 return "or,%N2 %1,%%r0,%0\n\tb %3";
7334
7335 default:
7336 /* The reversed conditional branch must branch over one additional
7337 instruction if the delay slot is filled and needs to be extracted
7338 by pa_output_lbranch. If the delay slot is empty or this is a
7339 nullified forward branch, the instruction after the reversed
7340 condition branch must be nullified. */
7341 if (dbr_sequence_length () == 0
7342 || (nullify && forward_branch_p (insn)))
7343 {
7344 nullify = 1;
7345 xdelay = 0;
7346 operands[4] = GEN_INT (length);
7347 }
7348 else
7349 {
7350 xdelay = 1;
7351 operands[4] = GEN_INT (length + 4);
7352 }
7353
7354 if (nullify)
7355 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7356 else
7357 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7358
7359 return pa_output_lbranch (operands[3], insn, xdelay);
7360 }
7361 }
7362 /* Deal with gross reload for FP destination register case. */
7363 else if (which_alternative == 1)
7364 {
7365 /* Move source register to MEM, perform the branch test, then
7366 finally load the FP register from MEM from within the branch's
7367 delay slot. */
7368 output_asm_insn ("stw %1,-16(%%r30)", operands);
7369 if (length == 12)
7370 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7371 else if (length == 16)
7372 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7373 else
7374 {
7375 operands[4] = GEN_INT (length - 4);
7376 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7377 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7378 return pa_output_lbranch (operands[3], insn, 0);
7379 }
7380 }
7381 /* Deal with gross reload from memory case. */
7382 else if (which_alternative == 2)
7383 {
7384 /* Reload loop counter from memory, the store back to memory
7385 happens in the branch's delay slot. */
7386 if (length == 8)
7387 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7388 else if (length == 12)
7389 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7390 else
7391 {
7392 operands[4] = GEN_INT (length);
7393 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7394 operands);
7395 return pa_output_lbranch (operands[3], insn, 0);
7396 }
7397 }
7398 /* Handle SAR as a destination. */
7399 else
7400 {
7401 if (length == 8)
7402 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7403 else if (length == 12)
7404 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7405 else
7406 {
7407 operands[4] = GEN_INT (length);
7408 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7409 operands);
7410 return pa_output_lbranch (operands[3], insn, 0);
7411 }
7412 }
7413 }
7414
7415 /* Copy any FP arguments in INSN into integer registers. */
7416 static void
7417 copy_fp_args (rtx insn)
7418 {
7419 rtx link;
7420 rtx xoperands[2];
7421
7422 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7423 {
7424 int arg_mode, regno;
7425 rtx use = XEXP (link, 0);
7426
7427 if (! (GET_CODE (use) == USE
7428 && GET_CODE (XEXP (use, 0)) == REG
7429 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7430 continue;
7431
7432 arg_mode = GET_MODE (XEXP (use, 0));
7433 regno = REGNO (XEXP (use, 0));
7434
7435 /* Is it a floating point register? */
7436 if (regno >= 32 && regno <= 39)
7437 {
7438 /* Copy the FP register into an integer register via memory. */
7439 if (arg_mode == SFmode)
7440 {
7441 xoperands[0] = XEXP (use, 0);
7442 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7443 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7444 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7445 }
7446 else
7447 {
7448 xoperands[0] = XEXP (use, 0);
7449 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7450 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7451 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7452 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7453 }
7454 }
7455 }
7456 }
7457
7458 /* Compute length of the FP argument copy sequence for INSN. */
7459 static int
7460 length_fp_args (rtx insn)
7461 {
7462 int length = 0;
7463 rtx link;
7464
7465 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7466 {
7467 int arg_mode, regno;
7468 rtx use = XEXP (link, 0);
7469
7470 if (! (GET_CODE (use) == USE
7471 && GET_CODE (XEXP (use, 0)) == REG
7472 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7473 continue;
7474
7475 arg_mode = GET_MODE (XEXP (use, 0));
7476 regno = REGNO (XEXP (use, 0));
7477
7478 /* Is it a floating point register? */
7479 if (regno >= 32 && regno <= 39)
7480 {
7481 if (arg_mode == SFmode)
7482 length += 8;
7483 else
7484 length += 12;
7485 }
7486 }
7487
7488 return length;
7489 }
7490
7491 /* Return the attribute length for the millicode call instruction INSN.
7492 The length must match the code generated by pa_output_millicode_call.
7493 We include the delay slot in the returned length as it is better to
7494 over estimate the length than to under estimate it. */
7495
7496 int
7497 pa_attr_length_millicode_call (rtx_insn *insn)
7498 {
7499 unsigned long distance = -1;
7500 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7501
7502 if (INSN_ADDRESSES_SET_P ())
7503 {
7504 distance = (total + insn_current_reference_address (insn));
7505 if (distance < total)
7506 distance = -1;
7507 }
7508
7509 if (TARGET_64BIT)
7510 {
7511 if (!TARGET_LONG_CALLS && distance < 7600000)
7512 return 8;
7513
7514 return 20;
7515 }
7516 else if (TARGET_PORTABLE_RUNTIME)
7517 return 24;
7518 else
7519 {
7520 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7521 return 8;
7522
7523 if (!flag_pic)
7524 return 12;
7525
7526 return 24;
7527 }
7528 }
7529
7530 /* INSN is a function call. It may have an unconditional jump
7531 in its delay slot.
7532
7533 CALL_DEST is the routine we are calling. */
7534
7535 const char *
7536 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7537 {
7538 int attr_length = get_attr_length (insn);
7539 int seq_length = dbr_sequence_length ();
7540 int distance;
7541 rtx seq_insn;
7542 rtx xoperands[3];
7543
7544 xoperands[0] = call_dest;
7545 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7546
7547 /* Handle the common case where we are sure that the branch will
7548 reach the beginning of the $CODE$ subspace. The within reach
7549 form of the $$sh_func_adrs call has a length of 28. Because it
7550 has an attribute type of sh_func_adrs, it never has a nonzero
7551 sequence length (i.e., the delay slot is never filled). */
7552 if (!TARGET_LONG_CALLS
7553 && (attr_length == 8
7554 || (attr_length == 28
7555 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7556 {
7557 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7558 }
7559 else
7560 {
7561 if (TARGET_64BIT)
7562 {
7563 /* It might seem that one insn could be saved by accessing
7564 the millicode function using the linkage table. However,
7565 this doesn't work in shared libraries and other dynamically
7566 loaded objects. Using a pc-relative sequence also avoids
7567 problems related to the implicit use of the gp register. */
7568 output_asm_insn ("b,l .+8,%%r1", xoperands);
7569
7570 if (TARGET_GAS)
7571 {
7572 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7573 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7574 }
7575 else
7576 {
7577 xoperands[1] = gen_label_rtx ();
7578 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7579 targetm.asm_out.internal_label (asm_out_file, "L",
7580 CODE_LABEL_NUMBER (xoperands[1]));
7581 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7582 }
7583
7584 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7585 }
7586 else if (TARGET_PORTABLE_RUNTIME)
7587 {
7588 /* Pure portable runtime doesn't allow be/ble; we also don't
7589 have PIC support in the assembler/linker, so this sequence
7590 is needed. */
7591
7592 /* Get the address of our target into %r1. */
7593 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7594 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7595
7596 /* Get our return address into %r31. */
7597 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7598 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7599
7600 /* Jump to our target address in %r1. */
7601 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7602 }
7603 else if (!flag_pic)
7604 {
7605 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7606 if (TARGET_PA_20)
7607 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7608 else
7609 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7610 }
7611 else
7612 {
7613 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7614 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7615
7616 if (TARGET_SOM || !TARGET_GAS)
7617 {
7618 /* The HP assembler can generate relocations for the
7619 difference of two symbols. GAS can do this for a
7620 millicode symbol but not an arbitrary external
7621 symbol when generating SOM output. */
7622 xoperands[1] = gen_label_rtx ();
7623 targetm.asm_out.internal_label (asm_out_file, "L",
7624 CODE_LABEL_NUMBER (xoperands[1]));
7625 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7626 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7627 }
7628 else
7629 {
7630 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7631 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7632 xoperands);
7633 }
7634
7635 /* Jump to our target address in %r1. */
7636 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7637 }
7638 }
7639
7640 if (seq_length == 0)
7641 output_asm_insn ("nop", xoperands);
7642
7643 /* We are done if there isn't a jump in the delay slot. */
7644 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7645 return "";
7646
7647 /* This call has an unconditional jump in its delay slot. */
7648 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7649
7650 /* See if the return address can be adjusted. Use the containing
7651 sequence insn's address. */
7652 if (INSN_ADDRESSES_SET_P ())
7653 {
7654 seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
7655 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7656 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7657
7658 if (VAL_14_BITS_P (distance))
7659 {
7660 xoperands[1] = gen_label_rtx ();
7661 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7662 targetm.asm_out.internal_label (asm_out_file, "L",
7663 CODE_LABEL_NUMBER (xoperands[1]));
7664 }
7665 else
7666 /* ??? This branch may not reach its target. */
7667 output_asm_insn ("nop\n\tb,n %0", xoperands);
7668 }
7669 else
7670 /* ??? This branch may not reach its target. */
7671 output_asm_insn ("nop\n\tb,n %0", xoperands);
7672
7673 /* Delete the jump. */
7674 SET_INSN_DELETED (NEXT_INSN (insn));
7675
7676 return "";
7677 }
7678
7679 /* Return the attribute length of the call instruction INSN. The SIBCALL
7680 flag indicates whether INSN is a regular call or a sibling call. The
7681 length returned must be longer than the code actually generated by
7682 pa_output_call. Since branch shortening is done before delay branch
7683 sequencing, there is no way to determine whether or not the delay
7684 slot will be filled during branch shortening. Even when the delay
7685 slot is filled, we may have to add a nop if the delay slot contains
7686 a branch that can't reach its target. Thus, we always have to include
7687 the delay slot in the length estimate. This used to be done in
7688 pa_adjust_insn_length but we do it here now as some sequences always
7689 fill the delay slot and we can save four bytes in the estimate for
7690 these sequences. */
7691
7692 int
7693 pa_attr_length_call (rtx_insn *insn, int sibcall)
7694 {
7695 int local_call;
7696 rtx call, call_dest;
7697 tree call_decl;
7698 int length = 0;
7699 rtx pat = PATTERN (insn);
7700 unsigned long distance = -1;
7701
7702 gcc_assert (CALL_P (insn));
7703
7704 if (INSN_ADDRESSES_SET_P ())
7705 {
7706 unsigned long total;
7707
7708 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7709 distance = (total + insn_current_reference_address (insn));
7710 if (distance < total)
7711 distance = -1;
7712 }
7713
7714 gcc_assert (GET_CODE (pat) == PARALLEL);
7715
7716 /* Get the call rtx. */
7717 call = XVECEXP (pat, 0, 0);
7718 if (GET_CODE (call) == SET)
7719 call = SET_SRC (call);
7720
7721 gcc_assert (GET_CODE (call) == CALL);
7722
7723 /* Determine if this is a local call. */
7724 call_dest = XEXP (XEXP (call, 0), 0);
7725 call_decl = SYMBOL_REF_DECL (call_dest);
7726 local_call = call_decl && targetm.binds_local_p (call_decl);
7727
7728 /* pc-relative branch. */
7729 if (!TARGET_LONG_CALLS
7730 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7731 || distance < MAX_PCREL17F_OFFSET))
7732 length += 8;
7733
7734 /* 64-bit plabel sequence. */
7735 else if (TARGET_64BIT && !local_call)
7736 length += sibcall ? 28 : 24;
7737
7738 /* non-pic long absolute branch sequence. */
7739 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7740 length += 12;
7741
7742 /* long pc-relative branch sequence. */
7743 else if (TARGET_LONG_PIC_SDIFF_CALL
7744 || (TARGET_GAS && !TARGET_SOM
7745 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7746 {
7747 length += 20;
7748
7749 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7750 length += 8;
7751 }
7752
7753 /* 32-bit plabel sequence. */
7754 else
7755 {
7756 length += 32;
7757
7758 if (TARGET_SOM)
7759 length += length_fp_args (insn);
7760
7761 if (flag_pic)
7762 length += 4;
7763
7764 if (!TARGET_PA_20)
7765 {
7766 if (!sibcall)
7767 length += 8;
7768
7769 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7770 length += 8;
7771 }
7772 }
7773
7774 return length;
7775 }
7776
7777 /* INSN is a function call. It may have an unconditional jump
7778 in its delay slot.
7779
7780 CALL_DEST is the routine we are calling. */
7781
7782 const char *
7783 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7784 {
7785 int delay_insn_deleted = 0;
7786 int delay_slot_filled = 0;
7787 int seq_length = dbr_sequence_length ();
7788 tree call_decl = SYMBOL_REF_DECL (call_dest);
7789 int local_call = call_decl && targetm.binds_local_p (call_decl);
7790 rtx xoperands[2];
7791
7792 xoperands[0] = call_dest;
7793
7794 /* Handle the common case where we're sure that the branch will reach
7795 the beginning of the "$CODE$" subspace. This is the beginning of
7796 the current function if we are in a named section. */
7797 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7798 {
7799 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7800 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7801 }
7802 else
7803 {
7804 if (TARGET_64BIT && !local_call)
7805 {
7806 /* ??? As far as I can tell, the HP linker doesn't support the
7807 long pc-relative sequence described in the 64-bit runtime
7808 architecture. So, we use a slightly longer indirect call. */
7809 xoperands[0] = pa_get_deferred_plabel (call_dest);
7810 xoperands[1] = gen_label_rtx ();
7811
7812 /* If this isn't a sibcall, we put the load of %r27 into the
7813 delay slot. We can't do this in a sibcall as we don't
7814 have a second call-clobbered scratch register available. */
7815 if (seq_length != 0
7816 && ! JUMP_P (NEXT_INSN (insn))
7817 && !sibcall)
7818 {
7819 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7820 optimize, 0, NULL);
7821
7822 /* Now delete the delay insn. */
7823 SET_INSN_DELETED (NEXT_INSN (insn));
7824 delay_insn_deleted = 1;
7825 }
7826
7827 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7828 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7829 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7830
7831 if (sibcall)
7832 {
7833 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7834 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7835 output_asm_insn ("bve (%%r1)", xoperands);
7836 }
7837 else
7838 {
7839 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7840 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7841 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7842 delay_slot_filled = 1;
7843 }
7844 }
7845 else
7846 {
7847 int indirect_call = 0;
7848
7849 /* Emit a long call. There are several different sequences
7850 of increasing length and complexity. In most cases,
7851 they don't allow an instruction in the delay slot. */
7852 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7853 && !TARGET_LONG_PIC_SDIFF_CALL
7854 && !(TARGET_GAS && !TARGET_SOM
7855 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7856 && !TARGET_64BIT)
7857 indirect_call = 1;
7858
7859 if (seq_length != 0
7860 && ! JUMP_P (NEXT_INSN (insn))
7861 && !sibcall
7862 && (!TARGET_PA_20
7863 || indirect_call
7864 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7865 {
7866 /* A non-jump insn in the delay slot. By definition we can
7867 emit this insn before the call (and in fact before argument
7868 relocating. */
7869 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7870 NULL);
7871
7872 /* Now delete the delay insn. */
7873 SET_INSN_DELETED (NEXT_INSN (insn));
7874 delay_insn_deleted = 1;
7875 }
7876
7877 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7878 {
7879 /* This is the best sequence for making long calls in
7880 non-pic code. Unfortunately, GNU ld doesn't provide
7881 the stub needed for external calls, and GAS's support
7882 for this with the SOM linker is buggy. It is safe
7883 to use this for local calls. */
7884 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7885 if (sibcall)
7886 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7887 else
7888 {
7889 if (TARGET_PA_20)
7890 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7891 xoperands);
7892 else
7893 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7894
7895 output_asm_insn ("copy %%r31,%%r2", xoperands);
7896 delay_slot_filled = 1;
7897 }
7898 }
7899 else
7900 {
7901 if (TARGET_LONG_PIC_SDIFF_CALL)
7902 {
7903 /* The HP assembler and linker can handle relocations
7904 for the difference of two symbols. The HP assembler
7905 recognizes the sequence as a pc-relative call and
7906 the linker provides stubs when needed. */
7907 xoperands[1] = gen_label_rtx ();
7908 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7909 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7910 targetm.asm_out.internal_label (asm_out_file, "L",
7911 CODE_LABEL_NUMBER (xoperands[1]));
7912 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7913 }
7914 else if (TARGET_GAS && !TARGET_SOM
7915 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7916 {
7917 /* GAS currently can't generate the relocations that
7918 are needed for the SOM linker under HP-UX using this
7919 sequence. The GNU linker doesn't generate the stubs
7920 that are needed for external calls on TARGET_ELF32
7921 with this sequence. For now, we have to use a
7922 longer plabel sequence when using GAS. */
7923 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7924 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7925 xoperands);
7926 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7927 xoperands);
7928 }
7929 else
7930 {
7931 /* Emit a long plabel-based call sequence. This is
7932 essentially an inline implementation of $$dyncall.
7933 We don't actually try to call $$dyncall as this is
7934 as difficult as calling the function itself. */
7935 xoperands[0] = pa_get_deferred_plabel (call_dest);
7936 xoperands[1] = gen_label_rtx ();
7937
7938 /* Since the call is indirect, FP arguments in registers
7939 need to be copied to the general registers. Then, the
7940 argument relocation stub will copy them back. */
7941 if (TARGET_SOM)
7942 copy_fp_args (insn);
7943
7944 if (flag_pic)
7945 {
7946 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7947 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7948 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7949 }
7950 else
7951 {
7952 output_asm_insn ("addil LR'%0-$global$,%%r27",
7953 xoperands);
7954 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7955 xoperands);
7956 }
7957
7958 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7959 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7960 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7961 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7962
7963 if (!sibcall && !TARGET_PA_20)
7964 {
7965 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7966 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7967 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7968 else
7969 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7970 }
7971 }
7972
7973 if (TARGET_PA_20)
7974 {
7975 if (sibcall)
7976 output_asm_insn ("bve (%%r1)", xoperands);
7977 else
7978 {
7979 if (indirect_call)
7980 {
7981 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7982 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7983 delay_slot_filled = 1;
7984 }
7985 else
7986 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7987 }
7988 }
7989 else
7990 {
7991 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7992 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7993 xoperands);
7994
7995 if (sibcall)
7996 {
7997 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7998 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7999 else
8000 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8001 }
8002 else
8003 {
8004 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8005 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8006 else
8007 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8008
8009 if (indirect_call)
8010 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8011 else
8012 output_asm_insn ("copy %%r31,%%r2", xoperands);
8013 delay_slot_filled = 1;
8014 }
8015 }
8016 }
8017 }
8018 }
8019
8020 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8021 output_asm_insn ("nop", xoperands);
8022
8023 /* We are done if there isn't a jump in the delay slot. */
8024 if (seq_length == 0
8025 || delay_insn_deleted
8026 || ! JUMP_P (NEXT_INSN (insn)))
8027 return "";
8028
8029 /* A sibcall should never have a branch in the delay slot. */
8030 gcc_assert (!sibcall);
8031
8032 /* This call has an unconditional jump in its delay slot. */
8033 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8034
8035 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8036 {
8037 /* See if the return address can be adjusted. Use the containing
8038 sequence insn's address. This would break the regular call/return@
8039 relationship assumed by the table based eh unwinder, so only do that
8040 if the call is not possibly throwing. */
8041 rtx seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
8042 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8043 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8044
8045 if (VAL_14_BITS_P (distance)
8046 && !(can_throw_internal (insn) || can_throw_external (insn)))
8047 {
8048 xoperands[1] = gen_label_rtx ();
8049 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8050 targetm.asm_out.internal_label (asm_out_file, "L",
8051 CODE_LABEL_NUMBER (xoperands[1]));
8052 }
8053 else
8054 output_asm_insn ("nop\n\tb,n %0", xoperands);
8055 }
8056 else
8057 output_asm_insn ("b,n %0", xoperands);
8058
8059 /* Delete the jump. */
8060 SET_INSN_DELETED (NEXT_INSN (insn));
8061
8062 return "";
8063 }
8064
8065 /* Return the attribute length of the indirect call instruction INSN.
8066 The length must match the code generated by output_indirect call.
8067 The returned length includes the delay slot. Currently, the delay
8068 slot of an indirect call sequence is not exposed and it is used by
8069 the sequence itself. */
8070
8071 int
8072 pa_attr_length_indirect_call (rtx_insn *insn)
8073 {
8074 unsigned long distance = -1;
8075 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8076
8077 if (INSN_ADDRESSES_SET_P ())
8078 {
8079 distance = (total + insn_current_reference_address (insn));
8080 if (distance < total)
8081 distance = -1;
8082 }
8083
8084 if (TARGET_64BIT)
8085 return 12;
8086
8087 if (TARGET_FAST_INDIRECT_CALLS
8088 || (!TARGET_LONG_CALLS
8089 && !TARGET_PORTABLE_RUNTIME
8090 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8091 || distance < MAX_PCREL17F_OFFSET)))
8092 return 8;
8093
8094 if (flag_pic)
8095 return 20;
8096
8097 if (TARGET_PORTABLE_RUNTIME)
8098 return 16;
8099
8100 /* Out of reach, can use ble. */
8101 return 12;
8102 }
8103
8104 const char *
8105 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8106 {
8107 rtx xoperands[1];
8108
8109 if (TARGET_64BIT)
8110 {
8111 xoperands[0] = call_dest;
8112 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8113 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8114 return "";
8115 }
8116
8117 /* First the special case for kernels, level 0 systems, etc. */
8118 if (TARGET_FAST_INDIRECT_CALLS)
8119 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8120
8121 /* Now the normal case -- we can reach $$dyncall directly or
8122 we're sure that we can get there via a long-branch stub.
8123
8124 No need to check target flags as the length uniquely identifies
8125 the remaining cases. */
8126 if (pa_attr_length_indirect_call (insn) == 8)
8127 {
8128 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8129 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8130 variant of the B,L instruction can't be used on the SOM target. */
8131 if (TARGET_PA_20 && !TARGET_SOM)
8132 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8133 else
8134 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8135 }
8136
8137 /* Long millicode call, but we are not generating PIC or portable runtime
8138 code. */
8139 if (pa_attr_length_indirect_call (insn) == 12)
8140 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8141
8142 /* Long millicode call for portable runtime. */
8143 if (pa_attr_length_indirect_call (insn) == 16)
8144 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8145
8146 /* We need a long PIC call to $$dyncall. */
8147 xoperands[0] = NULL_RTX;
8148 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8149 if (TARGET_SOM || !TARGET_GAS)
8150 {
8151 xoperands[0] = gen_label_rtx ();
8152 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8153 targetm.asm_out.internal_label (asm_out_file, "L",
8154 CODE_LABEL_NUMBER (xoperands[0]));
8155 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8156 }
8157 else
8158 {
8159 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8160 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8161 xoperands);
8162 }
8163 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8164 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8165 return "";
8166 }
8167
8168 /* In HPUX 8.0's shared library scheme, special relocations are needed
8169 for function labels if they might be passed to a function
8170 in a shared library (because shared libraries don't live in code
8171 space), and special magic is needed to construct their address. */
8172
8173 void
8174 pa_encode_label (rtx sym)
8175 {
8176 const char *str = XSTR (sym, 0);
8177 int len = strlen (str) + 1;
8178 char *newstr, *p;
8179
8180 p = newstr = XALLOCAVEC (char, len + 1);
8181 *p++ = '@';
8182 strcpy (p, str);
8183
8184 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8185 }
8186
8187 static void
8188 pa_encode_section_info (tree decl, rtx rtl, int first)
8189 {
8190 int old_referenced = 0;
8191
8192 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8193 old_referenced
8194 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8195
8196 default_encode_section_info (decl, rtl, first);
8197
8198 if (first && TEXT_SPACE_P (decl))
8199 {
8200 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8201 if (TREE_CODE (decl) == FUNCTION_DECL)
8202 pa_encode_label (XEXP (rtl, 0));
8203 }
8204 else if (old_referenced)
8205 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8206 }
8207
8208 /* This is sort of inverse to pa_encode_section_info. */
8209
8210 static const char *
8211 pa_strip_name_encoding (const char *str)
8212 {
8213 str += (*str == '@');
8214 str += (*str == '*');
8215 return str;
8216 }
8217
8218 /* Returns 1 if OP is a function label involved in a simple addition
8219 with a constant. Used to keep certain patterns from matching
8220 during instruction combination. */
8221 int
8222 pa_is_function_label_plus_const (rtx op)
8223 {
8224 /* Strip off any CONST. */
8225 if (GET_CODE (op) == CONST)
8226 op = XEXP (op, 0);
8227
8228 return (GET_CODE (op) == PLUS
8229 && function_label_operand (XEXP (op, 0), VOIDmode)
8230 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8231 }
8232
8233 /* Output assembly code for a thunk to FUNCTION. */
8234
8235 static void
8236 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8237 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8238 tree function)
8239 {
8240 static unsigned int current_thunk_number;
8241 int val_14 = VAL_14_BITS_P (delta);
8242 unsigned int old_last_address = last_address, nbytes = 0;
8243 char label[16];
8244 rtx xoperands[4];
8245
8246 xoperands[0] = XEXP (DECL_RTL (function), 0);
8247 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8248 xoperands[2] = GEN_INT (delta);
8249
8250 final_start_function (emit_barrier (), file, 1);
8251
8252 /* Output the thunk. We know that the function is in the same
8253 translation unit (i.e., the same space) as the thunk, and that
8254 thunks are output after their method. Thus, we don't need an
8255 external branch to reach the function. With SOM and GAS,
8256 functions and thunks are effectively in different sections.
8257 Thus, we can always use a IA-relative branch and the linker
8258 will add a long branch stub if necessary.
8259
8260 However, we have to be careful when generating PIC code on the
8261 SOM port to ensure that the sequence does not transfer to an
8262 import stub for the target function as this could clobber the
8263 return value saved at SP-24. This would also apply to the
8264 32-bit linux port if the multi-space model is implemented. */
8265 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8266 && !(flag_pic && TREE_PUBLIC (function))
8267 && (TARGET_GAS || last_address < 262132))
8268 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8269 && ((targetm_common.have_named_sections
8270 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8271 /* The GNU 64-bit linker has rather poor stub management.
8272 So, we use a long branch from thunks that aren't in
8273 the same section as the target function. */
8274 && ((!TARGET_64BIT
8275 && (DECL_SECTION_NAME (thunk_fndecl)
8276 != DECL_SECTION_NAME (function)))
8277 || ((DECL_SECTION_NAME (thunk_fndecl)
8278 == DECL_SECTION_NAME (function))
8279 && last_address < 262132)))
8280 /* In this case, we need to be able to reach the start of
8281 the stub table even though the function is likely closer
8282 and can be jumped to directly. */
8283 || (targetm_common.have_named_sections
8284 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8285 && DECL_SECTION_NAME (function) == NULL
8286 && total_code_bytes < MAX_PCREL17F_OFFSET)
8287 /* Likewise. */
8288 || (!targetm_common.have_named_sections
8289 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8290 {
8291 if (!val_14)
8292 output_asm_insn ("addil L'%2,%%r26", xoperands);
8293
8294 output_asm_insn ("b %0", xoperands);
8295
8296 if (val_14)
8297 {
8298 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8299 nbytes += 8;
8300 }
8301 else
8302 {
8303 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8304 nbytes += 12;
8305 }
8306 }
8307 else if (TARGET_64BIT)
8308 {
8309 /* We only have one call-clobbered scratch register, so we can't
8310 make use of the delay slot if delta doesn't fit in 14 bits. */
8311 if (!val_14)
8312 {
8313 output_asm_insn ("addil L'%2,%%r26", xoperands);
8314 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8315 }
8316
8317 output_asm_insn ("b,l .+8,%%r1", xoperands);
8318
8319 if (TARGET_GAS)
8320 {
8321 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8322 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8323 }
8324 else
8325 {
8326 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8327 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8328 }
8329
8330 if (val_14)
8331 {
8332 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8333 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8334 nbytes += 20;
8335 }
8336 else
8337 {
8338 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8339 nbytes += 24;
8340 }
8341 }
8342 else if (TARGET_PORTABLE_RUNTIME)
8343 {
8344 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8345 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8346
8347 if (!val_14)
8348 output_asm_insn ("addil L'%2,%%r26", xoperands);
8349
8350 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8351
8352 if (val_14)
8353 {
8354 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8355 nbytes += 16;
8356 }
8357 else
8358 {
8359 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8360 nbytes += 20;
8361 }
8362 }
8363 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8364 {
8365 /* The function is accessible from outside this module. The only
8366 way to avoid an import stub between the thunk and function is to
8367 call the function directly with an indirect sequence similar to
8368 that used by $$dyncall. This is possible because $$dyncall acts
8369 as the import stub in an indirect call. */
8370 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8371 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8372 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8373 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8374 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8375 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8376 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8377 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8378 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8379
8380 if (!val_14)
8381 {
8382 output_asm_insn ("addil L'%2,%%r26", xoperands);
8383 nbytes += 4;
8384 }
8385
8386 if (TARGET_PA_20)
8387 {
8388 output_asm_insn ("bve (%%r22)", xoperands);
8389 nbytes += 36;
8390 }
8391 else if (TARGET_NO_SPACE_REGS)
8392 {
8393 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8394 nbytes += 36;
8395 }
8396 else
8397 {
8398 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8399 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8400 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8401 nbytes += 44;
8402 }
8403
8404 if (val_14)
8405 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8406 else
8407 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8408 }
8409 else if (flag_pic)
8410 {
8411 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8412
8413 if (TARGET_SOM || !TARGET_GAS)
8414 {
8415 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8416 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8417 }
8418 else
8419 {
8420 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8421 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8422 }
8423
8424 if (!val_14)
8425 output_asm_insn ("addil L'%2,%%r26", xoperands);
8426
8427 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8428
8429 if (val_14)
8430 {
8431 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8432 nbytes += 20;
8433 }
8434 else
8435 {
8436 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8437 nbytes += 24;
8438 }
8439 }
8440 else
8441 {
8442 if (!val_14)
8443 output_asm_insn ("addil L'%2,%%r26", xoperands);
8444
8445 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8446 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8447
8448 if (val_14)
8449 {
8450 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8451 nbytes += 12;
8452 }
8453 else
8454 {
8455 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8456 nbytes += 16;
8457 }
8458 }
8459
8460 final_end_function ();
8461
8462 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8463 {
8464 switch_to_section (data_section);
8465 output_asm_insn (".align 4", xoperands);
8466 ASM_OUTPUT_LABEL (file, label);
8467 output_asm_insn (".word P'%0", xoperands);
8468 }
8469
8470 current_thunk_number++;
8471 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8472 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8473 last_address += nbytes;
8474 if (old_last_address > last_address)
8475 last_address = UINT_MAX;
8476 update_total_code_bytes (nbytes);
8477 }
8478
8479 /* Only direct calls to static functions are allowed to be sibling (tail)
8480 call optimized.
8481
8482 This restriction is necessary because some linker generated stubs will
8483 store return pointers into rp' in some cases which might clobber a
8484 live value already in rp'.
8485
8486 In a sibcall the current function and the target function share stack
8487 space. Thus if the path to the current function and the path to the
8488 target function save a value in rp', they save the value into the
8489 same stack slot, which has undesirable consequences.
8490
8491 Because of the deferred binding nature of shared libraries any function
8492 with external scope could be in a different load module and thus require
8493 rp' to be saved when calling that function. So sibcall optimizations
8494 can only be safe for static function.
8495
8496 Note that GCC never needs return value relocations, so we don't have to
8497 worry about static calls with return value relocations (which require
8498 saving rp').
8499
8500 It is safe to perform a sibcall optimization when the target function
8501 will never return. */
8502 static bool
8503 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8504 {
8505 if (TARGET_PORTABLE_RUNTIME)
8506 return false;
8507
8508 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8509 single subspace mode and the call is not indirect. As far as I know,
8510 there is no operating system support for the multiple subspace mode.
8511 It might be possible to support indirect calls if we didn't use
8512 $$dyncall (see the indirect sequence generated in pa_output_call). */
8513 if (TARGET_ELF32)
8514 return (decl != NULL_TREE);
8515
8516 /* Sibcalls are not ok because the arg pointer register is not a fixed
8517 register. This prevents the sibcall optimization from occurring. In
8518 addition, there are problems with stub placement using GNU ld. This
8519 is because a normal sibcall branch uses a 17-bit relocation while
8520 a regular call branch uses a 22-bit relocation. As a result, more
8521 care needs to be taken in the placement of long-branch stubs. */
8522 if (TARGET_64BIT)
8523 return false;
8524
8525 /* Sibcalls are only ok within a translation unit. */
8526 return (decl && !TREE_PUBLIC (decl));
8527 }
8528
8529 /* ??? Addition is not commutative on the PA due to the weird implicit
8530 space register selection rules for memory addresses. Therefore, we
8531 don't consider a + b == b + a, as this might be inside a MEM. */
8532 static bool
8533 pa_commutative_p (const_rtx x, int outer_code)
8534 {
8535 return (COMMUTATIVE_P (x)
8536 && (TARGET_NO_SPACE_REGS
8537 || (outer_code != UNKNOWN && outer_code != MEM)
8538 || GET_CODE (x) != PLUS));
8539 }
8540
8541 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8542 use in fmpyadd instructions. */
8543 int
8544 pa_fmpyaddoperands (rtx *operands)
8545 {
8546 enum machine_mode mode = GET_MODE (operands[0]);
8547
8548 /* Must be a floating point mode. */
8549 if (mode != SFmode && mode != DFmode)
8550 return 0;
8551
8552 /* All modes must be the same. */
8553 if (! (mode == GET_MODE (operands[1])
8554 && mode == GET_MODE (operands[2])
8555 && mode == GET_MODE (operands[3])
8556 && mode == GET_MODE (operands[4])
8557 && mode == GET_MODE (operands[5])))
8558 return 0;
8559
8560 /* All operands must be registers. */
8561 if (! (GET_CODE (operands[1]) == REG
8562 && GET_CODE (operands[2]) == REG
8563 && GET_CODE (operands[3]) == REG
8564 && GET_CODE (operands[4]) == REG
8565 && GET_CODE (operands[5]) == REG))
8566 return 0;
8567
8568 /* Only 2 real operands to the addition. One of the input operands must
8569 be the same as the output operand. */
8570 if (! rtx_equal_p (operands[3], operands[4])
8571 && ! rtx_equal_p (operands[3], operands[5]))
8572 return 0;
8573
8574 /* Inout operand of add cannot conflict with any operands from multiply. */
8575 if (rtx_equal_p (operands[3], operands[0])
8576 || rtx_equal_p (operands[3], operands[1])
8577 || rtx_equal_p (operands[3], operands[2]))
8578 return 0;
8579
8580 /* multiply cannot feed into addition operands. */
8581 if (rtx_equal_p (operands[4], operands[0])
8582 || rtx_equal_p (operands[5], operands[0]))
8583 return 0;
8584
8585 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8586 if (mode == SFmode
8587 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8588 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8589 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8590 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8591 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8592 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8593 return 0;
8594
8595 /* Passed. Operands are suitable for fmpyadd. */
8596 return 1;
8597 }
8598
8599 #if !defined(USE_COLLECT2)
8600 static void
8601 pa_asm_out_constructor (rtx symbol, int priority)
8602 {
8603 if (!function_label_operand (symbol, VOIDmode))
8604 pa_encode_label (symbol);
8605
8606 #ifdef CTORS_SECTION_ASM_OP
8607 default_ctor_section_asm_out_constructor (symbol, priority);
8608 #else
8609 # ifdef TARGET_ASM_NAMED_SECTION
8610 default_named_section_asm_out_constructor (symbol, priority);
8611 # else
8612 default_stabs_asm_out_constructor (symbol, priority);
8613 # endif
8614 #endif
8615 }
8616
8617 static void
8618 pa_asm_out_destructor (rtx symbol, int priority)
8619 {
8620 if (!function_label_operand (symbol, VOIDmode))
8621 pa_encode_label (symbol);
8622
8623 #ifdef DTORS_SECTION_ASM_OP
8624 default_dtor_section_asm_out_destructor (symbol, priority);
8625 #else
8626 # ifdef TARGET_ASM_NAMED_SECTION
8627 default_named_section_asm_out_destructor (symbol, priority);
8628 # else
8629 default_stabs_asm_out_destructor (symbol, priority);
8630 # endif
8631 #endif
8632 }
8633 #endif
8634
8635 /* This function places uninitialized global data in the bss section.
8636 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8637 function on the SOM port to prevent uninitialized global data from
8638 being placed in the data section. */
8639
8640 void
8641 pa_asm_output_aligned_bss (FILE *stream,
8642 const char *name,
8643 unsigned HOST_WIDE_INT size,
8644 unsigned int align)
8645 {
8646 switch_to_section (bss_section);
8647 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8648
8649 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8650 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8651 #endif
8652
8653 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8654 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8655 #endif
8656
8657 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8658 ASM_OUTPUT_LABEL (stream, name);
8659 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8660 }
8661
8662 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8663 that doesn't allow the alignment of global common storage to be directly
8664 specified. The SOM linker aligns common storage based on the rounded
8665 value of the NUM_BYTES parameter in the .comm directive. It's not
8666 possible to use the .align directive as it doesn't affect the alignment
8667 of the label associated with a .comm directive. */
8668
8669 void
8670 pa_asm_output_aligned_common (FILE *stream,
8671 const char *name,
8672 unsigned HOST_WIDE_INT size,
8673 unsigned int align)
8674 {
8675 unsigned int max_common_align;
8676
8677 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8678 if (align > max_common_align)
8679 {
8680 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8681 "for global common data. Using %u",
8682 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8683 align = max_common_align;
8684 }
8685
8686 switch_to_section (bss_section);
8687
8688 assemble_name (stream, name);
8689 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8690 MAX (size, align / BITS_PER_UNIT));
8691 }
8692
8693 /* We can't use .comm for local common storage as the SOM linker effectively
8694 treats the symbol as universal and uses the same storage for local symbols
8695 with the same name in different object files. The .block directive
8696 reserves an uninitialized block of storage. However, it's not common
8697 storage. Fortunately, GCC never requests common storage with the same
8698 name in any given translation unit. */
8699
8700 void
8701 pa_asm_output_aligned_local (FILE *stream,
8702 const char *name,
8703 unsigned HOST_WIDE_INT size,
8704 unsigned int align)
8705 {
8706 switch_to_section (bss_section);
8707 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8708
8709 #ifdef LOCAL_ASM_OP
8710 fprintf (stream, "%s", LOCAL_ASM_OP);
8711 assemble_name (stream, name);
8712 fprintf (stream, "\n");
8713 #endif
8714
8715 ASM_OUTPUT_LABEL (stream, name);
8716 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8717 }
8718
8719 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8720 use in fmpysub instructions. */
8721 int
8722 pa_fmpysuboperands (rtx *operands)
8723 {
8724 enum machine_mode mode = GET_MODE (operands[0]);
8725
8726 /* Must be a floating point mode. */
8727 if (mode != SFmode && mode != DFmode)
8728 return 0;
8729
8730 /* All modes must be the same. */
8731 if (! (mode == GET_MODE (operands[1])
8732 && mode == GET_MODE (operands[2])
8733 && mode == GET_MODE (operands[3])
8734 && mode == GET_MODE (operands[4])
8735 && mode == GET_MODE (operands[5])))
8736 return 0;
8737
8738 /* All operands must be registers. */
8739 if (! (GET_CODE (operands[1]) == REG
8740 && GET_CODE (operands[2]) == REG
8741 && GET_CODE (operands[3]) == REG
8742 && GET_CODE (operands[4]) == REG
8743 && GET_CODE (operands[5]) == REG))
8744 return 0;
8745
8746 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8747 operation, so operands[4] must be the same as operand[3]. */
8748 if (! rtx_equal_p (operands[3], operands[4]))
8749 return 0;
8750
8751 /* multiply cannot feed into subtraction. */
8752 if (rtx_equal_p (operands[5], operands[0]))
8753 return 0;
8754
8755 /* Inout operand of sub cannot conflict with any operands from multiply. */
8756 if (rtx_equal_p (operands[3], operands[0])
8757 || rtx_equal_p (operands[3], operands[1])
8758 || rtx_equal_p (operands[3], operands[2]))
8759 return 0;
8760
8761 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8762 if (mode == SFmode
8763 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8764 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8765 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8766 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8767 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8768 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8769 return 0;
8770
8771 /* Passed. Operands are suitable for fmpysub. */
8772 return 1;
8773 }
8774
8775 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8776 constants for shadd instructions. */
8777 int
8778 pa_shadd_constant_p (int val)
8779 {
8780 if (val == 2 || val == 4 || val == 8)
8781 return 1;
8782 else
8783 return 0;
8784 }
8785
8786 /* Return TRUE if INSN branches forward. */
8787
8788 static bool
8789 forward_branch_p (rtx_insn *insn)
8790 {
8791 rtx lab = JUMP_LABEL (insn);
8792
8793 /* The INSN must have a jump label. */
8794 gcc_assert (lab != NULL_RTX);
8795
8796 if (INSN_ADDRESSES_SET_P ())
8797 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8798
8799 while (insn)
8800 {
8801 if (insn == lab)
8802 return true;
8803 else
8804 insn = NEXT_INSN (insn);
8805 }
8806
8807 return false;
8808 }
8809
8810 /* Return 1 if INSN is in the delay slot of a call instruction. */
8811 int
8812 pa_jump_in_call_delay (rtx_insn *insn)
8813 {
8814
8815 if (! JUMP_P (insn))
8816 return 0;
8817
8818 if (PREV_INSN (insn)
8819 && PREV_INSN (PREV_INSN (insn))
8820 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8821 {
8822 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8823
8824 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8825 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8826
8827 }
8828 else
8829 return 0;
8830 }
8831
8832 /* Output an unconditional move and branch insn. */
8833
8834 const char *
8835 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8836 {
8837 int length = get_attr_length (insn);
8838
8839 /* These are the cases in which we win. */
8840 if (length == 4)
8841 return "mov%I1b,tr %1,%0,%2";
8842
8843 /* None of the following cases win, but they don't lose either. */
8844 if (length == 8)
8845 {
8846 if (dbr_sequence_length () == 0)
8847 {
8848 /* Nothing in the delay slot, fake it by putting the combined
8849 insn (the copy or add) in the delay slot of a bl. */
8850 if (GET_CODE (operands[1]) == CONST_INT)
8851 return "b %2\n\tldi %1,%0";
8852 else
8853 return "b %2\n\tcopy %1,%0";
8854 }
8855 else
8856 {
8857 /* Something in the delay slot, but we've got a long branch. */
8858 if (GET_CODE (operands[1]) == CONST_INT)
8859 return "ldi %1,%0\n\tb %2";
8860 else
8861 return "copy %1,%0\n\tb %2";
8862 }
8863 }
8864
8865 if (GET_CODE (operands[1]) == CONST_INT)
8866 output_asm_insn ("ldi %1,%0", operands);
8867 else
8868 output_asm_insn ("copy %1,%0", operands);
8869 return pa_output_lbranch (operands[2], insn, 1);
8870 }
8871
8872 /* Output an unconditional add and branch insn. */
8873
8874 const char *
8875 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8876 {
8877 int length = get_attr_length (insn);
8878
8879 /* To make life easy we want operand0 to be the shared input/output
8880 operand and operand1 to be the readonly operand. */
8881 if (operands[0] == operands[1])
8882 operands[1] = operands[2];
8883
8884 /* These are the cases in which we win. */
8885 if (length == 4)
8886 return "add%I1b,tr %1,%0,%3";
8887
8888 /* None of the following cases win, but they don't lose either. */
8889 if (length == 8)
8890 {
8891 if (dbr_sequence_length () == 0)
8892 /* Nothing in the delay slot, fake it by putting the combined
8893 insn (the copy or add) in the delay slot of a bl. */
8894 return "b %3\n\tadd%I1 %1,%0,%0";
8895 else
8896 /* Something in the delay slot, but we've got a long branch. */
8897 return "add%I1 %1,%0,%0\n\tb %3";
8898 }
8899
8900 output_asm_insn ("add%I1 %1,%0,%0", operands);
8901 return pa_output_lbranch (operands[3], insn, 1);
8902 }
8903
8904 /* Return nonzero if INSN (a jump insn) immediately follows a call
8905 to a named function. This is used to avoid filling the delay slot
8906 of the jump since it can usually be eliminated by modifying RP in
8907 the delay slot of the call. */
8908
8909 int
8910 pa_following_call (rtx_insn *insn)
8911 {
8912 if (! TARGET_JUMP_IN_DELAY)
8913 return 0;
8914
8915 /* Find the previous real insn, skipping NOTEs. */
8916 insn = PREV_INSN (insn);
8917 while (insn && NOTE_P (insn))
8918 insn = PREV_INSN (insn);
8919
8920 /* Check for CALL_INSNs and millicode calls. */
8921 if (insn
8922 && ((CALL_P (insn)
8923 && get_attr_type (insn) != TYPE_DYNCALL)
8924 || (NONJUMP_INSN_P (insn)
8925 && GET_CODE (PATTERN (insn)) != SEQUENCE
8926 && GET_CODE (PATTERN (insn)) != USE
8927 && GET_CODE (PATTERN (insn)) != CLOBBER
8928 && get_attr_type (insn) == TYPE_MILLI)))
8929 return 1;
8930
8931 return 0;
8932 }
8933
8934 /* We use this hook to perform a PA specific optimization which is difficult
8935 to do in earlier passes. */
8936
8937 static void
8938 pa_reorg (void)
8939 {
8940 remove_useless_addtr_insns (1);
8941
8942 if (pa_cpu < PROCESSOR_8000)
8943 pa_combine_instructions ();
8944 }
8945
8946 /* The PA has a number of odd instructions which can perform multiple
8947 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8948 it may be profitable to combine two instructions into one instruction
8949 with two outputs. It's not profitable PA2.0 machines because the
8950 two outputs would take two slots in the reorder buffers.
8951
8952 This routine finds instructions which can be combined and combines
8953 them. We only support some of the potential combinations, and we
8954 only try common ways to find suitable instructions.
8955
8956 * addb can add two registers or a register and a small integer
8957 and jump to a nearby (+-8k) location. Normally the jump to the
8958 nearby location is conditional on the result of the add, but by
8959 using the "true" condition we can make the jump unconditional.
8960 Thus addb can perform two independent operations in one insn.
8961
8962 * movb is similar to addb in that it can perform a reg->reg
8963 or small immediate->reg copy and jump to a nearby (+-8k location).
8964
8965 * fmpyadd and fmpysub can perform a FP multiply and either an
8966 FP add or FP sub if the operands of the multiply and add/sub are
8967 independent (there are other minor restrictions). Note both
8968 the fmpy and fadd/fsub can in theory move to better spots according
8969 to data dependencies, but for now we require the fmpy stay at a
8970 fixed location.
8971
8972 * Many of the memory operations can perform pre & post updates
8973 of index registers. GCC's pre/post increment/decrement addressing
8974 is far too simple to take advantage of all the possibilities. This
8975 pass may not be suitable since those insns may not be independent.
8976
8977 * comclr can compare two ints or an int and a register, nullify
8978 the following instruction and zero some other register. This
8979 is more difficult to use as it's harder to find an insn which
8980 will generate a comclr than finding something like an unconditional
8981 branch. (conditional moves & long branches create comclr insns).
8982
8983 * Most arithmetic operations can conditionally skip the next
8984 instruction. They can be viewed as "perform this operation
8985 and conditionally jump to this nearby location" (where nearby
8986 is an insns away). These are difficult to use due to the
8987 branch length restrictions. */
8988
8989 static void
8990 pa_combine_instructions (void)
8991 {
8992 rtx_insn *anchor;
8993 rtx new_rtx;
8994
8995 /* This can get expensive since the basic algorithm is on the
8996 order of O(n^2) (or worse). Only do it for -O2 or higher
8997 levels of optimization. */
8998 if (optimize < 2)
8999 return;
9000
9001 /* Walk down the list of insns looking for "anchor" insns which
9002 may be combined with "floating" insns. As the name implies,
9003 "anchor" instructions don't move, while "floating" insns may
9004 move around. */
9005 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9006 new_rtx = make_insn_raw (new_rtx);
9007
9008 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9009 {
9010 enum attr_pa_combine_type anchor_attr;
9011 enum attr_pa_combine_type floater_attr;
9012
9013 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9014 Also ignore any special USE insns. */
9015 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9016 || GET_CODE (PATTERN (anchor)) == USE
9017 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9018 continue;
9019
9020 anchor_attr = get_attr_pa_combine_type (anchor);
9021 /* See if anchor is an insn suitable for combination. */
9022 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9023 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9024 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9025 && ! forward_branch_p (anchor)))
9026 {
9027 rtx_insn *floater;
9028
9029 for (floater = PREV_INSN (anchor);
9030 floater;
9031 floater = PREV_INSN (floater))
9032 {
9033 if (NOTE_P (floater)
9034 || (NONJUMP_INSN_P (floater)
9035 && (GET_CODE (PATTERN (floater)) == USE
9036 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9037 continue;
9038
9039 /* Anything except a regular INSN will stop our search. */
9040 if (! NONJUMP_INSN_P (floater))
9041 {
9042 floater = NULL;
9043 break;
9044 }
9045
9046 /* See if FLOATER is suitable for combination with the
9047 anchor. */
9048 floater_attr = get_attr_pa_combine_type (floater);
9049 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9050 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9051 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9052 && floater_attr == PA_COMBINE_TYPE_FMPY))
9053 {
9054 /* If ANCHOR and FLOATER can be combined, then we're
9055 done with this pass. */
9056 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9057 SET_DEST (PATTERN (floater)),
9058 XEXP (SET_SRC (PATTERN (floater)), 0),
9059 XEXP (SET_SRC (PATTERN (floater)), 1)))
9060 break;
9061 }
9062
9063 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9064 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9065 {
9066 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9067 {
9068 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9069 SET_DEST (PATTERN (floater)),
9070 XEXP (SET_SRC (PATTERN (floater)), 0),
9071 XEXP (SET_SRC (PATTERN (floater)), 1)))
9072 break;
9073 }
9074 else
9075 {
9076 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9077 SET_DEST (PATTERN (floater)),
9078 SET_SRC (PATTERN (floater)),
9079 SET_SRC (PATTERN (floater))))
9080 break;
9081 }
9082 }
9083 }
9084
9085 /* If we didn't find anything on the backwards scan try forwards. */
9086 if (!floater
9087 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9088 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9089 {
9090 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9091 {
9092 if (NOTE_P (floater)
9093 || (NONJUMP_INSN_P (floater)
9094 && (GET_CODE (PATTERN (floater)) == USE
9095 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9096
9097 continue;
9098
9099 /* Anything except a regular INSN will stop our search. */
9100 if (! NONJUMP_INSN_P (floater))
9101 {
9102 floater = NULL;
9103 break;
9104 }
9105
9106 /* See if FLOATER is suitable for combination with the
9107 anchor. */
9108 floater_attr = get_attr_pa_combine_type (floater);
9109 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9110 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9111 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9112 && floater_attr == PA_COMBINE_TYPE_FMPY))
9113 {
9114 /* If ANCHOR and FLOATER can be combined, then we're
9115 done with this pass. */
9116 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9117 SET_DEST (PATTERN (floater)),
9118 XEXP (SET_SRC (PATTERN (floater)),
9119 0),
9120 XEXP (SET_SRC (PATTERN (floater)),
9121 1)))
9122 break;
9123 }
9124 }
9125 }
9126
9127 /* FLOATER will be nonzero if we found a suitable floating
9128 insn for combination with ANCHOR. */
9129 if (floater
9130 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9131 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9132 {
9133 /* Emit the new instruction and delete the old anchor. */
9134 emit_insn_before (gen_rtx_PARALLEL
9135 (VOIDmode,
9136 gen_rtvec (2, PATTERN (anchor),
9137 PATTERN (floater))),
9138 anchor);
9139
9140 SET_INSN_DELETED (anchor);
9141
9142 /* Emit a special USE insn for FLOATER, then delete
9143 the floating insn. */
9144 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9145 delete_insn (floater);
9146
9147 continue;
9148 }
9149 else if (floater
9150 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9151 {
9152 rtx temp;
9153 /* Emit the new_jump instruction and delete the old anchor. */
9154 temp
9155 = emit_jump_insn_before (gen_rtx_PARALLEL
9156 (VOIDmode,
9157 gen_rtvec (2, PATTERN (anchor),
9158 PATTERN (floater))),
9159 anchor);
9160
9161 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9162 SET_INSN_DELETED (anchor);
9163
9164 /* Emit a special USE insn for FLOATER, then delete
9165 the floating insn. */
9166 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9167 delete_insn (floater);
9168 continue;
9169 }
9170 }
9171 }
9172 }
9173
9174 static int
9175 pa_can_combine_p (rtx new_rtx, rtx_insn *anchor, rtx_insn *floater,
9176 int reversed, rtx dest,
9177 rtx src1, rtx src2)
9178 {
9179 int insn_code_number;
9180 rtx_insn *start, *end;
9181
9182 /* Create a PARALLEL with the patterns of ANCHOR and
9183 FLOATER, try to recognize it, then test constraints
9184 for the resulting pattern.
9185
9186 If the pattern doesn't match or the constraints
9187 aren't met keep searching for a suitable floater
9188 insn. */
9189 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9190 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9191 INSN_CODE (new_rtx) = -1;
9192 insn_code_number = recog_memoized (new_rtx);
9193 if (insn_code_number < 0
9194 || (extract_insn (new_rtx), ! constrain_operands (1)))
9195 return 0;
9196
9197 if (reversed)
9198 {
9199 start = anchor;
9200 end = floater;
9201 }
9202 else
9203 {
9204 start = floater;
9205 end = anchor;
9206 }
9207
9208 /* There's up to three operands to consider. One
9209 output and two inputs.
9210
9211 The output must not be used between FLOATER & ANCHOR
9212 exclusive. The inputs must not be set between
9213 FLOATER and ANCHOR exclusive. */
9214
9215 if (reg_used_between_p (dest, start, end))
9216 return 0;
9217
9218 if (reg_set_between_p (src1, start, end))
9219 return 0;
9220
9221 if (reg_set_between_p (src2, start, end))
9222 return 0;
9223
9224 /* If we get here, then everything is good. */
9225 return 1;
9226 }
9227
9228 /* Return nonzero if references for INSN are delayed.
9229
9230 Millicode insns are actually function calls with some special
9231 constraints on arguments and register usage.
9232
9233 Millicode calls always expect their arguments in the integer argument
9234 registers, and always return their result in %r29 (ret1). They
9235 are expected to clobber their arguments, %r1, %r29, and the return
9236 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9237
9238 This function tells reorg that the references to arguments and
9239 millicode calls do not appear to happen until after the millicode call.
9240 This allows reorg to put insns which set the argument registers into the
9241 delay slot of the millicode call -- thus they act more like traditional
9242 CALL_INSNs.
9243
9244 Note we cannot consider side effects of the insn to be delayed because
9245 the branch and link insn will clobber the return pointer. If we happened
9246 to use the return pointer in the delay slot of the call, then we lose.
9247
9248 get_attr_type will try to recognize the given insn, so make sure to
9249 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9250 in particular. */
9251 int
9252 pa_insn_refs_are_delayed (rtx insn)
9253 {
9254 return ((NONJUMP_INSN_P (insn)
9255 && GET_CODE (PATTERN (insn)) != SEQUENCE
9256 && GET_CODE (PATTERN (insn)) != USE
9257 && GET_CODE (PATTERN (insn)) != CLOBBER
9258 && get_attr_type (insn) == TYPE_MILLI));
9259 }
9260
9261 /* Promote the return value, but not the arguments. */
9262
9263 static enum machine_mode
9264 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9265 enum machine_mode mode,
9266 int *punsignedp ATTRIBUTE_UNUSED,
9267 const_tree fntype ATTRIBUTE_UNUSED,
9268 int for_return)
9269 {
9270 if (for_return == 0)
9271 return mode;
9272 return promote_mode (type, mode, punsignedp);
9273 }
9274
9275 /* On the HP-PA the value is found in register(s) 28(-29), unless
9276 the mode is SF or DF. Then the value is returned in fr4 (32).
9277
9278 This must perform the same promotions as PROMOTE_MODE, else promoting
9279 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9280
9281 Small structures must be returned in a PARALLEL on PA64 in order
9282 to match the HP Compiler ABI. */
9283
9284 static rtx
9285 pa_function_value (const_tree valtype,
9286 const_tree func ATTRIBUTE_UNUSED,
9287 bool outgoing ATTRIBUTE_UNUSED)
9288 {
9289 enum machine_mode valmode;
9290
9291 if (AGGREGATE_TYPE_P (valtype)
9292 || TREE_CODE (valtype) == COMPLEX_TYPE
9293 || TREE_CODE (valtype) == VECTOR_TYPE)
9294 {
9295 if (TARGET_64BIT)
9296 {
9297 /* Aggregates with a size less than or equal to 128 bits are
9298 returned in GR 28(-29). They are left justified. The pad
9299 bits are undefined. Larger aggregates are returned in
9300 memory. */
9301 rtx loc[2];
9302 int i, offset = 0;
9303 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9304
9305 for (i = 0; i < ub; i++)
9306 {
9307 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9308 gen_rtx_REG (DImode, 28 + i),
9309 GEN_INT (offset));
9310 offset += 8;
9311 }
9312
9313 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9314 }
9315 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9316 {
9317 /* Aggregates 5 to 8 bytes in size are returned in general
9318 registers r28-r29 in the same manner as other non
9319 floating-point objects. The data is right-justified and
9320 zero-extended to 64 bits. This is opposite to the normal
9321 justification used on big endian targets and requires
9322 special treatment. */
9323 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9324 gen_rtx_REG (DImode, 28), const0_rtx);
9325 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9326 }
9327 }
9328
9329 if ((INTEGRAL_TYPE_P (valtype)
9330 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9331 || POINTER_TYPE_P (valtype))
9332 valmode = word_mode;
9333 else
9334 valmode = TYPE_MODE (valtype);
9335
9336 if (TREE_CODE (valtype) == REAL_TYPE
9337 && !AGGREGATE_TYPE_P (valtype)
9338 && TYPE_MODE (valtype) != TFmode
9339 && !TARGET_SOFT_FLOAT)
9340 return gen_rtx_REG (valmode, 32);
9341
9342 return gen_rtx_REG (valmode, 28);
9343 }
9344
9345 /* Implement the TARGET_LIBCALL_VALUE hook. */
9346
9347 static rtx
9348 pa_libcall_value (enum machine_mode mode,
9349 const_rtx fun ATTRIBUTE_UNUSED)
9350 {
9351 if (! TARGET_SOFT_FLOAT
9352 && (mode == SFmode || mode == DFmode))
9353 return gen_rtx_REG (mode, 32);
9354 else
9355 return gen_rtx_REG (mode, 28);
9356 }
9357
9358 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9359
9360 static bool
9361 pa_function_value_regno_p (const unsigned int regno)
9362 {
9363 if (regno == 28
9364 || (! TARGET_SOFT_FLOAT && regno == 32))
9365 return true;
9366
9367 return false;
9368 }
9369
9370 /* Update the data in CUM to advance over an argument
9371 of mode MODE and data type TYPE.
9372 (TYPE is null for libcalls where that information may not be available.) */
9373
9374 static void
9375 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9376 const_tree type, bool named ATTRIBUTE_UNUSED)
9377 {
9378 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9379 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9380
9381 cum->nargs_prototype--;
9382 cum->words += (arg_size
9383 + ((cum->words & 01)
9384 && type != NULL_TREE
9385 && arg_size > 1));
9386 }
9387
9388 /* Return the location of a parameter that is passed in a register or NULL
9389 if the parameter has any component that is passed in memory.
9390
9391 This is new code and will be pushed to into the net sources after
9392 further testing.
9393
9394 ??? We might want to restructure this so that it looks more like other
9395 ports. */
9396 static rtx
9397 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9398 const_tree type, bool named ATTRIBUTE_UNUSED)
9399 {
9400 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9401 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9402 int alignment = 0;
9403 int arg_size;
9404 int fpr_reg_base;
9405 int gpr_reg_base;
9406 rtx retval;
9407
9408 if (mode == VOIDmode)
9409 return NULL_RTX;
9410
9411 arg_size = FUNCTION_ARG_SIZE (mode, type);
9412
9413 /* If this arg would be passed partially or totally on the stack, then
9414 this routine should return zero. pa_arg_partial_bytes will
9415 handle arguments which are split between regs and stack slots if
9416 the ABI mandates split arguments. */
9417 if (!TARGET_64BIT)
9418 {
9419 /* The 32-bit ABI does not split arguments. */
9420 if (cum->words + arg_size > max_arg_words)
9421 return NULL_RTX;
9422 }
9423 else
9424 {
9425 if (arg_size > 1)
9426 alignment = cum->words & 1;
9427 if (cum->words + alignment >= max_arg_words)
9428 return NULL_RTX;
9429 }
9430
9431 /* The 32bit ABIs and the 64bit ABIs are rather different,
9432 particularly in their handling of FP registers. We might
9433 be able to cleverly share code between them, but I'm not
9434 going to bother in the hope that splitting them up results
9435 in code that is more easily understood. */
9436
9437 if (TARGET_64BIT)
9438 {
9439 /* Advance the base registers to their current locations.
9440
9441 Remember, gprs grow towards smaller register numbers while
9442 fprs grow to higher register numbers. Also remember that
9443 although FP regs are 32-bit addressable, we pretend that
9444 the registers are 64-bits wide. */
9445 gpr_reg_base = 26 - cum->words;
9446 fpr_reg_base = 32 + cum->words;
9447
9448 /* Arguments wider than one word and small aggregates need special
9449 treatment. */
9450 if (arg_size > 1
9451 || mode == BLKmode
9452 || (type && (AGGREGATE_TYPE_P (type)
9453 || TREE_CODE (type) == COMPLEX_TYPE
9454 || TREE_CODE (type) == VECTOR_TYPE)))
9455 {
9456 /* Double-extended precision (80-bit), quad-precision (128-bit)
9457 and aggregates including complex numbers are aligned on
9458 128-bit boundaries. The first eight 64-bit argument slots
9459 are associated one-to-one, with general registers r26
9460 through r19, and also with floating-point registers fr4
9461 through fr11. Arguments larger than one word are always
9462 passed in general registers.
9463
9464 Using a PARALLEL with a word mode register results in left
9465 justified data on a big-endian target. */
9466
9467 rtx loc[8];
9468 int i, offset = 0, ub = arg_size;
9469
9470 /* Align the base register. */
9471 gpr_reg_base -= alignment;
9472
9473 ub = MIN (ub, max_arg_words - cum->words - alignment);
9474 for (i = 0; i < ub; i++)
9475 {
9476 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9477 gen_rtx_REG (DImode, gpr_reg_base),
9478 GEN_INT (offset));
9479 gpr_reg_base -= 1;
9480 offset += 8;
9481 }
9482
9483 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9484 }
9485 }
9486 else
9487 {
9488 /* If the argument is larger than a word, then we know precisely
9489 which registers we must use. */
9490 if (arg_size > 1)
9491 {
9492 if (cum->words)
9493 {
9494 gpr_reg_base = 23;
9495 fpr_reg_base = 38;
9496 }
9497 else
9498 {
9499 gpr_reg_base = 25;
9500 fpr_reg_base = 34;
9501 }
9502
9503 /* Structures 5 to 8 bytes in size are passed in the general
9504 registers in the same manner as other non floating-point
9505 objects. The data is right-justified and zero-extended
9506 to 64 bits. This is opposite to the normal justification
9507 used on big endian targets and requires special treatment.
9508 We now define BLOCK_REG_PADDING to pad these objects.
9509 Aggregates, complex and vector types are passed in the same
9510 manner as structures. */
9511 if (mode == BLKmode
9512 || (type && (AGGREGATE_TYPE_P (type)
9513 || TREE_CODE (type) == COMPLEX_TYPE
9514 || TREE_CODE (type) == VECTOR_TYPE)))
9515 {
9516 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9517 gen_rtx_REG (DImode, gpr_reg_base),
9518 const0_rtx);
9519 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9520 }
9521 }
9522 else
9523 {
9524 /* We have a single word (32 bits). A simple computation
9525 will get us the register #s we need. */
9526 gpr_reg_base = 26 - cum->words;
9527 fpr_reg_base = 32 + 2 * cum->words;
9528 }
9529 }
9530
9531 /* Determine if the argument needs to be passed in both general and
9532 floating point registers. */
9533 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9534 /* If we are doing soft-float with portable runtime, then there
9535 is no need to worry about FP regs. */
9536 && !TARGET_SOFT_FLOAT
9537 /* The parameter must be some kind of scalar float, else we just
9538 pass it in integer registers. */
9539 && GET_MODE_CLASS (mode) == MODE_FLOAT
9540 /* The target function must not have a prototype. */
9541 && cum->nargs_prototype <= 0
9542 /* libcalls do not need to pass items in both FP and general
9543 registers. */
9544 && type != NULL_TREE
9545 /* All this hair applies to "outgoing" args only. This includes
9546 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9547 && !cum->incoming)
9548 /* Also pass outgoing floating arguments in both registers in indirect
9549 calls with the 32 bit ABI and the HP assembler since there is no
9550 way to the specify argument locations in static functions. */
9551 || (!TARGET_64BIT
9552 && !TARGET_GAS
9553 && !cum->incoming
9554 && cum->indirect
9555 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9556 {
9557 retval
9558 = gen_rtx_PARALLEL
9559 (mode,
9560 gen_rtvec (2,
9561 gen_rtx_EXPR_LIST (VOIDmode,
9562 gen_rtx_REG (mode, fpr_reg_base),
9563 const0_rtx),
9564 gen_rtx_EXPR_LIST (VOIDmode,
9565 gen_rtx_REG (mode, gpr_reg_base),
9566 const0_rtx)));
9567 }
9568 else
9569 {
9570 /* See if we should pass this parameter in a general register. */
9571 if (TARGET_SOFT_FLOAT
9572 /* Indirect calls in the normal 32bit ABI require all arguments
9573 to be passed in general registers. */
9574 || (!TARGET_PORTABLE_RUNTIME
9575 && !TARGET_64BIT
9576 && !TARGET_ELF32
9577 && cum->indirect)
9578 /* If the parameter is not a scalar floating-point parameter,
9579 then it belongs in GPRs. */
9580 || GET_MODE_CLASS (mode) != MODE_FLOAT
9581 /* Structure with single SFmode field belongs in GPR. */
9582 || (type && AGGREGATE_TYPE_P (type)))
9583 retval = gen_rtx_REG (mode, gpr_reg_base);
9584 else
9585 retval = gen_rtx_REG (mode, fpr_reg_base);
9586 }
9587 return retval;
9588 }
9589
9590 /* Arguments larger than one word are double word aligned. */
9591
9592 static unsigned int
9593 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9594 {
9595 bool singleword = (type
9596 ? (integer_zerop (TYPE_SIZE (type))
9597 || !TREE_CONSTANT (TYPE_SIZE (type))
9598 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9599 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9600
9601 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9602 }
9603
9604 /* If this arg would be passed totally in registers or totally on the stack,
9605 then this routine should return zero. */
9606
9607 static int
9608 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9609 tree type, bool named ATTRIBUTE_UNUSED)
9610 {
9611 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9612 unsigned int max_arg_words = 8;
9613 unsigned int offset = 0;
9614
9615 if (!TARGET_64BIT)
9616 return 0;
9617
9618 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9619 offset = 1;
9620
9621 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9622 /* Arg fits fully into registers. */
9623 return 0;
9624 else if (cum->words + offset >= max_arg_words)
9625 /* Arg fully on the stack. */
9626 return 0;
9627 else
9628 /* Arg is split. */
9629 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9630 }
9631
9632
9633 /* A get_unnamed_section callback for switching to the text section.
9634
9635 This function is only used with SOM. Because we don't support
9636 named subspaces, we can only create a new subspace or switch back
9637 to the default text subspace. */
9638
9639 static void
9640 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9641 {
9642 gcc_assert (TARGET_SOM);
9643 if (TARGET_GAS)
9644 {
9645 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9646 {
9647 /* We only want to emit a .nsubspa directive once at the
9648 start of the function. */
9649 cfun->machine->in_nsubspa = 1;
9650
9651 /* Create a new subspace for the text. This provides
9652 better stub placement and one-only functions. */
9653 if (cfun->decl
9654 && DECL_ONE_ONLY (cfun->decl)
9655 && !DECL_WEAK (cfun->decl))
9656 {
9657 output_section_asm_op ("\t.SPACE $TEXT$\n"
9658 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9659 "ACCESS=44,SORT=24,COMDAT");
9660 return;
9661 }
9662 }
9663 else
9664 {
9665 /* There isn't a current function or the body of the current
9666 function has been completed. So, we are changing to the
9667 text section to output debugging information. Thus, we
9668 need to forget that we are in the text section so that
9669 varasm.c will call us when text_section is selected again. */
9670 gcc_assert (!cfun || !cfun->machine
9671 || cfun->machine->in_nsubspa == 2);
9672 in_section = NULL;
9673 }
9674 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9675 return;
9676 }
9677 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9678 }
9679
9680 /* A get_unnamed_section callback for switching to comdat data
9681 sections. This function is only used with SOM. */
9682
9683 static void
9684 som_output_comdat_data_section_asm_op (const void *data)
9685 {
9686 in_section = NULL;
9687 output_section_asm_op (data);
9688 }
9689
9690 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9691
9692 static void
9693 pa_som_asm_init_sections (void)
9694 {
9695 text_section
9696 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9697
9698 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9699 is not being generated. */
9700 som_readonly_data_section
9701 = get_unnamed_section (0, output_section_asm_op,
9702 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9703
9704 /* When secondary definitions are not supported, SOM makes readonly
9705 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9706 the comdat flag. */
9707 som_one_only_readonly_data_section
9708 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9709 "\t.SPACE $TEXT$\n"
9710 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9711 "ACCESS=0x2c,SORT=16,COMDAT");
9712
9713
9714 /* When secondary definitions are not supported, SOM makes data one-only
9715 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9716 som_one_only_data_section
9717 = get_unnamed_section (SECTION_WRITE,
9718 som_output_comdat_data_section_asm_op,
9719 "\t.SPACE $PRIVATE$\n"
9720 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9721 "ACCESS=31,SORT=24,COMDAT");
9722
9723 if (flag_tm)
9724 som_tm_clone_table_section
9725 = get_unnamed_section (0, output_section_asm_op,
9726 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9727
9728 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9729 which reference data within the $TEXT$ space (for example constant
9730 strings in the $LIT$ subspace).
9731
9732 The assemblers (GAS and HP as) both have problems with handling
9733 the difference of two symbols which is the other correct way to
9734 reference constant data during PIC code generation.
9735
9736 So, there's no way to reference constant data which is in the
9737 $TEXT$ space during PIC generation. Instead place all constant
9738 data into the $PRIVATE$ subspace (this reduces sharing, but it
9739 works correctly). */
9740 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9741
9742 /* We must not have a reference to an external symbol defined in a
9743 shared library in a readonly section, else the SOM linker will
9744 complain.
9745
9746 So, we force exception information into the data section. */
9747 exception_section = data_section;
9748 }
9749
9750 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9751
9752 static section *
9753 pa_som_tm_clone_table_section (void)
9754 {
9755 return som_tm_clone_table_section;
9756 }
9757
9758 /* On hpux10, the linker will give an error if we have a reference
9759 in the read-only data section to a symbol defined in a shared
9760 library. Therefore, expressions that might require a reloc can
9761 not be placed in the read-only data section. */
9762
9763 static section *
9764 pa_select_section (tree exp, int reloc,
9765 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9766 {
9767 if (TREE_CODE (exp) == VAR_DECL
9768 && TREE_READONLY (exp)
9769 && !TREE_THIS_VOLATILE (exp)
9770 && DECL_INITIAL (exp)
9771 && (DECL_INITIAL (exp) == error_mark_node
9772 || TREE_CONSTANT (DECL_INITIAL (exp)))
9773 && !reloc)
9774 {
9775 if (TARGET_SOM
9776 && DECL_ONE_ONLY (exp)
9777 && !DECL_WEAK (exp))
9778 return som_one_only_readonly_data_section;
9779 else
9780 return readonly_data_section;
9781 }
9782 else if (CONSTANT_CLASS_P (exp) && !reloc)
9783 return readonly_data_section;
9784 else if (TARGET_SOM
9785 && TREE_CODE (exp) == VAR_DECL
9786 && DECL_ONE_ONLY (exp)
9787 && !DECL_WEAK (exp))
9788 return som_one_only_data_section;
9789 else
9790 return data_section;
9791 }
9792
9793 static void
9794 pa_globalize_label (FILE *stream, const char *name)
9795 {
9796 /* We only handle DATA objects here, functions are globalized in
9797 ASM_DECLARE_FUNCTION_NAME. */
9798 if (! FUNCTION_NAME_P (name))
9799 {
9800 fputs ("\t.EXPORT ", stream);
9801 assemble_name (stream, name);
9802 fputs (",DATA\n", stream);
9803 }
9804 }
9805
9806 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9807
9808 static rtx
9809 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9810 int incoming ATTRIBUTE_UNUSED)
9811 {
9812 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9813 }
9814
9815 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9816
9817 bool
9818 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9819 {
9820 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9821 PA64 ABI says that objects larger than 128 bits are returned in memory.
9822 Note, int_size_in_bytes can return -1 if the size of the object is
9823 variable or larger than the maximum value that can be expressed as
9824 a HOST_WIDE_INT. It can also return zero for an empty type. The
9825 simplest way to handle variable and empty types is to pass them in
9826 memory. This avoids problems in defining the boundaries of argument
9827 slots, allocating registers, etc. */
9828 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9829 || int_size_in_bytes (type) <= 0);
9830 }
9831
9832 /* Structure to hold declaration and name of external symbols that are
9833 emitted by GCC. We generate a vector of these symbols and output them
9834 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9835 This avoids putting out names that are never really used. */
9836
9837 typedef struct GTY(()) extern_symbol
9838 {
9839 tree decl;
9840 const char *name;
9841 } extern_symbol;
9842
9843 /* Define gc'd vector type for extern_symbol. */
9844
9845 /* Vector of extern_symbol pointers. */
9846 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9847
9848 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9849 /* Mark DECL (name NAME) as an external reference (assembler output
9850 file FILE). This saves the names to output at the end of the file
9851 if actually referenced. */
9852
9853 void
9854 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9855 {
9856 gcc_assert (file == asm_out_file);
9857 extern_symbol p = {decl, name};
9858 vec_safe_push (extern_symbols, p);
9859 }
9860
9861 /* Output text required at the end of an assembler file.
9862 This includes deferred plabels and .import directives for
9863 all external symbols that were actually referenced. */
9864
9865 static void
9866 pa_hpux_file_end (void)
9867 {
9868 unsigned int i;
9869 extern_symbol *p;
9870
9871 if (!NO_DEFERRED_PROFILE_COUNTERS)
9872 output_deferred_profile_counters ();
9873
9874 output_deferred_plabels ();
9875
9876 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9877 {
9878 tree decl = p->decl;
9879
9880 if (!TREE_ASM_WRITTEN (decl)
9881 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9882 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9883 }
9884
9885 vec_free (extern_symbols);
9886 }
9887 #endif
9888
9889 /* Return true if a change from mode FROM to mode TO for a register
9890 in register class RCLASS is invalid. */
9891
9892 bool
9893 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9894 enum reg_class rclass)
9895 {
9896 if (from == to)
9897 return false;
9898
9899 /* Reject changes to/from complex and vector modes. */
9900 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9901 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9902 return true;
9903
9904 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9905 return false;
9906
9907 /* There is no way to load QImode or HImode values directly from
9908 memory. SImode loads to the FP registers are not zero extended.
9909 On the 64-bit target, this conflicts with the definition of
9910 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9911 with different sizes in the floating-point registers. */
9912 if (MAYBE_FP_REG_CLASS_P (rclass))
9913 return true;
9914
9915 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9916 in specific sets of registers. Thus, we cannot allow changing
9917 to a larger mode when it's larger than a word. */
9918 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9919 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9920 return true;
9921
9922 return false;
9923 }
9924
9925 /* Returns TRUE if it is a good idea to tie two pseudo registers
9926 when one has mode MODE1 and one has mode MODE2.
9927 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9928 for any hard reg, then this must be FALSE for correct output.
9929
9930 We should return FALSE for QImode and HImode because these modes
9931 are not ok in the floating-point registers. However, this prevents
9932 tieing these modes to SImode and DImode in the general registers.
9933 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9934 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9935 in the floating-point registers. */
9936
9937 bool
9938 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9939 {
9940 /* Don't tie modes in different classes. */
9941 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9942 return false;
9943
9944 return true;
9945 }
9946
9947 \f
9948 /* Length in units of the trampoline instruction code. */
9949
9950 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9951
9952
9953 /* Output assembler code for a block containing the constant parts
9954 of a trampoline, leaving space for the variable parts.\
9955
9956 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9957 and then branches to the specified routine.
9958
9959 This code template is copied from text segment to stack location
9960 and then patched with pa_trampoline_init to contain valid values,
9961 and then entered as a subroutine.
9962
9963 It is best to keep this as small as possible to avoid having to
9964 flush multiple lines in the cache. */
9965
9966 static void
9967 pa_asm_trampoline_template (FILE *f)
9968 {
9969 if (!TARGET_64BIT)
9970 {
9971 fputs ("\tldw 36(%r22),%r21\n", f);
9972 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9973 if (ASSEMBLER_DIALECT == 0)
9974 fputs ("\tdepi 0,31,2,%r21\n", f);
9975 else
9976 fputs ("\tdepwi 0,31,2,%r21\n", f);
9977 fputs ("\tldw 4(%r21),%r19\n", f);
9978 fputs ("\tldw 0(%r21),%r21\n", f);
9979 if (TARGET_PA_20)
9980 {
9981 fputs ("\tbve (%r21)\n", f);
9982 fputs ("\tldw 40(%r22),%r29\n", f);
9983 fputs ("\t.word 0\n", f);
9984 fputs ("\t.word 0\n", f);
9985 }
9986 else
9987 {
9988 fputs ("\tldsid (%r21),%r1\n", f);
9989 fputs ("\tmtsp %r1,%sr0\n", f);
9990 fputs ("\tbe 0(%sr0,%r21)\n", f);
9991 fputs ("\tldw 40(%r22),%r29\n", f);
9992 }
9993 fputs ("\t.word 0\n", f);
9994 fputs ("\t.word 0\n", f);
9995 fputs ("\t.word 0\n", f);
9996 fputs ("\t.word 0\n", f);
9997 }
9998 else
9999 {
10000 fputs ("\t.dword 0\n", f);
10001 fputs ("\t.dword 0\n", f);
10002 fputs ("\t.dword 0\n", f);
10003 fputs ("\t.dword 0\n", f);
10004 fputs ("\tmfia %r31\n", f);
10005 fputs ("\tldd 24(%r31),%r1\n", f);
10006 fputs ("\tldd 24(%r1),%r27\n", f);
10007 fputs ("\tldd 16(%r1),%r1\n", f);
10008 fputs ("\tbve (%r1)\n", f);
10009 fputs ("\tldd 32(%r31),%r31\n", f);
10010 fputs ("\t.dword 0 ; fptr\n", f);
10011 fputs ("\t.dword 0 ; static link\n", f);
10012 }
10013 }
10014
10015 /* Emit RTL insns to initialize the variable parts of a trampoline.
10016 FNADDR is an RTX for the address of the function's pure code.
10017 CXT is an RTX for the static chain value for the function.
10018
10019 Move the function address to the trampoline template at offset 36.
10020 Move the static chain value to trampoline template at offset 40.
10021 Move the trampoline address to trampoline template at offset 44.
10022 Move r19 to trampoline template at offset 48. The latter two
10023 words create a plabel for the indirect call to the trampoline.
10024
10025 A similar sequence is used for the 64-bit port but the plabel is
10026 at the beginning of the trampoline.
10027
10028 Finally, the cache entries for the trampoline code are flushed.
10029 This is necessary to ensure that the trampoline instruction sequence
10030 is written to memory prior to any attempts at prefetching the code
10031 sequence. */
10032
10033 static void
10034 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10035 {
10036 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10037 rtx start_addr = gen_reg_rtx (Pmode);
10038 rtx end_addr = gen_reg_rtx (Pmode);
10039 rtx line_length = gen_reg_rtx (Pmode);
10040 rtx r_tramp, tmp;
10041
10042 emit_block_move (m_tramp, assemble_trampoline_template (),
10043 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10044 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10045
10046 if (!TARGET_64BIT)
10047 {
10048 tmp = adjust_address (m_tramp, Pmode, 36);
10049 emit_move_insn (tmp, fnaddr);
10050 tmp = adjust_address (m_tramp, Pmode, 40);
10051 emit_move_insn (tmp, chain_value);
10052
10053 /* Create a fat pointer for the trampoline. */
10054 tmp = adjust_address (m_tramp, Pmode, 44);
10055 emit_move_insn (tmp, r_tramp);
10056 tmp = adjust_address (m_tramp, Pmode, 48);
10057 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10058
10059 /* fdc and fic only use registers for the address to flush,
10060 they do not accept integer displacements. We align the
10061 start and end addresses to the beginning of their respective
10062 cache lines to minimize the number of lines flushed. */
10063 emit_insn (gen_andsi3 (start_addr, r_tramp,
10064 GEN_INT (-MIN_CACHELINE_SIZE)));
10065 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10066 TRAMPOLINE_CODE_SIZE-1));
10067 emit_insn (gen_andsi3 (end_addr, tmp,
10068 GEN_INT (-MIN_CACHELINE_SIZE)));
10069 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10070 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10071 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10072 gen_reg_rtx (Pmode),
10073 gen_reg_rtx (Pmode)));
10074 }
10075 else
10076 {
10077 tmp = adjust_address (m_tramp, Pmode, 56);
10078 emit_move_insn (tmp, fnaddr);
10079 tmp = adjust_address (m_tramp, Pmode, 64);
10080 emit_move_insn (tmp, chain_value);
10081
10082 /* Create a fat pointer for the trampoline. */
10083 tmp = adjust_address (m_tramp, Pmode, 16);
10084 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10085 r_tramp, 32)));
10086 tmp = adjust_address (m_tramp, Pmode, 24);
10087 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10088
10089 /* fdc and fic only use registers for the address to flush,
10090 they do not accept integer displacements. We align the
10091 start and end addresses to the beginning of their respective
10092 cache lines to minimize the number of lines flushed. */
10093 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10094 emit_insn (gen_anddi3 (start_addr, tmp,
10095 GEN_INT (-MIN_CACHELINE_SIZE)));
10096 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10097 TRAMPOLINE_CODE_SIZE - 1));
10098 emit_insn (gen_anddi3 (end_addr, tmp,
10099 GEN_INT (-MIN_CACHELINE_SIZE)));
10100 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10101 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10102 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10103 gen_reg_rtx (Pmode),
10104 gen_reg_rtx (Pmode)));
10105 }
10106
10107 #ifdef HAVE_ENABLE_EXECUTE_STACK
10108  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10109      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10110 #endif
10111 }
10112
10113 /* Perform any machine-specific adjustment in the address of the trampoline.
10114 ADDR contains the address that was passed to pa_trampoline_init.
10115 Adjust the trampoline address to point to the plabel at offset 44. */
10116
10117 static rtx
10118 pa_trampoline_adjust_address (rtx addr)
10119 {
10120 if (!TARGET_64BIT)
10121 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10122 return addr;
10123 }
10124
10125 static rtx
10126 pa_delegitimize_address (rtx orig_x)
10127 {
10128 rtx x = delegitimize_mem_from_attrs (orig_x);
10129
10130 if (GET_CODE (x) == LO_SUM
10131 && GET_CODE (XEXP (x, 1)) == UNSPEC
10132 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10133 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10134 return x;
10135 }
10136 \f
10137 static rtx
10138 pa_internal_arg_pointer (void)
10139 {
10140 /* The argument pointer and the hard frame pointer are the same in
10141 the 32-bit runtime, so we don't need a copy. */
10142 if (TARGET_64BIT)
10143 return copy_to_reg (virtual_incoming_args_rtx);
10144 else
10145 return virtual_incoming_args_rtx;
10146 }
10147
10148 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10149 Frame pointer elimination is automatically handled. */
10150
10151 static bool
10152 pa_can_eliminate (const int from, const int to)
10153 {
10154 /* The argument cannot be eliminated in the 64-bit runtime. */
10155 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10156 return false;
10157
10158 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10159 ? ! frame_pointer_needed
10160 : true);
10161 }
10162
10163 /* Define the offset between two registers, FROM to be eliminated and its
10164 replacement TO, at the start of a routine. */
10165 HOST_WIDE_INT
10166 pa_initial_elimination_offset (int from, int to)
10167 {
10168 HOST_WIDE_INT offset;
10169
10170 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10171 && to == STACK_POINTER_REGNUM)
10172 offset = -pa_compute_frame_size (get_frame_size (), 0);
10173 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10174 offset = 0;
10175 else
10176 gcc_unreachable ();
10177
10178 return offset;
10179 }
10180
10181 static void
10182 pa_conditional_register_usage (void)
10183 {
10184 int i;
10185
10186 if (!TARGET_64BIT && !TARGET_PA_11)
10187 {
10188 for (i = 56; i <= FP_REG_LAST; i++)
10189 fixed_regs[i] = call_used_regs[i] = 1;
10190 for (i = 33; i < 56; i += 2)
10191 fixed_regs[i] = call_used_regs[i] = 1;
10192 }
10193 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10194 {
10195 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10196 fixed_regs[i] = call_used_regs[i] = 1;
10197 }
10198 if (flag_pic)
10199 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10200 }
10201
10202 /* Target hook for c_mode_for_suffix. */
10203
10204 static enum machine_mode
10205 pa_c_mode_for_suffix (char suffix)
10206 {
10207 if (HPUX_LONG_DOUBLE_LIBRARY)
10208 {
10209 if (suffix == 'q')
10210 return TFmode;
10211 }
10212
10213 return VOIDmode;
10214 }
10215
10216 /* Target hook for function_section. */
10217
10218 static section *
10219 pa_function_section (tree decl, enum node_frequency freq,
10220 bool startup, bool exit)
10221 {
10222 /* Put functions in text section if target doesn't have named sections. */
10223 if (!targetm_common.have_named_sections)
10224 return text_section;
10225
10226 /* Force nested functions into the same section as the containing
10227 function. */
10228 if (decl
10229 && DECL_SECTION_NAME (decl) == NULL
10230 && DECL_CONTEXT (decl) != NULL_TREE
10231 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10232 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10233 return function_section (DECL_CONTEXT (decl));
10234
10235 /* Otherwise, use the default function section. */
10236 return default_function_section (decl, freq, startup, exit);
10237 }
10238
10239 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10240
10241 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10242 that need more than three instructions to load prior to reload. This
10243 limit is somewhat arbitrary. It takes three instructions to load a
10244 CONST_INT from memory but two are memory accesses. It may be better
10245 to increase the allowed range for CONST_INTS. We may also be able
10246 to handle CONST_DOUBLES. */
10247
10248 static bool
10249 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10250 {
10251 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10252 return false;
10253
10254 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10255 return false;
10256
10257 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10258 legitimate constants. The other variants can't be handled by
10259 the move patterns after reload starts. */
10260 if (tls_referenced_p (x))
10261 return false;
10262
10263 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10264 return false;
10265
10266 if (TARGET_64BIT
10267 && HOST_BITS_PER_WIDE_INT > 32
10268 && GET_CODE (x) == CONST_INT
10269 && !reload_in_progress
10270 && !reload_completed
10271 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10272 && !pa_cint_ok_for_move (INTVAL (x)))
10273 return false;
10274
10275 if (function_label_operand (x, mode))
10276 return false;
10277
10278 return true;
10279 }
10280
10281 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10282
10283 static unsigned int
10284 pa_section_type_flags (tree decl, const char *name, int reloc)
10285 {
10286 unsigned int flags;
10287
10288 flags = default_section_type_flags (decl, name, reloc);
10289
10290 /* Function labels are placed in the constant pool. This can
10291 cause a section conflict if decls are put in ".data.rel.ro"
10292 or ".data.rel.ro.local" using the __attribute__ construct. */
10293 if (strcmp (name, ".data.rel.ro") == 0
10294 || strcmp (name, ".data.rel.ro.local") == 0)
10295 flags |= SECTION_WRITE | SECTION_RELRO;
10296
10297 return flags;
10298 }
10299
10300 /* pa_legitimate_address_p recognizes an RTL expression that is a
10301 valid memory address for an instruction. The MODE argument is the
10302 machine mode for the MEM expression that wants to use this address.
10303
10304 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10305 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10306 available with floating point loads and stores, and integer loads.
10307 We get better code by allowing indexed addresses in the initial
10308 RTL generation.
10309
10310 The acceptance of indexed addresses as legitimate implies that we
10311 must provide patterns for doing indexed integer stores, or the move
10312 expanders must force the address of an indexed store to a register.
10313 We have adopted the latter approach.
10314
10315 Another function of pa_legitimate_address_p is to ensure that
10316 the base register is a valid pointer for indexed instructions.
10317 On targets that have non-equivalent space registers, we have to
10318 know at the time of assembler output which register in a REG+REG
10319 pair is the base register. The REG_POINTER flag is sometimes lost
10320 in reload and the following passes, so it can't be relied on during
10321 code generation. Thus, we either have to canonicalize the order
10322 of the registers in REG+REG indexed addresses, or treat REG+REG
10323 addresses separately and provide patterns for both permutations.
10324
10325 The latter approach requires several hundred additional lines of
10326 code in pa.md. The downside to canonicalizing is that a PLUS
10327 in the wrong order can't combine to form to make a scaled indexed
10328 memory operand. As we won't need to canonicalize the operands if
10329 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10330
10331 We initially break out scaled indexed addresses in canonical order
10332 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10333 scaled indexed addresses during RTL generation. However, fold_rtx
10334 has its own opinion on how the operands of a PLUS should be ordered.
10335 If one of the operands is equivalent to a constant, it will make
10336 that operand the second operand. As the base register is likely to
10337 be equivalent to a SYMBOL_REF, we have made it the second operand.
10338
10339 pa_legitimate_address_p accepts REG+REG as legitimate when the
10340 operands are in the order INDEX+BASE on targets with non-equivalent
10341 space registers, and in any order on targets with equivalent space
10342 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10343
10344 We treat a SYMBOL_REF as legitimate if it is part of the current
10345 function's constant-pool, because such addresses can actually be
10346 output as REG+SMALLINT. */
10347
10348 static bool
10349 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10350 {
10351 if ((REG_P (x)
10352 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10353 : REG_OK_FOR_BASE_P (x)))
10354 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10355 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10356 && REG_P (XEXP (x, 0))
10357 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10358 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10359 return true;
10360
10361 if (GET_CODE (x) == PLUS)
10362 {
10363 rtx base, index;
10364
10365 /* For REG+REG, the base register should be in XEXP (x, 1),
10366 so check it first. */
10367 if (REG_P (XEXP (x, 1))
10368 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10369 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10370 base = XEXP (x, 1), index = XEXP (x, 0);
10371 else if (REG_P (XEXP (x, 0))
10372 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10373 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10374 base = XEXP (x, 0), index = XEXP (x, 1);
10375 else
10376 return false;
10377
10378 if (GET_CODE (index) == CONST_INT)
10379 {
10380 if (INT_5_BITS (index))
10381 return true;
10382
10383 /* When INT14_OK_STRICT is false, a secondary reload is needed
10384 to adjust the displacement of SImode and DImode floating point
10385 instructions but this may fail when the register also needs
10386 reloading. So, we return false when STRICT is true. We
10387 also reject long displacements for float mode addresses since
10388 the majority of accesses will use floating point instructions
10389 that don't support 14-bit offsets. */
10390 if (!INT14_OK_STRICT
10391 && (strict || !(reload_in_progress || reload_completed))
10392 && mode != QImode
10393 && mode != HImode)
10394 return false;
10395
10396 return base14_operand (index, mode);
10397 }
10398
10399 if (!TARGET_DISABLE_INDEXING
10400 /* Only accept the "canonical" INDEX+BASE operand order
10401 on targets with non-equivalent space registers. */
10402 && (TARGET_NO_SPACE_REGS
10403 ? REG_P (index)
10404 : (base == XEXP (x, 1) && REG_P (index)
10405 && (reload_completed
10406 || (reload_in_progress && HARD_REGISTER_P (base))
10407 || REG_POINTER (base))
10408 && (reload_completed
10409 || (reload_in_progress && HARD_REGISTER_P (index))
10410 || !REG_POINTER (index))))
10411 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10412 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10413 : REG_OK_FOR_INDEX_P (index))
10414 && borx_reg_operand (base, Pmode)
10415 && borx_reg_operand (index, Pmode))
10416 return true;
10417
10418 if (!TARGET_DISABLE_INDEXING
10419 && GET_CODE (index) == MULT
10420 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10421 && REG_P (XEXP (index, 0))
10422 && GET_MODE (XEXP (index, 0)) == Pmode
10423 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10424 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10425 && GET_CODE (XEXP (index, 1)) == CONST_INT
10426 && INTVAL (XEXP (index, 1))
10427 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10428 && borx_reg_operand (base, Pmode))
10429 return true;
10430
10431 return false;
10432 }
10433
10434 if (GET_CODE (x) == LO_SUM)
10435 {
10436 rtx y = XEXP (x, 0);
10437
10438 if (GET_CODE (y) == SUBREG)
10439 y = SUBREG_REG (y);
10440
10441 if (REG_P (y)
10442 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10443 : REG_OK_FOR_BASE_P (y)))
10444 {
10445 /* Needed for -fPIC */
10446 if (mode == Pmode
10447 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10448 return true;
10449
10450 if (!INT14_OK_STRICT
10451 && (strict || !(reload_in_progress || reload_completed))
10452 && mode != QImode
10453 && mode != HImode)
10454 return false;
10455
10456 if (CONSTANT_P (XEXP (x, 1)))
10457 return true;
10458 }
10459 return false;
10460 }
10461
10462 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10463 return true;
10464
10465 return false;
10466 }
10467
10468 /* Look for machine dependent ways to make the invalid address AD a
10469 valid address.
10470
10471 For the PA, transform:
10472
10473 memory(X + <large int>)
10474
10475 into:
10476
10477 if (<large int> & mask) >= 16
10478 Y = (<large int> & ~mask) + mask + 1 Round up.
10479 else
10480 Y = (<large int> & ~mask) Round down.
10481 Z = X + Y
10482 memory (Z + (<large int> - Y));
10483
10484 This makes reload inheritance and reload_cse work better since Z
10485 can be reused.
10486
10487 There may be more opportunities to improve code with this hook. */
10488
10489 rtx
10490 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10491 int opnum, int type,
10492 int ind_levels ATTRIBUTE_UNUSED)
10493 {
10494 long offset, newoffset, mask;
10495 rtx new_rtx, temp = NULL_RTX;
10496
10497 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10498 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10499
10500 if (optimize && GET_CODE (ad) == PLUS)
10501 temp = simplify_binary_operation (PLUS, Pmode,
10502 XEXP (ad, 0), XEXP (ad, 1));
10503
10504 new_rtx = temp ? temp : ad;
10505
10506 if (optimize
10507 && GET_CODE (new_rtx) == PLUS
10508 && GET_CODE (XEXP (new_rtx, 0)) == REG
10509 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10510 {
10511 offset = INTVAL (XEXP ((new_rtx), 1));
10512
10513 /* Choose rounding direction. Round up if we are >= halfway. */
10514 if ((offset & mask) >= ((mask + 1) / 2))
10515 newoffset = (offset & ~mask) + mask + 1;
10516 else
10517 newoffset = offset & ~mask;
10518
10519 /* Ensure that long displacements are aligned. */
10520 if (mask == 0x3fff
10521 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10522 || (TARGET_64BIT && (mode) == DImode)))
10523 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10524
10525 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10526 {
10527 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10528 GEN_INT (newoffset));
10529 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10530 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10531 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10532 opnum, (enum reload_type) type);
10533 return ad;
10534 }
10535 }
10536
10537 return NULL_RTX;
10538 }
10539
10540 /* Output address vector. */
10541
10542 void
10543 pa_output_addr_vec (rtx lab, rtx body)
10544 {
10545 int idx, vlen = XVECLEN (body, 0);
10546
10547 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10548 if (TARGET_GAS)
10549 fputs ("\t.begin_brtab\n", asm_out_file);
10550 for (idx = 0; idx < vlen; idx++)
10551 {
10552 ASM_OUTPUT_ADDR_VEC_ELT
10553 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10554 }
10555 if (TARGET_GAS)
10556 fputs ("\t.end_brtab\n", asm_out_file);
10557 }
10558
10559 /* Output address difference vector. */
10560
10561 void
10562 pa_output_addr_diff_vec (rtx lab, rtx body)
10563 {
10564 rtx base = XEXP (XEXP (body, 0), 0);
10565 int idx, vlen = XVECLEN (body, 1);
10566
10567 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10568 if (TARGET_GAS)
10569 fputs ("\t.begin_brtab\n", asm_out_file);
10570 for (idx = 0; idx < vlen; idx++)
10571 {
10572 ASM_OUTPUT_ADDR_DIFF_ELT
10573 (asm_out_file,
10574 body,
10575 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10576 CODE_LABEL_NUMBER (base));
10577 }
10578 if (TARGET_GAS)
10579 fputs ("\t.end_brtab\n", asm_out_file);
10580 }
10581
10582 #include "gt-pa.h"